mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-11-04 08:00:58 +08:00 
			
		
		
		
	Compare commits
	
		
			2 Commits
		
	
	
		
			VariaT
			...
			csl/fix_in
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 2f26446210 | |||
| f36f3bc69f | 
@ -8,8 +8,6 @@ if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then
 | 
			
		||||
    export TORCH_CUDA_ARCH_LIST="8.0;9.0"
 | 
			
		||||
elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then
 | 
			
		||||
    export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
 | 
			
		||||
elif [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then
 | 
			
		||||
    export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
 | 
			
		||||
elif [[ "$GPU_ARCH_VERSION" == *"13.0"* ]]; then
 | 
			
		||||
    export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;11.0;12.0+PTX"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
@ -37,9 +37,9 @@ case ${DOCKER_TAG_PREFIX} in
 | 
			
		||||
  rocm*)
 | 
			
		||||
    BASE_TARGET=rocm
 | 
			
		||||
    PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
 | 
			
		||||
    # add gfx950, gfx115x conditionally starting in ROCm 7.0
 | 
			
		||||
    # add gfx950 conditionally starting in ROCm 7.0
 | 
			
		||||
    if [[ "$ROCM_VERSION" == *"7.0"* ]]; then
 | 
			
		||||
        PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
 | 
			
		||||
        PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950"
 | 
			
		||||
    fi
 | 
			
		||||
    EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}"
 | 
			
		||||
    ;;
 | 
			
		||||
 | 
			
		||||
@ -181,7 +181,7 @@ case "$tag" in
 | 
			
		||||
    KATEX=yes
 | 
			
		||||
    UCX_COMMIT=${_UCX_COMMIT}
 | 
			
		||||
    UCC_COMMIT=${_UCC_COMMIT}
 | 
			
		||||
    PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950;gfx1100"
 | 
			
		||||
    PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950"
 | 
			
		||||
    if [[ $tag =~ "benchmarks" ]]; then
 | 
			
		||||
      INDUCTOR_BENCHMARKS=yes
 | 
			
		||||
    fi
 | 
			
		||||
@ -344,7 +344,7 @@ docker build \
 | 
			
		||||
       --build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
 | 
			
		||||
       --build-arg "KATEX=${KATEX:-}" \
 | 
			
		||||
       --build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \
 | 
			
		||||
       --build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}" \
 | 
			
		||||
       --build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx90a;gfx942}" \
 | 
			
		||||
       --build-arg "IMAGE_NAME=${IMAGE_NAME}" \
 | 
			
		||||
       --build-arg "UCX_COMMIT=${UCX_COMMIT}" \
 | 
			
		||||
       --build-arg "UCC_COMMIT=${UCC_COMMIT}" \
 | 
			
		||||
 | 
			
		||||
@ -1 +1 @@
 | 
			
		||||
deb42f2a8e48f5032b4a98ee781a15fa87a157cf
 | 
			
		||||
e0dda9059d082537cee36be6c5e4fe3b18c880c0
 | 
			
		||||
 | 
			
		||||
@ -1 +1 @@
 | 
			
		||||
7416ffcb92cdbe98d9f97e4e6f95247e46dfc9fd
 | 
			
		||||
27664085f804afc83df26f740bb46c365854f2c4
 | 
			
		||||
 | 
			
		||||
@ -19,8 +19,8 @@ pip_install \
 | 
			
		||||
  transformers==4.36.2
 | 
			
		||||
 | 
			
		||||
pip_install coloredlogs packaging
 | 
			
		||||
pip_install onnxruntime==1.23.0
 | 
			
		||||
pip_install onnxscript==0.5.3
 | 
			
		||||
pip_install onnxruntime==1.22.1
 | 
			
		||||
pip_install onnxscript==0.4.0
 | 
			
		||||
 | 
			
		||||
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
 | 
			
		||||
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
 | 
			
		||||
 | 
			
		||||
@ -46,9 +46,9 @@ case ${DOCKER_TAG_PREFIX} in
 | 
			
		||||
        BASE_TARGET=rocm
 | 
			
		||||
        GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete
 | 
			
		||||
        PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
 | 
			
		||||
        # add gfx950, gfx115x conditionally starting in ROCm 7.0
 | 
			
		||||
        # add gfx950 conditionally starting in ROCm 7.0
 | 
			
		||||
        if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
 | 
			
		||||
            PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
 | 
			
		||||
            PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950"
 | 
			
		||||
        fi
 | 
			
		||||
        DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}"
 | 
			
		||||
        ;;
 | 
			
		||||
 | 
			
		||||
@ -115,9 +115,6 @@ RUN env GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=True pip3 install grpcio
 | 
			
		||||
# cmake-3.28.0 from pip for onnxruntime
 | 
			
		||||
RUN python3 -mpip install cmake==3.28.0
 | 
			
		||||
 | 
			
		||||
ADD ./common/patch_libstdc.sh patch_libstdc.sh
 | 
			
		||||
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh
 | 
			
		||||
 | 
			
		||||
# build onnxruntime 1.21.0 from sources.
 | 
			
		||||
# it is not possible to build it from sources using pip,
 | 
			
		||||
# so just build it from upstream repository.
 | 
			
		||||
 | 
			
		||||
@ -84,9 +84,9 @@ case ${image} in
 | 
			
		||||
        DEVTOOLSET_VERSION="11"
 | 
			
		||||
        GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete
 | 
			
		||||
        PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
 | 
			
		||||
        # add gfx950, gfx115x conditionally starting in ROCm 7.0
 | 
			
		||||
        # add gfx950 conditionally starting in ROCm 7.0
 | 
			
		||||
        if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
 | 
			
		||||
            PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
 | 
			
		||||
            PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950"
 | 
			
		||||
        fi
 | 
			
		||||
        DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}"
 | 
			
		||||
        ;;
 | 
			
		||||
 | 
			
		||||
@ -10,6 +10,11 @@ BAD_SSL = "https://self-signed.badssl.com"
 | 
			
		||||
 | 
			
		||||
print("Testing SSL certificate checking for Python:", sys.version)
 | 
			
		||||
 | 
			
		||||
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] < (3, 4):
 | 
			
		||||
    print("This version never checks SSL certs; skipping tests")
 | 
			
		||||
    sys.exit(0)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
EXC = OSError
 | 
			
		||||
 | 
			
		||||
print(f"Connecting to {GOOD_SSL} should work")
 | 
			
		||||
 | 
			
		||||
@ -120,8 +120,9 @@ ninja==1.11.1.4
 | 
			
		||||
numba==0.55.2 ; python_version == "3.10" and platform_machine != "s390x"
 | 
			
		||||
numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x"
 | 
			
		||||
#Description: Just-In-Time Compiler for Numerical Functions
 | 
			
		||||
#Pinned versions: 0.55.2, 0.60.0
 | 
			
		||||
#Pinned versions: 0.54.1, 0.49.0, <=0.49.1
 | 
			
		||||
#test that import: test_numba_integration.py
 | 
			
		||||
#For numba issue see https://github.com/pytorch/pytorch/issues/51511
 | 
			
		||||
#Need release > 0.61.2 for s390x due to https://github.com/numba/numba/pull/10073
 | 
			
		||||
 | 
			
		||||
#numpy
 | 
			
		||||
@ -241,9 +242,10 @@ pygments==2.15.0
 | 
			
		||||
#Pinned versions: 14.1.0
 | 
			
		||||
#test that import:
 | 
			
		||||
 | 
			
		||||
scikit-image==0.22.0
 | 
			
		||||
scikit-image==0.19.3 ; python_version < "3.10"
 | 
			
		||||
scikit-image==0.22.0 ; python_version >= "3.10"
 | 
			
		||||
#Description: image processing routines
 | 
			
		||||
#Pinned versions: 0.22.0
 | 
			
		||||
#Pinned versions:
 | 
			
		||||
#test that import: test_nn.py
 | 
			
		||||
 | 
			
		||||
#scikit-learn
 | 
			
		||||
@ -339,7 +341,7 @@ onnx==1.18.0
 | 
			
		||||
#Pinned versions:
 | 
			
		||||
#test that import:
 | 
			
		||||
 | 
			
		||||
onnxscript==0.5.3
 | 
			
		||||
onnxscript==0.4.0
 | 
			
		||||
#Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
 | 
			
		||||
#Pinned versions:
 | 
			
		||||
#test that import:
 | 
			
		||||
 | 
			
		||||
@ -143,7 +143,7 @@ def sample_vllm_test_library():
 | 
			
		||||
                "pytest -v -s compile/test_decorator.py",
 | 
			
		||||
            ],
 | 
			
		||||
        },
 | 
			
		||||
        "vllm_language_model_test_extended_generation_28_failure_test": {
 | 
			
		||||
        "vllm_languagde_model_test_extended_generation_28_failure_test": {
 | 
			
		||||
            "title": "Language Models Test (Extended Generation) 2.8 release failure",
 | 
			
		||||
            "id": "vllm_languagde_model_test_extended_generation_28_failure_test",
 | 
			
		||||
            "package_install": [
 | 
			
		||||
 | 
			
		||||
@ -63,7 +63,7 @@ class VllmBuildParameters:
 | 
			
		||||
    # DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True"
 | 
			
		||||
    use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True)
 | 
			
		||||
    dockerfile_path: Path = env_path_field(
 | 
			
		||||
        "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile"
 | 
			
		||||
        "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm"
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    # the cleaning script to remove torch dependencies from pip
 | 
			
		||||
 | 
			
		||||
@ -5,7 +5,7 @@ DESIRED_ROCM ?= 7.0
 | 
			
		||||
DESIRED_ROCM_SHORT = $(subst .,,$(DESIRED_ROCM))
 | 
			
		||||
PACKAGE_NAME = magma-rocm
 | 
			
		||||
# inherit this from underlying docker image, do not pass this env var to docker
 | 
			
		||||
#PYTORCH_ROCM_ARCH ?= gfx900;gfx906;gfx908;gfx90a;gfx942;gfx950;gfx1030;gfx1100;gfx1101;gfx1102;gfx1150;gfx1151;gfx1200;gfx1201
 | 
			
		||||
#PYTORCH_ROCM_ARCH ?= gfx900;gfx906;gfx908;gfx90a;gfx942;gfx950;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201
 | 
			
		||||
 | 
			
		||||
DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
 | 
			
		||||
	-v $(shell git rev-parse --show-toplevel)/.ci:/builder \
 | 
			
		||||
@ -18,6 +18,7 @@ DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
 | 
			
		||||
.PHONY: all
 | 
			
		||||
all: magma-rocm70
 | 
			
		||||
all: magma-rocm64
 | 
			
		||||
all: magma-rocm63
 | 
			
		||||
 | 
			
		||||
.PHONY:
 | 
			
		||||
clean:
 | 
			
		||||
@ -33,3 +34,8 @@ magma-rocm70:
 | 
			
		||||
magma-rocm64: DESIRED_ROCM := 6.4
 | 
			
		||||
magma-rocm64:
 | 
			
		||||
	$(DOCKER_RUN)
 | 
			
		||||
 | 
			
		||||
.PHONY: magma-rocm63
 | 
			
		||||
magma-rocm63: DESIRED_ROCM := 6.3
 | 
			
		||||
magma-rocm63:
 | 
			
		||||
	$(DOCKER_RUN)
 | 
			
		||||
 | 
			
		||||
@ -187,22 +187,19 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then
 | 
			
		||||
            export USE_CUFILE=0
 | 
			
		||||
        else
 | 
			
		||||
            DEPS_LIST+=(
 | 
			
		||||
                "/usr/local/cuda/lib64/libnvToolsExt.so.1"
 | 
			
		||||
                "/usr/local/cuda/lib64/libcublas.so.12"
 | 
			
		||||
                "/usr/local/cuda/lib64/libcublasLt.so.12"
 | 
			
		||||
                "/usr/local/cuda/lib64/libcudart.so.12"
 | 
			
		||||
                "/usr/local/cuda/lib64/libnvrtc.so.12"
 | 
			
		||||
                "/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12")
 | 
			
		||||
            DEPS_SONAME+=(
 | 
			
		||||
                "libnvToolsExt.so.1"
 | 
			
		||||
                "libcublas.so.12"
 | 
			
		||||
                "libcublasLt.so.12"
 | 
			
		||||
                "libcudart.so.12"
 | 
			
		||||
                "libnvrtc.so.12"
 | 
			
		||||
                "libcupti.so.12")
 | 
			
		||||
 | 
			
		||||
            if [[ $CUDA_VERSION != 12.9* ]]; then
 | 
			
		||||
                DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1")
 | 
			
		||||
                DEPS_SONAME+=("libnvToolsExt.so.1")
 | 
			
		||||
            fi
 | 
			
		||||
        fi
 | 
			
		||||
    else
 | 
			
		||||
        echo "Using nvidia libs from pypi."
 | 
			
		||||
 | 
			
		||||
@ -233,9 +233,7 @@ if [[ "${BUILD_ENVIRONMENT}" != *cuda* ]]; then
 | 
			
		||||
  export BUILD_STATIC_RUNTIME_BENCHMARK=ON
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [[ "$BUILD_ENVIRONMENT" == *-full-debug* ]]; then
 | 
			
		||||
  export CMAKE_BUILD_TYPE=Debug
 | 
			
		||||
elif [[ "$BUILD_ENVIRONMENT" == *-debug* ]]; then
 | 
			
		||||
if [[ "$BUILD_ENVIRONMENT" == *-debug* ]]; then
 | 
			
		||||
  export CMAKE_BUILD_TYPE=RelWithAssert
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
@ -301,11 +299,6 @@ else
 | 
			
		||||
      python -m build --wheel --no-isolation
 | 
			
		||||
    fi
 | 
			
		||||
    pip_install_whl "$(echo dist/*.whl)"
 | 
			
		||||
    if [[ "$BUILD_ENVIRONMENT" == *full-debug* ]]; then
 | 
			
		||||
      # Regression test for https://github.com/pytorch/pytorch/issues/164297
 | 
			
		||||
      # Torch should be importable and that's about it
 | 
			
		||||
      pushd /; python -c "import torch;print(torch.__config__.show(), torch.randn(5) + 1.7)"; popd
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    if [[ "${BUILD_ADDITIONAL_PACKAGES:-}" == *vision* ]]; then
 | 
			
		||||
      install_torchvision
 | 
			
		||||
 | 
			
		||||
@ -67,7 +67,7 @@ fi
 | 
			
		||||
#       wheels with cxx11-abi
 | 
			
		||||
 | 
			
		||||
echo "Checking that the gcc ABI is what we expect"
 | 
			
		||||
if [[ "$(uname)" != 'Darwin' ]]; then
 | 
			
		||||
if [[ "$(uname)" != 'Darwin' &&  "$(uname -m)" != "s390x" ]]; then
 | 
			
		||||
  # We also check that there are cxx11 symbols in libtorch
 | 
			
		||||
  #
 | 
			
		||||
  echo "Checking that symbols in libtorch.so have the right gcc abi"
 | 
			
		||||
 | 
			
		||||
@ -256,7 +256,7 @@ test_torchbench_smoketest() {
 | 
			
		||||
  local device=mps
 | 
			
		||||
  local dtypes=(undefined float16 bfloat16 notset)
 | 
			
		||||
  local dtype=${dtypes[$1]}
 | 
			
		||||
  local models=(llama BERT_pytorch dcgan yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor vgg16)
 | 
			
		||||
  local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor timm_resnet timm_vovnet vgg16)
 | 
			
		||||
 | 
			
		||||
  for backend in eager inductor; do
 | 
			
		||||
 | 
			
		||||
@ -319,7 +319,7 @@ test_aoti_torchbench_smoketest() {
 | 
			
		||||
  local device=mps
 | 
			
		||||
  local dtypes=(undefined float16 bfloat16 notset)
 | 
			
		||||
  local dtype=${dtypes[$1]}
 | 
			
		||||
  local models=(llama BERT_pytorch dcgan yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor vgg16)
 | 
			
		||||
  local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor timm_resnet timm_vovnet vgg16)
 | 
			
		||||
 | 
			
		||||
  echo "Launching torchbench inference performance run for AOT Inductor and dtype ${dtype}"
 | 
			
		||||
  local dtype_arg="--${dtype}"
 | 
			
		||||
 | 
			
		||||
@ -34,14 +34,12 @@ fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Patch numba to avoid CUDA-13 crash, see https://github.com/pytorch/pytorch/issues/162878
 | 
			
		||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
 | 
			
		||||
  NUMBA_CUDA_DIR=$(python -c "import os;import numba.cuda; print(os.path.dirname(numba.cuda.__file__))" 2>/dev/null || true)
 | 
			
		||||
  if [ -n "$NUMBA_CUDA_DIR" ]; then
 | 
			
		||||
    NUMBA_PATCH="$(dirname "$(realpath "${BASH_SOURCE[0]}")")/numba-cuda-13.patch"
 | 
			
		||||
    pushd "$NUMBA_CUDA_DIR"
 | 
			
		||||
    patch -p4 <"$NUMBA_PATCH"
 | 
			
		||||
    popd
 | 
			
		||||
  fi
 | 
			
		||||
NUMBA_CUDA_DIR=$(python -c "import os;import numba.cuda; print(os.path.dirname(numba.cuda.__file__))" 2>/dev/null || true)
 | 
			
		||||
if [ -n "$NUMBA_CUDA_DIR" ]; then
 | 
			
		||||
  NUMBA_PATCH="$(dirname "$(realpath "${BASH_SOURCE[0]}")")/numba-cuda-13.patch"
 | 
			
		||||
  pushd "$NUMBA_CUDA_DIR"
 | 
			
		||||
  patch -p4 <"$NUMBA_PATCH"
 | 
			
		||||
  popd
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
echo "Environment variables:"
 | 
			
		||||
@ -337,13 +335,13 @@ test_python() {
 | 
			
		||||
 | 
			
		||||
test_python_smoke() {
 | 
			
		||||
  # Smoke tests for H100/B200
 | 
			
		||||
  time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 inductor/test_max_autotune $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
 | 
			
		||||
  time python test/run_test.py --include test_matmul_cuda inductor/test_fp8 inductor/test_max_autotune $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
 | 
			
		||||
  assert_git_not_dirty
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
test_python_smoke_b200() {
 | 
			
		||||
  # Targeted smoke tests for B200 - staged approach to avoid too many failures
 | 
			
		||||
  time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
 | 
			
		||||
  time python test/run_test.py --include test_matmul_cuda inductor/test_fp8 $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
 | 
			
		||||
  assert_git_not_dirty
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -838,7 +836,7 @@ test_dynamo_benchmark() {
 | 
			
		||||
      elif [[ "${suite}" == "timm_models" ]]; then
 | 
			
		||||
        export TORCHBENCH_ONLY_MODELS="inception_v3"
 | 
			
		||||
      elif [[ "${suite}" == "torchbench" ]]; then
 | 
			
		||||
        export TORCHBENCH_ONLY_MODELS="BERT_pytorch"
 | 
			
		||||
        export TORCHBENCH_ONLY_MODELS="hf_Bert"
 | 
			
		||||
      fi
 | 
			
		||||
    fi
 | 
			
		||||
    test_single_dynamo_benchmark "dashboard" "$suite" "$shard_id" "$@"
 | 
			
		||||
@ -869,13 +867,13 @@ test_inductor_torchbench_smoketest_perf() {
 | 
			
		||||
  mkdir -p "$TEST_REPORTS_DIR"
 | 
			
		||||
 | 
			
		||||
  python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --float16 --training \
 | 
			
		||||
    --batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" --only BERT_pytorch \
 | 
			
		||||
    --batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" --only hf_Bert \
 | 
			
		||||
    --output "$TEST_REPORTS_DIR/inductor_training_smoketest.csv"
 | 
			
		||||
  # The threshold value needs to be actively maintained to make this check useful
 | 
			
		||||
  python benchmarks/dynamo/check_perf_csv.py -f "$TEST_REPORTS_DIR/inductor_training_smoketest.csv" -t 1.4
 | 
			
		||||
 | 
			
		||||
  # Check memory compression ratio for a few models
 | 
			
		||||
  for test in BERT_pytorch yolov3; do
 | 
			
		||||
  for test in hf_Albert timm_vision_transformer; do
 | 
			
		||||
    python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --amp --training \
 | 
			
		||||
      --disable-cudagraphs --batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" \
 | 
			
		||||
      --only $test --output "$TEST_REPORTS_DIR/inductor_training_smoketest_$test.csv"
 | 
			
		||||
@ -886,7 +884,7 @@ test_inductor_torchbench_smoketest_perf() {
 | 
			
		||||
  done
 | 
			
		||||
 | 
			
		||||
  # Perform some "warm-start" runs for a few huggingface models.
 | 
			
		||||
  for test in AllenaiLongformerBase DistilBertForMaskedLM DistillGPT2 GoogleFnet YituTechConvBert; do
 | 
			
		||||
  for test in AlbertForQuestionAnswering AllenaiLongformerBase DistilBertForMaskedLM DistillGPT2 GoogleFnet YituTechConvBert; do
 | 
			
		||||
    python benchmarks/dynamo/huggingface.py --accuracy --training --amp --inductor --device cuda --warm-start-latency \
 | 
			
		||||
      --only $test --output "$TEST_REPORTS_DIR/inductor_warm_start_smoketest_$test.csv"
 | 
			
		||||
    python benchmarks/dynamo/check_accuracy.py \
 | 
			
		||||
 | 
			
		||||
@ -15,35 +15,37 @@ if errorlevel 1 exit /b 1
 | 
			
		||||
if not errorlevel 0 exit /b 1
 | 
			
		||||
 | 
			
		||||
cd %TMP_DIR_WIN%\build\torch\test
 | 
			
		||||
 | 
			
		||||
:: Enable delayed variable expansion to make the list
 | 
			
		||||
setlocal enabledelayedexpansion
 | 
			
		||||
set EXE_LIST=
 | 
			
		||||
for /r "." %%a in (*.exe) do (
 | 
			
		||||
  if "%%~na" == "c10_intrusive_ptr_benchmark" (
 | 
			
		||||
    @REM NB: This is not a gtest executable file, thus couldn't be handled by
 | 
			
		||||
    @REM pytest-cpp and is excluded from test discovery by run_test
 | 
			
		||||
    call "%%~fa"
 | 
			
		||||
    call :libtorch_check "%%~na" "%%~fa"
 | 
			
		||||
    if errorlevel 1 goto fail
 | 
			
		||||
    if not errorlevel 0 goto fail
 | 
			
		||||
  ) else (
 | 
			
		||||
    if "%%~na" == "verify_api_visibility" (
 | 
			
		||||
      @REM Skip verify_api_visibility as it is a compile-level test
 | 
			
		||||
    ) else (
 | 
			
		||||
      set EXE_LIST=!EXE_LIST! cpp/%%~na
 | 
			
		||||
    )
 | 
			
		||||
  )
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
goto :eof
 | 
			
		||||
 | 
			
		||||
:libtorch_check
 | 
			
		||||
 | 
			
		||||
cd %CWD%
 | 
			
		||||
set CPP_TESTS_DIR=%TMP_DIR_WIN%\build\torch\test
 | 
			
		||||
 | 
			
		||||
:: Run python test\run_test.py on the list
 | 
			
		||||
set NO_TD=True && python test\run_test.py --cpp --verbose -i !EXE_LIST!
 | 
			
		||||
if errorlevel 1 goto fail
 | 
			
		||||
if not errorlevel 0 goto fail
 | 
			
		||||
:: Skip verify_api_visibility as it a compile level test
 | 
			
		||||
if "%~1" == "verify_api_visibility" goto :eof
 | 
			
		||||
 | 
			
		||||
goto :eof
 | 
			
		||||
echo Running "%~2"
 | 
			
		||||
if "%~1" == "c10_intrusive_ptr_benchmark" (
 | 
			
		||||
  :: NB: This is not a gtest executable file, thus couldn't be handled by pytest-cpp
 | 
			
		||||
  call "%~2"
 | 
			
		||||
  goto :eof
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
python test\run_test.py --cpp --verbose -i "cpp/%~1"
 | 
			
		||||
if errorlevel 1 (
 | 
			
		||||
  echo %1 failed with exit code %errorlevel%
 | 
			
		||||
  goto fail
 | 
			
		||||
)
 | 
			
		||||
if not errorlevel 0 (
 | 
			
		||||
  echo %1 failed with exit code %errorlevel%
 | 
			
		||||
  goto fail
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
:eof
 | 
			
		||||
exit /b 0
 | 
			
		||||
 | 
			
		||||
@ -38,7 +38,7 @@ if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# TODO: Move this to .ci/docker/requirements-ci.txt
 | 
			
		||||
python -m pip install "psutil==5.9.1" nvidia-ml-py "pytest-shard==0.1.2"
 | 
			
		||||
python -m pip install "psutil==5.9.1" "pynvml==11.4.1" "pytest-shard==0.1.2"
 | 
			
		||||
 | 
			
		||||
run_tests() {
 | 
			
		||||
    # Run nvidia-smi if available
 | 
			
		||||
 | 
			
		||||
@ -71,7 +71,14 @@ export PYTORCH_BUILD_NUMBER=1
 | 
			
		||||
 | 
			
		||||
# Set triton version as part of PYTORCH_EXTRA_INSTALL_REQUIREMENTS
 | 
			
		||||
TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
 | 
			
		||||
TRITON_CONSTRAINT="platform_system == 'Linux'"
 | 
			
		||||
 | 
			
		||||
# Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT
 | 
			
		||||
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64'"
 | 
			
		||||
 | 
			
		||||
# CUDA 12.9/13.0 builds have triton for Linux and Linux aarch64 binaries.
 | 
			
		||||
if [[ "$DESIRED_CUDA" == "cu129" ]] || [[ "$DESIRED_CUDA" == "cu130" ]]; then
 | 
			
		||||
  TRITON_CONSTRAINT="platform_system == 'Linux'"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* &&  -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" && ! "$PYTORCH_BUILD_VERSION" =~ .*xpu.* ]]; then
 | 
			
		||||
  TRITON_REQUIREMENT="triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
 | 
			
		||||
 | 
			
		||||
@ -66,7 +66,6 @@ readability-simplify-subscript-expr,
 | 
			
		||||
readability-string-compare,
 | 
			
		||||
-readability-redundant-access-specifiers,
 | 
			
		||||
-readability-redundant-control-flow,
 | 
			
		||||
-readability-redundant-inline-specifier,
 | 
			
		||||
'
 | 
			
		||||
HeaderFilterRegex: '^(aten/|c10/|torch/).*$'
 | 
			
		||||
WarningsAsErrors: '*'
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.flake8
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								.flake8
									
									
									
									
									
								
							@ -12,7 +12,7 @@ ignore =
 | 
			
		||||
    # to line this up with executable bit
 | 
			
		||||
    EXE001,
 | 
			
		||||
    # these ignores are from flake8-bugbear; please fix!
 | 
			
		||||
    B007,B008,B017,B019,B023,B028,B903,B905,B906,B907,B908,B910
 | 
			
		||||
    B007,B008,B017,B019,B023,B028,B903,B904,B905,B906,B907,B908,B910
 | 
			
		||||
    # these ignores are from flake8-comprehensions; please fix!
 | 
			
		||||
    C407,
 | 
			
		||||
    # these ignores are from flake8-logging-format; please fix!
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								.github/ISSUE_TEMPLATE/ci-sev.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/ISSUE_TEMPLATE/ci-sev.md
									
									
									
									
										vendored
									
									
								
							@ -8,7 +8,6 @@ assignees: ''
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
> NOTE: Remember to label this issue with "`ci: sev`"
 | 
			
		||||
>       If you want autorevert to be disabled, keep the ci: disable-autorevert label
 | 
			
		||||
 | 
			
		||||
 <!-- Add the `merge blocking` label to this PR to prevent PRs from being merged while this issue is open -->
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										4
									
								
								.github/ISSUE_TEMPLATE/disable-autorevert.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/ISSUE_TEMPLATE/disable-autorevert.md
									
									
									
									
										vendored
									
									
								
							@ -1,7 +1,7 @@
 | 
			
		||||
---
 | 
			
		||||
name: "D❌\U0001F519 ISABLE AUTOREVERT"
 | 
			
		||||
name: DISABLE AUTOREVERT
 | 
			
		||||
about: Disables autorevert when open
 | 
			
		||||
title: "[DISABLE AUTOREVERT]"
 | 
			
		||||
title: "❌\U0001F519 [DISABLE AUTOREVERT]"
 | 
			
		||||
labels: 'ci: disable-autorevert'
 | 
			
		||||
assignees: ''
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/actions/linux-test/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/actions/linux-test/action.yml
									
									
									
									
										vendored
									
									
								
							@ -274,6 +274,8 @@ runs:
 | 
			
		||||
          -w /var/lib/jenkins/workspace \
 | 
			
		||||
          "${DOCKER_IMAGE}"
 | 
			
		||||
        )
 | 
			
		||||
        # Propagate download.pytorch.org IP to container
 | 
			
		||||
        grep download.pytorch.org /etc/hosts | docker exec -i "${container_name}" sudo bash -c "/bin/cat >> /etc/hosts"
 | 
			
		||||
        echo "DOCKER_CONTAINER_ID=${container_name}" >> "${GITHUB_ENV}"
 | 
			
		||||
        docker exec -t "${container_name}" sh -c "pip install $(echo dist/*.whl)[opt-einsum] && ${TEST_COMMAND}"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										35
									
								
								.github/actions/setup-linux/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										35
									
								
								.github/actions/setup-linux/action.yml
									
									
									
									
										vendored
									
									
								
							@ -28,10 +28,6 @@ runs:
 | 
			
		||||
        echo "instance-type: $(get_ec2_metadata instance-type)"
 | 
			
		||||
        echo "system info $(uname -a)"
 | 
			
		||||
 | 
			
		||||
    - name: Print GPU info (if present)
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: if [ -f /usr/bin/nvidia-smi ]; then nvidia-smi; fi
 | 
			
		||||
 | 
			
		||||
    - name: Check if in a container runner
 | 
			
		||||
      shell: bash
 | 
			
		||||
      id: check_container_runner
 | 
			
		||||
@ -86,6 +82,37 @@ runs:
 | 
			
		||||
        # Prune all of the docker images
 | 
			
		||||
        docker system prune -af
 | 
			
		||||
 | 
			
		||||
    - name: Manually resolve download.pytorch.org
 | 
			
		||||
      shell: bash
 | 
			
		||||
      continue-on-error: true
 | 
			
		||||
      run: |
 | 
			
		||||
        set +e
 | 
			
		||||
        set -x
 | 
			
		||||
 | 
			
		||||
        PT_DOMAIN=download.pytorch.org
 | 
			
		||||
        # TODO: Flaky access to download.pytorch.org https://github.com/pytorch/pytorch/issues/100400,
 | 
			
		||||
        # cleaning this up once the issue is fixed. There are more than one resolved IP here, the last
 | 
			
		||||
        # one is returned at random
 | 
			
		||||
        RESOLVED_IP=$(dig -4 +short "${PT_DOMAIN}" | tail -n1)
 | 
			
		||||
 | 
			
		||||
        if [ -z "${RESOLVED_IP}" ]; then
 | 
			
		||||
          echo "Couldn't resolve ${PT_DOMAIN}, retrying with Google DNS..."
 | 
			
		||||
          RESOLVED_IP=$(dig -4 +short "${PT_DOMAIN}" @8.8.8.8 | tail -n1)
 | 
			
		||||
 | 
			
		||||
          if [ -z "${RESOLVED_IP}" ]; then
 | 
			
		||||
            echo "Couldn't resolve ${PT_DOMAIN}, exiting..."
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        if grep -r "${PT_DOMAIN}" /etc/hosts; then
 | 
			
		||||
          # Clean up any old records first
 | 
			
		||||
          sudo sed -i "/${PT_DOMAIN}/d" /etc/hosts
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        echo "${RESOLVED_IP} ${PT_DOMAIN}" | sudo tee -a /etc/hosts
 | 
			
		||||
        cat /etc/hosts
 | 
			
		||||
 | 
			
		||||
    - name: Check that the docker daemon is running
 | 
			
		||||
      shell: bash
 | 
			
		||||
      continue-on-error: true
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										13
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							@ -111,16 +111,3 @@ runs:
 | 
			
		||||
        # This video group ID maps to subgid 1 inside the docker image due to the /etc/subgid entries.
 | 
			
		||||
        # The group name corresponding to group ID 1 can change depending on the OS, so both are necessary.
 | 
			
		||||
        echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd $DEVICE_FLAG --group-add video --group-add $render_gid --group-add daemon --group-add bin --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --network=host" >> "${GITHUB_ENV}"
 | 
			
		||||
 | 
			
		||||
    - name: configure aws credentials
 | 
			
		||||
      id: aws_creds
 | 
			
		||||
      uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
 | 
			
		||||
      with:
 | 
			
		||||
        role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
 | 
			
		||||
        aws-region: us-east-1
 | 
			
		||||
        role-duration-seconds: 18000
 | 
			
		||||
 | 
			
		||||
    - name: Login to Amazon ECR
 | 
			
		||||
      id: login-ecr
 | 
			
		||||
      continue-on-error: true
 | 
			
		||||
      uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
 | 
			
		||||
 | 
			
		||||
@ -33,6 +33,10 @@ runs:
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        echo "CONTAINER_NAME=${container_name}" >> "$GITHUB_ENV"
 | 
			
		||||
        if [[ "${GPU_ARCH_TYPE}" != "rocm" && "${BUILD_ENVIRONMENT}" != "linux-aarch64-binary-manywheel" && "${BUILD_ENVIRONMENT}" != "linux-s390x-binary-manywheel" && "${GPU_ARCH_TYPE}" != "xpu" ]]; then
 | 
			
		||||
          # Propagate download.pytorch.org IP to container. This is only needed on Linux non aarch64 runner
 | 
			
		||||
          grep download.pytorch.org /etc/hosts | docker exec -i "${container_name}" bash -c "/bin/cat >> /etc/hosts"
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh"
 | 
			
		||||
        # Generate test script
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							@ -1 +1 @@
 | 
			
		||||
1b013f5b5a87a1882eb143c26d79d091150d6a37
 | 
			
		||||
87ff22e49ed0e92576c4935ccb8c143daac4a3cd
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							@ -1 +1 @@
 | 
			
		||||
faffd5cf673615583da6517275e361cb3dbc77e6
 | 
			
		||||
966da7e46f65d6d49df3e31214470a4fe5cc8e66
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/vllm.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/vllm.txt
									
									
									
									
										vendored
									
									
								
							@ -1 +1 @@
 | 
			
		||||
e5192819208c4d68194844b7dfafbc00020d0dea
 | 
			
		||||
78a47f87ce259a48f0391fa9ae15add05ea7432b
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							@ -1 +1 @@
 | 
			
		||||
0fa6e3129e61143224663e1ec67980d12b7ec4eb
 | 
			
		||||
0fc62aa26a30ed7ca419d285f285cb5ba02c4394
 | 
			
		||||
 | 
			
		||||
@ -1,41 +1,59 @@
 | 
			
		||||
# TODO(elainwy): remove this file after the torch nightly dockerfile is in sync in vllm repo
 | 
			
		||||
# The vLLM Dockerfile is used to construct vLLM image against torch nightly and torch main that can be directly used for testing
 | 
			
		||||
 | 
			
		||||
ARG CUDA_VERSION=12.8.1
 | 
			
		||||
ARG PYTHON_VERSION=3.12
 | 
			
		||||
 | 
			
		||||
# BUILD_BASE_IMAGE: used to setup python build xformers, and vllm wheels, It can be replaced with a different base image from local machine,
 | 
			
		||||
# by default, it uses the torch-nightly-base stage from this docker image
 | 
			
		||||
ARG BUILD_BASE_IMAGE=torch-nightly-base
 | 
			
		||||
 | 
			
		||||
# FINAL_BASE_IMAGE: used to set up vllm-instaled environment and build flashinfer,
 | 
			
		||||
# by default, it uses devel-ubuntu22.04 official image.
 | 
			
		||||
ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
 | 
			
		||||
 | 
			
		||||
# The logic is copied from https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile
 | 
			
		||||
ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#################### TORCH NIGHTLY BASE IMAGE ####################
 | 
			
		||||
# A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci
 | 
			
		||||
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base
 | 
			
		||||
 | 
			
		||||
ARG CUDA_VERSION
 | 
			
		||||
ARG PYTHON_VERSION
 | 
			
		||||
ARG GET_PIP_URL
 | 
			
		||||
 | 
			
		||||
# Install system dependencies and uv, then create Python virtual environment
 | 
			
		||||
# Install Python and other dependencies
 | 
			
		||||
RUN apt-get update -y \
 | 
			
		||||
    && apt-get install -y ccache software-properties-common git curl sudo vim python3-pip \
 | 
			
		||||
    && curl -LsSf https://astral.sh/uv/install.sh | sh \
 | 
			
		||||
    && $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \
 | 
			
		||||
    && rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \
 | 
			
		||||
    && ln -s /opt/venv/bin/python3 /usr/bin/python3 \
 | 
			
		||||
    && ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \
 | 
			
		||||
    && ln -s /opt/venv/bin/pip /usr/bin/pip \
 | 
			
		||||
    && apt-get install -y ccache software-properties-common git curl wget sudo vim \
 | 
			
		||||
    && add-apt-repository -y ppa:deadsnakes/ppa \
 | 
			
		||||
    && apt-get update -y \
 | 
			
		||||
    && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
 | 
			
		||||
    && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
 | 
			
		||||
    && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
 | 
			
		||||
    && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
 | 
			
		||||
    && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \
 | 
			
		||||
    && python3 --version && python3 -m pip --version
 | 
			
		||||
 | 
			
		||||
# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519
 | 
			
		||||
# as it was causing spam when compiling the CUTLASS kernels
 | 
			
		||||
RUN apt-get install -y gcc-10 g++-10
 | 
			
		||||
RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10
 | 
			
		||||
RUN <<EOF
 | 
			
		||||
gcc --version
 | 
			
		||||
EOF
 | 
			
		||||
# Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519)
 | 
			
		||||
RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \
 | 
			
		||||
    if command -v apt-get >/dev/null; then \
 | 
			
		||||
        if [ "$current_gcc_version" -lt 10 ]; then \
 | 
			
		||||
            echo "GCC version is $current_gcc_version, installing gcc-10..."; \
 | 
			
		||||
            apt-get update \
 | 
			
		||||
            && apt-get install -y gcc-10 g++-10 \
 | 
			
		||||
            && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 \
 | 
			
		||||
            && update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \
 | 
			
		||||
        else \
 | 
			
		||||
            echo "GCC version is $current_gcc_version, no need to install gcc-10."; \
 | 
			
		||||
        fi \
 | 
			
		||||
    fi \
 | 
			
		||||
    && gcc --version && g++ --version
 | 
			
		||||
 | 
			
		||||
# Install uv for faster pip installs
 | 
			
		||||
# install uv for faster pip installs
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    python3 -m pip install uv==0.8.4
 | 
			
		||||
 | 
			
		||||
@ -43,32 +61,36 @@ ENV UV_HTTP_TIMEOUT=500
 | 
			
		||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
 | 
			
		||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
 | 
			
		||||
ENV UV_LINK_MODE=copy
 | 
			
		||||
 | 
			
		||||
#################### TORCH NIGHTLY  BASE IMAGE ####################
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#################### BASE BUILD IMAGE ####################
 | 
			
		||||
# A base image for building vLLM with torch nightly or torch wheels
 | 
			
		||||
# prepare basic build environment
 | 
			
		||||
FROM ${BUILD_BASE_IMAGE} AS base
 | 
			
		||||
USER root
 | 
			
		||||
 | 
			
		||||
ARG CUDA_VERSION
 | 
			
		||||
ARG PYTHON_VERSION
 | 
			
		||||
 | 
			
		||||
# Only work with PyTorch manylinux builder
 | 
			
		||||
# TODO (huydhn): Only work with PyTorch manylinux builder
 | 
			
		||||
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}"
 | 
			
		||||
 | 
			
		||||
# Install some system dependencies and double check python version
 | 
			
		||||
RUN if command -v apt-get >/dev/null; then \
 | 
			
		||||
        apt-get update -y \
 | 
			
		||||
        && apt-get install -y ccache software-properties-common git wget sudo vim; \
 | 
			
		||||
        && apt-get install -y ccache software-properties-common git curl wget sudo vim; \
 | 
			
		||||
    else \
 | 
			
		||||
        dnf install -y git wget sudo; \
 | 
			
		||||
        dnf install -y git curl wget sudo; \
 | 
			
		||||
    fi \
 | 
			
		||||
    && python3 --version && python3 -m pip --version
 | 
			
		||||
 | 
			
		||||
# Install uv for faster pip installs if not existed
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    python3 -m pip install uv==0.8.4
 | 
			
		||||
 | 
			
		||||
    if ! python3 -m uv --version >/dev/null 2>&1; then \
 | 
			
		||||
        python3 -m pip install uv==0.8.4; \
 | 
			
		||||
    fi
 | 
			
		||||
ENV UV_HTTP_TIMEOUT=500
 | 
			
		||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
 | 
			
		||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
 | 
			
		||||
@ -76,15 +98,15 @@ ENV UV_LINK_MODE=copy
 | 
			
		||||
 | 
			
		||||
WORKDIR /workspace
 | 
			
		||||
 | 
			
		||||
# Install build and runtime dependencies
 | 
			
		||||
# install build and runtime dependencies
 | 
			
		||||
COPY requirements/common.txt requirements/common.txt
 | 
			
		||||
COPY use_existing_torch.py use_existing_torch.py
 | 
			
		||||
COPY pyproject.toml pyproject.toml
 | 
			
		||||
 | 
			
		||||
# Install build and runtime dependencies without stable torch version
 | 
			
		||||
# install build and runtime dependencies without stable torch version
 | 
			
		||||
RUN python3 use_existing_torch.py
 | 
			
		||||
 | 
			
		||||
# Default mount file as placeholder, this just avoid the mount error
 | 
			
		||||
# default mount file as placeholder, this just avoid the mount error
 | 
			
		||||
# change to a different vllm folder if this does not exist anymore
 | 
			
		||||
ARG TORCH_WHEELS_PATH="./requirements"
 | 
			
		||||
ARG PINNED_TORCH_VERSION
 | 
			
		||||
@ -116,36 +138,56 @@ RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system -r requirements/common.txt
 | 
			
		||||
 | 
			
		||||
# Must put before installing xformers, so it can install the correct version of xfomrers.
 | 
			
		||||
ARG xformers_cuda_arch_list='7.5;8.0+PTX;9.0a'
 | 
			
		||||
ENV TORCH_CUDA_ARCH_LIST=${xformers_cuda_arch_list}
 | 
			
		||||
 | 
			
		||||
ARG max_jobs=16
 | 
			
		||||
ENV MAX_JOBS=${max_jobs}
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH'
 | 
			
		||||
    export TORCH_CUDA_ARCH_LIST='7.5 8.0+PTX 9.0a'
 | 
			
		||||
    git clone https://github.com/facebookresearch/xformers.git
 | 
			
		||||
RUN echo ${TORCH_CUDA_ARCH_LIST}
 | 
			
		||||
RUN echo ${MAX_JOBS}
 | 
			
		||||
RUN pip freeze | grep -E 'ninja'
 | 
			
		||||
 | 
			
		||||
    pushd xformers
 | 
			
		||||
    git checkout v0.0.32.post2
 | 
			
		||||
    git submodule update --init --recursive
 | 
			
		||||
    python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose
 | 
			
		||||
    popd
 | 
			
		||||
# Build xformers with cuda and torch nightly/wheel
 | 
			
		||||
# following official xformers guidance: https://github.com/facebookresearch/xformers#build
 | 
			
		||||
# sha for https://github.com/facebookresearch/xformers/tree/v0.0.32.post2
 | 
			
		||||
ARG XFORMERS_COMMIT=5d4b92a5e5a9c6c6d4878283f47d82e17995b468
 | 
			
		||||
ENV CCACHE_DIR=/root/.cache/ccache
 | 
			
		||||
 | 
			
		||||
    rm -rf xformers
 | 
			
		||||
BASH
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/ccache \
 | 
			
		||||
    --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    echo 'git clone xformers...' \
 | 
			
		||||
    && git clone https://github.com/facebookresearch/xformers.git --recursive \
 | 
			
		||||
    && cd xformers \
 | 
			
		||||
    && git checkout ${XFORMERS_COMMIT} \
 | 
			
		||||
    && git submodule update --init --recursive \
 | 
			
		||||
    && echo 'finish git clone xformers...' \
 | 
			
		||||
    && rm -rf build \
 | 
			
		||||
    && python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose \
 | 
			
		||||
    && cd .. \
 | 
			
		||||
    && rm -rf xformers
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system xformers-dist/*.whl
 | 
			
		||||
    uv pip install --system xformers-dist/*.whl --verbose
 | 
			
		||||
 | 
			
		||||
# Build can take a long time, and the torch nightly version fetched from url can be different in next docker stage.
 | 
			
		||||
# track the nightly torch version used in the build, when we set up runtime environment we can make sure the version is the same
 | 
			
		||||
RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio' > torch_build_versions.txt
 | 
			
		||||
 | 
			
		||||
RUN cat torch_build_versions.txt
 | 
			
		||||
RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio'
 | 
			
		||||
 | 
			
		||||
#################### BASE BUILD IMAGE ####################
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#################### WHEEL BUILD IMAGE ####################
 | 
			
		||||
# Image used to build vllm wheel
 | 
			
		||||
FROM base AS build
 | 
			
		||||
ARG TARGETPLATFORM
 | 
			
		||||
 | 
			
		||||
COPY . .
 | 
			
		||||
 | 
			
		||||
RUN python3 use_existing_torch.py
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
@ -155,17 +197,20 @@ ARG GIT_REPO_CHECK=0
 | 
			
		||||
RUN --mount=type=bind,source=.git,target=.git \
 | 
			
		||||
    if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi
 | 
			
		||||
 | 
			
		||||
# Max jobs used by Ninja to build extensions
 | 
			
		||||
ARG max_jobs=16
 | 
			
		||||
ENV MAX_JOBS=${max_jobs}
 | 
			
		||||
ARG nvcc_threads=8
 | 
			
		||||
ARG nvcc_threads=4
 | 
			
		||||
ENV NVCC_THREADS=$nvcc_threads
 | 
			
		||||
ARG torch_cuda_arch_list='8.0;8.6;8.9;9.0'
 | 
			
		||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
 | 
			
		||||
 | 
			
		||||
ARG USE_SCCACHE
 | 
			
		||||
ARG SCCACHE_BUCKET_NAME=vllm-build-sccache
 | 
			
		||||
ARG SCCACHE_REGION_NAME=us-west-2
 | 
			
		||||
ARG SCCACHE_S3_NO_CREDENTIALS=0
 | 
			
		||||
 | 
			
		||||
# Use sccache to speed up compilation
 | 
			
		||||
# if USE_SCCACHE is set, use sccache to speed up compilation
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    --mount=type=bind,source=.git,target=.git \
 | 
			
		||||
    if [ "$USE_SCCACHE" = "1" ]; then \
 | 
			
		||||
@ -190,9 +235,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
        && sccache --show-stats; \
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
ARG torch_cuda_arch_list='8.0 8.6 8.9 9.0'
 | 
			
		||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
 | 
			
		||||
 | 
			
		||||
ARG vllm_target_device="cuda"
 | 
			
		||||
ENV VLLM_TARGET_DEVICE=${vllm_target_device}
 | 
			
		||||
ENV CCACHE_DIR=/root/.cache/ccache
 | 
			
		||||
@ -206,10 +248,17 @@ RUN --mount=type=cache,target=/root/.cache/ccache \
 | 
			
		||||
        export VLLM_DOCKER_BUILD_CONTEXT=1 && \
 | 
			
		||||
        python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
RUN echo "[INFO] Listing current directory:" && \
 | 
			
		||||
    ls -al && \
 | 
			
		||||
    echo "[INFO] Showing torch_build_versions.txt content:" && \
 | 
			
		||||
    cat torch_build_versions.txt
 | 
			
		||||
 | 
			
		||||
#################### WHEEL BUILD IMAGE ####################
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
################### VLLM INSTALLED IMAGE ####################
 | 
			
		||||
# Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer
 | 
			
		||||
FROM ${FINAL_BASE_IMAGE} AS vllm-base
 | 
			
		||||
USER root
 | 
			
		||||
 | 
			
		||||
@ -217,7 +266,7 @@ ARG CUDA_VERSION
 | 
			
		||||
ARG PYTHON_VERSION
 | 
			
		||||
ARG GET_PIP_URL
 | 
			
		||||
 | 
			
		||||
# Only work with PyTorch manylinux builder
 | 
			
		||||
# TODO (huydhn): Only work with PyTorch manylinux builder
 | 
			
		||||
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}"
 | 
			
		||||
 | 
			
		||||
# prepare for environment starts
 | 
			
		||||
@ -226,19 +275,20 @@ WORKDIR /workspace
 | 
			
		||||
# Install Python and other dependencies
 | 
			
		||||
RUN if command -v apt-get >/dev/null; then \
 | 
			
		||||
        apt-get update -y \
 | 
			
		||||
        && apt-get install -y ccache software-properties-common git sudo vim python3-pip; \
 | 
			
		||||
        && apt-get install -y ccache software-properties-common git curl wget sudo vim \
 | 
			
		||||
        && add-apt-repository -y ppa:deadsnakes/ppa \
 | 
			
		||||
        && apt-get update -y \
 | 
			
		||||
        && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
 | 
			
		||||
        && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
 | 
			
		||||
        && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
 | 
			
		||||
        && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
 | 
			
		||||
        && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION}; \
 | 
			
		||||
    else \
 | 
			
		||||
        dnf install -y git wget sudo; \
 | 
			
		||||
        dnf install -y git curl wget sudo; \
 | 
			
		||||
    fi \
 | 
			
		||||
    && curl -LsSf https://astral.sh/uv/install.sh | sh \
 | 
			
		||||
    && $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \
 | 
			
		||||
    && rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \
 | 
			
		||||
    && ln -s /opt/venv/bin/python3 /usr/bin/python3 \
 | 
			
		||||
    && ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \
 | 
			
		||||
    && ln -s /opt/venv/bin/pip /usr/bin/pip \
 | 
			
		||||
    && python3 --version && python3 -m pip --version
 | 
			
		||||
 | 
			
		||||
# Get the torch versions, and whls used in previous stage
 | 
			
		||||
# Get the torch versions, and whls used in previous stagtes for consistency
 | 
			
		||||
COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt
 | 
			
		||||
COPY --from=base /workspace/xformers-dist /wheels/xformers
 | 
			
		||||
COPY --from=build /workspace/vllm-dist /wheels/vllm
 | 
			
		||||
@ -249,27 +299,19 @@ RUN echo "[INFO] Listing current directory before torch install step:" && \
 | 
			
		||||
 | 
			
		||||
# Install uv for faster pip installs if not existed
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    python3 -m pip install uv==0.8.4
 | 
			
		||||
 | 
			
		||||
    if ! python3 -m uv --version > /dev/null 2>&1; then \
 | 
			
		||||
        python3 -m pip install uv==0.8.4; \
 | 
			
		||||
    fi
 | 
			
		||||
ENV UV_HTTP_TIMEOUT=500
 | 
			
		||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
 | 
			
		||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
 | 
			
		||||
ENV UV_LINK_MODE=copy
 | 
			
		||||
 | 
			
		||||
# Install build and runtime dependencies, this is needed for flashinfer install
 | 
			
		||||
COPY requirements/build.txt requirements/build.txt
 | 
			
		||||
COPY use_existing_torch.py use_existing_torch.py
 | 
			
		||||
RUN python3 use_existing_torch.py
 | 
			
		||||
RUN cat requirements/build.txt
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system -r requirements/build.txt
 | 
			
		||||
 | 
			
		||||
# Default mount file as placeholder, this just avoid the mount error
 | 
			
		||||
ARG TORCH_WHEELS_PATH="./requirements"
 | 
			
		||||
# Install torch, torchaudio and torchvision. If TORCH_WHEELS_PATH is default
 | 
			
		||||
# to ./requirements, it will pull the nightly versions using pip. Otherwise,
 | 
			
		||||
# it will use the local wheels from TORCH_WHEELS_PATH
 | 
			
		||||
# Install torch, torchaudio and torchvision
 | 
			
		||||
# if TORCH_WHEELS_PATH is default "./requirements", it will pull the nightly versions using pip using torch_build_versions.txt
 | 
			
		||||
# otherwise, it will use the whls from TORCH_WHEELS_PATH from the host machine
 | 
			
		||||
RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
 | 
			
		||||
    --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
 | 
			
		||||
@ -291,13 +333,19 @@ RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system /wheels/xformers/*.whl --verbose
 | 
			
		||||
 | 
			
		||||
# Build FlashInfer from source
 | 
			
		||||
# Build flashinfer from source.
 | 
			
		||||
ARG torch_cuda_arch_list='8.0;8.9;9.0a;10.0a;12.0'
 | 
			
		||||
# install package for build flashinfer
 | 
			
		||||
# see issue: https://github.com/flashinfer-ai/flashinfer/issues/738
 | 
			
		||||
 | 
			
		||||
RUN pip install build==1.3.0
 | 
			
		||||
RUN pip freeze | grep -E 'setuptools|packaging|build'
 | 
			
		||||
 | 
			
		||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
 | 
			
		||||
 | 
			
		||||
# Build flashinfer for torch nightly from source around 10 mins
 | 
			
		||||
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
 | 
			
		||||
# Keep this in sync with https://github.com/vllm-project/vllm/blob/main/requirements/cuda.txt
 | 
			
		||||
ARG FLASHINFER_GIT_REF="v0.2.14.post1"
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    git clone --depth 1 --recursive --shallow-submodules \
 | 
			
		||||
        --branch ${FLASHINFER_GIT_REF} \
 | 
			
		||||
@ -309,7 +357,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    && cd .. \
 | 
			
		||||
    && rm -rf flashinfer
 | 
			
		||||
 | 
			
		||||
# Install FlashInfer
 | 
			
		||||
# install flashinfer python
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system wheels/flashinfer/*.whl --verbose
 | 
			
		||||
 | 
			
		||||
@ -319,6 +367,49 @@ RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio\|^xformers\|^vllm
 | 
			
		||||
################### VLLM INSTALLED IMAGE ####################
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#################### UNITTEST IMAGE #############################
 | 
			
		||||
FROM vllm-base as test
 | 
			
		||||
 | 
			
		||||
ENV UV_HTTP_TIMEOUT=500
 | 
			
		||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
 | 
			
		||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
 | 
			
		||||
ENV UV_LINK_MODE=copy
 | 
			
		||||
 | 
			
		||||
COPY tests/ tests/
 | 
			
		||||
COPY examples examples
 | 
			
		||||
COPY benchmarks benchmarks
 | 
			
		||||
COPY ./vllm/collect_env.py .
 | 
			
		||||
COPY requirements/common.txt requirements/common.txt
 | 
			
		||||
COPY use_existing_torch.py use_existing_torch.py
 | 
			
		||||
COPY pyproject.toml pyproject.toml
 | 
			
		||||
# Install build and runtime dependencies without stable torch version
 | 
			
		||||
COPY requirements/nightly_torch_test.txt requirements/nightly_torch_test.txt
 | 
			
		||||
 | 
			
		||||
RUN python3 use_existing_torch.py
 | 
			
		||||
 | 
			
		||||
# install packages
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system -r requirements/common.txt
 | 
			
		||||
# enable fast downloads from hf (for testing)
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system hf_transfer
 | 
			
		||||
ENV HF_HUB_ENABLE_HF_TRANSFER 1
 | 
			
		||||
 | 
			
		||||
# install development dependencies (for testing)
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system -e tests/vllm_test_utils
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system -r requirements/nightly_torch_test.txt
 | 
			
		||||
 | 
			
		||||
# Logging to confirm the torch versions
 | 
			
		||||
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'
 | 
			
		||||
 | 
			
		||||
# Logging to confirm all the packages are installed
 | 
			
		||||
RUN pip freeze
 | 
			
		||||
 | 
			
		||||
#################### UNITTEST IMAGE #############################
 | 
			
		||||
 | 
			
		||||
#################### EXPORT STAGE ####################
 | 
			
		||||
FROM scratch as export-wheels
 | 
			
		||||
 | 
			
		||||
@ -1,14 +1,9 @@
 | 
			
		||||
import glob
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
requires_files = glob.glob("requirements/*.txt")
 | 
			
		||||
requires_files += ["pyproject.toml"]
 | 
			
		||||
 | 
			
		||||
for file in requires_files:
 | 
			
		||||
    if not os.path.exists(file):
 | 
			
		||||
        print(f"!!! skipping missing {file}")
 | 
			
		||||
        continue
 | 
			
		||||
    print(f">>> cleaning {file}")
 | 
			
		||||
    with open(file) as f:
 | 
			
		||||
        lines = f.readlines()
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										4
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							@ -15,8 +15,7 @@ ciflow_push_tags:
 | 
			
		||||
- ciflow/inductor-micro-benchmark
 | 
			
		||||
- ciflow/inductor-micro-benchmark-cpu-x86
 | 
			
		||||
- ciflow/inductor-perf-compare
 | 
			
		||||
- ciflow/inductor-perf-test-nightly-rocm-mi300
 | 
			
		||||
- ciflow/inductor-perf-test-nightly-rocm-mi355
 | 
			
		||||
- ciflow/inductor-perf-test-nightly-rocm
 | 
			
		||||
- ciflow/inductor-perf-test-nightly-x86-zen
 | 
			
		||||
- ciflow/inductor-periodic
 | 
			
		||||
- ciflow/inductor-rocm
 | 
			
		||||
@ -31,7 +30,6 @@ ciflow_push_tags:
 | 
			
		||||
- ciflow/riscv64
 | 
			
		||||
- ciflow/rocm
 | 
			
		||||
- ciflow/rocm-mi300
 | 
			
		||||
- ciflow/rocm-mi355
 | 
			
		||||
- ciflow/s390
 | 
			
		||||
- ciflow/slow
 | 
			
		||||
- ciflow/torchbench
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										
											BIN
										
									
								
								.github/scripts/drci_mocks.json.gz
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								.github/scripts/drci_mocks.json.gz
									
									
									
									
										vendored
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										2
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							@ -512,8 +512,6 @@ def perform_misc_tasks(
 | 
			
		||||
        "keep-going",
 | 
			
		||||
        branch == MAIN_BRANCH
 | 
			
		||||
        or bool(tag and re.match(r"^trunk/[a-f0-9]{40}$", tag))
 | 
			
		||||
        # Pattern for tags created via manual run on HUD
 | 
			
		||||
        or bool(tag and re.match(r"^ciflow/[^/]+/[a-f0-9]{40}$", tag))
 | 
			
		||||
        or check_for_setting(labels, pr_body, "keep-going"),
 | 
			
		||||
    )
 | 
			
		||||
    set_output(
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										26
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										26
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							@ -16,18 +16,16 @@ from typing import Optional
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this
 | 
			
		||||
CUDA_ARCHES = ["12.6", "12.8", "12.9", "13.0"]
 | 
			
		||||
CUDA_ARCHES = ["12.6", "12.8", "13.0"]
 | 
			
		||||
CUDA_STABLE = "12.8"
 | 
			
		||||
CUDA_ARCHES_FULL_VERSION = {
 | 
			
		||||
    "12.6": "12.6.3",
 | 
			
		||||
    "12.8": "12.8.1",
 | 
			
		||||
    "12.9": "12.9.1",
 | 
			
		||||
    "13.0": "13.0.0",
 | 
			
		||||
}
 | 
			
		||||
CUDA_ARCHES_CUDNN_VERSION = {
 | 
			
		||||
    "12.6": "9",
 | 
			
		||||
    "12.8": "9",
 | 
			
		||||
    "12.9": "9",
 | 
			
		||||
    "13.0": "9",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -40,7 +38,7 @@ CPU_AARCH64_ARCH = ["cpu-aarch64"]
 | 
			
		||||
 | 
			
		||||
CPU_S390X_ARCH = ["cpu-s390x"]
 | 
			
		||||
 | 
			
		||||
CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "12.9-aarch64", "13.0-aarch64"]
 | 
			
		||||
CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "13.0-aarch64"]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
 | 
			
		||||
@ -78,23 +76,6 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
 | 
			
		||||
        "nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'"
 | 
			
		||||
    ),
 | 
			
		||||
    "12.9": (
 | 
			
		||||
        "nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'"
 | 
			
		||||
    ),
 | 
			
		||||
    "13.0": (
 | 
			
		||||
        "nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | "
 | 
			
		||||
@ -341,7 +322,7 @@ def generate_wheels_matrix(
 | 
			
		||||
            # cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install
 | 
			
		||||
 | 
			
		||||
            if (
 | 
			
		||||
                arch_version in ["13.0", "12.9", "12.8", "12.6"]
 | 
			
		||||
                arch_version in ["13.0", "12.8", "12.6"]
 | 
			
		||||
                and os == "linux"
 | 
			
		||||
                or arch_version in CUDA_AARCH64_ARCHES
 | 
			
		||||
            ):
 | 
			
		||||
@ -405,6 +386,5 @@ def generate_wheels_matrix(
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
validate_nccl_dep_consistency("13.0")
 | 
			
		||||
validate_nccl_dep_consistency("12.9")
 | 
			
		||||
validate_nccl_dep_consistency("12.8")
 | 
			
		||||
validate_nccl_dep_consistency("12.6")
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								.github/scripts/github_utils.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/scripts/github_utils.py
									
									
									
									
										vendored
									
									
								
							@ -18,7 +18,6 @@ class GitHubComment:
 | 
			
		||||
    body_text: str
 | 
			
		||||
    created_at: str
 | 
			
		||||
    author_login: str
 | 
			
		||||
    author_url: Optional[str]
 | 
			
		||||
    author_association: str
 | 
			
		||||
    editor_login: Optional[str]
 | 
			
		||||
    database_id: int
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										
											BIN
										
									
								
								.github/scripts/gql_mocks.json.gz
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								.github/scripts/gql_mocks.json.gz
									
									
									
									
										vendored
									
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										2
									
								
								.github/scripts/test_check_labels.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/scripts/test_check_labels.py
									
									
									
									
										vendored
									
									
								
							@ -38,7 +38,6 @@ def mock_get_comments() -> list[GitHubComment]:
 | 
			
		||||
            body_text="mock_body_text",
 | 
			
		||||
            created_at="",
 | 
			
		||||
            author_login="",
 | 
			
		||||
            author_url=None,
 | 
			
		||||
            author_association="",
 | 
			
		||||
            editor_login=None,
 | 
			
		||||
            database_id=1,
 | 
			
		||||
@ -49,7 +48,6 @@ def mock_get_comments() -> list[GitHubComment]:
 | 
			
		||||
            body_text=" #" + LABEL_ERR_MSG_TITLE.replace("`", ""),
 | 
			
		||||
            created_at="",
 | 
			
		||||
            author_login=BOT_AUTHORS[1],
 | 
			
		||||
            author_url=None,
 | 
			
		||||
            author_association="",
 | 
			
		||||
            editor_login=None,
 | 
			
		||||
            database_id=2,
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										18
									
								
								.github/scripts/test_trymerge.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								.github/scripts/test_trymerge.py
									
									
									
									
										vendored
									
									
								
							@ -32,7 +32,6 @@ from trymerge import (
 | 
			
		||||
    main as trymerge_main,
 | 
			
		||||
    MandatoryChecksMissingError,
 | 
			
		||||
    MergeRule,
 | 
			
		||||
    PostCommentError,
 | 
			
		||||
    RE_GHSTACK_DESC,
 | 
			
		||||
    read_merge_rules,
 | 
			
		||||
    remove_job_name_suffix,
 | 
			
		||||
@ -589,23 +588,6 @@ class TestTryMerge(TestCase):
 | 
			
		||||
            self.assertEqual(mock_merge_base, pr.get_merge_base())
 | 
			
		||||
            mocked_gh_fetch_merge_base.assert_called_once()
 | 
			
		||||
 | 
			
		||||
    def test_app_can_revert(self, *args: Any) -> None:
 | 
			
		||||
        pr = GitHubPR("pytorch", "pytorch", 164660)
 | 
			
		||||
        repo = DummyGitRepo()
 | 
			
		||||
        app_comment_id, impostor_comment_id = 3375785595, 3377647892
 | 
			
		||||
        # Check that app can revert
 | 
			
		||||
        self.assertIsNotNone(validate_revert(repo, pr, comment_id=app_comment_id))
 | 
			
		||||
        # But impostor can not
 | 
			
		||||
        self.assertRaises(
 | 
			
		||||
            PostCommentError,
 | 
			
		||||
            lambda: validate_revert(repo, pr, comment_id=impostor_comment_id),
 | 
			
		||||
        )
 | 
			
		||||
        # Despite it's name being the name of the bot
 | 
			
		||||
        self.assertEqual(
 | 
			
		||||
            pr.get_comment_by_id(impostor_comment_id).author_login,
 | 
			
		||||
            "pytorch-auto-revert",
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@mock.patch("trymerge.gh_graphql", side_effect=mocked_gh_graphql)
 | 
			
		||||
@mock.patch("trymerge.gh_fetch_merge_base", return_value="")
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										11
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							@ -234,7 +234,6 @@ query ($owner: String!, $name: String!, $number: Int!) {
 | 
			
		||||
          createdAt
 | 
			
		||||
          author {
 | 
			
		||||
            login
 | 
			
		||||
            url
 | 
			
		||||
          }
 | 
			
		||||
          authorAssociation
 | 
			
		||||
          editor {
 | 
			
		||||
@ -1094,7 +1093,6 @@ class GitHubPR:
 | 
			
		||||
            body_text=node["bodyText"],
 | 
			
		||||
            created_at=node["createdAt"] if "createdAt" in node else "",
 | 
			
		||||
            author_login=node["author"]["login"],
 | 
			
		||||
            author_url=node["author"].get("url", None),
 | 
			
		||||
            author_association=node["authorAssociation"],
 | 
			
		||||
            editor_login=editor["login"] if editor else None,
 | 
			
		||||
            database_id=node["databaseId"],
 | 
			
		||||
@ -2031,17 +2029,16 @@ def validate_revert(
 | 
			
		||||
    # For some reason, one can not be a member of private repo, only CONTRIBUTOR
 | 
			
		||||
    if pr.is_base_repo_private():
 | 
			
		||||
        allowed_reverters.append("CONTRIBUTOR")
 | 
			
		||||
    # Special case the pytorch-auto-revert app, whose does not have association
 | 
			
		||||
    # But should be able to issue revert command
 | 
			
		||||
    if comment.author_url == "https://github.com/apps/pytorch-auto-revert":
 | 
			
		||||
        allowed_reverters.append("NONE")
 | 
			
		||||
 | 
			
		||||
    if author_association not in allowed_reverters:
 | 
			
		||||
        raise PostCommentError(
 | 
			
		||||
            f"Will not revert as @{author_login} is not one of "
 | 
			
		||||
            f"[{', '.join(allowed_reverters)}], but instead is {author_association}."
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    # Raises exception if matching rule is not found, but ignores all status checks
 | 
			
		||||
    find_matching_merge_rule(
 | 
			
		||||
        pr, repo, skip_mandatory_checks=True, skip_internal_checks=True
 | 
			
		||||
    )
 | 
			
		||||
    commit_sha = get_pr_commit_sha(repo, pr)
 | 
			
		||||
    return (author_login, commit_sha)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -177,9 +177,6 @@ jobs:
 | 
			
		||||
    runs-on: linux.rocm.gpu.mi250
 | 
			
		||||
    timeout-minutes: !{{ common.timeout_minutes }}
 | 
			
		||||
    !{{ upload.binary_env(config) }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							@ -72,7 +72,7 @@ jobs:
 | 
			
		||||
            # Let's try to figure out how this can be improved
 | 
			
		||||
            timeout-minutes: 360
 | 
			
		||||
          - docs_type: python
 | 
			
		||||
            runner: ${{ inputs.runner_prefix }}linux.c7i.2xlarge
 | 
			
		||||
            runner: ${{ inputs.runner_prefix }}linux.2xlarge
 | 
			
		||||
            # It takes less than 30m to finish python docs unless there are issues
 | 
			
		||||
            timeout-minutes: 30
 | 
			
		||||
    # Set a fixed name for this job instead of using the current matrix-generated name, i.e. build-docs (cpp, linux.12xlarge, 180)
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										9
									
								
								.github/workflows/_get-changed-files.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								.github/workflows/_get-changed-files.yml
									
									
									
									
										vendored
									
									
								
							@ -40,15 +40,6 @@ jobs:
 | 
			
		||||
              # Use gh CLI to get changed files in the PR with explicit repo
 | 
			
		||||
              CHANGED_FILES=$(gh api repos/${{ github.repository }}/pulls/$PR_NUMBER/files --paginate --jq '.[] | select(.status != "removed") | .filename' | tr '\n' ' ' | sed 's/ $//')
 | 
			
		||||
 | 
			
		||||
              # See https://github.com/pytorch/pytorch/pull/134215#issuecomment-2332128790
 | 
			
		||||
              PYI_FILES_TO_ADD=""
 | 
			
		||||
              for file in ${CHANGED_FILES}; do
 | 
			
		||||
                if [[ "${file}" == *".pyi.in" ]]; then
 | 
			
		||||
                  PYI_FILES_TO_ADD="${PYI_FILES_TO_ADD} ${file//.in/}"
 | 
			
		||||
                fi
 | 
			
		||||
              done
 | 
			
		||||
              CHANGED_FILES="${CHANGED_FILES}${PYI_FILES_TO_ADD}"
 | 
			
		||||
 | 
			
		||||
              if [ -z "$CHANGED_FILES" ]; then
 | 
			
		||||
                echo "No changed files found, setting to '*'"
 | 
			
		||||
                CHANGED_FILES="*"
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/_linux-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/_linux-test.yml
									
									
									
									
										vendored
									
									
								
							@ -389,6 +389,8 @@ jobs:
 | 
			
		||||
            "${DOCKER_IMAGE}" \
 | 
			
		||||
            ${DOCKER_SHELL_CMD}
 | 
			
		||||
          )
 | 
			
		||||
          # Propagate download.pytorch.org IP to container
 | 
			
		||||
          grep download.pytorch.org /etc/hosts | docker exec -i "${container_name}" sudo bash -c "/bin/cat >> /etc/hosts"
 | 
			
		||||
          echo "DOCKER_CONTAINER_ID=${container_name}" >> "${GITHUB_ENV}"
 | 
			
		||||
 | 
			
		||||
          if [[ ${BUILD_ENVIRONMENT} == *"s390x"* ]]; then
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										13
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							@ -102,6 +102,19 @@ jobs:
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: configure aws credentials
 | 
			
		||||
        id: aws_creds
 | 
			
		||||
        uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
 | 
			
		||||
        with:
 | 
			
		||||
          role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
 | 
			
		||||
          aws-region: us-east-1
 | 
			
		||||
          role-duration-seconds: 18000
 | 
			
		||||
 | 
			
		||||
      - name: Login to Amazon ECR
 | 
			
		||||
        id: login-ecr
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
 | 
			
		||||
 | 
			
		||||
      - name: Calculate docker image
 | 
			
		||||
        id: calculate-docker-image
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										4
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							@ -46,12 +46,10 @@ jobs:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        include: [
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "cuda13.0",          runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "cuda13.0",         runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "cuda12.8",          runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "cuda12.9",          runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "cuda12.6",          runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinuxaarch64-builder",       tag: "cuda13.0",          runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinuxaarch64-builder",       tag: "cuda12.9",          runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinuxaarch64-builder",       tag: "cuda12.8",          runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinuxaarch64-builder",       tag: "cuda12.6",          runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "rocm6.4",           runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										322
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										322
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -204,52 +204,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_10-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_10-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_10-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_10-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -453,52 +407,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_11-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_11-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_11-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_11-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -702,52 +610,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_12-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_12-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_12-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_12-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -951,52 +813,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_13-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -1200,52 +1016,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13t-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13t-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_13t-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13t-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -1449,52 +1219,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_14-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -1698,52 +1422,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14t-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14t-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_14t-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14t-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										74
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										74
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -248,74 +248,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
      build_environment: linux-binary-libtorch
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - libtorch-cuda12_9-shared-with-deps-release-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      build_name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
      build_environment: linux-binary-libtorch
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: libtorch-cuda12_9-shared-with-deps-release-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      build_name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  libtorch-cuda13_0-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -426,9 +358,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -544,9 +473,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										504
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										504
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -241,72 +241,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_10-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_10-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_10-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_10-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_10-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_10-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -413,9 +347,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -528,9 +459,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -907,72 +835,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_11-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_11-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_11-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_11-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_11-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_11-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -1079,9 +941,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -1194,9 +1053,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -1573,72 +1429,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_12-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_12-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_12-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_12-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_12-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_12-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -1745,9 +1535,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -1860,9 +1647,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -2239,72 +2023,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_13-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_13-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -2411,9 +2129,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -2526,9 +2241,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -2905,72 +2617,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13t-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13t-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_13t-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13t-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_13t-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13t-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -3077,9 +2723,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -3192,9 +2835,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -3571,72 +3211,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_14-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_14-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -3743,9 +3317,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -3858,9 +3429,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -4237,72 +3805,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14t-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14t-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_14t-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14t-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_14t-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14t-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -4409,9 +3911,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -4524,9 +4023,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -788,256 +788,6 @@ jobs:
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-debug-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      SKIP_ALL_TESTS: 1
 | 
			
		||||
      LIBTORCH_CONFIG: debug
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    steps:
 | 
			
		||||
      # NOTE: These environment variables are put here so that they can be applied on every job equally
 | 
			
		||||
      #       They are also here because setting them at a workflow level doesn't give us access to the
 | 
			
		||||
      #       runner.temp variable, which we need.
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
 | 
			
		||||
      - name: Display EC2 information
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          set -euo pipefail
 | 
			
		||||
          function get_ec2_metadata() {
 | 
			
		||||
            # Pulled from instance metadata endpoint for EC2
 | 
			
		||||
            # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
 | 
			
		||||
            category=$1
 | 
			
		||||
            curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}"
 | 
			
		||||
          }
 | 
			
		||||
          echo "ami-id: $(get_ec2_metadata ami-id)"
 | 
			
		||||
          echo "instance-id: $(get_ec2_metadata instance-id)"
 | 
			
		||||
          echo "instance-type: $(get_ec2_metadata instance-type)"
 | 
			
		||||
          echo "system info $(uname -a)"
 | 
			
		||||
      - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/setup-ssh@main
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        with:
 | 
			
		||||
          github-secret: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
      - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          git config --global core.longpaths true
 | 
			
		||||
          git config --global core.symlinks true
 | 
			
		||||
 | 
			
		||||
          # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock
 | 
			
		||||
          # the directory on Windows and prevent GHA from checking out as reported
 | 
			
		||||
          # in https://github.com/actions/checkout/issues/1018
 | 
			
		||||
          git config --global core.fsmonitor false
 | 
			
		||||
      # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
 | 
			
		||||
      - name: Enable long paths on Windows
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
 | 
			
		||||
      # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
 | 
			
		||||
      # removed once Windows Defender is removed from the AMI
 | 
			
		||||
      - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
 | 
			
		||||
          # Let's both exclude the path and disable Windows Defender completely just to be sure
 | 
			
		||||
          # that it doesn't interfere
 | 
			
		||||
          Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
 | 
			
		||||
      - name: Checkout PyTorch
 | 
			
		||||
        uses: actions/checkout@v4
 | 
			
		||||
        with:
 | 
			
		||||
          ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
 | 
			
		||||
          submodules: recursive
 | 
			
		||||
          path: pytorch
 | 
			
		||||
          show-progress: false
 | 
			
		||||
      - name: Clean PyTorch checkout
 | 
			
		||||
        run: |
 | 
			
		||||
          # Remove any artifacts from the previous checkouts
 | 
			
		||||
          git clean -fxd
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
 | 
			
		||||
      - name: Build PyTorch binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
 | 
			
		||||
      - uses: actions/upload-artifact@v4.4.0
 | 
			
		||||
        if: always()
 | 
			
		||||
        with:
 | 
			
		||||
          name: libtorch-cuda12_9-shared-with-deps-debug
 | 
			
		||||
          retention-days: 14
 | 
			
		||||
          if-no-files-found: error
 | 
			
		||||
          path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
 | 
			
		||||
      - name: Wait until all sessions have drained
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        timeout-minutes: 120
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\wait_for_ssh_to_drain.ps1
 | 
			
		||||
      - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\kill_active_ssh_sessions.ps1
 | 
			
		||||
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-debug-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - libtorch-cuda12_9-shared-with-deps-debug-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.g4dn.xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      SKIP_ALL_TESTS: 1
 | 
			
		||||
      LIBTORCH_CONFIG: debug
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Display EC2 information
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          set -euo pipefail
 | 
			
		||||
          function get_ec2_metadata() {
 | 
			
		||||
            # Pulled from instance metadata endpoint for EC2
 | 
			
		||||
            # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
 | 
			
		||||
            category=$1
 | 
			
		||||
            curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}"
 | 
			
		||||
          }
 | 
			
		||||
          echo "ami-id: $(get_ec2_metadata ami-id)"
 | 
			
		||||
          echo "instance-id: $(get_ec2_metadata instance-id)"
 | 
			
		||||
          echo "instance-type: $(get_ec2_metadata instance-type)"
 | 
			
		||||
          echo "system info $(uname -a)"
 | 
			
		||||
      - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/setup-ssh@main
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        with:
 | 
			
		||||
          github-secret: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
      - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          git config --global core.longpaths true
 | 
			
		||||
          git config --global core.symlinks true
 | 
			
		||||
 | 
			
		||||
          # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock
 | 
			
		||||
          # the directory on Windows and prevent GHA from checking out as reported
 | 
			
		||||
          # in https://github.com/actions/checkout/issues/1018
 | 
			
		||||
          git config --global core.fsmonitor false
 | 
			
		||||
      # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
 | 
			
		||||
      - name: Enable long paths on Windows
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
 | 
			
		||||
      # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
 | 
			
		||||
      # removed once Windows Defender is removed from the AMI
 | 
			
		||||
      - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
 | 
			
		||||
          # Let's both exclude the path and disable Windows Defender completely just to be sure
 | 
			
		||||
          # that it doesn't interfere
 | 
			
		||||
          Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
 | 
			
		||||
      - name: Checkout PyTorch
 | 
			
		||||
        uses: actions/checkout@v4
 | 
			
		||||
        with:
 | 
			
		||||
          ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
 | 
			
		||||
          submodules: recursive
 | 
			
		||||
          path: pytorch
 | 
			
		||||
          show-progress: false
 | 
			
		||||
      - name: Clean PyTorch checkout
 | 
			
		||||
        run: |
 | 
			
		||||
          # Remove any artifacts from the previous checkouts
 | 
			
		||||
          git clean -fxd
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
      # NOTE: These environment variables are put here so that they can be applied on every job equally
 | 
			
		||||
      #       They are also here because setting them at a workflow level doesn't give us access to the
 | 
			
		||||
      #       runner.temp variable, which we need.
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
 | 
			
		||||
      - uses: actions/download-artifact@v4.1.7
 | 
			
		||||
        name: Download Build Artifacts
 | 
			
		||||
        with:
 | 
			
		||||
          name: libtorch-cuda12_9-shared-with-deps-debug
 | 
			
		||||
          path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
 | 
			
		||||
      - name: Test PyTorch binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
 | 
			
		||||
      - name: Wait until all sessions have drained
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        timeout-minutes: 120
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\wait_for_ssh_to_drain.ps1
 | 
			
		||||
      - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\kill_active_ssh_sessions.ps1
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-debug-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: libtorch-cuda12_9-shared-with-deps-debug-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      LIBTORCH_CONFIG: debug
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      build_name: libtorch-cuda12_9-shared-with-deps-debug
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
  libtorch-cuda13_0-shared-with-deps-debug-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -788,256 +788,6 @@ jobs:
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      SKIP_ALL_TESTS: 1
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    steps:
 | 
			
		||||
      # NOTE: These environment variables are put here so that they can be applied on every job equally
 | 
			
		||||
      #       They are also here because setting them at a workflow level doesn't give us access to the
 | 
			
		||||
      #       runner.temp variable, which we need.
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
 | 
			
		||||
      - name: Display EC2 information
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          set -euo pipefail
 | 
			
		||||
          function get_ec2_metadata() {
 | 
			
		||||
            # Pulled from instance metadata endpoint for EC2
 | 
			
		||||
            # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
 | 
			
		||||
            category=$1
 | 
			
		||||
            curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}"
 | 
			
		||||
          }
 | 
			
		||||
          echo "ami-id: $(get_ec2_metadata ami-id)"
 | 
			
		||||
          echo "instance-id: $(get_ec2_metadata instance-id)"
 | 
			
		||||
          echo "instance-type: $(get_ec2_metadata instance-type)"
 | 
			
		||||
          echo "system info $(uname -a)"
 | 
			
		||||
      - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/setup-ssh@main
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        with:
 | 
			
		||||
          github-secret: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
      - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          git config --global core.longpaths true
 | 
			
		||||
          git config --global core.symlinks true
 | 
			
		||||
 | 
			
		||||
          # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock
 | 
			
		||||
          # the directory on Windows and prevent GHA from checking out as reported
 | 
			
		||||
          # in https://github.com/actions/checkout/issues/1018
 | 
			
		||||
          git config --global core.fsmonitor false
 | 
			
		||||
      # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
 | 
			
		||||
      - name: Enable long paths on Windows
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
 | 
			
		||||
      # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
 | 
			
		||||
      # removed once Windows Defender is removed from the AMI
 | 
			
		||||
      - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
 | 
			
		||||
          # Let's both exclude the path and disable Windows Defender completely just to be sure
 | 
			
		||||
          # that it doesn't interfere
 | 
			
		||||
          Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
 | 
			
		||||
      - name: Checkout PyTorch
 | 
			
		||||
        uses: actions/checkout@v4
 | 
			
		||||
        with:
 | 
			
		||||
          ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
 | 
			
		||||
          submodules: recursive
 | 
			
		||||
          path: pytorch
 | 
			
		||||
          show-progress: false
 | 
			
		||||
      - name: Clean PyTorch checkout
 | 
			
		||||
        run: |
 | 
			
		||||
          # Remove any artifacts from the previous checkouts
 | 
			
		||||
          git clean -fxd
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
 | 
			
		||||
      - name: Build PyTorch binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
 | 
			
		||||
      - uses: actions/upload-artifact@v4.4.0
 | 
			
		||||
        if: always()
 | 
			
		||||
        with:
 | 
			
		||||
          name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
          retention-days: 14
 | 
			
		||||
          if-no-files-found: error
 | 
			
		||||
          path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
 | 
			
		||||
      - name: Wait until all sessions have drained
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        timeout-minutes: 120
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\wait_for_ssh_to_drain.ps1
 | 
			
		||||
      - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\kill_active_ssh_sessions.ps1
 | 
			
		||||
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - libtorch-cuda12_9-shared-with-deps-release-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.g4dn.xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      SKIP_ALL_TESTS: 1
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Display EC2 information
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          set -euo pipefail
 | 
			
		||||
          function get_ec2_metadata() {
 | 
			
		||||
            # Pulled from instance metadata endpoint for EC2
 | 
			
		||||
            # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
 | 
			
		||||
            category=$1
 | 
			
		||||
            curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}"
 | 
			
		||||
          }
 | 
			
		||||
          echo "ami-id: $(get_ec2_metadata ami-id)"
 | 
			
		||||
          echo "instance-id: $(get_ec2_metadata instance-id)"
 | 
			
		||||
          echo "instance-type: $(get_ec2_metadata instance-type)"
 | 
			
		||||
          echo "system info $(uname -a)"
 | 
			
		||||
      - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/setup-ssh@main
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        with:
 | 
			
		||||
          github-secret: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
      - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          git config --global core.longpaths true
 | 
			
		||||
          git config --global core.symlinks true
 | 
			
		||||
 | 
			
		||||
          # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock
 | 
			
		||||
          # the directory on Windows and prevent GHA from checking out as reported
 | 
			
		||||
          # in https://github.com/actions/checkout/issues/1018
 | 
			
		||||
          git config --global core.fsmonitor false
 | 
			
		||||
      # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
 | 
			
		||||
      - name: Enable long paths on Windows
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
 | 
			
		||||
      # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
 | 
			
		||||
      # removed once Windows Defender is removed from the AMI
 | 
			
		||||
      - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
 | 
			
		||||
          # Let's both exclude the path and disable Windows Defender completely just to be sure
 | 
			
		||||
          # that it doesn't interfere
 | 
			
		||||
          Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
 | 
			
		||||
      - name: Checkout PyTorch
 | 
			
		||||
        uses: actions/checkout@v4
 | 
			
		||||
        with:
 | 
			
		||||
          ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
 | 
			
		||||
          submodules: recursive
 | 
			
		||||
          path: pytorch
 | 
			
		||||
          show-progress: false
 | 
			
		||||
      - name: Clean PyTorch checkout
 | 
			
		||||
        run: |
 | 
			
		||||
          # Remove any artifacts from the previous checkouts
 | 
			
		||||
          git clean -fxd
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
      # NOTE: These environment variables are put here so that they can be applied on every job equally
 | 
			
		||||
      #       They are also here because setting them at a workflow level doesn't give us access to the
 | 
			
		||||
      #       runner.temp variable, which we need.
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
 | 
			
		||||
      - uses: actions/download-artifact@v4.1.7
 | 
			
		||||
        name: Download Build Artifacts
 | 
			
		||||
        with:
 | 
			
		||||
          name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
          path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
 | 
			
		||||
      - name: Test PyTorch binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
 | 
			
		||||
      - name: Wait until all sessions have drained
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        timeout-minutes: 120
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\wait_for_ssh_to_drain.ps1
 | 
			
		||||
      - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\kill_active_ssh_sessions.ps1
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: libtorch-cuda12_9-shared-with-deps-release-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      build_name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
  libtorch-cuda13_0-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1666
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1666
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										2
									
								
								.github/workflows/h100-distributed.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/h100-distributed.yml
									
									
									
									
										vendored
									
									
								
							@ -37,7 +37,7 @@ jobs:
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runner: "linux.c7i.12xlarge"
 | 
			
		||||
      runner: "linux.12xlarge"
 | 
			
		||||
      build-environment: linux-jammy-cuda12.8-py3.10-gcc11-sm90-dist
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
 | 
			
		||||
      cuda-arch-list: '9.0'
 | 
			
		||||
 | 
			
		||||
@ -2,7 +2,7 @@ name: inductor-perf-nightly-h100
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  schedule:
 | 
			
		||||
    - cron: 15 0 * * 1-6
 | 
			
		||||
    - cron: 15 0,12 * * 1-6
 | 
			
		||||
    - cron: 0 7 * * 0
 | 
			
		||||
  # NB: GitHub has an upper limit of 10 inputs here, so before we can sort it
 | 
			
		||||
  # out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
 | 
			
		||||
@ -130,7 +130,7 @@ jobs:
 | 
			
		||||
    name: test-periodically
 | 
			
		||||
    uses: ./.github/workflows/_linux-test.yml
 | 
			
		||||
    needs: build
 | 
			
		||||
    if: github.event.schedule == '15 0 * * 1-6'
 | 
			
		||||
    if: github.event.schedule == '15 0,12 * * 1-6'
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm90
 | 
			
		||||
      dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true
 | 
			
		||||
 | 
			
		||||
@ -63,7 +63,6 @@ jobs:
 | 
			
		||||
      # Same as the build job
 | 
			
		||||
      python-version: 3.12.7
 | 
			
		||||
      test-matrix: ${{ needs.macos-perf-py3-arm64-build.outputs.test-matrix }}
 | 
			
		||||
      timeout-minutes: 300
 | 
			
		||||
      disable-monitor: false
 | 
			
		||||
      monitor-log-interval: 15
 | 
			
		||||
      monitor-data-collect-interval: 4
 | 
			
		||||
 | 
			
		||||
@ -1,132 +0,0 @@
 | 
			
		||||
name: inductor-perf-nightly-rocm-mi300
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  push:
 | 
			
		||||
    tags:
 | 
			
		||||
      - ciflow/inductor-perf-test-nightly-rocm-mi300/*
 | 
			
		||||
  schedule:
 | 
			
		||||
    - cron: 15 0 * * *
 | 
			
		||||
  # NB: GitHub has an upper limit of 10 inputs here, so before we can sort it
 | 
			
		||||
  # out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
    inputs:
 | 
			
		||||
      training:
 | 
			
		||||
        description: Run training (on by default)?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: true
 | 
			
		||||
      inference:
 | 
			
		||||
        description: Run inference (on by default)?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: true
 | 
			
		||||
      default:
 | 
			
		||||
        description: Run inductor_default?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      dynamic:
 | 
			
		||||
        description: Run inductor_dynamic_shapes?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      cppwrapper:
 | 
			
		||||
        description: Run inductor_cpp_wrapper?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      cudagraphs:
 | 
			
		||||
        description: Run inductor_cudagraphs?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: true
 | 
			
		||||
      freezing_cudagraphs:
 | 
			
		||||
        description: Run inductor_cudagraphs with freezing for inference?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      aotinductor:
 | 
			
		||||
        description: Run aot_inductor for inference?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      maxautotune:
 | 
			
		||||
        description: Run inductor_max_autotune?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      benchmark_configs:
 | 
			
		||||
        description: The list of configs used the benchmark
 | 
			
		||||
        required: false
 | 
			
		||||
        type: string
 | 
			
		||||
        default: inductor_huggingface_perf_rocm_mi300,inductor_timm_perf_rocm_mi300,inductor_torchbench_perf_rocm_mi300
 | 
			
		||||
 | 
			
		||||
concurrency:
 | 
			
		||||
  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
 | 
			
		||||
  cancel-in-progress: true
 | 
			
		||||
 | 
			
		||||
permissions: read-all
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  get-label-type:
 | 
			
		||||
    name: get-label-type
 | 
			
		||||
    uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
 | 
			
		||||
    if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
 | 
			
		||||
    with:
 | 
			
		||||
      triggering_actor: ${{ github.triggering_actor }}
 | 
			
		||||
      issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
 | 
			
		||||
      curr_branch: ${{ github.head_ref || github.ref_name }}
 | 
			
		||||
      curr_ref_type: ${{ github.ref_type }}
 | 
			
		||||
      opt_out_experiments: lf
 | 
			
		||||
 | 
			
		||||
  linux-jammy-rocm-py3_10-inductor-benchmark-build:
 | 
			
		||||
    if: github.repository_owner == 'pytorch'
 | 
			
		||||
    name: rocm-py3_10-inductor-benchmark-build
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3_10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm_mi300", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm_mi300", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm_mi300", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm_mi300", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm_mi300", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi300", shard: 1, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi300", shard: 2, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi300", shard: 3, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi300", shard: 4, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi300", shard: 5, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi300", shard: 6, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi300", shard: 7, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi300", shard: 1, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi300", shard: 2, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi300", shard: 3, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi300", shard: 4, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi300", shard: 5, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi300", shard: 6, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi300", shard: 7, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi300", shard: 8, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi300", shard: 9, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  linux-jammy-rocm-py3_10-inductor-benchmark-test:
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    name: rocm-py3_10-inductor-benchmark-test
 | 
			
		||||
    uses: ./.github/workflows/_rocm-test.yml
 | 
			
		||||
    needs: linux-jammy-rocm-py3_10-inductor-benchmark-build
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3_10
 | 
			
		||||
      dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true
 | 
			
		||||
      docker-image: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.docker-image }}
 | 
			
		||||
      test-matrix: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.test-matrix }}
 | 
			
		||||
      timeout-minutes: 720
 | 
			
		||||
      # Disable monitor in perf tests for more investigation
 | 
			
		||||
      disable-monitor: true
 | 
			
		||||
      monitor-log-interval: 10
 | 
			
		||||
      monitor-data-collect-interval: 2
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
@ -1,11 +1,11 @@
 | 
			
		||||
name: inductor-perf-nightly-rocm-mi355
 | 
			
		||||
name: inductor-perf-nightly-rocm
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  push:
 | 
			
		||||
    tags:
 | 
			
		||||
      - ciflow/inductor-perf-test-nightly-rocm-mi355/*
 | 
			
		||||
      - ciflow/inductor-perf-test-nightly-rocm/*
 | 
			
		||||
  schedule:
 | 
			
		||||
    - cron: 15 0 * * *
 | 
			
		||||
    - cron: 0 7 * * 0,3
 | 
			
		||||
  # NB: GitHub has an upper limit of 10 inputs here, so before we can sort it
 | 
			
		||||
  # out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
@ -59,7 +59,7 @@ on:
 | 
			
		||||
        description: The list of configs used the benchmark
 | 
			
		||||
        required: false
 | 
			
		||||
        type: string
 | 
			
		||||
        default: inductor_huggingface_perf_rocm_mi355,inductor_timm_perf_rocm_mi355,inductor_torchbench_perf_rocm_mi355
 | 
			
		||||
        default: inductor_huggingface_perf_rocm,inductor_timm_perf_rocm,inductor_torchbench_perf_rocm
 | 
			
		||||
 | 
			
		||||
concurrency:
 | 
			
		||||
  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
 | 
			
		||||
@ -88,27 +88,23 @@ jobs:
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm_mi355", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm_mi355", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm_mi355", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm_mi355", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm_mi355", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi355", shard: 1, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi355", shard: 2, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi355", shard: 3, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi355", shard: 4, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi355", shard: 5, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi355", shard: 6, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm_mi355", shard: 7, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi355", shard: 1, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi355", shard: 2, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi355", shard: 3, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi355", shard: 4, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi355", shard: 5, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi355", shard: 6, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi355", shard: 7, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi355", shard: 8, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm_mi355", shard: 9, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm", shard: 1, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm", shard: 2, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm", shard: 3, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_rocm", shard: 4, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_timm_perf_rocm", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm", shard: 1, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm", shard: 2, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm", shard: 3, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm", shard: 4, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm", shard: 5, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm", shard: 6, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm", shard: 7, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_rocm", shard: 8, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										10
									
								
								.github/workflows/inductor-periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/inductor-periodic.yml
									
									
									
									
										vendored
									
									
								
							@ -106,16 +106,6 @@ jobs:
 | 
			
		||||
          { config: "dynamic_aot_eager_huggingface", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "dynamic_aot_eager_timm", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "dynamic_aot_eager_timm", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "dynamic_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "dynamic_inductor_timm", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "dynamic_inductor_timm", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "dynamic_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "dynamic_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "aot_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "aot_inductor_timm", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "aot_inductor_timm", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "aot_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "aot_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										7
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							@ -12,7 +12,6 @@ on:
 | 
			
		||||
      - landchecks/*
 | 
			
		||||
    tags:
 | 
			
		||||
      - ciflow/pull/*
 | 
			
		||||
      - ciflow/trunk/*
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
 | 
			
		||||
permissions: read-all
 | 
			
		||||
@ -33,12 +32,10 @@ jobs:
 | 
			
		||||
    name: Get changed files
 | 
			
		||||
    uses: ./.github/workflows/_get-changed-files.yml
 | 
			
		||||
    with:
 | 
			
		||||
      all_files: ${{ contains(github.event.pull_request.labels.*.name, 'lint-all-files') || contains(github.event.pull_request.labels.*.name, 'Reverted') || github.event_name == 'push' }}
 | 
			
		||||
      all_files: ${{ contains(github.event.pull_request.labels.*.name, 'lint-all-files') || contains(github.event.pull_request.labels.*.name, 'Reverted') }}
 | 
			
		||||
 | 
			
		||||
  lintrunner-clang:
 | 
			
		||||
    uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
 | 
			
		||||
    # Needed to prevent deduping on HUD
 | 
			
		||||
    name: lintrunner-clang-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }}
 | 
			
		||||
    needs: [get-label-type, get-changed-files]
 | 
			
		||||
    # Only run if there are changed files relevant to clangtidy / clangformat
 | 
			
		||||
    if: |
 | 
			
		||||
@ -78,7 +75,6 @@ jobs:
 | 
			
		||||
  #       fails to find types when it should
 | 
			
		||||
  lintrunner-mypy:
 | 
			
		||||
    uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
 | 
			
		||||
    name: lintrunner-mypy-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }}
 | 
			
		||||
    needs: [get-label-type, get-changed-files]
 | 
			
		||||
    # Only run if there are changed files relevant to mypy
 | 
			
		||||
    if: |
 | 
			
		||||
@ -103,7 +99,6 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  lintrunner-noclang:
 | 
			
		||||
    uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
 | 
			
		||||
    name: lintrunner-noclang-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }}
 | 
			
		||||
    needs: [get-label-type, get-changed-files]
 | 
			
		||||
    with:
 | 
			
		||||
      timeout: 120
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										25
									
								
								.github/workflows/operator_microbenchmark.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										25
									
								
								.github/workflows/operator_microbenchmark.yml
									
									
									
									
										vendored
									
									
								
							@ -73,28 +73,3 @@ jobs:
 | 
			
		||||
      test-matrix: ${{ needs.opmicrobenchmark-build-b200.outputs.test-matrix }}
 | 
			
		||||
      aws-role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  # ROCM MI300 runner
 | 
			
		||||
  opmicrobenchmark-build-rocm:
 | 
			
		||||
    if: github.repository_owner == 'pytorch'
 | 
			
		||||
    name: opmicrobenchmark-build-rocm
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3_10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "operator_microbenchmark_test", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  opmicrobenchmark-test-rocm:
 | 
			
		||||
    name: opmicrobenchmark-test-rocm
 | 
			
		||||
    uses: ./.github/workflows/_rocm-test.yml
 | 
			
		||||
    needs: opmicrobenchmark-build-rocm
 | 
			
		||||
    with:
 | 
			
		||||
      timeout-minutes: 500
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3_10
 | 
			
		||||
      docker-image: ${{ needs.opmicrobenchmark-build-rocm.outputs.docker-image }}
 | 
			
		||||
      test-matrix: ${{ needs.opmicrobenchmark-build-rocm.outputs.test-matrix }}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										16
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										16
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							@ -182,11 +182,11 @@ jobs:
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "nogpu_AVX512", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
 | 
			
		||||
          { config: "nogpu_AVX512", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
 | 
			
		||||
          { config: "nogpu_AVX512", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
 | 
			
		||||
          { config: "nogpu_NO_AVX2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
 | 
			
		||||
          { config: "nogpu_NO_AVX2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
 | 
			
		||||
          { config: "nogpu_AVX512", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
 | 
			
		||||
          { config: "nogpu_AVX512", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
 | 
			
		||||
          { config: "nogpu_AVX512", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
 | 
			
		||||
          { config: "nogpu_NO_AVX2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
 | 
			
		||||
          { config: "nogpu_NO_AVX2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
 | 
			
		||||
          { config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
@ -213,9 +213,9 @@ jobs:
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu.mi250.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
          { config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu.mi250.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
          { config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu.mi250.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
          { config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
          { config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
          { config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							@ -127,6 +127,7 @@ jobs:
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      # More memory is needed to build with asan
 | 
			
		||||
      runner: linux.2xlarge.memory
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build-environment: linux-jammy-py3.10-clang18-asan
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										7
									
								
								.github/workflows/rocm-mi355.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/rocm-mi355.yml
									
									
									
									
										vendored
									
									
								
							@ -1,9 +1,6 @@
 | 
			
		||||
name: rocm-mi355
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  push:
 | 
			
		||||
    tags:
 | 
			
		||||
      - ciflow/rocm-mi355/*
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
  schedule:
 | 
			
		||||
    - cron: 30 11,1 * * *  # about 4:30am PDT and 6:30pm PDT
 | 
			
		||||
@ -67,7 +64,5 @@ jobs:
 | 
			
		||||
      build-environment: linux-noble-rocm-py3.12-mi355
 | 
			
		||||
      docker-image: ${{ needs.linux-noble-rocm-py3_12-build.outputs.docker-image }}
 | 
			
		||||
      test-matrix: ${{ needs.linux-noble-rocm-py3_12-build.outputs.test-matrix }}
 | 
			
		||||
      tests-to-include: >-
 | 
			
		||||
                        ${{ github.event_name == 'schedule' && 'test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor test_matmul_cuda test_scaled_matmul_cuda'
 | 
			
		||||
                           || '' }}
 | 
			
		||||
      tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor"
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										26
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										26
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							@ -59,29 +59,3 @@ jobs:
 | 
			
		||||
      docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
 | 
			
		||||
      test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  linux-jammy-rocm-py3_10-gfx1100-test:
 | 
			
		||||
    if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    name: linux-jammy-rocm-py3_10-gfx1100
 | 
			
		||||
    uses: ./.github/workflows/_rocm-test.yml
 | 
			
		||||
    needs:
 | 
			
		||||
      - linux-jammy-rocm-py3_10-build
 | 
			
		||||
      - target-determination
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3.10
 | 
			
		||||
      docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" },
 | 
			
		||||
          { config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" },
 | 
			
		||||
        ]}
 | 
			
		||||
      tests-to-include: >
 | 
			
		||||
         test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs
 | 
			
		||||
         test_autograd inductor/test_torchinductor inductor/test_kernel_benchmark
 | 
			
		||||
         inductor/test_pad_mm inductor/test_benchmark_fusion inductor/test_aot_inductor
 | 
			
		||||
         inductor/test_torchinductor inductor/test_decompose_mem_bound_mm
 | 
			
		||||
         inductor/test_flex_attention inductor/test_max_autotune
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								.github/workflows/slow.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/slow.yml
									
									
									
									
										vendored
									
									
								
							@ -140,6 +140,7 @@ jobs:
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      # More memory is needed to build with asan
 | 
			
		||||
      runner: linux.2xlarge.memory
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build-environment: linux-jammy-py3.10-clang18-asan
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										63
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										63
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							@ -56,7 +56,7 @@ jobs:
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
 | 
			
		||||
      build-generates-artifacts: false
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runner: "linux.c7i.4xlarge"
 | 
			
		||||
      runner: "linux.4xlarge"
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 1 },
 | 
			
		||||
@ -160,10 +160,9 @@ jobs:
 | 
			
		||||
      runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
 | 
			
		||||
          { config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
 | 
			
		||||
          { config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
 | 
			
		||||
          { config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
 | 
			
		||||
          { config: "default", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
 | 
			
		||||
          { config: "default", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
@ -180,16 +179,51 @@ jobs:
 | 
			
		||||
      disable-monitor: false
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  win-vs2022-cuda12_8-py3-build:
 | 
			
		||||
    name: win-vs2022-cuda12.8-py3
 | 
			
		||||
  win-vs2022-cuda12_6-py3-build:
 | 
			
		||||
    name: win-vs2022-cuda12.6-py3
 | 
			
		||||
    uses: ./.github/workflows/_win-build.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: win-vs2022-cuda12.8-py3
 | 
			
		||||
      cuda-version: "12.8"
 | 
			
		||||
      build-environment: win-vs2022-cuda12.6-py3
 | 
			
		||||
      cuda-version: "12.6"
 | 
			
		||||
      runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  linux-jammy-rocm-py3_10-build:
 | 
			
		||||
    if: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/trunk') }}
 | 
			
		||||
    name: linux-jammy-rocm-py3.10
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3.10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
 | 
			
		||||
      sync-tag: rocm-build
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
          { config: "distributed", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.4" },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  linux-jammy-rocm-py3_10-test:
 | 
			
		||||
    if: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/trunk') }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    name: linux-jammy-rocm-py3.10
 | 
			
		||||
    uses: ./.github/workflows/_rocm-test.yml
 | 
			
		||||
    needs:
 | 
			
		||||
      - linux-jammy-rocm-py3_10-build
 | 
			
		||||
      - target-determination
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3.10
 | 
			
		||||
      docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
 | 
			
		||||
      test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
 | 
			
		||||
      tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor distributed/test_c10d_common distributed/test_c10d_nccl"
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  inductor-build:
 | 
			
		||||
    name: inductor-build
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
@ -249,14 +283,3 @@ jobs:
 | 
			
		||||
      docker-image: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.docker-image }}
 | 
			
		||||
      test-matrix: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.test-matrix }}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  linux-jammy-py3_10-gcc11-full-debug-build-only:
 | 
			
		||||
    name: linux-jammy-py3.10-gcc11-full-debug-build-only
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runner: linux.2xlarge.memory
 | 
			
		||||
      build-environment: linux-jammy-py3.10-gcc11-full-debug-build-only
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-py3.10-gcc11
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										6
									
								
								.github/workflows/vllm.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.github/workflows/vllm.yml
									
									
									
									
										vendored
									
									
								
							@ -42,11 +42,11 @@ jobs:
 | 
			
		||||
      build-external-packages: "vllm"
 | 
			
		||||
      build-environment: linux-jammy-cuda12.8-py3.12-gcc11
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc11-vllm
 | 
			
		||||
      cuda-arch-list: '8.0 8.9 9.0'
 | 
			
		||||
      cuda-arch-list: '8.0;8.9;9.0'
 | 
			
		||||
      runner: linux.24xlarge.memory
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config:  "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_basic_models_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_entrypoints_test", shard: 1, num_shards: 1,runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_regression_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
@ -54,7 +54,7 @@ jobs:
 | 
			
		||||
          { config: "vllm_pytorch_compilation_unit_tests", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_lora_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_multi_model_test_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
 | 
			
		||||
          { config: "vllm_language_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
 | 
			
		||||
          { config: "vllm_languagde_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
 | 
			
		||||
          { config: "vllm_distributed_test_2_gpu_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_lora_test", shard: 0, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_lora_test", shard: 1, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										4
									
								
								.github/workflows/xpu.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/xpu.yml
									
									
									
									
										vendored
									
									
								
							@ -35,7 +35,7 @@ jobs:
 | 
			
		||||
      runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
 | 
			
		||||
      build-environment: linux-jammy-xpu-n-1-py3.10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-1-py3
 | 
			
		||||
      runner: linux.c7i.12xlarge
 | 
			
		||||
      runner: linux.12xlarge
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
@ -56,7 +56,7 @@ jobs:
 | 
			
		||||
      runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
 | 
			
		||||
      build-environment: linux-jammy-xpu-n-py3.10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3
 | 
			
		||||
      runner: linux.c7i.12xlarge
 | 
			
		||||
      runner: linux.12xlarge
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 8, runner: "linux.idc.xpu" },
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										3
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@ -88,7 +88,7 @@ torch_compile_debug/
 | 
			
		||||
# Listed manually because some files in this directory are not generated
 | 
			
		||||
torch/testing/_internal/generated/annotated_fn_args.py
 | 
			
		||||
torch/testing/_internal/data/*.pt
 | 
			
		||||
torch/headeronly/version.h
 | 
			
		||||
torch/csrc/api/include/torch/version.h
 | 
			
		||||
torch/csrc/cudnn/cuDNN.cpp
 | 
			
		||||
torch/csrc/generated
 | 
			
		||||
torch/csrc/generic/TensorMethods.cpp
 | 
			
		||||
@ -395,4 +395,3 @@ android/pytorch_android_torchvision/.cxx
 | 
			
		||||
CLAUDE.local.md
 | 
			
		||||
/test_*.py
 | 
			
		||||
/debug_*.py
 | 
			
		||||
CLAUDE_CONTEXT/
 | 
			
		||||
 | 
			
		||||
@ -28,7 +28,6 @@ exclude_patterns = [
 | 
			
		||||
    'torch/lib/**',
 | 
			
		||||
    'venv/**',
 | 
			
		||||
    '**/*.pyi',
 | 
			
		||||
    "tools/experimental/torchfuzz/**",
 | 
			
		||||
    'tools/test/test_selective_build.py',
 | 
			
		||||
]
 | 
			
		||||
command = [
 | 
			
		||||
@ -198,7 +197,7 @@ exclude_patterns = [
 | 
			
		||||
    'tools/test/gen_operators_yaml_test.py',
 | 
			
		||||
    'tools/test/gen_oplist_test.py',
 | 
			
		||||
    'tools/test/test_selective_build.py',
 | 
			
		||||
    'tools/experimental/torchfuzz/**',
 | 
			
		||||
    'tools/experimental/dynamic_shapes/torchfuzz/**',
 | 
			
		||||
]
 | 
			
		||||
command = [
 | 
			
		||||
    'python3',
 | 
			
		||||
@ -1573,7 +1572,6 @@ exclude_patterns = [
 | 
			
		||||
    'torch/_inductor/fx_passes/serialized_patterns/**',
 | 
			
		||||
    'torch/_inductor/autoheuristic/artifacts/**',
 | 
			
		||||
    'test/dynamo/cpython/**',
 | 
			
		||||
    'test/test_torchfuzz_repros.py',
 | 
			
		||||
    'scripts/**',
 | 
			
		||||
    'third_party/**',
 | 
			
		||||
    'fb/**',
 | 
			
		||||
 | 
			
		||||
@ -13,9 +13,6 @@ load(":build_variables.bzl", "jit_core_sources", "lazy_tensor_ts_sources", "libt
 | 
			
		||||
load(":ufunc_defs.bzl", "aten_ufunc_generated_cpu_kernel_sources", "aten_ufunc_generated_cpu_sources", "aten_ufunc_generated_cuda_sources")
 | 
			
		||||
load("//:tools/bazel.bzl", "rules")
 | 
			
		||||
 | 
			
		||||
# Export files for use by torch/headeronly (where version.h generation now lives)
 | 
			
		||||
exports_files(["version.txt"])
 | 
			
		||||
 | 
			
		||||
define_targets(rules = rules)
 | 
			
		||||
 | 
			
		||||
COMMON_COPTS = [
 | 
			
		||||
@ -693,9 +690,7 @@ cc_library(
 | 
			
		||||
            "torch/csrc/*/generated/*.h",
 | 
			
		||||
            "torch/csrc/jit/serialization/mobile_bytecode_generated.h",
 | 
			
		||||
        ] + torch_cuda_headers,
 | 
			
		||||
    ) + GENERATED_AUTOGRAD_CPP + [
 | 
			
		||||
        "//torch/headeronly:version_h",
 | 
			
		||||
    ],
 | 
			
		||||
    ) + GENERATED_AUTOGRAD_CPP + [":version_h"],
 | 
			
		||||
    includes = [
 | 
			
		||||
        "third_party/kineto/libkineto/include",
 | 
			
		||||
        "torch/csrc",
 | 
			
		||||
 | 
			
		||||
@ -388,9 +388,9 @@ cmake_dependent_option(USE_PRIORITIZED_TEXT_FOR_LD "Use prioritized text linker
 | 
			
		||||
 | 
			
		||||
option(USE_MIMALLOC "Use mimalloc" OFF)
 | 
			
		||||
# Enable third party mimalloc library to improve memory allocation performance
 | 
			
		||||
# on Windows and AArch64.
 | 
			
		||||
# on Windows.
 | 
			
		||||
option(USE_MIMALLOC_ON_MKL "Use mimalloc on MKL" OFF)
 | 
			
		||||
if(WIN32 OR (CPU_AARCH64 AND NOT APPLE))
 | 
			
		||||
if(WIN32)
 | 
			
		||||
  set(USE_MIMALLOC ON)
 | 
			
		||||
 | 
			
		||||
  # Not enable USE_MIMALLOC_ON_MKL due to it caused issue:
 | 
			
		||||
 | 
			
		||||
@ -50,10 +50,11 @@ RUN git submodule update --init --recursive
 | 
			
		||||
FROM conda as conda-installs
 | 
			
		||||
ARG PYTHON_VERSION=3.11
 | 
			
		||||
ARG CUDA_PATH=cu121
 | 
			
		||||
ARG CUDA_CHANNEL=nvidia
 | 
			
		||||
ARG INSTALL_CHANNEL=whl/nightly
 | 
			
		||||
# Automatically set by buildx
 | 
			
		||||
# pinning version of conda here see: https://github.com/pytorch/pytorch/issues/164574
 | 
			
		||||
RUN /opt/conda/bin/conda install -y python=${PYTHON_VERSION} conda=25.7.0
 | 
			
		||||
RUN /opt/conda/bin/conda update -y -n base -c defaults conda
 | 
			
		||||
RUN /opt/conda/bin/conda install -y python=${PYTHON_VERSION}
 | 
			
		||||
 | 
			
		||||
ARG TARGETPLATFORM
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -28,19 +28,4 @@ inline std::ostream& operator<<(std::ostream& stream, at::BlasBackend backend) {
 | 
			
		||||
  return stream << BlasBackendToString(backend);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
namespace blas {
 | 
			
		||||
 | 
			
		||||
enum class ScalingType : std::uint8_t {
 | 
			
		||||
  TensorWise, // fp32 scales
 | 
			
		||||
  RowWise, // fp32 scales
 | 
			
		||||
  BlockWise1x16, // fp8_e4m3fn scales
 | 
			
		||||
  BlockWise1x32, // fp8_e8m0fnu scales
 | 
			
		||||
  BlockWise1x128, // fp32 scales
 | 
			
		||||
  BlockWise128x128, // fp32 scales
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
enum class SwizzleType : std::uint8_t { NO_SWIZZLE = 0, SWIZZLE_32_4_4 = 1 };
 | 
			
		||||
 | 
			
		||||
} // namespace blas
 | 
			
		||||
 | 
			
		||||
} // namespace at
 | 
			
		||||
 | 
			
		||||
@ -144,7 +144,8 @@ inline std::string _all_equal_numel_error(at::ArrayRef<Tensor> tensors) {
 | 
			
		||||
inline bool _apply_preamble(ArrayRef<Tensor> tensors) {
 | 
			
		||||
  checkDeviceType("CPU_tensor_apply", tensors, kCPU);
 | 
			
		||||
  checkLayout("CPU_tensor_apply", tensors, kStrided);
 | 
			
		||||
  TORCH_CHECK(_all_equal_numel(tensors), _all_equal_numel_error(tensors));
 | 
			
		||||
  if (!_all_equal_numel(tensors))
 | 
			
		||||
    TORCH_CHECK(false, _all_equal_numel_error(tensors));
 | 
			
		||||
  // An empty tensor has no elements
 | 
			
		||||
  for (auto& t : tensors)
 | 
			
		||||
    if (t.numel() == 0)
 | 
			
		||||
 | 
			
		||||
@ -40,6 +40,41 @@ namespace {
 | 
			
		||||
                ->conv
 | 
			
		||||
                ->rnn
 | 
			
		||||
*/
 | 
			
		||||
const std::map<std::string, std::vector<std::string>> _fp32_precisions = {
 | 
			
		||||
    {"generic", {{"ieee", "tf32", "bf16", "none"}}},
 | 
			
		||||
    {"mkldnn", {{"ieee", "tf32", "bf16", "none"}}},
 | 
			
		||||
    {"cuda", {{"ieee", "tf32", "none"}}}};
 | 
			
		||||
 | 
			
		||||
// Check whether the backend and op are legal
 | 
			
		||||
void check_fp32_prec_backend_and_op(
 | 
			
		||||
    const std::string& backend,
 | 
			
		||||
    const std::string& op) {
 | 
			
		||||
  static std::vector<std::string> backends = {"generic", "mkldnn", "cuda"};
 | 
			
		||||
  static std::vector<std::string> operators = {"conv", "matmul", "rnn", "all"};
 | 
			
		||||
  TORCH_CHECK(
 | 
			
		||||
      std::find(backends.begin(), backends.end(), backend) != backends.end(),
 | 
			
		||||
      "Invalid backend: ",
 | 
			
		||||
      backend);
 | 
			
		||||
  TORCH_CHECK(
 | 
			
		||||
      std::find(operators.begin(), operators.end(), op) != operators.end(),
 | 
			
		||||
      "Invalid operator: ",
 | 
			
		||||
      op);
 | 
			
		||||
  if (backend == "generic") {
 | 
			
		||||
    TORCH_CHECK(op == "all", "Invalid operation for generic backend: ", op);
 | 
			
		||||
  }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Return whether the precision is supported by backends
 | 
			
		||||
  bool validate_fp32_prec(
 | 
			
		||||
      const std::string& backend,
 | 
			
		||||
      const std::string& precision) {
 | 
			
		||||
    auto iterp = _fp32_precisions.find(backend);
 | 
			
		||||
    TORCH_CHECK(iterp != _fp32_precisions.end());
 | 
			
		||||
    auto precisions = iterp->second;
 | 
			
		||||
    bool valid = std::find(precisions.begin(), precisions.end(), precision) !=
 | 
			
		||||
        precisions.end();
 | 
			
		||||
    return valid;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  C10_ALWAYS_INLINE void warn_deprecated_fp32_precision_api(){
 | 
			
		||||
    TORCH_WARN_ONCE(
 | 
			
		||||
@ -51,54 +86,6 @@ namespace {
 | 
			
		||||
  }
 | 
			
		||||
} // namespace
 | 
			
		||||
 | 
			
		||||
Float32Backend str2backend(const std::string& name) {
 | 
			
		||||
  if (name == "generic")
 | 
			
		||||
    return Float32Backend::GENERIC;
 | 
			
		||||
  else if (name == "cuda")
 | 
			
		||||
    return Float32Backend::CUDA;
 | 
			
		||||
  else if (name == "mkldnn")
 | 
			
		||||
    return Float32Backend::MKLDNN;
 | 
			
		||||
  TORCH_CHECK(false, "Unknown backend: ", name);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Float32Op str2op(const std::string& name) {
 | 
			
		||||
  if (name == "all")
 | 
			
		||||
    return Float32Op::ALL;
 | 
			
		||||
  else if (name == "conv")
 | 
			
		||||
    return Float32Op::CONV;
 | 
			
		||||
  else if (name == "rnn")
 | 
			
		||||
    return Float32Op::RNN;
 | 
			
		||||
  else if (name == "matmul")
 | 
			
		||||
    return Float32Op::MATMUL;
 | 
			
		||||
  TORCH_CHECK(false, "Unknown op: ", name);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Float32Precision str2precision(const std::string& name) {
 | 
			
		||||
  if (name == "none")
 | 
			
		||||
    return Float32Precision::NONE;
 | 
			
		||||
  else if (name == "ieee")
 | 
			
		||||
    return Float32Precision::IEEE;
 | 
			
		||||
  else if (name == "tf32")
 | 
			
		||||
    return Float32Precision::TF32;
 | 
			
		||||
  else if (name == "bf16")
 | 
			
		||||
    return Float32Precision::BF16;
 | 
			
		||||
  TORCH_CHECK(false, "Unknown precision: ", name);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string precision2str(Float32Precision prec) {
 | 
			
		||||
  switch (prec) {
 | 
			
		||||
    case Float32Precision::NONE:
 | 
			
		||||
      return "none";
 | 
			
		||||
    case Float32Precision::IEEE:
 | 
			
		||||
      return "ieee";
 | 
			
		||||
    case Float32Precision::TF32:
 | 
			
		||||
      return "tf32";
 | 
			
		||||
    case Float32Precision::BF16:
 | 
			
		||||
      return "bf16";
 | 
			
		||||
  }
 | 
			
		||||
  TORCH_CHECK(false, "Invalid enum Float32Precision(", static_cast<int>(prec), ")");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Context::Context() = default;
 | 
			
		||||
 | 
			
		||||
// TODO: This could be bad juju if someone calls globalContext() in the
 | 
			
		||||
@ -192,10 +179,10 @@ void Context::setUserEnabledNNPACK(bool e) {
 | 
			
		||||
  enabled_nnpack = e;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool Context::allowTF32CuDNN(std::optional<Float32Op> op) const {
 | 
			
		||||
  if (!op.has_value()) {
 | 
			
		||||
    bool allow_tf32_rnn = float32Precision(Float32Backend::CUDA, Float32Op::RNN) == Float32Precision::TF32;
 | 
			
		||||
    bool allow_tf32_conv = float32Precision(Float32Backend::CUDA, Float32Op::CONV) == Float32Precision::TF32;
 | 
			
		||||
bool Context::allowTF32CuDNN(const std::string& op) const {
 | 
			
		||||
  if (op.empty()){
 | 
			
		||||
    bool allow_tf32_rnn = float32Precision("cuda", "rnn") == "tf32";
 | 
			
		||||
    bool allow_tf32_conv = float32Precision("cuda", "conv") == "tf32";
 | 
			
		||||
    TORCH_CHECK(
 | 
			
		||||
        allow_tf32_rnn == allow_tf32_conv && allow_tf32_rnn == allow_tf32_cudnn,
 | 
			
		||||
        "PyTorch is checking whether allow_tf32 is enabled for cuDNN without a specific operator name,",
 | 
			
		||||
@ -204,15 +191,15 @@ bool Context::allowTF32CuDNN(std::optional<Float32Op> op) const {
 | 
			
		||||
        "We suggest only using the new API to set the TF32 flag(s). See also: ",
 | 
			
		||||
        "https://pytorch.org/docs/main/notes/cuda.html#tensorfloat-32-tf32-on-ampere-and-later-devices");
 | 
			
		||||
  } else {
 | 
			
		||||
    return float32Precision(Float32Backend::CUDA, op.value()) == Float32Precision::TF32;
 | 
			
		||||
    return float32Precision("cuda", op) == "tf32";
 | 
			
		||||
  }
 | 
			
		||||
  warn_deprecated_fp32_precision_api();
 | 
			
		||||
  return allow_tf32_cudnn;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Context::setAllowTF32CuDNN(bool b) {
 | 
			
		||||
  setFloat32Precision(Float32Backend::CUDA, Float32Op::RNN, b ? Float32Precision::TF32 : Float32Precision::NONE);
 | 
			
		||||
  setFloat32Precision(Float32Backend::CUDA, Float32Op::CONV, b ? Float32Precision::TF32 : Float32Precision::NONE);
 | 
			
		||||
  setFloat32Precision("cuda", "rnn", b ? "tf32" : "none");
 | 
			
		||||
  setFloat32Precision("cuda", "conv", b ? "tf32" : "none");
 | 
			
		||||
  allow_tf32_cudnn = b;
 | 
			
		||||
  warn_deprecated_fp32_precision_api();
 | 
			
		||||
}
 | 
			
		||||
@ -292,6 +279,42 @@ bool Context::userEnabledOverrideableSDP() const {
 | 
			
		||||
  return enabled_overrideable;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static constexpr const auto cublas_config_var_name = "CUBLAS_WORKSPACE_CONFIG";
 | 
			
		||||
static constexpr const std::array<const char*, 2> cublas_deterministic_configs = {":4096:8", ":16:8"};
 | 
			
		||||
 | 
			
		||||
bool Context::checkCuBLASConfigDeterministic() {
 | 
			
		||||
  // If using CUDA 10.2 or greater, need to make sure CuBLAS workspace config
 | 
			
		||||
  // is set to deterministic setting
 | 
			
		||||
  if (hasCUDART()) {
 | 
			
		||||
    const auto workspace_config = c10::utils::get_env(cublas_config_var_name);
 | 
			
		||||
    return (workspace_config == cublas_deterministic_configs[0] || workspace_config == cublas_deterministic_configs[1]);
 | 
			
		||||
  }
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Context::alertCuBLASConfigNotDeterministic() const {
 | 
			
		||||
  static const bool cublas_config_deterministic = checkCuBLASConfigDeterministic();
 | 
			
		||||
  if (C10_LIKELY(!deterministicAlgorithms() || cublas_config_deterministic)) {
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  auto msg = c10::str(
 | 
			
		||||
    "Deterministic behavior was enabled with either `torch.use_deterministic_algorithms(True)` or ",
 | 
			
		||||
    "`at::Context::setDeterministicAlgorithms(true)`, but this operation is not deterministic because ",
 | 
			
		||||
    "it uses CuBLAS and you have CUDA >= 10.2. To enable deterministic behavior in this ",
 | 
			
		||||
    "case, you must set an environment variable before running your PyTorch application: ",
 | 
			
		||||
    cublas_config_var_name, "=", cublas_deterministic_configs[0], " or ",
 | 
			
		||||
    cublas_config_var_name, "=", cublas_deterministic_configs[1], ". For more information, go to ",
 | 
			
		||||
    "https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility"
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  if (deterministicAlgorithmsWarnOnly()) {
 | 
			
		||||
    TORCH_WARN(msg);
 | 
			
		||||
  } else {
 | 
			
		||||
    TORCH_CHECK(false, msg);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool Context::benchmarkCuDNN() const {
 | 
			
		||||
  return benchmark_cudnn;
 | 
			
		||||
}
 | 
			
		||||
@ -318,7 +341,7 @@ void Context::setImmediateMiopen(bool b) {
 | 
			
		||||
 | 
			
		||||
bool Context::allowTF32CuBLAS() const {
 | 
			
		||||
  bool legacy_allow_tf32 = float32_matmul_precision != at::Float32MatmulPrecision::HIGHEST;
 | 
			
		||||
  bool allow_tf32_new = float32Precision(Float32Backend::CUDA, Float32Op::MATMUL) == Float32Precision::TF32;
 | 
			
		||||
  bool allow_tf32_new = float32Precision("cuda", "matmul") == "tf32";
 | 
			
		||||
  TORCH_CHECK(
 | 
			
		||||
      legacy_allow_tf32 == allow_tf32_new,
 | 
			
		||||
      "PyTorch is checking whether allow_tf32_new is enabled for cuBlas matmul,",
 | 
			
		||||
@ -331,17 +354,17 @@ bool Context::allowTF32CuBLAS() const {
 | 
			
		||||
 | 
			
		||||
void Context::setAllowTF32CuBLAS(bool b) {
 | 
			
		||||
  float32_matmul_precision = b ? at::Float32MatmulPrecision::HIGH : at::Float32MatmulPrecision::HIGHEST;
 | 
			
		||||
  setFloat32Precision(Float32Backend::CUDA, Float32Op::MATMUL, b ? Float32Precision::TF32 : Float32Precision::IEEE);
 | 
			
		||||
  setFloat32Precision("cuda", "matmul", b ? "tf32" : "ieee");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Float32MatmulPrecision Context::float32MatmulPrecision() const {
 | 
			
		||||
  bool invalid = float32Precision(Float32Backend::CUDA, Float32Op::MATMUL) == Float32Precision::TF32 &&
 | 
			
		||||
  bool invalid = float32Precision("cuda", "matmul") == "tf32" &&
 | 
			
		||||
      float32_matmul_precision == at::Float32MatmulPrecision::HIGHEST;
 | 
			
		||||
  invalid = invalid ||
 | 
			
		||||
      (float32Precision(Float32Backend::MKLDNN, Float32Op::MATMUL) == Float32Precision::BF16 &&
 | 
			
		||||
      (float32Precision("mkldnn", "matmul") == "bf16" &&
 | 
			
		||||
       float32_matmul_precision != at::Float32MatmulPrecision::MEDIUM);
 | 
			
		||||
  invalid = invalid ||
 | 
			
		||||
      (float32Precision(Float32Backend::MKLDNN, Float32Op::MATMUL) == Float32Precision::TF32 &&
 | 
			
		||||
      (float32Precision("mkldnn", "matmul") == "tf32" &&
 | 
			
		||||
       float32_matmul_precision != at::Float32MatmulPrecision::HIGH);
 | 
			
		||||
  TORCH_CHECK(
 | 
			
		||||
      !invalid,
 | 
			
		||||
@ -353,26 +376,15 @@ Float32MatmulPrecision Context::float32MatmulPrecision() const {
 | 
			
		||||
  return float32_matmul_precision;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Float32Precision Context::float32Precision(Float32Backend backend, Float32Op op) const {
 | 
			
		||||
  std::pair<Float32Backend, Float32Op> key{backend, op};
 | 
			
		||||
  auto it = fp32_precision.find(key);
 | 
			
		||||
  TORCH_CHECK(it != fp32_precision.end(), "Invalid (backend, op) pair: (", backend, ", ", op, ")");
 | 
			
		||||
 | 
			
		||||
  Float32Precision precision = it->second;
 | 
			
		||||
  if (precision == Float32Precision::NONE) {
 | 
			
		||||
    key.second = Float32Op::ALL;
 | 
			
		||||
    precision = fp32_precision.find(key)->second;
 | 
			
		||||
  }
 | 
			
		||||
  if (precision == Float32Precision::NONE) {
 | 
			
		||||
    key.first = Float32Backend::GENERIC;
 | 
			
		||||
    precision = fp32_precision.find(key)->second;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // "cuda" does not support "bf16"
 | 
			
		||||
  if (backend == Float32Backend::CUDA && precision == Float32Precision::BF16) {
 | 
			
		||||
    return Float32Precision::NONE;
 | 
			
		||||
  }
 | 
			
		||||
  return precision;
 | 
			
		||||
std::string Context::float32Precision(const std::string& backend, const std::string& op) const {
 | 
			
		||||
  check_fp32_prec_backend_and_op(backend, op);
 | 
			
		||||
  auto precision = fp32_precision.find(backend)->second.find(op)->second;
 | 
			
		||||
  if (precision == "none")
 | 
			
		||||
    precision = fp32_precision.find(backend)->second.find("all")->second;
 | 
			
		||||
  if (precision == "none")
 | 
			
		||||
    precision = fp32_precision.find("generic")->second.find("all")->second;
 | 
			
		||||
  bool valid_prec = validate_fp32_prec(backend, precision);
 | 
			
		||||
  return valid_prec ? precision : "none";
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Context::setFloat32MatmulPrecision(const std::string &s) {
 | 
			
		||||
@ -381,18 +393,18 @@ void Context::setFloat32MatmulPrecision(const std::string &s) {
 | 
			
		||||
    // TODO: consider if CuDNN field needs to also be set for potential future CuDNN ops like multi-headed attention
 | 
			
		||||
    if (s_ == "highest") {
 | 
			
		||||
      float32_matmul_precision = at::Float32MatmulPrecision::HIGHEST;
 | 
			
		||||
      setFloat32Precision(Float32Backend::CUDA, Float32Op::MATMUL, Float32Precision::IEEE);
 | 
			
		||||
      setFloat32Precision(Float32Backend::MKLDNN, Float32Op::MATMUL, Float32Precision::IEEE);
 | 
			
		||||
      setFloat32Precision("cuda", "matmul", "ieee");
 | 
			
		||||
      setFloat32Precision("mkldnn", "matmul", "ieee");
 | 
			
		||||
      return true;
 | 
			
		||||
    } else if (s_ == "high") {
 | 
			
		||||
      float32_matmul_precision = at::Float32MatmulPrecision::HIGH;
 | 
			
		||||
      setFloat32Precision(Float32Backend::CUDA, Float32Op::MATMUL, Float32Precision::TF32);
 | 
			
		||||
      setFloat32Precision(Float32Backend::MKLDNN, Float32Op::MATMUL, Float32Precision::TF32);
 | 
			
		||||
      setFloat32Precision("cuda", "matmul", "tf32");
 | 
			
		||||
      setFloat32Precision("mkldnn", "matmul", "tf32");
 | 
			
		||||
      return true;
 | 
			
		||||
    } else if (s_ == "medium") {
 | 
			
		||||
      float32_matmul_precision = at::Float32MatmulPrecision::MEDIUM;
 | 
			
		||||
      setFloat32Precision(Float32Backend::CUDA, Float32Op::MATMUL, Float32Precision::TF32);
 | 
			
		||||
      setFloat32Precision(Float32Backend::MKLDNN, Float32Op::MATMUL, Float32Precision::BF16);
 | 
			
		||||
      setFloat32Precision("cuda", "matmul", "tf32");
 | 
			
		||||
      setFloat32Precision("mkldnn", "matmul", "bf16");
 | 
			
		||||
      return true;
 | 
			
		||||
    }
 | 
			
		||||
    return false;
 | 
			
		||||
@ -406,16 +418,25 @@ void Context::setFloat32MatmulPrecision(const std::string &s) {
 | 
			
		||||
    "setFloat32MatmulPrecision call has no effect.");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Context::setFloat32Precision(Float32Backend backend, Float32Op op, Float32Precision p) {
 | 
			
		||||
  auto it = fp32_precision.find(std::make_pair(backend, op));
 | 
			
		||||
  TORCH_CHECK(
 | 
			
		||||
      it != fp32_precision.end(),
 | 
			
		||||
      "Invalid (backend, op) pair: (", backend, ", ", op, ")");
 | 
			
		||||
  TORCH_CHECK(
 | 
			
		||||
      !(backend == Float32Backend::CUDA && p == Float32Precision::BF16),
 | 
			
		||||
      "backend 'cuda' does not support precision 'bf16'");
 | 
			
		||||
 | 
			
		||||
  it->second = p;
 | 
			
		||||
void Context::setFloat32Precision(const std::string& backend, const std::string& op, const std::string& p) {
 | 
			
		||||
  check_fp32_prec_backend_and_op(backend, op);
 | 
			
		||||
  if (validate_fp32_prec(backend, p)) {
 | 
			
		||||
    fp32_precision[backend][op] = p;
 | 
			
		||||
  } else {
 | 
			
		||||
    std::string msg;
 | 
			
		||||
    auto iterp = _fp32_precisions.find(backend);
 | 
			
		||||
    TORCH_CHECK(iterp != _fp32_precisions.end());
 | 
			
		||||
    for (const auto& p : iterp->second) {
 | 
			
		||||
      msg += p;
 | 
			
		||||
      msg += " ";
 | 
			
		||||
    }
 | 
			
		||||
    TORCH_WARN(
 | 
			
		||||
        "you have set wrong precision for backend:",
 | 
			
		||||
        backend,
 | 
			
		||||
        " setFloat32Precision call has no effect.",
 | 
			
		||||
        "Please choose precision from: ",
 | 
			
		||||
        msg);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
at::LinalgBackend Context::linalgPreferredBackend() const {
 | 
			
		||||
@ -483,8 +504,8 @@ at::BlasBackend Context::blasPreferredBackend() {
 | 
			
		||||
#if ROCM_VERSION >= 60300
 | 
			
		||||
          "gfx1100", "gfx1101", "gfx1200", "gfx1201", "gfx908",
 | 
			
		||||
#endif
 | 
			
		||||
#if ROCM_VERSION >= 70000
 | 
			
		||||
          "gfx950", "gfx1150", "gfx1151"
 | 
			
		||||
#if ROCM_VERSION >= 60500
 | 
			
		||||
          "gfx950"
 | 
			
		||||
#endif
 | 
			
		||||
      };
 | 
			
		||||
      for (auto index: c10::irange(detail::getCUDAHooks().deviceCount())) {
 | 
			
		||||
@ -587,33 +608,20 @@ void Context::setROCmFAPreferredBackend(at::ROCmFABackend b) {
 | 
			
		||||
  rocm_fa_preferred_backend = b;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CuBLASReductionOption Context::allowFP16ReductionCuBLAS() const {
 | 
			
		||||
bool Context::allowFP16ReductionCuBLAS() const {
 | 
			
		||||
  return allow_fp16_reduction_cublas;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CuBLASReductionOption inline get_reduction_option(bool allow_reduced_precision, bool allow_splitk) {
 | 
			
		||||
  TORCH_CHECK(
 | 
			
		||||
      !(allow_reduced_precision && !allow_splitk),
 | 
			
		||||
      "allow_splitk=False is not supported when reduced precision reductions are enabled");
 | 
			
		||||
  if (allow_reduced_precision) {
 | 
			
		||||
    return CuBLASReductionOption::AllowReducedPrecisionWithSplitK;
 | 
			
		||||
  } else if (allow_splitk) {
 | 
			
		||||
    return CuBLASReductionOption::DisallowReducedPrecisionAllowSplitK;
 | 
			
		||||
  } else {
 | 
			
		||||
    return CuBLASReductionOption::DisallowReducedPrecisionDisallowSplitK;
 | 
			
		||||
  }
 | 
			
		||||
void Context::setAllowFP16ReductionCuBLAS(bool b) {
 | 
			
		||||
  allow_fp16_reduction_cublas = b;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Context::setAllowFP16ReductionCuBLAS(bool allow_reduced_precision, bool allow_splitk) {
 | 
			
		||||
  allow_fp16_reduction_cublas = get_reduction_option(allow_reduced_precision, allow_splitk);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CuBLASReductionOption Context::allowBF16ReductionCuBLAS() const {
 | 
			
		||||
bool Context::allowBF16ReductionCuBLAS() const {
 | 
			
		||||
  return allow_bf16_reduction_cublas;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Context::setAllowBF16ReductionCuBLAS(bool allow_reduced_precision, bool allow_splitk) {
 | 
			
		||||
  allow_bf16_reduction_cublas = get_reduction_option(allow_reduced_precision, allow_splitk);
 | 
			
		||||
void Context::setAllowBF16ReductionCuBLAS(bool b) {
 | 
			
		||||
  allow_bf16_reduction_cublas = b;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool Context::allowFP16AccumulationCuBLAS() const {
 | 
			
		||||
 | 
			
		||||
@ -25,13 +25,11 @@
 | 
			
		||||
#include <c10/util/CallOnce.h>
 | 
			
		||||
#include <c10/util/Exception.h>
 | 
			
		||||
#include <c10/util/env.h>
 | 
			
		||||
#include <c10/util/hash.h>
 | 
			
		||||
#include <c10/util/irange.h>
 | 
			
		||||
 | 
			
		||||
#include <cstdint>
 | 
			
		||||
#include <map>
 | 
			
		||||
#include <mutex>
 | 
			
		||||
#include <unordered_map>
 | 
			
		||||
 | 
			
		||||
namespace at {
 | 
			
		||||
 | 
			
		||||
@ -39,20 +37,6 @@ class Tensor;
 | 
			
		||||
 | 
			
		||||
enum class TORCH_API Float32MatmulPrecision { HIGHEST, HIGH, MEDIUM };
 | 
			
		||||
 | 
			
		||||
enum class CuBLASReductionOption : uint8_t {
 | 
			
		||||
  AllowReducedPrecisionWithSplitK = 0,
 | 
			
		||||
  DisallowReducedPrecisionAllowSplitK = 1,
 | 
			
		||||
  DisallowReducedPrecisionDisallowSplitK = 2,
 | 
			
		||||
};
 | 
			
		||||
enum class TORCH_API Float32Backend { GENERIC, CUDA, MKLDNN };
 | 
			
		||||
enum class TORCH_API Float32Op { ALL, CONV, RNN, MATMUL };
 | 
			
		||||
enum class TORCH_API Float32Precision { NONE, IEEE, TF32, BF16 };
 | 
			
		||||
 | 
			
		||||
TORCH_API Float32Backend str2backend(const std::string& name);
 | 
			
		||||
TORCH_API Float32Op str2op(const std::string& name);
 | 
			
		||||
TORCH_API Float32Precision str2precision(const std::string& name);
 | 
			
		||||
TORCH_API std::string precision2str(Float32Precision prec);
 | 
			
		||||
 | 
			
		||||
class TORCH_API Context {
 | 
			
		||||
 public:
 | 
			
		||||
  Context();
 | 
			
		||||
@ -226,15 +210,15 @@ class TORCH_API Context {
 | 
			
		||||
  bool userEnabledMkldnn() const;
 | 
			
		||||
  void setUserEnabledMkldnn(bool e);
 | 
			
		||||
  bool benchmarkCuDNN() const;
 | 
			
		||||
  void setBenchmarkCuDNN(bool /*b*/);
 | 
			
		||||
  void setBenchmarkCuDNN(bool);
 | 
			
		||||
  int benchmarkLimitCuDNN() const;
 | 
			
		||||
  void setBenchmarkLimitCuDNN(int /*b*/);
 | 
			
		||||
  void setBenchmarkLimitCuDNN(int);
 | 
			
		||||
  bool immediateMiopen() const;
 | 
			
		||||
  void setImmediateMiopen(bool /*b*/);
 | 
			
		||||
  void setImmediateMiopen(bool);
 | 
			
		||||
  bool deterministicCuDNN() const;
 | 
			
		||||
  void setDeterministicCuDNN(bool /*b*/);
 | 
			
		||||
  void setDeterministicCuDNN(bool);
 | 
			
		||||
  bool deterministicMkldnn() const;
 | 
			
		||||
  void setDeterministicMkldnn(bool /*b*/);
 | 
			
		||||
  void setDeterministicMkldnn(bool);
 | 
			
		||||
  bool userEnabledNNPACK() const;
 | 
			
		||||
  void setUserEnabledNNPACK(bool e);
 | 
			
		||||
 | 
			
		||||
@ -252,32 +236,32 @@ class TORCH_API Context {
 | 
			
		||||
  void setSDPPriorityOrder(const std::vector<int64_t>& order);
 | 
			
		||||
  std::array<at::SDPBackend, at::num_sdp_backends> sDPPriorityOrder();
 | 
			
		||||
 | 
			
		||||
  void setSDPUseFlash(bool /*e*/);
 | 
			
		||||
  void setSDPUseFlash(bool);
 | 
			
		||||
  bool userEnabledFlashSDP() const;
 | 
			
		||||
 | 
			
		||||
  void setSDPUseMemEfficient(bool /*e*/);
 | 
			
		||||
  void setSDPUseMemEfficient(bool);
 | 
			
		||||
  bool userEnabledMemEfficientSDP() const;
 | 
			
		||||
 | 
			
		||||
  void setSDPUseMath(bool /*e*/);
 | 
			
		||||
  void setSDPUseMath(bool);
 | 
			
		||||
  bool userEnabledMathSDP() const;
 | 
			
		||||
 | 
			
		||||
  void setSDPUseCuDNN(bool /*e*/);
 | 
			
		||||
  void setSDPUseCuDNN(bool);
 | 
			
		||||
  bool userEnabledCuDNNSDP() const;
 | 
			
		||||
 | 
			
		||||
  void setAllowFP16BF16ReductionMathSDP(bool /*e*/);
 | 
			
		||||
  void setAllowFP16BF16ReductionMathSDP(bool);
 | 
			
		||||
  bool allowFP16BF16ReductionMathSDP() const;
 | 
			
		||||
 | 
			
		||||
  void setSDPUseOverrideable(bool /*e*/);
 | 
			
		||||
  void setSDPUseOverrideable(bool);
 | 
			
		||||
  bool userEnabledOverrideableSDP() const;
 | 
			
		||||
 | 
			
		||||
  at::LinalgBackend linalgPreferredBackend() const;
 | 
			
		||||
  void setLinalgPreferredBackend(at::LinalgBackend /*b*/);
 | 
			
		||||
  void setLinalgPreferredBackend(at::LinalgBackend);
 | 
			
		||||
 | 
			
		||||
  at::BlasBackend blasPreferredBackend();
 | 
			
		||||
  void setBlasPreferredBackend(at::BlasBackend /*b*/);
 | 
			
		||||
  void setBlasPreferredBackend(at::BlasBackend);
 | 
			
		||||
 | 
			
		||||
  at::ROCmFABackend getROCmFAPreferredBackend();
 | 
			
		||||
  void setROCmFAPreferredBackend(at::ROCmFABackend /*b*/);
 | 
			
		||||
  void setROCmFAPreferredBackend(at::ROCmFABackend);
 | 
			
		||||
 | 
			
		||||
  // Note [Enabling Deterministic Operations]
 | 
			
		||||
  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
@ -310,9 +294,9 @@ class TORCH_API Context {
 | 
			
		||||
 | 
			
		||||
  bool deterministicAlgorithms() const;
 | 
			
		||||
  bool deterministicAlgorithmsWarnOnly() const;
 | 
			
		||||
  void setDeterministicAlgorithms(bool /*b*/, bool /*warn_only*/);
 | 
			
		||||
  void setDeterministicAlgorithms(bool, bool);
 | 
			
		||||
  bool deterministicFillUninitializedMemory() const;
 | 
			
		||||
  void setDeterministicFillUninitializedMemory(bool /*b*/);
 | 
			
		||||
  void setDeterministicFillUninitializedMemory(bool);
 | 
			
		||||
 | 
			
		||||
  // Note [Writing Nondeterministic Operations]
 | 
			
		||||
  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | 
			
		||||
@ -326,7 +310,13 @@ class TORCH_API Context {
 | 
			
		||||
  //
 | 
			
		||||
  // * Throw an error when `Context::deterministicAlgorithms()` is true. Most
 | 
			
		||||
  //   of the time, this should be accomplished by calling
 | 
			
		||||
  //   `at::globalContext().alertNotDeterminstic().
 | 
			
		||||
  //   `at::globalContext().alertNotDeterminstic()`.  However, if the
 | 
			
		||||
  //   nondeterministic behavior is caused by the CuBLAS workspace
 | 
			
		||||
  //   configuration in CUDA >= 10.2,
 | 
			
		||||
  //   `at::globalContext().alertCuBLASConfigNotDeterministic()` should be
 | 
			
		||||
  //   called instead (in this case, a comment explaining why the operation is
 | 
			
		||||
  //   nondeterministic is not necessary). See below for details on these
 | 
			
		||||
  //   methods.
 | 
			
		||||
  //
 | 
			
		||||
  // * Have an entry in the list of nondeterministic PyTorch operations in the
 | 
			
		||||
  //   docstring of `use_deterministic_algorithms()` in torch/__init__.py
 | 
			
		||||
@ -350,29 +340,33 @@ class TORCH_API Context {
 | 
			
		||||
  // Throws an error if `Context::deterministicAlgorithms()` is true
 | 
			
		||||
  static void alertNotDeterministic(std::string_view const& caller);
 | 
			
		||||
 | 
			
		||||
  // Throws an error if `Context::deterministicAlgorithms()` is true, CUDA
 | 
			
		||||
  // >= 10.2, and CUBLAS_WORKSPACE_CONFIG is not set to either ":16:8" or
 | 
			
		||||
  // ":4096:8". For more details:
 | 
			
		||||
  // https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility
 | 
			
		||||
  void alertCuBLASConfigNotDeterministic() const;
 | 
			
		||||
 | 
			
		||||
  void setFloat32MatmulPrecision(const std::string& s);
 | 
			
		||||
  void setFloat32Precision(
 | 
			
		||||
      Float32Backend backend,
 | 
			
		||||
      Float32Op op,
 | 
			
		||||
      Float32Precision p);
 | 
			
		||||
  bool allowTF32CuDNN(std::optional<Float32Op> op = std::nullopt) const;
 | 
			
		||||
  void setAllowTF32CuDNN(bool /*b*/);
 | 
			
		||||
      const std::string& backend,
 | 
			
		||||
      const std::string& op,
 | 
			
		||||
      const std::string& s);
 | 
			
		||||
  bool allowTF32CuDNN(const std::string& op = std::string()) const;
 | 
			
		||||
  void setAllowTF32CuDNN(bool);
 | 
			
		||||
  bool allowTF32OneDNN() const;
 | 
			
		||||
  void setAllowTF32OneDNN(bool /*b*/);
 | 
			
		||||
  void setAllowTF32OneDNN(bool);
 | 
			
		||||
  bool allowTF32CuBLAS() const;
 | 
			
		||||
  void setAllowTF32CuBLAS(bool /*b*/);
 | 
			
		||||
  void setAllowTF32CuBLAS(bool);
 | 
			
		||||
  Float32MatmulPrecision float32MatmulPrecision() const;
 | 
			
		||||
  Float32Precision float32Precision(Float32Backend backend, Float32Op op) const;
 | 
			
		||||
  CuBLASReductionOption allowFP16ReductionCuBLAS() const;
 | 
			
		||||
  void setAllowFP16ReductionCuBLAS(
 | 
			
		||||
      bool allow_reduced_precision,
 | 
			
		||||
      bool allow_splitk = true);
 | 
			
		||||
  CuBLASReductionOption allowBF16ReductionCuBLAS() const;
 | 
			
		||||
  void setAllowBF16ReductionCuBLAS(
 | 
			
		||||
      bool allow_reduced_precision,
 | 
			
		||||
      bool allow_splitk = true);
 | 
			
		||||
  std::string float32Precision(
 | 
			
		||||
      const std::string& backend,
 | 
			
		||||
      const std::string& op) const;
 | 
			
		||||
  bool allowFP16ReductionCuBLAS() const;
 | 
			
		||||
  void setAllowFP16ReductionCuBLAS(bool);
 | 
			
		||||
  bool allowBF16ReductionCuBLAS() const;
 | 
			
		||||
  void setAllowBF16ReductionCuBLAS(bool);
 | 
			
		||||
  bool allowFP16AccumulationCuBLAS() const;
 | 
			
		||||
  void setAllowFP16AccumulationCuBLAS(bool /*b*/);
 | 
			
		||||
  void setAllowFP16AccumulationCuBLAS(bool);
 | 
			
		||||
 | 
			
		||||
  // Matmuls can use a so-called "persistent" kernel which launches one CUDA
 | 
			
		||||
  // block for each SM on the GPU, and each block then iterates over multiple
 | 
			
		||||
@ -384,7 +378,7 @@ class TORCH_API Context {
 | 
			
		||||
  // to make matmuls target only a subset of the SMs, so they can fully schedule
 | 
			
		||||
  // even next to a comms kernel, and only be a few percent slower.
 | 
			
		||||
  std::optional<int32_t> _SMCarveout_EXPERIMENTAL() const;
 | 
			
		||||
  void _setSMCarveout_EXPERIMENTAL(std::optional<int32_t> /*c*/);
 | 
			
		||||
  void _setSMCarveout_EXPERIMENTAL(std::optional<int32_t>);
 | 
			
		||||
 | 
			
		||||
  at::QEngine qEngine() const;
 | 
			
		||||
  void setQEngine(at::QEngine e);
 | 
			
		||||
@ -405,7 +399,7 @@ class TORCH_API Context {
 | 
			
		||||
  void setDefaultMobileCPUAllocator();
 | 
			
		||||
  void unsetDefaultMobileCPUAllocator();
 | 
			
		||||
  bool allowFP16ReductionCPU() const;
 | 
			
		||||
  void setAllowFP16ReductionCPU(bool /*b*/);
 | 
			
		||||
  void setAllowFP16ReductionCPU(bool);
 | 
			
		||||
 | 
			
		||||
  // Preserved for BC
 | 
			
		||||
  void lazyInitCUDA() {
 | 
			
		||||
@ -435,6 +429,7 @@ class TORCH_API Context {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  static bool checkCuBLASConfigDeterministic();
 | 
			
		||||
  std::array<c10::once_flag, at::COMPILE_TIME_MAX_DEVICE_TYPES> init_;
 | 
			
		||||
  bool enabled_cudnn = true;
 | 
			
		||||
  bool deterministic_cudnn = false;
 | 
			
		||||
@ -462,10 +457,8 @@ class TORCH_API Context {
 | 
			
		||||
      : at::Float32MatmulPrecision::HIGHEST;
 | 
			
		||||
  int benchmark_limit_cudnn = 10;
 | 
			
		||||
  bool allow_tf32_cudnn = true;
 | 
			
		||||
  CuBLASReductionOption allow_fp16_reduction_cublas =
 | 
			
		||||
      CuBLASReductionOption::AllowReducedPrecisionWithSplitK;
 | 
			
		||||
  CuBLASReductionOption allow_bf16_reduction_cublas =
 | 
			
		||||
      CuBLASReductionOption::AllowReducedPrecisionWithSplitK;
 | 
			
		||||
  bool allow_fp16_reduction_cublas = true;
 | 
			
		||||
  bool allow_bf16_reduction_cublas = true;
 | 
			
		||||
  bool allow_fp16_accumulation_cublas = false;
 | 
			
		||||
  std::optional<int32_t> sm_carveout = std::nullopt;
 | 
			
		||||
  bool enabled_mkldnn = true;
 | 
			
		||||
@ -495,20 +488,21 @@ class TORCH_API Context {
 | 
			
		||||
  bool enable_sparse_tensor_invariant_checks = false;
 | 
			
		||||
  bool allow_fp16_reduction_cpu = false;
 | 
			
		||||
 | 
			
		||||
  using Key = std::pair<Float32Backend, Float32Op>;
 | 
			
		||||
  std::unordered_map<Key, Float32Precision, c10::hash<Key>> fp32_precision = {
 | 
			
		||||
      {{Float32Backend::GENERIC, Float32Op::ALL}, Float32Precision::NONE},
 | 
			
		||||
      {{Float32Backend::MKLDNN, Float32Op::ALL}, Float32Precision::NONE},
 | 
			
		||||
      {{Float32Backend::MKLDNN, Float32Op::CONV}, Float32Precision::NONE},
 | 
			
		||||
      {{Float32Backend::MKLDNN, Float32Op::RNN}, Float32Precision::NONE},
 | 
			
		||||
      {{Float32Backend::MKLDNN, Float32Op::MATMUL}, Float32Precision::NONE},
 | 
			
		||||
      {{Float32Backend::CUDA, Float32Op::ALL}, Float32Precision::NONE},
 | 
			
		||||
      {{Float32Backend::CUDA, Float32Op::CONV}, Float32Precision::TF32},
 | 
			
		||||
      {{Float32Backend::CUDA, Float32Op::RNN}, Float32Precision::TF32},
 | 
			
		||||
      {{Float32Backend::CUDA, Float32Op::MATMUL},
 | 
			
		||||
       float32_matmul_precision == at::Float32MatmulPrecision::HIGHEST
 | 
			
		||||
           ? Float32Precision::NONE
 | 
			
		||||
           : Float32Precision::TF32},
 | 
			
		||||
  std::map<std::string, std::map<std::string, std::string>> fp32_precision = {
 | 
			
		||||
      {"generic", {{"all", "none"}}},
 | 
			
		||||
      {"mkldnn",
 | 
			
		||||
       {{"matmul", "none"},
 | 
			
		||||
        {"conv", "none"},
 | 
			
		||||
        {"rnn", "none"},
 | 
			
		||||
        {"all", "none"}}},
 | 
			
		||||
      {"cuda",
 | 
			
		||||
       {{"matmul",
 | 
			
		||||
         float32_matmul_precision == at::Float32MatmulPrecision::HIGHEST
 | 
			
		||||
             ? "none"
 | 
			
		||||
             : "tf32"},
 | 
			
		||||
        {"conv", "tf32"},
 | 
			
		||||
        {"rnn", "tf32"},
 | 
			
		||||
        {"all", "none"}}},
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  Allocator* prev_allocator_ptr_{nullptr};
 | 
			
		||||
@ -690,4 +684,5 @@ struct TORCH_API ROCmBackwardPassGuard {
 | 
			
		||||
  ~ROCmBackwardPassGuard();
 | 
			
		||||
  static bool is_backward_pass();
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace at
 | 
			
		||||
 | 
			
		||||
@ -389,16 +389,37 @@ void fillVersion<DLManagedTensorVersioned>(
 | 
			
		||||
// constructed out of ATen tensor
 | 
			
		||||
template <class T>
 | 
			
		||||
T* toDLPackImpl(const Tensor& src) {
 | 
			
		||||
  auto view = src;
 | 
			
		||||
 | 
			
		||||
  // Detect whether there is need to normalize the strides
 | 
			
		||||
  // Background: gh-83069
 | 
			
		||||
  //
 | 
			
		||||
  // However, normalizing strides can come at a high-cost
 | 
			
		||||
  // to slow down toDLPack conversion 3x, so we
 | 
			
		||||
  // only normalize if needed.
 | 
			
		||||
  //
 | 
			
		||||
  // The following code detects whether the src follows
 | 
			
		||||
  // a continuous pattern. If the src follows such pattern (common-case)
 | 
			
		||||
  // then we do not need to normalize the strides.
 | 
			
		||||
  bool need_normalize_strides = src.dim() == 1 && src.size(0) == 1 && src.stride(0) != 1;
 | 
			
		||||
  // less common case, try normalizing the strides
 | 
			
		||||
  if (need_normalize_strides) {
 | 
			
		||||
    // create a new tensor with possibly normalized strides
 | 
			
		||||
    // gh-83069
 | 
			
		||||
    auto shape = src.sizes();
 | 
			
		||||
    view = src.as_strided(shape, {1}, src.storage_offset());
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ATenDLMTensor<T>* atDLMTensor(new ATenDLMTensor<T>);
 | 
			
		||||
  atDLMTensor->handle = src;
 | 
			
		||||
  atDLMTensor->handle = view;
 | 
			
		||||
  atDLMTensor->tensor.manager_ctx = atDLMTensor;
 | 
			
		||||
  atDLMTensor->tensor.deleter = &deleter<T>;
 | 
			
		||||
  atDLMTensor->tensor.dl_tensor.data = src.data_ptr();
 | 
			
		||||
  atDLMTensor->tensor.dl_tensor.data = view.data_ptr();
 | 
			
		||||
  atDLMTensor->tensor.dl_tensor.device = torchDeviceToDLDevice(src.device());
 | 
			
		||||
  atDLMTensor->tensor.dl_tensor.ndim = static_cast<int32_t>(src.dim());
 | 
			
		||||
  atDLMTensor->tensor.dl_tensor.dtype = getDLDataType(src);
 | 
			
		||||
  atDLMTensor->tensor.dl_tensor.shape = const_cast<int64_t*>(src.sizes().data());
 | 
			
		||||
  atDLMTensor->tensor.dl_tensor.strides = const_cast<int64_t*>(src.strides().data());
 | 
			
		||||
  atDLMTensor->tensor.dl_tensor.shape = const_cast<int64_t*>(view.sizes().data());
 | 
			
		||||
  atDLMTensor->tensor.dl_tensor.strides = const_cast<int64_t*>(view.strides().data());
 | 
			
		||||
  atDLMTensor->tensor.dl_tensor.byte_offset = 0;
 | 
			
		||||
  fillVersion(&atDLMTensor->tensor);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -52,16 +52,16 @@ struct DLPackTraits {};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct DLPackTraits<DLManagedTensor> {
 | 
			
		||||
  inline static constexpr const char* capsule = "dltensor";
 | 
			
		||||
  inline static constexpr const char* used = "used_dltensor";
 | 
			
		||||
  inline static const char* capsule = "dltensor";
 | 
			
		||||
  inline static const char* used = "used_dltensor";
 | 
			
		||||
  inline static auto toDLPack = at::toDLPack;
 | 
			
		||||
  inline static auto fromDLPack = at::fromDLPack;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct DLPackTraits<DLManagedTensorVersioned> {
 | 
			
		||||
  inline static constexpr const char* capsule = "dltensor_versioned";
 | 
			
		||||
  inline static constexpr const char* used = "used_dltensor_versioned";
 | 
			
		||||
  inline static const char* capsule = "dltensor_versioned";
 | 
			
		||||
  inline static const char* used = "used_dltensor_versioned";
 | 
			
		||||
  inline static auto toDLPack = at::toDLPackVersioned;
 | 
			
		||||
  inline static auto fromDLPack = at::fromDLPackVersioned;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
@ -16,8 +16,8 @@ inline void check_size_nonnegative(ArrayRef<int64_t> size) {
 | 
			
		||||
 | 
			
		||||
inline void check_size_nonnegative(ArrayRef<c10::SymInt> size) {
 | 
			
		||||
  for (const auto& x : size) {
 | 
			
		||||
    TORCH_SYM_CHECK(
 | 
			
		||||
        x.sym_ge(0),
 | 
			
		||||
    TORCH_CHECK(
 | 
			
		||||
        x.expect_size(__FILE__, __LINE__),
 | 
			
		||||
        "Trying to create tensor with negative dimension ",
 | 
			
		||||
        x,
 | 
			
		||||
        ": ",
 | 
			
		||||
 | 
			
		||||
@ -4,7 +4,6 @@
 | 
			
		||||
#include <c10/core/ScalarType.h>
 | 
			
		||||
#include <c10/core/SymIntArrayRef.h>
 | 
			
		||||
#include <c10/util/DimVector.h>
 | 
			
		||||
#include <c10/util/Exception.h>
 | 
			
		||||
#include <optional>
 | 
			
		||||
#include <sstream>
 | 
			
		||||
#include <vector>
 | 
			
		||||
@ -27,7 +26,9 @@ inline void infer_size_impl(
 | 
			
		||||
  std::optional<int64_t> infer_dim;
 | 
			
		||||
  for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) {
 | 
			
		||||
    if (TORCH_GUARD_OR_FALSE(sym_eq(shape[dim], -1))) {
 | 
			
		||||
      TORCH_CHECK(!infer_dim, "only one dimension can be inferred");
 | 
			
		||||
      if (infer_dim) {
 | 
			
		||||
        throw std::runtime_error("only one dimension can be inferred");
 | 
			
		||||
      }
 | 
			
		||||
      infer_dim = dim;
 | 
			
		||||
    } else {
 | 
			
		||||
      // in case of unbacked shape[dim] we assume it's not -1 and add a runtime
 | 
			
		||||
 | 
			
		||||
@ -58,7 +58,7 @@ namespace at {
 | 
			
		||||
namespace{
 | 
			
		||||
 | 
			
		||||
// PyTorch allows operations to specify dim 0 and dim -1 on a scalar tensor.
 | 
			
		||||
bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
 | 
			
		||||
static bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
 | 
			
		||||
  return dim == 0 || dim == -1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -365,7 +365,7 @@ Tensor select_batching_rule(const Tensor& self, int64_t dim, int64_t index) {
 | 
			
		||||
  return self_physical.getPhysicalToLogicalMap().apply(result);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) {
 | 
			
		||||
static int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) {
 | 
			
		||||
  return maybe_wrap_dim(dim, static_cast<int64_t>(input_sizes.size())) + num_batch_dims;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -488,7 +488,7 @@ Tensor view_as_complex_batching_rule(const Tensor& self) {
 | 
			
		||||
// Checks that the smallest batch stride is greater than the largest example
 | 
			
		||||
// stride. This is something we can support but we choose not to because it's
 | 
			
		||||
// potentially error prone.
 | 
			
		||||
void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) {
 | 
			
		||||
static void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) {
 | 
			
		||||
  auto smallest_batch_stride = std::min_element(
 | 
			
		||||
      physical_strides.begin(), physical_strides.begin() + num_batch_dims);
 | 
			
		||||
  auto largest_example_stride = std::max_element(
 | 
			
		||||
@ -508,7 +508,7 @@ void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_bat
 | 
			
		||||
// given (sizes, strides, storage_offset) returns the maximum location that
 | 
			
		||||
// can be indexed (or nullopt if such a location doesn't exist, e.g., tensors
 | 
			
		||||
// with zero-size dims).
 | 
			
		||||
std::optional<int64_t> maximum_indexable_location(
 | 
			
		||||
static std::optional<int64_t> maximum_indexable_location(
 | 
			
		||||
    IntArrayRef sizes, IntArrayRef strides, int64_t storage_offset) {
 | 
			
		||||
  auto result = native::storage_size_for(sizes, strides);
 | 
			
		||||
  if (result == 0) {
 | 
			
		||||
@ -521,7 +521,7 @@ std::optional<int64_t> maximum_indexable_location(
 | 
			
		||||
// This checks that the range of possible memory locations accessible by
 | 
			
		||||
// x.as_strided(sizes, strides, maybe_storage_offset)
 | 
			
		||||
// are within the bounds of possible memory locations accessible by x.
 | 
			
		||||
void checkBasicAsStridedValidForSlice(
 | 
			
		||||
static void checkBasicAsStridedValidForSlice(
 | 
			
		||||
    const Tensor& physical_tensor,
 | 
			
		||||
    int64_t num_batch_dims,
 | 
			
		||||
    IntArrayRef sizes,
 | 
			
		||||
 | 
			
		||||
@ -62,7 +62,7 @@ constexpr const char* unknown_eventname = "eventname not specified";
 | 
			
		||||
#endif
 | 
			
		||||
}  // namespace (anonymous)
 | 
			
		||||
 | 
			
		||||
MapAllocator::MapAllocator(WithFd /*unused*/, std::string_view filename, int fd, int flags, size_t size)
 | 
			
		||||
MapAllocator::MapAllocator(WithFd, std::string_view filename, int fd, int flags, size_t size)
 | 
			
		||||
  : filename_(filename.empty() ? unknown_filename : filename)
 | 
			
		||||
  , size_(0) // to be filled later
 | 
			
		||||
#ifdef _WIN32
 | 
			
		||||
@ -494,7 +494,7 @@ RefcountedMapAllocator::RefcountedMapAllocator(const char *filename, int flags,
 | 
			
		||||
 | 
			
		||||
    initializeAlloc();
 | 
			
		||||
}
 | 
			
		||||
RefcountedMapAllocator::RefcountedMapAllocator(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size)
 | 
			
		||||
RefcountedMapAllocator::RefcountedMapAllocator(WithFd, const char *filename, int fd, int flags, size_t size)
 | 
			
		||||
  : RefcountedMapAllocatorArgCheck(flags)
 | 
			
		||||
  , MapAllocator(WITH_FD, filename, flags, fd, size + map_alloc_alignment) {
 | 
			
		||||
 | 
			
		||||
@ -614,7 +614,7 @@ at::DataPtr MapAllocator::makeDataPtr(std::string_view filename, int flags, size
 | 
			
		||||
  return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU};
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
at::DataPtr MapAllocator::makeDataPtr(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
 | 
			
		||||
at::DataPtr MapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
 | 
			
		||||
  auto* context = new MapAllocator(WITH_FD, filename, fd, flags, size);
 | 
			
		||||
  if (actual_size_out) *actual_size_out = context->size();
 | 
			
		||||
  return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU};
 | 
			
		||||
@ -626,7 +626,7 @@ at::DataPtr RefcountedMapAllocator::makeDataPtr(const char *filename, int flags,
 | 
			
		||||
  return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU};
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
at::DataPtr RefcountedMapAllocator::makeDataPtr(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
 | 
			
		||||
at::DataPtr RefcountedMapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
 | 
			
		||||
  auto* context = new RefcountedMapAllocator(WITH_FD, filename, fd, flags, size);
 | 
			
		||||
  if (actual_size_out) *actual_size_out = context->size() - map_alloc_alignment;
 | 
			
		||||
  return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU};
 | 
			
		||||
 | 
			
		||||
@ -25,7 +25,7 @@ class TORCH_API MapAllocator {
 | 
			
		||||
 public:
 | 
			
		||||
  MapAllocator(std::string_view filename, int flags, size_t size);
 | 
			
		||||
  MapAllocator(
 | 
			
		||||
      WithFd /*unused*/,
 | 
			
		||||
      WithFd,
 | 
			
		||||
      std::string_view filename,
 | 
			
		||||
      int fd,
 | 
			
		||||
      int flags,
 | 
			
		||||
@ -59,14 +59,14 @@ class TORCH_API MapAllocator {
 | 
			
		||||
    return flags_;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static MapAllocator* fromDataPtr(const at::DataPtr& /*dptr*/);
 | 
			
		||||
  static MapAllocator* fromDataPtr(const at::DataPtr&);
 | 
			
		||||
  static at::DataPtr makeDataPtr(
 | 
			
		||||
      std::string_view filename,
 | 
			
		||||
      int flags,
 | 
			
		||||
      size_t size,
 | 
			
		||||
      size_t* actual_size_out);
 | 
			
		||||
  static at::DataPtr makeDataPtr(
 | 
			
		||||
      WithFd /*unused*/,
 | 
			
		||||
      WithFd,
 | 
			
		||||
      const char* filename,
 | 
			
		||||
      int fd,
 | 
			
		||||
      int flags,
 | 
			
		||||
@ -105,13 +105,13 @@ class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck,
 | 
			
		||||
 public:
 | 
			
		||||
  RefcountedMapAllocator(const char* filename, int flags, size_t size);
 | 
			
		||||
  RefcountedMapAllocator(
 | 
			
		||||
      WithFd /*unused*/,
 | 
			
		||||
      WithFd,
 | 
			
		||||
      const char* filename,
 | 
			
		||||
      int fd,
 | 
			
		||||
      int flags,
 | 
			
		||||
      size_t size);
 | 
			
		||||
 | 
			
		||||
  static RefcountedMapAllocator* fromDataPtr(const at::DataPtr& /*dptr*/);
 | 
			
		||||
  static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&);
 | 
			
		||||
  RefcountedMapAllocator(const RefcountedMapAllocator&) = delete;
 | 
			
		||||
  RefcountedMapAllocator(RefcountedMapAllocator&&) = delete;
 | 
			
		||||
  RefcountedMapAllocator& operator=(const RefcountedMapAllocator&) = delete;
 | 
			
		||||
@ -122,7 +122,7 @@ class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck,
 | 
			
		||||
      size_t size,
 | 
			
		||||
      size_t* actual_size_out);
 | 
			
		||||
  static at::DataPtr makeDataPtr(
 | 
			
		||||
      WithFd /*unused*/,
 | 
			
		||||
      WithFd,
 | 
			
		||||
      const char* filename,
 | 
			
		||||
      int fd,
 | 
			
		||||
      int flags,
 | 
			
		||||
 | 
			
		||||
@ -179,7 +179,7 @@ void propagate_names_except(const Tensor& result, const Tensor& src, IntArrayRef
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
  const auto src_names = src.names();
 | 
			
		||||
  const auto result_dim = result.dim();
 | 
			
		||||
  const auto result_dim = static_cast<int64_t>(result.dim());
 | 
			
		||||
  const auto src_dim = static_cast<int64_t>(src_names.size());
 | 
			
		||||
  const auto excluded_dim = static_cast<int64_t>(excluded_idxs.size());
 | 
			
		||||
  TORCH_INTERNAL_ASSERT(src_dim - excluded_dim == result_dim);
 | 
			
		||||
 | 
			
		||||
@ -273,7 +273,7 @@ c10::SymInt NestedTensorImpl::sym_numel_custom() const {
 | 
			
		||||
  return NestedTensorImpl::numel_custom();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
c10::SymBool NestedTensorImpl::sym_is_contiguous_custom(MemoryFormat /*memory_format*/) const {
 | 
			
		||||
c10::SymBool NestedTensorImpl::sym_is_contiguous_custom(MemoryFormat) const {
 | 
			
		||||
  return nested_tensor_impl_is_contiguous(this);
 | 
			
		||||
}
 | 
			
		||||
IntArrayRef NestedTensorImpl::sizes_custom() const {
 | 
			
		||||
 | 
			
		||||
@ -115,8 +115,7 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
 | 
			
		||||
  // with real implementations
 | 
			
		||||
  int64_t numel_custom() const override;
 | 
			
		||||
  c10::SymInt sym_numel_custom() const override;
 | 
			
		||||
  c10::SymBool sym_is_contiguous_custom(
 | 
			
		||||
      MemoryFormat /*memory_format*/) const override;
 | 
			
		||||
  c10::SymBool sym_is_contiguous_custom(MemoryFormat) const override;
 | 
			
		||||
  int64_t size_custom(int64_t d) const override {
 | 
			
		||||
    return this->size(d);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@ -14,7 +14,7 @@ inline int64_t divup(int64_t x, int64_t y) {
 | 
			
		||||
TORCH_API void init_num_threads();
 | 
			
		||||
 | 
			
		||||
// Sets the number of threads to be used in parallel region
 | 
			
		||||
TORCH_API void set_num_threads(int /*nthreads*/);
 | 
			
		||||
TORCH_API void set_num_threads(int);
 | 
			
		||||
 | 
			
		||||
// Returns the maximum number of threads that may be used in a parallel region
 | 
			
		||||
TORCH_API int get_num_threads();
 | 
			
		||||
@ -37,7 +37,7 @@ inline void lazy_init_num_threads() {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
TORCH_API void set_thread_num(int /*id*/);
 | 
			
		||||
TORCH_API void set_thread_num(int);
 | 
			
		||||
 | 
			
		||||
class TORCH_API ThreadIdGuard {
 | 
			
		||||
 public:
 | 
			
		||||
@ -130,7 +130,7 @@ inline scalar_t parallel_reduce(
 | 
			
		||||
TORCH_API std::string get_parallel_info();
 | 
			
		||||
 | 
			
		||||
// Sets number of threads used for inter-op parallelism
 | 
			
		||||
TORCH_API void set_num_interop_threads(int /*nthreads*/);
 | 
			
		||||
TORCH_API void set_num_interop_threads(int);
 | 
			
		||||
 | 
			
		||||
// Returns the number of threads used for inter-op parallelism
 | 
			
		||||
TORCH_API size_t get_num_interop_threads();
 | 
			
		||||
 | 
			
		||||
@ -42,14 +42,8 @@ const PythonTorchFunctionTLS& PythonTorchFunctionTLS::get_state() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool torch_function_mode_enabled() {
 | 
			
		||||
  // Manually flatten because gcc is refusing to inline here.  Note
 | 
			
		||||
  // that we are still calling __tls_get_addr twice here with GCC,
 | 
			
		||||
  // presumably because of
 | 
			
		||||
  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81501 (which says
 | 
			
		||||
  // the fix ships in GCC 16), but forcing inlining still improves
 | 
			
		||||
  // performance.
 | 
			
		||||
  const auto& ptfs = pythonTorchFunctionState;
 | 
			
		||||
  return ptfs.disabled_state_ != TorchFunctionDisabledState::ALL_DISABLED && !ptfs.stack_.empty();
 | 
			
		||||
  return PythonTorchFunctionTLS::get_disabled_state() != TorchFunctionDisabledState::ALL_DISABLED &&
 | 
			
		||||
         PythonTorchFunctionTLS::stack_len() > 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// This is needed to disambiguate the ternary torch function disabled states
 | 
			
		||||
 | 
			
		||||
@ -27,7 +27,6 @@ struct TORCH_API PythonTorchFunctionTLS {
 | 
			
		||||
  TorchFunctionDisabledState disabled_state_ =
 | 
			
		||||
      TorchFunctionDisabledState::ENABLED;
 | 
			
		||||
  std::vector<std::shared_ptr<c10::SafePyObject>> stack_;
 | 
			
		||||
  friend TORCH_API bool torch_function_mode_enabled();
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
TORCH_API bool torch_function_mode_enabled();
 | 
			
		||||
 | 
			
		||||
@ -13,7 +13,7 @@ namespace {
 | 
			
		||||
  // and left at true for the rest of the execution.
 | 
			
		||||
  // It's an optimization so that users who never use default hooks don't need to
 | 
			
		||||
  // read the thread_local variables pack_hook_ and unpack_hook_.
 | 
			
		||||
  bool is_initialized(false);
 | 
			
		||||
  static bool is_initialized(false);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void assertSavedTensorHooksNotDisabled() {
 | 
			
		||||
 | 
			
		||||
@ -252,7 +252,7 @@ void SparseCsrTensorImpl::set_stride(int64_t dim, int64_t new_stride) {
 | 
			
		||||
void SparseCsrTensorImpl::set_storage_offset(int64_t storage_offset) {
 | 
			
		||||
  TORCH_CHECK(false, "Sparse ", at::sparse_csr::layoutToString(layout_, /*upper=*/true), " tensors do not have set_storage_offset.");
 | 
			
		||||
}
 | 
			
		||||
c10::SymBool SparseCsrTensorImpl::sym_is_contiguous_custom(MemoryFormat /*memory_format*/) const {
 | 
			
		||||
c10::SymBool SparseCsrTensorImpl::sym_is_contiguous_custom(MemoryFormat) const {
 | 
			
		||||
  TORCH_CHECK(false, "Sparse ", at::sparse_csr::layoutToString(layout_, /*upper=*/true), " tensors do not have is_contiguous");
 | 
			
		||||
}
 | 
			
		||||
} // namespace at
 | 
			
		||||
 | 
			
		||||
@ -32,10 +32,10 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
 | 
			
		||||
 | 
			
		||||
 public:
 | 
			
		||||
  explicit SparseCsrTensorImpl(
 | 
			
		||||
      at::DispatchKeySet /*key_set*/,
 | 
			
		||||
      at::DispatchKeySet,
 | 
			
		||||
      at::Device device,
 | 
			
		||||
      Layout layout,
 | 
			
		||||
      const caffe2::TypeMeta /*data_type*/);
 | 
			
		||||
      const caffe2::TypeMeta);
 | 
			
		||||
 | 
			
		||||
  void resize_(int64_t nnz, IntArrayRef size);
 | 
			
		||||
  void resize_and_clear_(
 | 
			
		||||
@ -86,8 +86,7 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
 | 
			
		||||
 protected:
 | 
			
		||||
  IntArrayRef strides_custom() const override;
 | 
			
		||||
  SymIntArrayRef sym_strides_custom() const override;
 | 
			
		||||
  SymBool sym_is_contiguous_custom(
 | 
			
		||||
      MemoryFormat /*memory_format*/) const override;
 | 
			
		||||
  SymBool sym_is_contiguous_custom(MemoryFormat) const override;
 | 
			
		||||
 | 
			
		||||
 public:
 | 
			
		||||
  void set_size(int64_t dim, int64_t new_size) override;
 | 
			
		||||
 | 
			
		||||
@ -46,9 +46,7 @@ struct TORCH_API SparseTensorImpl : public TensorImpl {
 | 
			
		||||
 | 
			
		||||
 public:
 | 
			
		||||
  // Public for now...
 | 
			
		||||
  explicit SparseTensorImpl(
 | 
			
		||||
      at::DispatchKeySet /*key_set*/,
 | 
			
		||||
      const caffe2::TypeMeta /*data_type*/);
 | 
			
		||||
  explicit SparseTensorImpl(at::DispatchKeySet, const caffe2::TypeMeta);
 | 
			
		||||
 | 
			
		||||
  void release_resources() override;
 | 
			
		||||
 | 
			
		||||
@ -231,14 +229,14 @@ struct TORCH_API SparseTensorImpl : public TensorImpl {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef<int64_t> size) {
 | 
			
		||||
    _resize_(sparse_dim, dense_dim, size);
 | 
			
		||||
    return _resize_(sparse_dim, dense_dim, size);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void resize_(
 | 
			
		||||
      int64_t sparse_dim,
 | 
			
		||||
      int64_t dense_dim,
 | 
			
		||||
      ArrayRef<c10::SymInt> size) {
 | 
			
		||||
    _resize_(sparse_dim, dense_dim, size);
 | 
			
		||||
    return _resize_(sparse_dim, dense_dim, size);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // NOTE: this function will resize the sparse tensor and also set `indices`
 | 
			
		||||
@ -386,8 +384,8 @@ struct TORCH_API SparseTensorImpl : public TensorImpl {
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  explicit SparseTensorImpl(
 | 
			
		||||
      at::DispatchKeySet /*key_set*/,
 | 
			
		||||
      const caffe2::TypeMeta /*data_type*/,
 | 
			
		||||
      at::DispatchKeySet,
 | 
			
		||||
      const caffe2::TypeMeta,
 | 
			
		||||
      at::Tensor indices,
 | 
			
		||||
      at::Tensor values);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -59,7 +59,7 @@ static inline void set_item(const Tensor& self, ArrayRef<TensorIndex> indices, c
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  set_item(self, indices, value);
 | 
			
		||||
  return set_item(self, indices, value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace indexing
 | 
			
		||||
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user