mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-11-04 08:00:58 +08:00 
			
		
		
		
	Compare commits
	
		
			1 Commits
		
	
	
		
			cpp-docs-d
			...
			gh/tugsbay
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 5169c0e551 | 
@ -13,4 +13,3 @@ exclude:
 | 
			
		||||
  - "**/benchmarks/**"
 | 
			
		||||
  - "**/test_*.py"
 | 
			
		||||
  - "**/*_test.py"
 | 
			
		||||
  - "tools/**"
 | 
			
		||||
 | 
			
		||||
@ -195,16 +195,13 @@ case "$tag" in
 | 
			
		||||
    NINJA_VERSION=1.9.0
 | 
			
		||||
    TRITON=yes
 | 
			
		||||
    ;;
 | 
			
		||||
  pytorch-linux-jammy-xpu-n-py3 | pytorch-linux-jammy-xpu-n-py3-inductor-benchmarks)
 | 
			
		||||
  pytorch-linux-jammy-xpu-n-py3)
 | 
			
		||||
    ANACONDA_PYTHON_VERSION=3.10
 | 
			
		||||
    GCC_VERSION=11
 | 
			
		||||
    VISION=yes
 | 
			
		||||
    XPU_VERSION=2025.2
 | 
			
		||||
    NINJA_VERSION=1.9.0
 | 
			
		||||
    TRITON=yes
 | 
			
		||||
    if [[ $tag =~ "benchmarks" ]]; then
 | 
			
		||||
      INDUCTOR_BENCHMARKS=yes
 | 
			
		||||
    fi
 | 
			
		||||
    ;;
 | 
			
		||||
  pytorch-linux-jammy-py3-gcc11-inductor-benchmarks)
 | 
			
		||||
    ANACONDA_PYTHON_VERSION=3.10
 | 
			
		||||
 | 
			
		||||
@ -3,7 +3,7 @@
 | 
			
		||||
 | 
			
		||||
set -eux
 | 
			
		||||
 | 
			
		||||
ACL_VERSION=${ACL_VERSION:-"v52.6.0"}
 | 
			
		||||
ACL_VERSION=${ACL_VERSION:-"v25.02"}
 | 
			
		||||
ACL_INSTALL_DIR="/acl"
 | 
			
		||||
 | 
			
		||||
# Clone ACL
 | 
			
		||||
 | 
			
		||||
@ -49,20 +49,12 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
 | 
			
		||||
    export SYSROOT_DEP="sysroot_linux-64=2.17"
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
# Install correct Python version
 | 
			
		||||
# Also ensure sysroot is using a modern GLIBC to match system compilers
 | 
			
		||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.14" ]; then
 | 
			
		||||
  as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y\
 | 
			
		||||
             python="3.14.0" \
 | 
			
		||||
             ${SYSROOT_DEP} \
 | 
			
		||||
             -c conda-forge
 | 
			
		||||
else
 | 
			
		||||
  # Install correct Python version
 | 
			
		||||
  # Also ensure sysroot is using a modern GLIBC to match system compilers
 | 
			
		||||
  as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y\
 | 
			
		||||
             python="$ANACONDA_PYTHON_VERSION" \
 | 
			
		||||
             ${SYSROOT_DEP}
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
  # libstdcxx from conda default channels are too old, we need GLIBCXX_3.4.30
 | 
			
		||||
  # which is provided in libstdcxx 12 and up.
 | 
			
		||||
  conda_install libstdcxx-ng=12.3.0 --update-deps -c conda-forge
 | 
			
		||||
 | 
			
		||||
@ -10,7 +10,7 @@ else
 | 
			
		||||
  arch_path='sbsa'
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
NVSHMEM_VERSION=3.4.5
 | 
			
		||||
NVSHMEM_VERSION=3.3.24
 | 
			
		||||
 | 
			
		||||
function install_cuda {
 | 
			
		||||
  version=$1
 | 
			
		||||
@ -150,7 +150,7 @@ function install_130 {
 | 
			
		||||
  CUDNN_VERSION=9.13.0.50
 | 
			
		||||
  echo "Installing CUDA 13.0 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
 | 
			
		||||
  # install CUDA 13.0 in the same container
 | 
			
		||||
  install_cuda 13.0.2 cuda_13.0.2_580.95.05_linux
 | 
			
		||||
  install_cuda 13.0.0 cuda_13.0.0_580.65.06_linux
 | 
			
		||||
 | 
			
		||||
  # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
 | 
			
		||||
  install_cudnn 13 $CUDNN_VERSION
 | 
			
		||||
 | 
			
		||||
@ -19,7 +19,7 @@ pip_install \
 | 
			
		||||
  transformers==4.36.2
 | 
			
		||||
 | 
			
		||||
pip_install coloredlogs packaging
 | 
			
		||||
pip_install onnxruntime==1.23.1
 | 
			
		||||
pip_install onnxruntime==1.23.0
 | 
			
		||||
pip_install onnxscript==0.5.4
 | 
			
		||||
 | 
			
		||||
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
 | 
			
		||||
 | 
			
		||||
@ -40,7 +40,11 @@ EOF
 | 
			
		||||
 | 
			
		||||
    # Default url values
 | 
			
		||||
    rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}"
 | 
			
		||||
    amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu"
 | 
			
		||||
 | 
			
		||||
    # Add amdgpu repository
 | 
			
		||||
    UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
 | 
			
		||||
    echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
 | 
			
		||||
 | 
			
		||||
    # Add rocm repository
 | 
			
		||||
    wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add -
 | 
			
		||||
 | 
			
		||||
@ -12,8 +12,8 @@ function do_install() {
 | 
			
		||||
 | 
			
		||||
    rocm_version_nodot=${rocm_version//./}
 | 
			
		||||
 | 
			
		||||
    # post merge of https://github.com/icl-utk-edu/magma/pull/65
 | 
			
		||||
    MAGMA_VERSION=c0792ae825fb36872784892ea643dd6f3456bc5f
 | 
			
		||||
    # https://github.com/icl-utk-edu/magma/pull/65
 | 
			
		||||
    MAGMA_VERSION=d6e4117bc88e73f06d26c6c2e14f064e8fc3d1ec
 | 
			
		||||
    magma_archive="magma-rocm${rocm_version_nodot}-${MAGMA_VERSION}-1.tar.bz2"
 | 
			
		||||
 | 
			
		||||
    rocm_dir="/opt/rocm"
 | 
			
		||||
 | 
			
		||||
@ -97,7 +97,7 @@ case ${image} in
 | 
			
		||||
    manylinux2_28-builder:xpu)
 | 
			
		||||
        TARGET=xpu_final
 | 
			
		||||
        GPU_IMAGE=amd64/almalinux:8
 | 
			
		||||
        DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=13"
 | 
			
		||||
        DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11"
 | 
			
		||||
        MANY_LINUX_VERSION="2_28"
 | 
			
		||||
        ;;
 | 
			
		||||
    *)
 | 
			
		||||
 | 
			
		||||
@ -138,12 +138,10 @@ numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x"
 | 
			
		||||
#test_binary_ufuncs.py
 | 
			
		||||
numpy==1.22.4; python_version == "3.10"
 | 
			
		||||
numpy==1.26.2; python_version == "3.11" or python_version == "3.12"
 | 
			
		||||
numpy==2.1.2; python_version >= "3.13" and python_version < "3.14"
 | 
			
		||||
numpy==2.3.4; python_version >= "3.14"
 | 
			
		||||
numpy==2.1.2; python_version >= "3.13"
 | 
			
		||||
 | 
			
		||||
pandas==2.0.3; python_version < "3.13"
 | 
			
		||||
pandas==2.2.3; python_version >= "3.13" and python_version < "3.14"
 | 
			
		||||
pandas==2.3.3; python_version >= "3.14"
 | 
			
		||||
pandas==2.2.3; python_version >= "3.13"
 | 
			
		||||
 | 
			
		||||
#onnxruntime
 | 
			
		||||
#Description: scoring engine for Open Neural Network Exchange (ONNX) models
 | 
			
		||||
@ -155,8 +153,7 @@ opt-einsum==3.3
 | 
			
		||||
#Pinned versions: 3.3
 | 
			
		||||
#test that import: test_linalg.py
 | 
			
		||||
 | 
			
		||||
optree==0.13.0 ; python_version < "3.14"
 | 
			
		||||
optree==0.17.0 ; python_version >= "3.14"
 | 
			
		||||
optree==0.13.0
 | 
			
		||||
#Description: A library for tree manipulation
 | 
			
		||||
#Pinned versions: 0.13.0
 | 
			
		||||
#test that import: test_vmap.py, test_aotdispatch.py, test_dynamic_shapes.py,
 | 
			
		||||
@ -255,8 +252,7 @@ scikit-image==0.22.0
 | 
			
		||||
#test that import:
 | 
			
		||||
 | 
			
		||||
scipy==1.10.1 ; python_version <= "3.11"
 | 
			
		||||
scipy==1.14.1 ; python_version > "3.11" and python_version < "3.14"
 | 
			
		||||
scipy==1.16.2 ; python_version >= "3.14"
 | 
			
		||||
scipy==1.14.1 ; python_version >= "3.12"
 | 
			
		||||
# Pin SciPy because of failing distribution tests (see #60347)
 | 
			
		||||
#Description: scientific python
 | 
			
		||||
#Pinned versions: 1.10.1
 | 
			
		||||
@ -328,8 +324,7 @@ pywavelets==1.7.0 ; python_version >= "3.12"
 | 
			
		||||
#Pinned versions: 1.4.1
 | 
			
		||||
#test that import:
 | 
			
		||||
 | 
			
		||||
lxml==5.3.0 ; python_version < "3.14"
 | 
			
		||||
lxml==6.0.2 ; python_version >= "3.14"
 | 
			
		||||
lxml==5.3.0
 | 
			
		||||
#Description: This is a requirement of unittest-xml-reporting
 | 
			
		||||
 | 
			
		||||
PyGithub==2.3.0
 | 
			
		||||
@ -339,14 +334,12 @@ sympy==1.13.3
 | 
			
		||||
#Pinned versions:
 | 
			
		||||
#test that import:
 | 
			
		||||
 | 
			
		||||
onnx==1.19.1 ; python_version < "3.14"
 | 
			
		||||
# Unpin once Python 3.14 is supported. See  onnxruntime issue 26309.
 | 
			
		||||
onnx==1.18.0 ; python_version == "3.14"
 | 
			
		||||
onnx==1.18.0
 | 
			
		||||
#Description: Required by onnx tests, and mypy and test_public_bindings.py when checking torch.onnx._internal
 | 
			
		||||
#Pinned versions:
 | 
			
		||||
#test that import:
 | 
			
		||||
 | 
			
		||||
onnxscript==0.5.4
 | 
			
		||||
onnxscript==0.5.3
 | 
			
		||||
#Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
 | 
			
		||||
#Pinned versions:
 | 
			
		||||
#test that import:
 | 
			
		||||
@ -366,7 +359,7 @@ pwlf==2.2.1
 | 
			
		||||
#test that import: test_sac_estimator.py
 | 
			
		||||
 | 
			
		||||
# To build PyTorch itself
 | 
			
		||||
pyyaml==6.0.3
 | 
			
		||||
pyyaml==6.0.2
 | 
			
		||||
pyzstd
 | 
			
		||||
setuptools==78.1.1
 | 
			
		||||
packaging==23.1
 | 
			
		||||
 | 
			
		||||
@ -1,11 +1,15 @@
 | 
			
		||||
sphinx==7.2.6
 | 
			
		||||
sphinx==5.3.0
 | 
			
		||||
#Description: This is used to generate PyTorch docs
 | 
			
		||||
#Pinned versions: 7.2.6
 | 
			
		||||
#Pinned versions: 5.3.0
 | 
			
		||||
 | 
			
		||||
pytorch_sphinx_theme2==0.2.0
 | 
			
		||||
#Description: This is needed to generate PyTorch docs
 | 
			
		||||
#Pinned versions: 0.2.0
 | 
			
		||||
standard-imghdr==3.13.0; python_version >= "3.13"
 | 
			
		||||
#Description: This is needed by Sphinx, so it needs to be added here.
 | 
			
		||||
# The reasons are as follows:
 | 
			
		||||
# 1) This module has been removed from the Python standard library since Python 3.13(https://peps.python.org/pep-0594/#imghdr);
 | 
			
		||||
# 2) The current version of Sphinx (5.3.0) is not compatible with Python 3.13.
 | 
			
		||||
# Once Sphinx is upgraded to a version compatible with Python 3.13 or later, we can remove this dependency.
 | 
			
		||||
 | 
			
		||||
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@71e55749be14ceb56e7f8211a9fb649866b87ad4#egg=pytorch_sphinx_theme2
 | 
			
		||||
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
 | 
			
		||||
# but it doesn't seem to work and hangs around idly. The initial thought that it is probably
 | 
			
		||||
# something related to Docker setup. We can investigate this later.
 | 
			
		||||
@ -32,17 +36,17 @@ tensorboard==2.18.0 ; python_version >= "3.13"
 | 
			
		||||
#Description: This is used to generate PyTorch docs
 | 
			
		||||
#Pinned versions: 2.13.0
 | 
			
		||||
 | 
			
		||||
breathe==4.36.0
 | 
			
		||||
breathe==4.34.0
 | 
			
		||||
#Description: This is used to generate PyTorch C++ docs
 | 
			
		||||
#Pinned versions: 4.36.0
 | 
			
		||||
#Pinned versions: 4.34.0
 | 
			
		||||
 | 
			
		||||
exhale==0.3.7
 | 
			
		||||
exhale==0.2.3
 | 
			
		||||
#Description: This is used to generate PyTorch C++ docs
 | 
			
		||||
#Pinned versions: 0.3.7
 | 
			
		||||
#Pinned versions: 0.2.3
 | 
			
		||||
 | 
			
		||||
docutils==0.20
 | 
			
		||||
docutils==0.16
 | 
			
		||||
#Description: This is used to generate PyTorch C++ docs
 | 
			
		||||
#Pinned versions: 0.20
 | 
			
		||||
#Pinned versions: 0.16
 | 
			
		||||
 | 
			
		||||
bs4==0.0.1
 | 
			
		||||
#Description: This is used to generate PyTorch C++ docs
 | 
			
		||||
@ -52,13 +56,13 @@ IPython==8.12.0
 | 
			
		||||
#Description: This is used to generate PyTorch functorch docs
 | 
			
		||||
#Pinned versions: 8.12.0
 | 
			
		||||
 | 
			
		||||
myst-nb==1.3.0
 | 
			
		||||
myst-nb==0.17.2
 | 
			
		||||
#Description: This is used to generate PyTorch functorch and torch.compile docs.
 | 
			
		||||
#Pinned versions: 1.3.0
 | 
			
		||||
#Pinned versions: 0.17.2
 | 
			
		||||
 | 
			
		||||
# The following are required to build torch.distributed.elastic.rendezvous.etcd* docs
 | 
			
		||||
python-etcd==0.4.5
 | 
			
		||||
sphinx-copybutton==0.5.0
 | 
			
		||||
sphinx-design==0.6.1
 | 
			
		||||
sphinx-design==0.4.0
 | 
			
		||||
sphinxcontrib-mermaid==1.0.0
 | 
			
		||||
myst-parser==4.0.1
 | 
			
		||||
myst-parser==0.18.1
 | 
			
		||||
 | 
			
		||||
@ -54,15 +54,12 @@ ENV OPENSSL_DIR /opt/openssl
 | 
			
		||||
RUN rm install_openssl.sh
 | 
			
		||||
 | 
			
		||||
ARG INDUCTOR_BENCHMARKS
 | 
			
		||||
ARG ANACONDA_PYTHON_VERSION
 | 
			
		||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
 | 
			
		||||
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
 | 
			
		||||
COPY ./common/common_utils.sh common_utils.sh
 | 
			
		||||
COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt
 | 
			
		||||
COPY ci_commit_pins/timm.txt timm.txt
 | 
			
		||||
COPY ci_commit_pins/torchbench.txt torchbench.txt
 | 
			
		||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
 | 
			
		||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt
 | 
			
		||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt
 | 
			
		||||
 | 
			
		||||
# Install XPU Dependencies
 | 
			
		||||
ARG XPU_VERSION
 | 
			
		||||
 | 
			
		||||
@ -100,8 +100,6 @@ COPY ./common/common_utils.sh common_utils.sh
 | 
			
		||||
COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt
 | 
			
		||||
COPY ci_commit_pins/timm.txt timm.txt
 | 
			
		||||
COPY ci_commit_pins/torchbench.txt torchbench.txt
 | 
			
		||||
# Only build aoti cpp tests when INDUCTOR_BENCHMARKS is set to True
 | 
			
		||||
ENV BUILD_AOT_INDUCTOR_TEST ${INDUCTOR_BENCHMARKS}
 | 
			
		||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
 | 
			
		||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -6,7 +6,7 @@ dependencies = [
 | 
			
		||||
    "GitPython==3.1.45",
 | 
			
		||||
    "docker==7.1.0",
 | 
			
		||||
    "pytest==7.3.2",
 | 
			
		||||
    "uv==0.9.6"
 | 
			
		||||
    "uv==0.8.6"
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[tool.setuptools]
 | 
			
		||||
 | 
			
		||||
@ -1,7 +1,7 @@
 | 
			
		||||
SHELL=/usr/bin/env bash
 | 
			
		||||
 | 
			
		||||
DOCKER_CMD ?= docker
 | 
			
		||||
DESIRED_ROCM ?= 7.1
 | 
			
		||||
DESIRED_ROCM ?= 7.0
 | 
			
		||||
DESIRED_ROCM_SHORT = $(subst .,,$(DESIRED_ROCM))
 | 
			
		||||
PACKAGE_NAME = magma-rocm
 | 
			
		||||
# inherit this from underlying docker image, do not pass this env var to docker
 | 
			
		||||
@ -16,7 +16,6 @@ DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
 | 
			
		||||
	magma-rocm/build_magma.sh
 | 
			
		||||
 | 
			
		||||
.PHONY: all
 | 
			
		||||
all: magma-rocm71
 | 
			
		||||
all: magma-rocm70
 | 
			
		||||
all: magma-rocm64
 | 
			
		||||
 | 
			
		||||
@ -25,11 +24,6 @@ clean:
 | 
			
		||||
	$(RM) -r magma-*
 | 
			
		||||
	$(RM) -r output
 | 
			
		||||
 | 
			
		||||
.PHONY: magma-rocm71
 | 
			
		||||
magma-rocm71: DESIRED_ROCM := 7.1
 | 
			
		||||
magma-rocm71:
 | 
			
		||||
	$(DOCKER_RUN)
 | 
			
		||||
 | 
			
		||||
.PHONY: magma-rocm70
 | 
			
		||||
magma-rocm70: DESIRED_ROCM := 7.0
 | 
			
		||||
magma-rocm70:
 | 
			
		||||
 | 
			
		||||
@ -6,8 +6,8 @@ set -eou pipefail
 | 
			
		||||
# The script expects DESIRED_CUDA and PACKAGE_NAME to be set
 | 
			
		||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
 | 
			
		||||
 | 
			
		||||
# post merge of https://github.com/icl-utk-edu/magma/pull/65
 | 
			
		||||
MAGMA_VERSION=c0792ae825fb36872784892ea643dd6f3456bc5f
 | 
			
		||||
# https://github.com/icl-utk-edu/magma/pull/65
 | 
			
		||||
MAGMA_VERSION=d6e4117bc88e73f06d26c6c2e14f064e8fc3d1ec
 | 
			
		||||
 | 
			
		||||
# Folders for the build
 | 
			
		||||
PACKAGE_FILES=${ROOT_DIR}/magma-rocm/package_files # metadata
 | 
			
		||||
@ -20,7 +20,7 @@ mkdir -p ${PACKAGE_DIR} ${PACKAGE_OUTPUT}/linux-64 ${PACKAGE_BUILD} ${PACKAGE_RE
 | 
			
		||||
 | 
			
		||||
# Fetch magma sources and verify checksum
 | 
			
		||||
pushd ${PACKAGE_DIR}
 | 
			
		||||
git clone https://github.com/icl-utk-edu/magma
 | 
			
		||||
git clone https://github.com/jeffdaily/magma
 | 
			
		||||
pushd magma
 | 
			
		||||
git checkout ${MAGMA_VERSION}
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
@ -426,7 +426,7 @@ fi
 | 
			
		||||
if [[ "$BUILD_ENVIRONMENT" != *libtorch* && "$BUILD_ENVIRONMENT" != *bazel* ]]; then
 | 
			
		||||
  # export test times so that potential sharded tests that'll branch off this build will use consistent data
 | 
			
		||||
  # don't do this for libtorch as libtorch is C++ only and thus won't have python tests run on its build
 | 
			
		||||
  PYTHONPATH=. python tools/stats/export_test_times.py
 | 
			
		||||
  python tools/stats/export_test_times.py
 | 
			
		||||
fi
 | 
			
		||||
# don't do this for bazel or s390x or riscv64 as they don't use sccache
 | 
			
		||||
if [[ "$BUILD_ENVIRONMENT" != *s390x* && "$BUILD_ENVIRONMENT" != *riscv64* && "$BUILD_ENVIRONMENT" != *-bazel-* ]]; then
 | 
			
		||||
 | 
			
		||||
@ -89,23 +89,20 @@ if [ "$is_main_doc" = true ]; then
 | 
			
		||||
 | 
			
		||||
  make coverage
 | 
			
		||||
  # Now we have the coverage report, we need to make sure it is empty.
 | 
			
		||||
  # Sphinx 7.2.6+ format: python.txt contains a statistics table with a TOTAL row
 | 
			
		||||
  # showing the undocumented count in the third column.
 | 
			
		||||
  # Example: | TOTAL | 99.83% | 2 |
 | 
			
		||||
  # Count the number of lines in the file and turn that number into a variable
 | 
			
		||||
  # $lines. The `cut -f1 ...` is to only parse the number, not the filename
 | 
			
		||||
  # Skip the report header by subtracting 2: the header will be output even if
 | 
			
		||||
  # there are no undocumented items.
 | 
			
		||||
  #
 | 
			
		||||
  # Also: see docs/source/conf.py for "coverage_ignore*" items, which should
 | 
			
		||||
  # be documented then removed from there.
 | 
			
		||||
 | 
			
		||||
  # Extract undocumented count from TOTAL row in Sphinx 7.2.6 statistics table
 | 
			
		||||
  # The table format is: | Module | Coverage | Undocumented |
 | 
			
		||||
  # Extract the third column (undocumented count) from the TOTAL row
 | 
			
		||||
  undocumented=$(grep "| TOTAL" build/coverage/python.txt | awk -F'|' '{print $4}' | tr -d ' ')
 | 
			
		||||
 | 
			
		||||
  if [ -z "$undocumented" ] || ! [[ "$undocumented" =~ ^[0-9]+$ ]]; then
 | 
			
		||||
  lines=$(wc -l build/coverage/python.txt 2>/dev/null |cut -f1 -d' ')
 | 
			
		||||
  undocumented=$((lines - 2))
 | 
			
		||||
  if [ $undocumented -lt 0 ]; then
 | 
			
		||||
    echo coverage output not found
 | 
			
		||||
    exit 1
 | 
			
		||||
  elif [ "$undocumented" -gt 0 ]; then
 | 
			
		||||
    echo "undocumented objects found:"
 | 
			
		||||
  elif [ $undocumented -gt 0 ]; then
 | 
			
		||||
    echo undocumented objects found:
 | 
			
		||||
    cat build/coverage/python.txt
 | 
			
		||||
    echo "Make sure you've updated relevant .rsts in docs/source!"
 | 
			
		||||
    echo "You can reproduce locally by running 'cd docs && make coverage && cat build/coverage/python.txt'"
 | 
			
		||||
 | 
			
		||||
@ -460,18 +460,28 @@ test_inductor_shard() {
 | 
			
		||||
    --verbose
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
test_inductor_aoti_cpp() {
 | 
			
		||||
test_inductor_aoti() {
 | 
			
		||||
  # docker build uses bdist_wheel which does not work with test_aot_inductor
 | 
			
		||||
  # TODO: need a faster way to build
 | 
			
		||||
  if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
 | 
			
		||||
    # We need to hipify before building again
 | 
			
		||||
    python3 tools/amd_build/build_amd.py
 | 
			
		||||
  fi
 | 
			
		||||
  if [[ "$BUILD_ENVIRONMENT" == *sm86* ]]; then
 | 
			
		||||
    BUILD_COMMAND=(TORCH_CUDA_ARCH_LIST=8.6 USE_FLASH_ATTENTION=OFF python -m pip install --no-build-isolation -v -e .)
 | 
			
		||||
    # TODO: Replace me completely, as one should not use conda libstdc++, nor need special path to TORCH_LIB
 | 
			
		||||
    TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="/opt/conda/envs/py_3.10/lib:${TORCH_LIB_DIR}:${LD_LIBRARY_PATH}")
 | 
			
		||||
  else
 | 
			
		||||
    BUILD_COMMAND=(python -m pip install --no-build-isolation -v -e .)
 | 
			
		||||
    TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}")
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  # aoti cmake custom command requires `torch` to be installed
 | 
			
		||||
  # initialize the cmake build cache and install torch
 | 
			
		||||
  /usr/bin/env "${BUILD_COMMAND[@]}"
 | 
			
		||||
  # rebuild with the build cache with `BUILD_AOT_INDUCTOR_TEST` enabled
 | 
			
		||||
  /usr/bin/env CMAKE_FRESH=1 BUILD_AOT_INDUCTOR_TEST=1 "${BUILD_COMMAND[@]}"
 | 
			
		||||
 | 
			
		||||
  /usr/bin/env "${TEST_ENVS[@]}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference cpp/test_vec_half_AVX2 -dist=loadfile
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -572,8 +582,6 @@ fi
 | 
			
		||||
 | 
			
		||||
if [[ "${TEST_CONFIG}" == *cpu* ]]; then
 | 
			
		||||
  DYNAMO_BENCHMARK_FLAGS+=(--device cpu)
 | 
			
		||||
elif [[ "${TEST_CONFIG}" == *xpu* ]]; then
 | 
			
		||||
  DYNAMO_BENCHMARK_FLAGS+=(--device xpu)
 | 
			
		||||
else
 | 
			
		||||
  DYNAMO_BENCHMARK_FLAGS+=(--device cuda)
 | 
			
		||||
fi
 | 
			
		||||
@ -667,8 +675,6 @@ test_perf_for_dashboard() {
 | 
			
		||||
    device=cuda_b200
 | 
			
		||||
  elif [[ "${TEST_CONFIG}" == *rocm* ]]; then
 | 
			
		||||
    device=rocm
 | 
			
		||||
  elif [[ "${TEST_CONFIG}" == *xpu* ]]; then
 | 
			
		||||
    device=xpu
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  for mode in "${modes[@]}"; do
 | 
			
		||||
@ -1761,7 +1767,7 @@ elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
 | 
			
		||||
  else
 | 
			
		||||
    # Do this after checkout_install_torchbench to ensure we clobber any
 | 
			
		||||
    # nightlies that torchbench may pull in
 | 
			
		||||
    if [[ "${TEST_CONFIG}" != *cpu* && "${TEST_CONFIG}" != *xpu* ]]; then
 | 
			
		||||
    if [[ "${TEST_CONFIG}" != *cpu* ]]; then
 | 
			
		||||
      install_torchrec_and_fbgemm
 | 
			
		||||
    fi
 | 
			
		||||
    PYTHONPATH=/torchbench test_dynamo_benchmark torchbench "$id"
 | 
			
		||||
@ -1770,7 +1776,7 @@ elif [[ "${TEST_CONFIG}" == *inductor_cpp_wrapper* ]]; then
 | 
			
		||||
  install_torchvision
 | 
			
		||||
  PYTHONPATH=/torchbench test_inductor_cpp_wrapper_shard "$SHARD_NUMBER"
 | 
			
		||||
  if [[ "$SHARD_NUMBER" -eq "1" ]]; then
 | 
			
		||||
    test_inductor_aoti_cpp
 | 
			
		||||
    test_inductor_aoti
 | 
			
		||||
  fi
 | 
			
		||||
elif [[ "${TEST_CONFIG}" == *inductor* ]]; then
 | 
			
		||||
  install_torchvision
 | 
			
		||||
 | 
			
		||||
@ -7,9 +7,12 @@ if "%DESIRED_PYTHON%" == "3.13t" (
 | 
			
		||||
    set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.13.0/python-3.13.0-amd64.exe"
 | 
			
		||||
    set ADDITIONAL_OPTIONS="Include_freethreaded=1"
 | 
			
		||||
    set PYTHON_EXEC="python3.13t"
 | 
			
		||||
) else if "%DESIRED_PYTHON%"=="3.14" (
 | 
			
		||||
    echo Python version is set to 3.14 or 3.14t
 | 
			
		||||
    set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.14.0/python-3.14.0rc1-amd64.exe"
 | 
			
		||||
) else if "%DESIRED_PYTHON%"=="3.14t" (
 | 
			
		||||
    echo Python version is set to 3.14 or 3.14t
 | 
			
		||||
    set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.14.0/python-3.14.0-amd64.exe"
 | 
			
		||||
    set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.14.0/python-3.14.0rc1-amd64.exe"
 | 
			
		||||
    set ADDITIONAL_OPTIONS="Include_freethreaded=1"
 | 
			
		||||
    set PYTHON_EXEC="python3.14t"
 | 
			
		||||
) else (
 | 
			
		||||
 | 
			
		||||
@ -163,13 +163,8 @@ if [[ "$(uname)" != Darwin ]]; then
 | 
			
		||||
  MEMORY_LIMIT_MAX_JOBS=12
 | 
			
		||||
  NUM_CPUS=$(( $(nproc) - 2 ))
 | 
			
		||||
 | 
			
		||||
  if [[ "$(uname)" == Linux ]]; then
 | 
			
		||||
    # Defaults here for **binary** linux builds so they can be changed in one place
 | 
			
		||||
    export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))}
 | 
			
		||||
  else
 | 
			
		||||
    # For other builds
 | 
			
		||||
    export MAX_JOBS=${NUM_CPUS}
 | 
			
		||||
  fi
 | 
			
		||||
  # Defaults here for **binary** linux builds so they can be changed in one place
 | 
			
		||||
  export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))}
 | 
			
		||||
 | 
			
		||||
  cat >>"$envfile" <<EOL
 | 
			
		||||
  export MAX_JOBS="${MAX_JOBS}"
 | 
			
		||||
 | 
			
		||||
@ -60,11 +60,9 @@ performance-*,
 | 
			
		||||
readability-container-size-empty,
 | 
			
		||||
readability-delete-null-pointer,
 | 
			
		||||
readability-duplicate-include,
 | 
			
		||||
readability-named-parameter,
 | 
			
		||||
readability-misplaced-array-index,
 | 
			
		||||
readability-redundant*,
 | 
			
		||||
readability-simplify-subscript-expr,
 | 
			
		||||
readability-static-definition-in-anonymous-namespace
 | 
			
		||||
readability-string-compare,
 | 
			
		||||
-readability-redundant-access-specifiers,
 | 
			
		||||
-readability-redundant-control-flow,
 | 
			
		||||
 | 
			
		||||
@ -1,319 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
name: add-uint-support
 | 
			
		||||
description: Add unsigned integer (uint) type support to PyTorch operators by updating AT_DISPATCH macros. Use when adding support for uint16, uint32, uint64 types to operators, kernels, or when user mentions enabling unsigned types, barebones unsigned types, or uint support.
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
# Add Unsigned Integer (uint) Support to Operators
 | 
			
		||||
 | 
			
		||||
This skill helps add support for unsigned integer types (uint16, uint32, uint64) to PyTorch operators by updating their AT_DISPATCH macros.
 | 
			
		||||
 | 
			
		||||
## When to use this skill
 | 
			
		||||
 | 
			
		||||
Use this skill when:
 | 
			
		||||
- Adding uint16, uint32, or uint64 support to an operator
 | 
			
		||||
- User mentions "unsigned types", "uint support", "barebones unsigned types"
 | 
			
		||||
- Enabling support for kUInt16, kUInt32, kUInt64 in kernels
 | 
			
		||||
- Working with operator implementations that need expanded type coverage
 | 
			
		||||
 | 
			
		||||
## Quick reference
 | 
			
		||||
 | 
			
		||||
**Add unsigned types to existing dispatch:**
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES));
 | 
			
		||||
 | 
			
		||||
// After (method 1: add unsigned types explicitly)
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
 | 
			
		||||
 | 
			
		||||
// After (method 2: use V2 integral types if AT_INTEGRAL_TYPES present)
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_INTEGRAL_TYPES_V2), AT_EXPAND(AT_FLOATING_TYPES));
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Type group reference
 | 
			
		||||
 | 
			
		||||
**Unsigned type groups:**
 | 
			
		||||
- `AT_BAREBONES_UNSIGNED_TYPES`: kUInt16, kUInt32, kUInt64
 | 
			
		||||
- `AT_INTEGRAL_TYPES_V2`: AT_INTEGRAL_TYPES + AT_BAREBONES_UNSIGNED_TYPES
 | 
			
		||||
 | 
			
		||||
**Relationship:**
 | 
			
		||||
```cpp
 | 
			
		||||
AT_INTEGRAL_TYPES          // kByte, kChar, kInt, kLong, kShort
 | 
			
		||||
AT_BAREBONES_UNSIGNED_TYPES  // kUInt16, kUInt32, kUInt64
 | 
			
		||||
AT_INTEGRAL_TYPES_V2       // INTEGRAL_TYPES + BAREBONES_UNSIGNED_TYPES
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Instructions
 | 
			
		||||
 | 
			
		||||
### Step 1: Determine if conversion to V2 is needed
 | 
			
		||||
 | 
			
		||||
Check if the file uses AT_DISPATCH_V2:
 | 
			
		||||
 | 
			
		||||
**If using old AT_DISPATCH:**
 | 
			
		||||
- First convert to AT_DISPATCH_V2 using the at-dispatch-v2 skill
 | 
			
		||||
- Then proceed with adding uint support
 | 
			
		||||
 | 
			
		||||
**If already using AT_DISPATCH_V2:**
 | 
			
		||||
- Proceed directly to Step 2
 | 
			
		||||
 | 
			
		||||
### Step 2: Analyze the current dispatch macro
 | 
			
		||||
 | 
			
		||||
Identify what type groups are currently in use:
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  // body
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES), kHalf, kBFloat16);
 | 
			
		||||
    ^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
    Current type coverage
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Common patterns:
 | 
			
		||||
- `AT_EXPAND(AT_ALL_TYPES)` → includes AT_INTEGRAL_TYPES + AT_FLOATING_TYPES
 | 
			
		||||
- `AT_EXPAND(AT_INTEGRAL_TYPES)` → signed integers only
 | 
			
		||||
- `AT_EXPAND(AT_FLOATING_TYPES)` → floating point types
 | 
			
		||||
 | 
			
		||||
### Step 3: Choose the uint addition method
 | 
			
		||||
 | 
			
		||||
Two approaches:
 | 
			
		||||
 | 
			
		||||
**Method 1: Add AT_BAREBONES_UNSIGNED_TYPES explicitly**
 | 
			
		||||
- Use when: You want to be explicit about adding uint support
 | 
			
		||||
- Add `AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES)` to the type list
 | 
			
		||||
 | 
			
		||||
**Method 2: Substitute AT_INTEGRAL_TYPES with AT_INTEGRAL_TYPES_V2**
 | 
			
		||||
- Use when: The dispatch already uses `AT_EXPAND(AT_INTEGRAL_TYPES)`
 | 
			
		||||
- More concise: replaces one type group with its superset
 | 
			
		||||
- Only applicable if AT_INTEGRAL_TYPES is present
 | 
			
		||||
 | 
			
		||||
### Step 4: Apply the transformation
 | 
			
		||||
 | 
			
		||||
**Method 1 example:**
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_DISPATCH_V2(
 | 
			
		||||
    dtype,
 | 
			
		||||
    "min_values_cuda",
 | 
			
		||||
    AT_WRAP([&]() {
 | 
			
		||||
      kernel_impl<scalar_t>(iter);
 | 
			
		||||
    }),
 | 
			
		||||
    AT_EXPAND(AT_ALL_TYPES),
 | 
			
		||||
    kBFloat16, kHalf, kBool
 | 
			
		||||
);
 | 
			
		||||
 | 
			
		||||
// After (add unsigned types)
 | 
			
		||||
AT_DISPATCH_V2(
 | 
			
		||||
    dtype,
 | 
			
		||||
    "min_values_cuda",
 | 
			
		||||
    AT_WRAP([&]() {
 | 
			
		||||
      kernel_impl<scalar_t>(iter);
 | 
			
		||||
    }),
 | 
			
		||||
    AT_EXPAND(AT_ALL_TYPES),
 | 
			
		||||
    AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES),
 | 
			
		||||
    kBFloat16, kHalf, kBool
 | 
			
		||||
);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Method 2 example:**
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_DISPATCH_V2(
 | 
			
		||||
    dtype,
 | 
			
		||||
    "integral_op",
 | 
			
		||||
    AT_WRAP([&]() {
 | 
			
		||||
      kernel<scalar_t>();
 | 
			
		||||
    }),
 | 
			
		||||
    AT_EXPAND(AT_INTEGRAL_TYPES)
 | 
			
		||||
);
 | 
			
		||||
 | 
			
		||||
// After (substitute with V2)
 | 
			
		||||
AT_DISPATCH_V2(
 | 
			
		||||
    dtype,
 | 
			
		||||
    "integral_op",
 | 
			
		||||
    AT_WRAP([&]() {
 | 
			
		||||
      kernel<scalar_t>();
 | 
			
		||||
    }),
 | 
			
		||||
    AT_EXPAND(AT_INTEGRAL_TYPES_V2)
 | 
			
		||||
);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 5: Handle AT_ALL_TYPES vs individual type groups
 | 
			
		||||
 | 
			
		||||
If the dispatch uses `AT_EXPAND(AT_ALL_TYPES)`:
 | 
			
		||||
- `AT_ALL_TYPES` = `AT_INTEGRAL_TYPES` + `AT_FLOATING_TYPES`
 | 
			
		||||
- To add uint: add `AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES)` to the list
 | 
			
		||||
 | 
			
		||||
If the dispatch separately lists INTEGRAL and FLOATING:
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_EXPAND(AT_INTEGRAL_TYPES), AT_EXPAND(AT_FLOATING_TYPES)
 | 
			
		||||
 | 
			
		||||
// After (Method 2 preferred)
 | 
			
		||||
AT_EXPAND(AT_INTEGRAL_TYPES_V2), AT_EXPAND(AT_FLOATING_TYPES)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 6: Verify all dispatch sites
 | 
			
		||||
 | 
			
		||||
Check the file for ALL dispatch macros that need uint support:
 | 
			
		||||
- Some operators have multiple dispatch sites (CPU, CUDA, different functions)
 | 
			
		||||
- Apply the transformation consistently across all sites
 | 
			
		||||
- Ensure each gets the same type coverage updates
 | 
			
		||||
 | 
			
		||||
### Step 7: Validate the changes
 | 
			
		||||
 | 
			
		||||
Check that:
 | 
			
		||||
- [ ] AT_DISPATCH_V2 format is used (not old AT_DISPATCH)
 | 
			
		||||
- [ ] Unsigned types are added via one of the two methods
 | 
			
		||||
- [ ] All relevant dispatch sites in the file are updated
 | 
			
		||||
- [ ] Type groups use `AT_EXPAND()`
 | 
			
		||||
- [ ] Arguments are properly formatted and comma-separated
 | 
			
		||||
 | 
			
		||||
## Common patterns
 | 
			
		||||
 | 
			
		||||
### Pattern 1: AT_ALL_TYPES + extras
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES), kHalf, kBFloat16);
 | 
			
		||||
 | 
			
		||||
// After
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kHalf, kBFloat16);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Pattern 2: Separate INTEGRAL + FLOATING
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_INTEGRAL_TYPES), AT_EXPAND(AT_FLOATING_TYPES));
 | 
			
		||||
 | 
			
		||||
// After
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_INTEGRAL_TYPES_V2), AT_EXPAND(AT_FLOATING_TYPES));
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Pattern 3: Old dispatch needs conversion first
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
// Before (needs v2 conversion first)
 | 
			
		||||
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, dtype, "op", [&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
});
 | 
			
		||||
 | 
			
		||||
// After v2 conversion
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES), kHalf, kBFloat16);
 | 
			
		||||
 | 
			
		||||
// After adding uint support
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kHalf, kBFloat16);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Multiple dispatch sites example
 | 
			
		||||
 | 
			
		||||
For a file with multiple functions:
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
void min_values_kernel_cuda(TensorIterator& iter) {
 | 
			
		||||
  AT_DISPATCH_V2(iter.dtype(), "min_values_cuda", AT_WRAP([&]() {
 | 
			
		||||
    impl<scalar_t>(iter);
 | 
			
		||||
  }), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf);
 | 
			
		||||
  //                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
  //                           Added uint support
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void min_launch_kernel(TensorIterator &iter) {
 | 
			
		||||
  AT_DISPATCH_V2(iter.input_dtype(), "min_cuda", AT_WRAP([&]() {
 | 
			
		||||
    gpu_reduce_kernel<scalar_t>(iter);
 | 
			
		||||
  }), AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES), kBFloat16, kHalf);
 | 
			
		||||
  //                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
  //                           Added uint support here too
 | 
			
		||||
}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Decision tree
 | 
			
		||||
 | 
			
		||||
Use this decision tree to determine the approach:
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
Is the file using AT_DISPATCH_V2?
 | 
			
		||||
├─ No → Use at-dispatch-v2 skill first, then continue
 | 
			
		||||
└─ Yes
 | 
			
		||||
   └─ Does it use AT_EXPAND(AT_INTEGRAL_TYPES)?
 | 
			
		||||
      ├─ Yes → Replace with AT_EXPAND(AT_INTEGRAL_TYPES_V2)
 | 
			
		||||
      └─ No → Add AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES) to type list
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Edge cases
 | 
			
		||||
 | 
			
		||||
### Case 1: Dispatch with only floating types
 | 
			
		||||
 | 
			
		||||
If the operator only supports floating point types, don't add uint support:
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
// Leave as-is - floating point only operator
 | 
			
		||||
AT_DISPATCH_V2(dtype, "float_op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_FLOATING_TYPES), kHalf);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Case 2: Complex types present
 | 
			
		||||
 | 
			
		||||
Unsigned types work alongside complex types:
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES),
 | 
			
		||||
    AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES),
 | 
			
		||||
    AT_EXPAND(AT_COMPLEX_TYPES),
 | 
			
		||||
    kHalf, kBFloat16);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Case 3: Already has uint support
 | 
			
		||||
 | 
			
		||||
Check if uint types are already present:
 | 
			
		||||
- If `AT_INTEGRAL_TYPES_V2` is used → already has uint support
 | 
			
		||||
- If `AT_BAREBONES_UNSIGNED_TYPES` is already in list → already has uint support
 | 
			
		||||
- Skip the file if uint support is already present
 | 
			
		||||
 | 
			
		||||
## Workflow
 | 
			
		||||
 | 
			
		||||
When asked to add uint support:
 | 
			
		||||
 | 
			
		||||
1. Read the target file
 | 
			
		||||
2. Check if using AT_DISPATCH_V2:
 | 
			
		||||
   - If not → use at-dispatch-v2 skill first
 | 
			
		||||
3. Identify all dispatch macro sites
 | 
			
		||||
4. For each dispatch:
 | 
			
		||||
   - Analyze current type groups
 | 
			
		||||
   - Choose method (add BAREBONES_UNSIGNED or upgrade to V2)
 | 
			
		||||
   - Apply transformation with Edit tool
 | 
			
		||||
5. Show the user the changes
 | 
			
		||||
6. Explain what was modified
 | 
			
		||||
 | 
			
		||||
## Important notes
 | 
			
		||||
 | 
			
		||||
- Always check if v2 conversion is needed first
 | 
			
		||||
- Apply changes consistently across all dispatch sites in the file
 | 
			
		||||
- Method 2 (AT_INTEGRAL_TYPES_V2) is cleaner when applicable
 | 
			
		||||
- Method 1 (explicit AT_BAREBONES_UNSIGNED_TYPES) is more explicit
 | 
			
		||||
- Unsigned types are: kUInt16, kUInt32, kUInt64 (not kByte which is uint8)
 | 
			
		||||
- Some operators may not semantically support unsigned types - use judgment
 | 
			
		||||
 | 
			
		||||
## Testing
 | 
			
		||||
 | 
			
		||||
After adding uint support, the operator should accept uint16, uint32, and uint64 tensors. The user is responsible for functional testing.
 | 
			
		||||
@ -1,305 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
name: at-dispatch-v2
 | 
			
		||||
description: Convert PyTorch AT_DISPATCH macros to AT_DISPATCH_V2 format in ATen C++ code. Use when porting AT_DISPATCH_ALL_TYPES_AND*, AT_DISPATCH_FLOATING_TYPES*, or other dispatch macros to the new v2 API. For ATen kernel files, CUDA kernels, and native operator implementations.
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
# AT_DISPATCH to AT_DISPATCH_V2 Converter
 | 
			
		||||
 | 
			
		||||
This skill helps convert PyTorch's legacy AT_DISPATCH macros to the new AT_DISPATCH_V2 format, as defined in `aten/src/ATen/Dispatch_v2.h`.
 | 
			
		||||
 | 
			
		||||
## When to use this skill
 | 
			
		||||
 | 
			
		||||
Use this skill when:
 | 
			
		||||
- Converting AT_DISPATCH_* macros to AT_DISPATCH_V2
 | 
			
		||||
- Porting ATen kernels to use the new dispatch API
 | 
			
		||||
- Working with files in `aten/src/ATen/native/` that use dispatch macros
 | 
			
		||||
- User mentions "AT_DISPATCH", "dispatch v2", "Dispatch_v2.h", or macro conversion
 | 
			
		||||
 | 
			
		||||
## Quick reference
 | 
			
		||||
 | 
			
		||||
**Old format:**
 | 
			
		||||
```cpp
 | 
			
		||||
AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, dtype, "kernel_name", [&]() {
 | 
			
		||||
  // lambda body
 | 
			
		||||
});
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**New format:**
 | 
			
		||||
```cpp
 | 
			
		||||
AT_DISPATCH_V2(dtype, "kernel_name", AT_WRAP([&]() {
 | 
			
		||||
  // lambda body
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kHalf, kBool);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Key transformations
 | 
			
		||||
 | 
			
		||||
1. **Reorder arguments**: `scalar_type` and `name` come first, then lambda, then types
 | 
			
		||||
2. **Wrap the lambda**: Use `AT_WRAP(lambda)` to handle internal commas
 | 
			
		||||
3. **Expand type groups**: Use `AT_EXPAND(AT_ALL_TYPES)` instead of implicit expansion
 | 
			
		||||
4. **List individual types**: Add extra types (kHalf, kBFloat16, etc.) after expanded groups
 | 
			
		||||
5. **Add include**: `#include <ATen/Dispatch_v2.h>` near other Dispatch includes
 | 
			
		||||
 | 
			
		||||
## Instructions
 | 
			
		||||
 | 
			
		||||
### Step 1: Add the Dispatch_v2.h include
 | 
			
		||||
 | 
			
		||||
Add the v2 header near the existing `#include <ATen/Dispatch.h>`:
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
#include <ATen/Dispatch.h>
 | 
			
		||||
#include <ATen/Dispatch_v2.h>
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Keep the old Dispatch.h include for now (other code may still need it).
 | 
			
		||||
 | 
			
		||||
### Step 2: Identify the old dispatch pattern
 | 
			
		||||
 | 
			
		||||
Common patterns to convert:
 | 
			
		||||
 | 
			
		||||
- `AT_DISPATCH_ALL_TYPES_AND{2,3,4}(type1, type2, ..., scalar_type, name, lambda)`
 | 
			
		||||
- `AT_DISPATCH_FLOATING_TYPES_AND{2,3}(type1, type2, ..., scalar_type, name, lambda)`
 | 
			
		||||
- `AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND{2,3}(type1, ..., scalar_type, name, lambda)`
 | 
			
		||||
- `AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND{2,3}(type1, ..., scalar_type, name, lambda)`
 | 
			
		||||
 | 
			
		||||
### Step 3: Map the old macro to type groups
 | 
			
		||||
 | 
			
		||||
Identify which type group macro corresponds to the base types:
 | 
			
		||||
 | 
			
		||||
| Old macro base | AT_DISPATCH_V2 type group |
 | 
			
		||||
|----------------|---------------------------|
 | 
			
		||||
| `ALL_TYPES` | `AT_EXPAND(AT_ALL_TYPES)` |
 | 
			
		||||
| `FLOATING_TYPES` | `AT_EXPAND(AT_FLOATING_TYPES)` |
 | 
			
		||||
| `INTEGRAL_TYPES` | `AT_EXPAND(AT_INTEGRAL_TYPES)` |
 | 
			
		||||
| `COMPLEX_TYPES` | `AT_EXPAND(AT_COMPLEX_TYPES)` |
 | 
			
		||||
| `ALL_TYPES_AND_COMPLEX` | `AT_EXPAND(AT_ALL_TYPES_AND_COMPLEX)` |
 | 
			
		||||
 | 
			
		||||
For combined patterns, use multiple `AT_EXPAND()` entries:
 | 
			
		||||
```cpp
 | 
			
		||||
// Old: AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(...)
 | 
			
		||||
// New: AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_COMPLEX_TYPES), type1, type2
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 4: Extract the individual types
 | 
			
		||||
 | 
			
		||||
From `AT_DISPATCH_*_AND2(type1, type2, ...)` or `AT_DISPATCH_*_AND3(type1, type2, type3, ...)`, extract the individual types (type1, type2, etc.).
 | 
			
		||||
 | 
			
		||||
These become the trailing arguments after the type group:
 | 
			
		||||
```cpp
 | 
			
		||||
AT_DISPATCH_V2(..., AT_EXPAND(AT_ALL_TYPES), kBFloat16, kHalf, kBool)
 | 
			
		||||
                                             ^^^^^^^^^^^^^^^^^^^^^^^^
 | 
			
		||||
                                             Individual types from AND3
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 5: Transform to AT_DISPATCH_V2
 | 
			
		||||
 | 
			
		||||
Apply the transformation:
 | 
			
		||||
 | 
			
		||||
**Pattern:**
 | 
			
		||||
```cpp
 | 
			
		||||
AT_DISPATCH_V2(
 | 
			
		||||
  scalar_type,           // 1st: The dtype expression
 | 
			
		||||
  "name",                // 2nd: The debug string
 | 
			
		||||
  AT_WRAP(lambda),       // 3rd: The lambda wrapped in AT_WRAP
 | 
			
		||||
  type_groups,           // 4th+: Type groups with AT_EXPAND()
 | 
			
		||||
  individual_types       // Last: Individual types
 | 
			
		||||
)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Example transformation:**
 | 
			
		||||
```cpp
 | 
			
		||||
// BEFORE
 | 
			
		||||
AT_DISPATCH_ALL_TYPES_AND3(
 | 
			
		||||
    kBFloat16, kHalf, kBool,
 | 
			
		||||
    iter.dtype(),
 | 
			
		||||
    "min_values_cuda",
 | 
			
		||||
    [&]() {
 | 
			
		||||
      min_values_kernel_cuda_impl<scalar_t>(iter);
 | 
			
		||||
    }
 | 
			
		||||
);
 | 
			
		||||
 | 
			
		||||
// AFTER
 | 
			
		||||
AT_DISPATCH_V2(
 | 
			
		||||
    iter.dtype(),
 | 
			
		||||
    "min_values_cuda",
 | 
			
		||||
    AT_WRAP([&]() {
 | 
			
		||||
      min_values_kernel_cuda_impl<scalar_t>(iter);
 | 
			
		||||
    }),
 | 
			
		||||
    AT_EXPAND(AT_ALL_TYPES),
 | 
			
		||||
    kBFloat16, kHalf, kBool
 | 
			
		||||
);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 6: Handle multi-line lambdas
 | 
			
		||||
 | 
			
		||||
For lambdas with internal commas or complex expressions, AT_WRAP is essential:
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
AT_DISPATCH_V2(
 | 
			
		||||
    dtype,
 | 
			
		||||
    "complex_kernel",
 | 
			
		||||
    AT_WRAP([&]() {
 | 
			
		||||
      gpu_reduce_kernel<scalar_t, scalar_t>(
 | 
			
		||||
        iter,
 | 
			
		||||
        MinOps<scalar_t>{},
 | 
			
		||||
        thrust::pair<scalar_t, int64_t>(upper_bound(), 0)  // Commas inside!
 | 
			
		||||
      );
 | 
			
		||||
    }),
 | 
			
		||||
    AT_EXPAND(AT_ALL_TYPES)
 | 
			
		||||
);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 7: Verify the conversion
 | 
			
		||||
 | 
			
		||||
Check that:
 | 
			
		||||
- [ ] `AT_WRAP()` wraps the entire lambda
 | 
			
		||||
- [ ] Type groups use `AT_EXPAND()`
 | 
			
		||||
- [ ] Individual types don't have `AT_EXPAND()` (just `kBFloat16`, not `AT_EXPAND(kBFloat16)`)
 | 
			
		||||
- [ ] Argument order is: scalar_type, name, lambda, types
 | 
			
		||||
- [ ] Include added: `#include <ATen/Dispatch_v2.h>`
 | 
			
		||||
 | 
			
		||||
## Type group reference
 | 
			
		||||
 | 
			
		||||
Available type group macros (use with `AT_EXPAND()`):
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
AT_INTEGRAL_TYPES      // kByte, kChar, kInt, kLong, kShort
 | 
			
		||||
AT_FLOATING_TYPES      // kDouble, kFloat
 | 
			
		||||
AT_COMPLEX_TYPES       // kComplexDouble, kComplexFloat
 | 
			
		||||
AT_QINT_TYPES         // kQInt8, kQUInt8, kQInt32
 | 
			
		||||
AT_ALL_TYPES          // INTEGRAL_TYPES + FLOATING_TYPES
 | 
			
		||||
AT_ALL_TYPES_AND_COMPLEX  // ALL_TYPES + COMPLEX_TYPES
 | 
			
		||||
AT_INTEGRAL_TYPES_V2  // INTEGRAL_TYPES + unsigned types
 | 
			
		||||
AT_BAREBONES_UNSIGNED_TYPES  // kUInt16, kUInt32, kUInt64
 | 
			
		||||
AT_FLOAT8_TYPES       // Float8 variants
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Common patterns
 | 
			
		||||
 | 
			
		||||
### Pattern: AT_DISPATCH_ALL_TYPES_AND2
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, dtype, "op", [&]() {
 | 
			
		||||
  kernel<scalar_t>(data);
 | 
			
		||||
});
 | 
			
		||||
 | 
			
		||||
// After
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>(data);
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES), kHalf, kBFloat16);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Pattern: AT_DISPATCH_FLOATING_TYPES_AND3
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_DISPATCH_FLOATING_TYPES_AND3(kHalf, kBFloat16, kFloat8_e4m3fn,
 | 
			
		||||
    tensor.scalar_type(), "float_op", [&] {
 | 
			
		||||
  process<scalar_t>(tensor);
 | 
			
		||||
});
 | 
			
		||||
 | 
			
		||||
// After
 | 
			
		||||
AT_DISPATCH_V2(tensor.scalar_type(), "float_op", AT_WRAP([&] {
 | 
			
		||||
  process<scalar_t>(tensor);
 | 
			
		||||
}), AT_EXPAND(AT_FLOATING_TYPES), kHalf, kBFloat16, kFloat8_e4m3fn);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Pattern: AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
 | 
			
		||||
    kComplexHalf, kHalf,
 | 
			
		||||
    self.scalar_type(),
 | 
			
		||||
    "complex_op",
 | 
			
		||||
    [&] {
 | 
			
		||||
      result = compute<scalar_t>(self);
 | 
			
		||||
    }
 | 
			
		||||
);
 | 
			
		||||
 | 
			
		||||
// After
 | 
			
		||||
AT_DISPATCH_V2(
 | 
			
		||||
    self.scalar_type(),
 | 
			
		||||
    "complex_op",
 | 
			
		||||
    AT_WRAP([&] {
 | 
			
		||||
      result = compute<scalar_t>(self);
 | 
			
		||||
    }),
 | 
			
		||||
    AT_EXPAND(AT_ALL_TYPES),
 | 
			
		||||
    AT_EXPAND(AT_COMPLEX_TYPES),
 | 
			
		||||
    kComplexHalf,
 | 
			
		||||
    kHalf
 | 
			
		||||
);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Edge cases
 | 
			
		||||
 | 
			
		||||
### Case 1: No extra types (rare)
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_DISPATCH_ALL_TYPES(dtype, "op", [&]() { kernel<scalar_t>(); });
 | 
			
		||||
 | 
			
		||||
// After
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES));
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Case 2: Many individual types (AND4, AND5, etc.)
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_DISPATCH_FLOATING_TYPES_AND4(kHalf, kBFloat16, kFloat8_e4m3fn, kFloat8_e5m2,
 | 
			
		||||
    dtype, "float8_op", [&]() { kernel<scalar_t>(); });
 | 
			
		||||
 | 
			
		||||
// After
 | 
			
		||||
AT_DISPATCH_V2(dtype, "float8_op", AT_WRAP([&]() {
 | 
			
		||||
  kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_FLOATING_TYPES), kHalf, kBFloat16, kFloat8_e4m3fn, kFloat8_e5m2);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Case 3: Lambda with no captures
 | 
			
		||||
 | 
			
		||||
```cpp
 | 
			
		||||
// Before
 | 
			
		||||
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, dtype, "op", []() {
 | 
			
		||||
  static_kernel<scalar_t>();
 | 
			
		||||
});
 | 
			
		||||
 | 
			
		||||
// After
 | 
			
		||||
AT_DISPATCH_V2(dtype, "op", AT_WRAP([]() {
 | 
			
		||||
  static_kernel<scalar_t>();
 | 
			
		||||
}), AT_EXPAND(AT_ALL_TYPES), kHalf, kBool);
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Benefits of AT_DISPATCH_V2
 | 
			
		||||
 | 
			
		||||
1. **No arity in macro name**: Don't need different macros for AND2, AND3, AND4
 | 
			
		||||
2. **Composable type sets**: Mix and match type groups with `AT_EXPAND()`
 | 
			
		||||
3. **Extensible**: Easy to add more types without hitting macro limits
 | 
			
		||||
4. **Clearer**: Type groups are explicit, not implicit in macro name
 | 
			
		||||
 | 
			
		||||
## Important notes
 | 
			
		||||
 | 
			
		||||
- Keep `#include <ATen/Dispatch.h>` - other code may need it
 | 
			
		||||
- The `AT_WRAP()` is mandatory - prevents comma parsing issues in the lambda
 | 
			
		||||
- Type groups need `AT_EXPAND()`, individual types don't
 | 
			
		||||
- The v2 API is in `aten/src/ATen/Dispatch_v2.h` - refer to it for full docs
 | 
			
		||||
- See the header file for the Python script to regenerate the macro implementation
 | 
			
		||||
 | 
			
		||||
## Workflow
 | 
			
		||||
 | 
			
		||||
When asked to convert AT_DISPATCH macros:
 | 
			
		||||
 | 
			
		||||
1. Read the file to identify all AT_DISPATCH uses
 | 
			
		||||
2. Add `#include <ATen/Dispatch_v2.h>` if not present
 | 
			
		||||
3. For each dispatch macro:
 | 
			
		||||
   - Identify the pattern and extract components
 | 
			
		||||
   - Map the base type group
 | 
			
		||||
   - Extract individual types
 | 
			
		||||
   - Construct the AT_DISPATCH_V2 call
 | 
			
		||||
   - Apply with Edit tool
 | 
			
		||||
4. Show the user the complete converted file
 | 
			
		||||
5. Explain what was changed
 | 
			
		||||
 | 
			
		||||
Do NOT compile or test the code - focus on accurate conversion only.
 | 
			
		||||
@ -1,359 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
name: docstring
 | 
			
		||||
description: Write docstrings for PyTorch functions and methods following PyTorch conventions. Use when writing or updating docstrings in PyTorch code.
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
# PyTorch Docstring Writing Guide
 | 
			
		||||
 | 
			
		||||
This skill describes how to write docstrings for functions and methods in the PyTorch project, following the conventions in `torch/_tensor_docs.py` and `torch/nn/functional.py`.
 | 
			
		||||
 | 
			
		||||
## General Principles
 | 
			
		||||
 | 
			
		||||
- Use **raw strings** (`r"""..."""`) for all docstrings to avoid issues with LaTeX/math backslashes
 | 
			
		||||
- Follow **Sphinx/reStructuredText** (reST) format for documentation
 | 
			
		||||
- Be **concise but complete** - include all essential information
 | 
			
		||||
- Always include **examples** when possible
 | 
			
		||||
- Use **cross-references** to related functions/classes
 | 
			
		||||
 | 
			
		||||
## Docstring Structure
 | 
			
		||||
 | 
			
		||||
### 1. Function Signature (First Line)
 | 
			
		||||
 | 
			
		||||
Start with the function signature showing all parameters:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
r"""function_name(param1, param2, *, kwarg1=default1, kwarg2=default2) -> ReturnType
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Notes:**
 | 
			
		||||
- Include the function name
 | 
			
		||||
- Show positional and keyword-only arguments (use `*` separator)
 | 
			
		||||
- Include default values
 | 
			
		||||
- Show return type annotation
 | 
			
		||||
- This line should NOT end with a period
 | 
			
		||||
 | 
			
		||||
### 2. Brief Description
 | 
			
		||||
 | 
			
		||||
Provide a one-line description of what the function does:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
r"""conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
 | 
			
		||||
 | 
			
		||||
Applies a 2D convolution over an input image composed of several input
 | 
			
		||||
planes.
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### 3. Mathematical Formulas (if applicable)
 | 
			
		||||
 | 
			
		||||
Use Sphinx math directives for mathematical expressions:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
.. math::
 | 
			
		||||
    \text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Or inline math: `:math:\`x^2\``
 | 
			
		||||
 | 
			
		||||
### 4. Cross-References
 | 
			
		||||
 | 
			
		||||
Link to related classes and functions using Sphinx roles:
 | 
			
		||||
 | 
			
		||||
- `:class:\`~torch.nn.ModuleName\`` - Link to a class
 | 
			
		||||
- `:func:\`torch.function_name\`` - Link to a function
 | 
			
		||||
- `:meth:\`~Tensor.method_name\`` - Link to a method
 | 
			
		||||
- `:attr:\`attribute_name\`` - Reference an attribute
 | 
			
		||||
- The `~` prefix shows only the last component (e.g., `Conv2d` instead of `torch.nn.Conv2d`)
 | 
			
		||||
 | 
			
		||||
**Example:**
 | 
			
		||||
```python
 | 
			
		||||
See :class:`~torch.nn.Conv2d` for details and output shape.
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### 5. Notes and Warnings
 | 
			
		||||
 | 
			
		||||
Use admonitions for important information:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
.. note::
 | 
			
		||||
    This function doesn't work directly with NLLLoss,
 | 
			
		||||
    which expects the Log to be computed between the Softmax and itself.
 | 
			
		||||
    Use log_softmax instead (it's faster and has better numerical properties).
 | 
			
		||||
 | 
			
		||||
.. warning::
 | 
			
		||||
    :func:`new_tensor` always copies :attr:`data`. If you have a Tensor
 | 
			
		||||
    ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
 | 
			
		||||
    or :func:`torch.Tensor.detach`.
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### 6. Args Section
 | 
			
		||||
 | 
			
		||||
Document all parameters with type annotations and descriptions:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
Args:
 | 
			
		||||
    input (Tensor): input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
 | 
			
		||||
    weight (Tensor): filters of shape :math:`(\text{out\_channels} , kH , kW)`
 | 
			
		||||
    bias (Tensor, optional): optional bias tensor of shape :math:`(\text{out\_channels})`. Default: ``None``
 | 
			
		||||
    stride (int or tuple): the stride of the convolving kernel. Can be a single number or a
 | 
			
		||||
      tuple `(sH, sW)`. Default: 1
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Formatting rules:**
 | 
			
		||||
- Parameter name in **lowercase**
 | 
			
		||||
- Type in parentheses: `(Type)`, `(Type, optional)` for optional parameters
 | 
			
		||||
- Description follows the type
 | 
			
		||||
- For optional parameters, include "Default: ``value``" at the end
 | 
			
		||||
- Use double backticks for inline code: ``` ``None`` ```
 | 
			
		||||
- Indent continuation lines by 2 spaces
 | 
			
		||||
 | 
			
		||||
### 7. Keyword Args Section (if applicable)
 | 
			
		||||
 | 
			
		||||
Sometimes keyword arguments are documented separately:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
Keyword args:
 | 
			
		||||
    dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
 | 
			
		||||
        Default: if None, same :class:`torch.dtype` as this tensor.
 | 
			
		||||
    device (:class:`torch.device`, optional): the desired device of returned tensor.
 | 
			
		||||
        Default: if None, same :class:`torch.device` as this tensor.
 | 
			
		||||
    requires_grad (bool, optional): If autograd should record operations on the
 | 
			
		||||
        returned tensor. Default: ``False``.
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### 8. Returns Section (if needed)
 | 
			
		||||
 | 
			
		||||
Document the return value:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
Returns:
 | 
			
		||||
    Tensor: Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution.
 | 
			
		||||
        If ``hard=True``, the returned samples will be one-hot, otherwise they will
 | 
			
		||||
        be probability distributions that sum to 1 across `dim`.
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Or simply include it in the function signature line if obvious from context.
 | 
			
		||||
 | 
			
		||||
### 9. Examples Section
 | 
			
		||||
 | 
			
		||||
Always include examples when possible:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
Examples::
 | 
			
		||||
 | 
			
		||||
    >>> inputs = torch.randn(33, 16, 30)
 | 
			
		||||
    >>> filters = torch.randn(20, 16, 5)
 | 
			
		||||
    >>> F.conv1d(inputs, filters)
 | 
			
		||||
 | 
			
		||||
    >>> # With square kernels and equal stride
 | 
			
		||||
    >>> filters = torch.randn(8, 4, 3, 3)
 | 
			
		||||
    >>> inputs = torch.randn(1, 4, 5, 5)
 | 
			
		||||
    >>> F.conv2d(inputs, filters, padding=1)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Formatting rules:**
 | 
			
		||||
- Use `Examples::` with double colon
 | 
			
		||||
- Use `>>>` prompt for Python code
 | 
			
		||||
- Include comments with `#` when helpful
 | 
			
		||||
- Show actual output when it helps understanding (indent without `>>>`)
 | 
			
		||||
 | 
			
		||||
### 10. External References
 | 
			
		||||
 | 
			
		||||
Link to papers or external documentation:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
.. _Link Name:
 | 
			
		||||
    https://arxiv.org/abs/1611.00712
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Reference them in text: ```See `Link Name`_```
 | 
			
		||||
 | 
			
		||||
## Method Types
 | 
			
		||||
 | 
			
		||||
### Native Python Functions
 | 
			
		||||
 | 
			
		||||
For regular Python functions, use a standard docstring:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
def relu(input: Tensor, inplace: bool = False) -> Tensor:
 | 
			
		||||
    r"""relu(input, inplace=False) -> Tensor
 | 
			
		||||
 | 
			
		||||
    Applies the rectified linear unit function element-wise. See
 | 
			
		||||
    :class:`~torch.nn.ReLU` for more details.
 | 
			
		||||
    """
 | 
			
		||||
    # implementation
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### C-Bound Functions (using add_docstr)
 | 
			
		||||
 | 
			
		||||
For C-bound functions, use `_add_docstr`:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
conv1d = _add_docstr(
 | 
			
		||||
    torch.conv1d,
 | 
			
		||||
    r"""
 | 
			
		||||
conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
 | 
			
		||||
 | 
			
		||||
Applies a 1D convolution over an input signal composed of several input
 | 
			
		||||
planes.
 | 
			
		||||
 | 
			
		||||
See :class:`~torch.nn.Conv1d` for details and output shape.
 | 
			
		||||
 | 
			
		||||
Args:
 | 
			
		||||
    input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
 | 
			
		||||
    weight: filters of shape :math:`(\text{out\_channels} , kW)`
 | 
			
		||||
    ...
 | 
			
		||||
""",
 | 
			
		||||
)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### In-Place Variants
 | 
			
		||||
 | 
			
		||||
For in-place operations (ending with `_`), reference the original:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
add_docstr_all(
 | 
			
		||||
    "abs_",
 | 
			
		||||
    r"""
 | 
			
		||||
abs_() -> Tensor
 | 
			
		||||
 | 
			
		||||
In-place version of :meth:`~Tensor.abs`
 | 
			
		||||
""",
 | 
			
		||||
)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Alias Functions
 | 
			
		||||
 | 
			
		||||
For aliases, simply reference the original:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
add_docstr_all(
 | 
			
		||||
    "absolute",
 | 
			
		||||
    r"""
 | 
			
		||||
absolute() -> Tensor
 | 
			
		||||
 | 
			
		||||
Alias for :func:`abs`
 | 
			
		||||
""",
 | 
			
		||||
)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Common Patterns
 | 
			
		||||
 | 
			
		||||
### Shape Documentation
 | 
			
		||||
 | 
			
		||||
Use LaTeX math notation for tensor shapes:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
:math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Reusable Argument Definitions
 | 
			
		||||
 | 
			
		||||
For commonly used arguments, define them once and reuse:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
common_args = parse_kwargs(
 | 
			
		||||
    """
 | 
			
		||||
    dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
 | 
			
		||||
        Default: if None, same as this tensor.
 | 
			
		||||
"""
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
# Then use with .format():
 | 
			
		||||
r"""
 | 
			
		||||
...
 | 
			
		||||
 | 
			
		||||
Keyword args:
 | 
			
		||||
    {dtype}
 | 
			
		||||
    {device}
 | 
			
		||||
""".format(**common_args)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Template Insertion
 | 
			
		||||
 | 
			
		||||
Insert reproducibility notes or other common text:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
r"""
 | 
			
		||||
{tf32_note}
 | 
			
		||||
 | 
			
		||||
{cudnn_reproducibility_note}
 | 
			
		||||
""".format(**reproducibility_notes, **tf32_notes)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Complete Example
 | 
			
		||||
 | 
			
		||||
Here's a complete example showing all elements:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
def gumbel_softmax(
 | 
			
		||||
    logits: Tensor,
 | 
			
		||||
    tau: float = 1,
 | 
			
		||||
    hard: bool = False,
 | 
			
		||||
    eps: float = 1e-10,
 | 
			
		||||
    dim: int = -1,
 | 
			
		||||
) -> Tensor:
 | 
			
		||||
    r"""
 | 
			
		||||
    Sample from the Gumbel-Softmax distribution and optionally discretize.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        logits (Tensor): `[..., num_features]` unnormalized log probabilities
 | 
			
		||||
        tau (float): non-negative scalar temperature
 | 
			
		||||
        hard (bool): if ``True``, the returned samples will be discretized as one-hot vectors,
 | 
			
		||||
              but will be differentiated as if it is the soft sample in autograd. Default: ``False``
 | 
			
		||||
        dim (int): A dimension along which softmax will be computed. Default: -1
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
        Tensor: Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution.
 | 
			
		||||
            If ``hard=True``, the returned samples will be one-hot, otherwise they will
 | 
			
		||||
            be probability distributions that sum to 1 across `dim`.
 | 
			
		||||
 | 
			
		||||
    .. note::
 | 
			
		||||
        This function is here for legacy reasons, may be removed from nn.Functional in the future.
 | 
			
		||||
 | 
			
		||||
    Examples::
 | 
			
		||||
        >>> logits = torch.randn(20, 32)
 | 
			
		||||
        >>> # Sample soft categorical using reparametrization trick:
 | 
			
		||||
        >>> F.gumbel_softmax(logits, tau=1, hard=False)
 | 
			
		||||
        >>> # Sample hard categorical using "Straight-through" trick:
 | 
			
		||||
        >>> F.gumbel_softmax(logits, tau=1, hard=True)
 | 
			
		||||
 | 
			
		||||
    .. _Link 1:
 | 
			
		||||
        https://arxiv.org/abs/1611.00712
 | 
			
		||||
    """
 | 
			
		||||
    # implementation
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Quick Checklist
 | 
			
		||||
 | 
			
		||||
When writing a PyTorch docstring, ensure:
 | 
			
		||||
 | 
			
		||||
- [ ] Use raw string (`r"""`)
 | 
			
		||||
- [ ] Include function signature on first line
 | 
			
		||||
- [ ] Provide brief description
 | 
			
		||||
- [ ] Document all parameters in Args section with types
 | 
			
		||||
- [ ] Include default values for optional parameters
 | 
			
		||||
- [ ] Use Sphinx cross-references (`:func:`, `:class:`, `:meth:`)
 | 
			
		||||
- [ ] Add mathematical formulas if applicable
 | 
			
		||||
- [ ] Include at least one example in Examples section
 | 
			
		||||
- [ ] Add warnings/notes for important caveats
 | 
			
		||||
- [ ] Link to related module class with `:class:`
 | 
			
		||||
- [ ] Use proper math notation for tensor shapes
 | 
			
		||||
- [ ] Follow consistent formatting and indentation
 | 
			
		||||
 | 
			
		||||
## Common Sphinx Roles Reference
 | 
			
		||||
 | 
			
		||||
- `:class:\`~torch.nn.Module\`` - Class reference
 | 
			
		||||
- `:func:\`torch.function\`` - Function reference
 | 
			
		||||
- `:meth:\`~Tensor.method\`` - Method reference
 | 
			
		||||
- `:attr:\`attribute\`` - Attribute reference
 | 
			
		||||
- `:math:\`equation\`` - Inline math
 | 
			
		||||
- `:ref:\`label\`` - Internal reference
 | 
			
		||||
- ``` ``code`` ``` - Inline code (use double backticks)
 | 
			
		||||
 | 
			
		||||
## Additional Notes
 | 
			
		||||
 | 
			
		||||
- **Indentation**: Use 4 spaces for code, 2 spaces for continuation of parameter descriptions
 | 
			
		||||
- **Line length**: Try to keep lines under 100 characters when possible
 | 
			
		||||
- **Periods**: End sentences with periods, but not the signature line
 | 
			
		||||
- **Backticks**: Use double backticks for code: ``` ``True`` ``None`` ``False`` ```
 | 
			
		||||
- **Types**: Common types are `Tensor`, `int`, `float`, `bool`, `str`, `tuple`, `list`, etc.
 | 
			
		||||
@ -1,385 +0,0 @@
 | 
			
		||||
---
 | 
			
		||||
name: skill-writer
 | 
			
		||||
description: Guide users through creating Agent Skills for Claude Code. Use when the user wants to create, write, author, or design a new Skill, or needs help with SKILL.md files, frontmatter, or skill structure.
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
# Skill Writer
 | 
			
		||||
 | 
			
		||||
This Skill helps you create well-structured Agent Skills for Claude Code that follow best practices and validation requirements.
 | 
			
		||||
 | 
			
		||||
## When to use this Skill
 | 
			
		||||
 | 
			
		||||
Use this Skill when:
 | 
			
		||||
- Creating a new Agent Skill
 | 
			
		||||
- Writing or updating SKILL.md files
 | 
			
		||||
- Designing skill structure and frontmatter
 | 
			
		||||
- Troubleshooting skill discovery issues
 | 
			
		||||
- Converting existing prompts or workflows into Skills
 | 
			
		||||
 | 
			
		||||
## Instructions
 | 
			
		||||
 | 
			
		||||
### Step 1: Determine Skill scope
 | 
			
		||||
 | 
			
		||||
First, understand what the Skill should do:
 | 
			
		||||
 | 
			
		||||
1. **Ask clarifying questions**:
 | 
			
		||||
   - What specific capability should this Skill provide?
 | 
			
		||||
   - When should Claude use this Skill?
 | 
			
		||||
   - What tools or resources does it need?
 | 
			
		||||
   - Is this for personal use or team sharing?
 | 
			
		||||
 | 
			
		||||
2. **Keep it focused**: One Skill = one capability
 | 
			
		||||
   - Good: "PDF form filling", "Excel data analysis"
 | 
			
		||||
   - Too broad: "Document processing", "Data tools"
 | 
			
		||||
 | 
			
		||||
### Step 2: Choose Skill location
 | 
			
		||||
 | 
			
		||||
Determine where to create the Skill:
 | 
			
		||||
 | 
			
		||||
**Personal Skills** (`~/.claude/skills/`):
 | 
			
		||||
- Individual workflows and preferences
 | 
			
		||||
- Experimental Skills
 | 
			
		||||
- Personal productivity tools
 | 
			
		||||
 | 
			
		||||
**Project Skills** (`.claude/skills/`):
 | 
			
		||||
- Team workflows and conventions
 | 
			
		||||
- Project-specific expertise
 | 
			
		||||
- Shared utilities (committed to git)
 | 
			
		||||
 | 
			
		||||
### Step 3: Create Skill structure
 | 
			
		||||
 | 
			
		||||
Create the directory and files:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
# Personal
 | 
			
		||||
mkdir -p ~/.claude/skills/skill-name
 | 
			
		||||
 | 
			
		||||
# Project
 | 
			
		||||
mkdir -p .claude/skills/skill-name
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
For multi-file Skills:
 | 
			
		||||
```
 | 
			
		||||
skill-name/
 | 
			
		||||
├── SKILL.md (required)
 | 
			
		||||
├── reference.md (optional)
 | 
			
		||||
├── examples.md (optional)
 | 
			
		||||
├── scripts/
 | 
			
		||||
│   └── helper.py (optional)
 | 
			
		||||
└── templates/
 | 
			
		||||
    └── template.txt (optional)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 4: Write SKILL.md frontmatter
 | 
			
		||||
 | 
			
		||||
Create YAML frontmatter with required fields:
 | 
			
		||||
 | 
			
		||||
```yaml
 | 
			
		||||
---
 | 
			
		||||
name: skill-name
 | 
			
		||||
description: Brief description of what this does and when to use it
 | 
			
		||||
---
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Field requirements**:
 | 
			
		||||
 | 
			
		||||
- **name**:
 | 
			
		||||
  - Lowercase letters, numbers, hyphens only
 | 
			
		||||
  - Max 64 characters
 | 
			
		||||
  - Must match directory name
 | 
			
		||||
  - Good: `pdf-processor`, `git-commit-helper`
 | 
			
		||||
  - Bad: `PDF_Processor`, `Git Commits!`
 | 
			
		||||
 | 
			
		||||
- **description**:
 | 
			
		||||
  - Max 1024 characters
 | 
			
		||||
  - Include BOTH what it does AND when to use it
 | 
			
		||||
  - Use specific trigger words users would say
 | 
			
		||||
  - Mention file types, operations, and context
 | 
			
		||||
 | 
			
		||||
**Optional frontmatter fields**:
 | 
			
		||||
 | 
			
		||||
- **allowed-tools**: Restrict tool access (comma-separated list)
 | 
			
		||||
  ```yaml
 | 
			
		||||
  allowed-tools: Read, Grep, Glob
 | 
			
		||||
  ```
 | 
			
		||||
  Use for:
 | 
			
		||||
  - Read-only Skills
 | 
			
		||||
  - Security-sensitive workflows
 | 
			
		||||
  - Limited-scope operations
 | 
			
		||||
 | 
			
		||||
### Step 5: Write effective descriptions
 | 
			
		||||
 | 
			
		||||
The description is critical for Claude to discover your Skill.
 | 
			
		||||
 | 
			
		||||
**Formula**: `[What it does] + [When to use it] + [Key triggers]`
 | 
			
		||||
 | 
			
		||||
**Examples**:
 | 
			
		||||
 | 
			
		||||
✅ **Good**:
 | 
			
		||||
```yaml
 | 
			
		||||
description: Extract text and tables from PDF files, fill forms, merge documents. Use when working with PDF files or when the user mentions PDFs, forms, or document extraction.
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
✅ **Good**:
 | 
			
		||||
```yaml
 | 
			
		||||
description: Analyze Excel spreadsheets, create pivot tables, and generate charts. Use when working with Excel files, spreadsheets, or analyzing tabular data in .xlsx format.
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
❌ **Too vague**:
 | 
			
		||||
```yaml
 | 
			
		||||
description: Helps with documents
 | 
			
		||||
description: For data analysis
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Tips**:
 | 
			
		||||
- Include specific file extensions (.pdf, .xlsx, .json)
 | 
			
		||||
- Mention common user phrases ("analyze", "extract", "generate")
 | 
			
		||||
- List concrete operations (not generic verbs)
 | 
			
		||||
- Add context clues ("Use when...", "For...")
 | 
			
		||||
 | 
			
		||||
### Step 6: Structure the Skill content
 | 
			
		||||
 | 
			
		||||
Use clear Markdown sections:
 | 
			
		||||
 | 
			
		||||
```markdown
 | 
			
		||||
# Skill Name
 | 
			
		||||
 | 
			
		||||
Brief overview of what this Skill does.
 | 
			
		||||
 | 
			
		||||
## Quick start
 | 
			
		||||
 | 
			
		||||
Provide a simple example to get started immediately.
 | 
			
		||||
 | 
			
		||||
## Instructions
 | 
			
		||||
 | 
			
		||||
Step-by-step guidance for Claude:
 | 
			
		||||
1. First step with clear action
 | 
			
		||||
2. Second step with expected outcome
 | 
			
		||||
3. Handle edge cases
 | 
			
		||||
 | 
			
		||||
## Examples
 | 
			
		||||
 | 
			
		||||
Show concrete usage examples with code or commands.
 | 
			
		||||
 | 
			
		||||
## Best practices
 | 
			
		||||
 | 
			
		||||
- Key conventions to follow
 | 
			
		||||
- Common pitfalls to avoid
 | 
			
		||||
- When to use vs. not use
 | 
			
		||||
 | 
			
		||||
## Requirements
 | 
			
		||||
 | 
			
		||||
List any dependencies or prerequisites:
 | 
			
		||||
```bash
 | 
			
		||||
pip install package-name
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Advanced usage
 | 
			
		||||
 | 
			
		||||
For complex scenarios, see [reference.md](reference.md).
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 7: Add supporting files (optional)
 | 
			
		||||
 | 
			
		||||
Create additional files for progressive disclosure:
 | 
			
		||||
 | 
			
		||||
**reference.md**: Detailed API docs, advanced options
 | 
			
		||||
**examples.md**: Extended examples and use cases
 | 
			
		||||
**scripts/**: Helper scripts and utilities
 | 
			
		||||
**templates/**: File templates or boilerplate
 | 
			
		||||
 | 
			
		||||
Reference them from SKILL.md:
 | 
			
		||||
```markdown
 | 
			
		||||
For advanced usage, see [reference.md](reference.md).
 | 
			
		||||
 | 
			
		||||
Run the helper script:
 | 
			
		||||
\`\`\`bash
 | 
			
		||||
python scripts/helper.py input.txt
 | 
			
		||||
\`\`\`
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Step 8: Validate the Skill
 | 
			
		||||
 | 
			
		||||
Check these requirements:
 | 
			
		||||
 | 
			
		||||
✅ **File structure**:
 | 
			
		||||
- [ ] SKILL.md exists in correct location
 | 
			
		||||
- [ ] Directory name matches frontmatter `name`
 | 
			
		||||
 | 
			
		||||
✅ **YAML frontmatter**:
 | 
			
		||||
- [ ] Opening `---` on line 1
 | 
			
		||||
- [ ] Closing `---` before content
 | 
			
		||||
- [ ] Valid YAML (no tabs, correct indentation)
 | 
			
		||||
- [ ] `name` follows naming rules
 | 
			
		||||
- [ ] `description` is specific and < 1024 chars
 | 
			
		||||
 | 
			
		||||
✅ **Content quality**:
 | 
			
		||||
- [ ] Clear instructions for Claude
 | 
			
		||||
- [ ] Concrete examples provided
 | 
			
		||||
- [ ] Edge cases handled
 | 
			
		||||
- [ ] Dependencies listed (if any)
 | 
			
		||||
 | 
			
		||||
✅ **Testing**:
 | 
			
		||||
- [ ] Description matches user questions
 | 
			
		||||
- [ ] Skill activates on relevant queries
 | 
			
		||||
- [ ] Instructions are clear and actionable
 | 
			
		||||
 | 
			
		||||
### Step 9: Test the Skill
 | 
			
		||||
 | 
			
		||||
1. **Restart Claude Code** (if running) to load the Skill
 | 
			
		||||
 | 
			
		||||
2. **Ask relevant questions** that match the description:
 | 
			
		||||
   ```
 | 
			
		||||
   Can you help me extract text from this PDF?
 | 
			
		||||
   ```
 | 
			
		||||
 | 
			
		||||
3. **Verify activation**: Claude should use the Skill automatically
 | 
			
		||||
 | 
			
		||||
4. **Check behavior**: Confirm Claude follows the instructions correctly
 | 
			
		||||
 | 
			
		||||
### Step 10: Debug if needed
 | 
			
		||||
 | 
			
		||||
If Claude doesn't use the Skill:
 | 
			
		||||
 | 
			
		||||
1. **Make description more specific**:
 | 
			
		||||
   - Add trigger words
 | 
			
		||||
   - Include file types
 | 
			
		||||
   - Mention common user phrases
 | 
			
		||||
 | 
			
		||||
2. **Check file location**:
 | 
			
		||||
   ```bash
 | 
			
		||||
   ls ~/.claude/skills/skill-name/SKILL.md
 | 
			
		||||
   ls .claude/skills/skill-name/SKILL.md
 | 
			
		||||
   ```
 | 
			
		||||
 | 
			
		||||
3. **Validate YAML**:
 | 
			
		||||
   ```bash
 | 
			
		||||
   cat SKILL.md | head -n 10
 | 
			
		||||
   ```
 | 
			
		||||
 | 
			
		||||
4. **Run debug mode**:
 | 
			
		||||
   ```bash
 | 
			
		||||
   claude --debug
 | 
			
		||||
   ```
 | 
			
		||||
 | 
			
		||||
## Common patterns
 | 
			
		||||
 | 
			
		||||
### Read-only Skill
 | 
			
		||||
 | 
			
		||||
```yaml
 | 
			
		||||
---
 | 
			
		||||
name: code-reader
 | 
			
		||||
description: Read and analyze code without making changes. Use for code review, understanding codebases, or documentation.
 | 
			
		||||
allowed-tools: Read, Grep, Glob
 | 
			
		||||
---
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Script-based Skill
 | 
			
		||||
 | 
			
		||||
```yaml
 | 
			
		||||
---
 | 
			
		||||
name: data-processor
 | 
			
		||||
description: Process CSV and JSON data files with Python scripts. Use when analyzing data files or transforming datasets.
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
# Data Processor
 | 
			
		||||
 | 
			
		||||
## Instructions
 | 
			
		||||
 | 
			
		||||
1. Use the processing script:
 | 
			
		||||
\`\`\`bash
 | 
			
		||||
python scripts/process.py input.csv --output results.json
 | 
			
		||||
\`\`\`
 | 
			
		||||
 | 
			
		||||
2. Validate output with:
 | 
			
		||||
\`\`\`bash
 | 
			
		||||
python scripts/validate.py results.json
 | 
			
		||||
\`\`\`
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Multi-file Skill with progressive disclosure
 | 
			
		||||
 | 
			
		||||
```yaml
 | 
			
		||||
---
 | 
			
		||||
name: api-designer
 | 
			
		||||
description: Design REST APIs following best practices. Use when creating API endpoints, designing routes, or planning API architecture.
 | 
			
		||||
---
 | 
			
		||||
 | 
			
		||||
# API Designer
 | 
			
		||||
 | 
			
		||||
Quick start: See [examples.md](examples.md)
 | 
			
		||||
 | 
			
		||||
Detailed reference: See [reference.md](reference.md)
 | 
			
		||||
 | 
			
		||||
## Instructions
 | 
			
		||||
 | 
			
		||||
1. Gather requirements
 | 
			
		||||
2. Design endpoints (see examples.md)
 | 
			
		||||
3. Document with OpenAPI spec
 | 
			
		||||
4. Review against best practices (see reference.md)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Best practices for Skill authors
 | 
			
		||||
 | 
			
		||||
1. **One Skill, one purpose**: Don't create mega-Skills
 | 
			
		||||
2. **Specific descriptions**: Include trigger words users will say
 | 
			
		||||
3. **Clear instructions**: Write for Claude, not humans
 | 
			
		||||
4. **Concrete examples**: Show real code, not pseudocode
 | 
			
		||||
5. **List dependencies**: Mention required packages in description
 | 
			
		||||
6. **Test with teammates**: Verify activation and clarity
 | 
			
		||||
7. **Version your Skills**: Document changes in content
 | 
			
		||||
8. **Use progressive disclosure**: Put advanced details in separate files
 | 
			
		||||
 | 
			
		||||
## Validation checklist
 | 
			
		||||
 | 
			
		||||
Before finalizing a Skill, verify:
 | 
			
		||||
 | 
			
		||||
- [ ] Name is lowercase, hyphens only, max 64 chars
 | 
			
		||||
- [ ] Description is specific and < 1024 chars
 | 
			
		||||
- [ ] Description includes "what" and "when"
 | 
			
		||||
- [ ] YAML frontmatter is valid
 | 
			
		||||
- [ ] Instructions are step-by-step
 | 
			
		||||
- [ ] Examples are concrete and realistic
 | 
			
		||||
- [ ] Dependencies are documented
 | 
			
		||||
- [ ] File paths use forward slashes
 | 
			
		||||
- [ ] Skill activates on relevant queries
 | 
			
		||||
- [ ] Claude follows instructions correctly
 | 
			
		||||
 | 
			
		||||
## Troubleshooting
 | 
			
		||||
 | 
			
		||||
**Skill doesn't activate**:
 | 
			
		||||
- Make description more specific with trigger words
 | 
			
		||||
- Include file types and operations in description
 | 
			
		||||
- Add "Use when..." clause with user phrases
 | 
			
		||||
 | 
			
		||||
**Multiple Skills conflict**:
 | 
			
		||||
- Make descriptions more distinct
 | 
			
		||||
- Use different trigger words
 | 
			
		||||
- Narrow the scope of each Skill
 | 
			
		||||
 | 
			
		||||
**Skill has errors**:
 | 
			
		||||
- Check YAML syntax (no tabs, proper indentation)
 | 
			
		||||
- Verify file paths (use forward slashes)
 | 
			
		||||
- Ensure scripts have execute permissions
 | 
			
		||||
- List all dependencies
 | 
			
		||||
 | 
			
		||||
## Examples
 | 
			
		||||
 | 
			
		||||
See the documentation for complete examples:
 | 
			
		||||
- Simple single-file Skill (commit-helper)
 | 
			
		||||
- Skill with tool permissions (code-reviewer)
 | 
			
		||||
- Multi-file Skill (pdf-processing)
 | 
			
		||||
 | 
			
		||||
## Output format
 | 
			
		||||
 | 
			
		||||
When creating a Skill, I will:
 | 
			
		||||
 | 
			
		||||
1. Ask clarifying questions about scope and requirements
 | 
			
		||||
2. Suggest a Skill name and location
 | 
			
		||||
3. Create the SKILL.md file with proper frontmatter
 | 
			
		||||
4. Include clear instructions and examples
 | 
			
		||||
5. Add supporting files if needed
 | 
			
		||||
6. Provide testing instructions
 | 
			
		||||
7. Validate against all requirements
 | 
			
		||||
 | 
			
		||||
The result will be a complete, working Skill that follows all best practices and validation rules.
 | 
			
		||||
							
								
								
									
										4
									
								
								.github/actions/diskspace-cleanup/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/actions/diskspace-cleanup/action.yml
									
									
									
									
										vendored
									
									
								
							@ -27,9 +27,7 @@ runs:
 | 
			
		||||
            docker system prune -af
 | 
			
		||||
            diskspace_new=$(df -H --output=pcent ${docker_root_dir} | sed -n 2p | sed 's/%//' | sed 's/ //')
 | 
			
		||||
            if [[ "$diskspace_new" -gt "$diskspace_cutoff" ]] ; then
 | 
			
		||||
                diskspace_cutoff_int=$((diskspace_cutoff + 0))
 | 
			
		||||
                difference=$((100 - diskspace_cutoff_int))
 | 
			
		||||
                echo "Error: Available diskspace is less than $difference percent. Not enough diskspace."
 | 
			
		||||
                echo "Error: Available diskspace is less than $diskspace_cutoff percent. Not enough diskspace."
 | 
			
		||||
                echo "$msg"
 | 
			
		||||
                exit 1
 | 
			
		||||
            else
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										7
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							@ -124,10 +124,3 @@ runs:
 | 
			
		||||
      id: login-ecr
 | 
			
		||||
      continue-on-error: true
 | 
			
		||||
      uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
 | 
			
		||||
 | 
			
		||||
    - name: Preserve github env variables for use in docker
 | 
			
		||||
      shell: bash
 | 
			
		||||
      run: |
 | 
			
		||||
        env | grep '^GITHUB' >> "${RUNNER_TEMP}/github_env_${GITHUB_RUN_ID}"
 | 
			
		||||
        env | grep '^CI' >> "${RUNNER_TEMP}/github_env_${GITHUB_RUN_ID}"
 | 
			
		||||
        env | grep '^RUNNER' >> "${RUNNER_TEMP}/github_env_${GITHUB_RUN_ID}"
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							@ -1 +1 @@
 | 
			
		||||
3b0e7a6f192ca2715e7e6cbe5db007aea7165fe2
 | 
			
		||||
69bbe7363897764f9e758d851cd0340147d27f94
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							@ -1 +1 @@
 | 
			
		||||
cfbc5c2f1c798991715a6b06bb3ce46478c4487c
 | 
			
		||||
faffd5cf673615583da6517275e361cb3dbc77e6
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							@ -1 +1 @@
 | 
			
		||||
c8b09f5f77d6bf6fb7ed7a9aa83e5d8156b3a5e9
 | 
			
		||||
0fa6e3129e61143224663e1ec67980d12b7ec4eb
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										5
									
								
								.github/ci_configs/vllm/Dockerfile
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								.github/ci_configs/vllm/Dockerfile
									
									
									
									
										vendored
									
									
								
							@ -283,9 +283,6 @@ RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
 | 
			
		||||
        uv pip install --system $(cat torch_build_versions.txt | xargs) --index-url https://download.pytorch.org/whl/nightly/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.'); \
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system --pre apache-tvm-ffi==0.1.0b15
 | 
			
		||||
 | 
			
		||||
# Install the vllm wheel from previous stage
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system /wheels/vllm/*.whl --verbose
 | 
			
		||||
@ -298,8 +295,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
ARG torch_cuda_arch_list='8.0;8.9;9.0a;10.0a;12.0'
 | 
			
		||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
 | 
			
		||||
 | 
			
		||||
# TODO(elainewy): remove this once vllm commit is updated, and install flashinfer from pip
 | 
			
		||||
# see https://github.com/pytorch/pytorch/pull/165274#issuecomment-3408531784
 | 
			
		||||
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
 | 
			
		||||
ARG FLASHINFER_GIT_REF="v0.2.14.post1"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										9
									
								
								.github/label_to_label.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								.github/label_to_label.yml
									
									
									
									
										vendored
									
									
								
							@ -15,11 +15,6 @@
 | 
			
		||||
  - "module: reinplacing"
 | 
			
		||||
  then:
 | 
			
		||||
  - "module: pt2-dispatcher"
 | 
			
		||||
- any:
 | 
			
		||||
  - "vllm-compile"
 | 
			
		||||
  then:
 | 
			
		||||
  - "module: vllm"
 | 
			
		||||
  - "oncall: pt2"
 | 
			
		||||
- any:
 | 
			
		||||
  - "module: vmap"
 | 
			
		||||
  then:
 | 
			
		||||
@ -32,6 +27,10 @@
 | 
			
		||||
  - "module: pt2 optimizer"
 | 
			
		||||
  then:
 | 
			
		||||
  - "module: dynamo"
 | 
			
		||||
- any:
 | 
			
		||||
  - "module: flex attention"
 | 
			
		||||
  then:
 | 
			
		||||
  - "module: higher order operators"
 | 
			
		||||
- any:
 | 
			
		||||
  - "module: aotinductor"
 | 
			
		||||
  then:
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										20
									
								
								.github/merge_rules.yaml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										20
									
								
								.github/merge_rules.yaml
									
									
									
									
										vendored
									
									
								
							@ -540,26 +540,6 @@
 | 
			
		||||
  - Lint
 | 
			
		||||
  - pull
 | 
			
		||||
 | 
			
		||||
- name: PrivateUse1
 | 
			
		||||
  patterns:
 | 
			
		||||
  - torch/accelerator/**
 | 
			
		||||
  - torch/utils/backend_registration.py
 | 
			
		||||
  - torch/csrc/acc/**
 | 
			
		||||
  - torch/csrc/DeviceAccelerator.*
 | 
			
		||||
  - torch/csrc/profiler/standalone/privateuse1_observer.*
 | 
			
		||||
  - aten/src/ATen/DeviceAccelerator.*
 | 
			
		||||
  - aten/src/ATen/core/GeneratorForPrivateuseone.*
 | 
			
		||||
  - aten/src/ATen/detail/PrivateUse1HooksInterface.*
 | 
			
		||||
  - docs/source/accelerator/**
 | 
			
		||||
  - test/cpp_extensions/open_registration_extension/torch_openreg/**
 | 
			
		||||
  approved_by:
 | 
			
		||||
  - albanD
 | 
			
		||||
  - fffrog
 | 
			
		||||
  mandatory_checks_name:
 | 
			
		||||
  - EasyCLA
 | 
			
		||||
  - Lint
 | 
			
		||||
  - pull
 | 
			
		||||
 | 
			
		||||
- name: superuser
 | 
			
		||||
  patterns:
 | 
			
		||||
  - '*'
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							@ -19,7 +19,6 @@ ciflow_push_tags:
 | 
			
		||||
- ciflow/inductor-perf-test-nightly-rocm-mi300
 | 
			
		||||
- ciflow/inductor-perf-test-nightly-rocm-mi355
 | 
			
		||||
- ciflow/inductor-perf-test-nightly-x86-zen
 | 
			
		||||
- ciflow/inductor-perf-test-nightly-xpu
 | 
			
		||||
- ciflow/inductor-periodic
 | 
			
		||||
- ciflow/inductor-rocm
 | 
			
		||||
- ciflow/linux-aarch64
 | 
			
		||||
@ -27,7 +26,6 @@ ciflow_push_tags:
 | 
			
		||||
- ciflow/nightly
 | 
			
		||||
- ciflow/op-benchmark
 | 
			
		||||
- ciflow/periodic
 | 
			
		||||
- ciflow/periodic-rocm-mi200
 | 
			
		||||
- ciflow/periodic-rocm-mi300
 | 
			
		||||
- ciflow/pull
 | 
			
		||||
- ciflow/quantization-periodic
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										119
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										119
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							@ -11,24 +11,18 @@ architectures:
 | 
			
		||||
    * Latest XPU
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
import json
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
from typing import Optional
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
SCRIPT_DIR = Path(__file__).absolute().parent
 | 
			
		||||
REPO_ROOT = SCRIPT_DIR.parent.parent
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this
 | 
			
		||||
CUDA_ARCHES = ["12.6", "12.8", "12.9", "13.0"]
 | 
			
		||||
CUDA_STABLE = "12.8"
 | 
			
		||||
CUDA_ARCHES_FULL_VERSION = {
 | 
			
		||||
    "12.6": "12.6.3",
 | 
			
		||||
    "12.8": "12.8.1",
 | 
			
		||||
    "12.9": "12.9.1",
 | 
			
		||||
    "13.0": "13.0.2",
 | 
			
		||||
    "13.0": "13.0.0",
 | 
			
		||||
}
 | 
			
		||||
CUDA_ARCHES_CUDNN_VERSION = {
 | 
			
		||||
    "12.6": "9",
 | 
			
		||||
@ -37,7 +31,8 @@ CUDA_ARCHES_CUDNN_VERSION = {
 | 
			
		||||
    "13.0": "9",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ROCM_ARCHES = ["7.0", "7.1"]
 | 
			
		||||
# NOTE: Please also update the ROCm sources in `PIP_SOURCES` in tools/nightly.py when changing this
 | 
			
		||||
ROCM_ARCHES = ["6.4", "7.0"]
 | 
			
		||||
 | 
			
		||||
XPU_ARCHES = ["xpu"]
 | 
			
		||||
 | 
			
		||||
@ -61,7 +56,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
 | 
			
		||||
        "nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'"
 | 
			
		||||
@ -78,7 +73,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
 | 
			
		||||
        "nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'"
 | 
			
		||||
@ -95,27 +90,27 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
 | 
			
		||||
        "nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'"
 | 
			
		||||
    ),
 | 
			
		||||
    "13.0": (
 | 
			
		||||
        "nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cublas==13.1.0.3; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cufft==12.0.0.61; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cublas==13.0.0.19; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cufft==12.0.0.15; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-curand==10.4.0.35; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvshmem-cu13==3.4.5; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvtx==13.0.85; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cufile==1.15.1.6; platform_system == 'Linux'"
 | 
			
		||||
        "nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvtx==13.0.39; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cufile==1.15.0.42; platform_system == 'Linux'"
 | 
			
		||||
    ),
 | 
			
		||||
    "xpu": (
 | 
			
		||||
        "intel-cmplr-lib-rt==2025.2.1 | "
 | 
			
		||||
@ -142,48 +137,9 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Used by tools/nightly.py
 | 
			
		||||
PYTORCH_NIGHTLY_PIP_INDEX_URL = "https://download.pytorch.org/whl/nightly"
 | 
			
		||||
NIGHTLY_SOURCE_MATRIX = {
 | 
			
		||||
    "cpu": dict(
 | 
			
		||||
        name="cpu",
 | 
			
		||||
        index_url=f"{PYTORCH_NIGHTLY_PIP_INDEX_URL}/cpu",
 | 
			
		||||
        supported_platforms=["Linux", "macOS", "Windows"],
 | 
			
		||||
        accelerator="cpu",
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
CUDA_NIGHTLY_SOURCE_MATRIX = {
 | 
			
		||||
    f"cuda-{major}.{minor}": dict(
 | 
			
		||||
        name=f"cuda-{major}.{minor}",
 | 
			
		||||
        index_url=f"{PYTORCH_NIGHTLY_PIP_INDEX_URL}/cu{major}{minor}",
 | 
			
		||||
        supported_platforms=["Linux", "Windows"],
 | 
			
		||||
        accelerator="cuda",
 | 
			
		||||
    )
 | 
			
		||||
    for major, minor in (map(int, version.split(".")) for version in CUDA_ARCHES)
 | 
			
		||||
}
 | 
			
		||||
ROCM_NIGHTLY_SOURCE_MATRIX = {
 | 
			
		||||
    f"rocm-{major}.{minor}": dict(
 | 
			
		||||
        name=f"rocm-{major}.{minor}",
 | 
			
		||||
        index_url=f"{PYTORCH_NIGHTLY_PIP_INDEX_URL}/rocm{major}.{minor}",
 | 
			
		||||
        supported_platforms=["Linux"],
 | 
			
		||||
        accelerator="rocm",
 | 
			
		||||
    )
 | 
			
		||||
    for major, minor in (map(int, version.split(".")) for version in ROCM_ARCHES)
 | 
			
		||||
}
 | 
			
		||||
XPU_NIGHTLY_SOURCE_MATRIX = {
 | 
			
		||||
    "xpu": dict(
 | 
			
		||||
        name="xpu",
 | 
			
		||||
        index_url=f"{PYTORCH_NIGHTLY_PIP_INDEX_URL}/xpu",
 | 
			
		||||
        supported_platforms=["Linux"],
 | 
			
		||||
        accelerator="xpu",
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
NIGHTLY_SOURCE_MATRIX.update(CUDA_NIGHTLY_SOURCE_MATRIX)
 | 
			
		||||
NIGHTLY_SOURCE_MATRIX.update(ROCM_NIGHTLY_SOURCE_MATRIX)
 | 
			
		||||
NIGHTLY_SOURCE_MATRIX.update(XPU_NIGHTLY_SOURCE_MATRIX)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_nccl_wheel_version(arch_version: str) -> str:
 | 
			
		||||
    import re
 | 
			
		||||
 | 
			
		||||
    requirements = map(
 | 
			
		||||
        str.strip, re.split("[;|]", PYTORCH_EXTRA_INSTALL_REQUIREMENTS[arch_version])
 | 
			
		||||
    )
 | 
			
		||||
@ -191,14 +147,17 @@ def get_nccl_wheel_version(arch_version: str) -> str:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_nccl_pin(arch_version: str) -> str:
 | 
			
		||||
    nccl_pin_path = (
 | 
			
		||||
        REPO_ROOT
 | 
			
		||||
        / ".ci"
 | 
			
		||||
        / "docker"
 | 
			
		||||
        / "ci_commit_pins"
 | 
			
		||||
        / f"nccl-cu{arch_version[:2]}.txt"
 | 
			
		||||
    from pathlib import Path
 | 
			
		||||
 | 
			
		||||
    nccl_pin_path = os.path.join(
 | 
			
		||||
        Path(__file__).absolute().parents[2],
 | 
			
		||||
        ".ci",
 | 
			
		||||
        "docker",
 | 
			
		||||
        "ci_commit_pins",
 | 
			
		||||
        f"nccl-cu{arch_version[:2]}.txt",
 | 
			
		||||
    )
 | 
			
		||||
    return nccl_pin_path.read_text().strip()
 | 
			
		||||
    with open(nccl_pin_path) as f:
 | 
			
		||||
        return f.read().strip()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def validate_nccl_dep_consistency(arch_version: str) -> None:
 | 
			
		||||
@ -206,8 +165,7 @@ def validate_nccl_dep_consistency(arch_version: str) -> None:
 | 
			
		||||
    wheel_ver = get_nccl_wheel_version(arch_version)
 | 
			
		||||
    if not nccl_release_tag.startswith(f"v{wheel_ver}"):
 | 
			
		||||
        raise RuntimeError(
 | 
			
		||||
            f"{arch_version} NCCL release tag version {nccl_release_tag} "
 | 
			
		||||
            f"does not correspond to wheel version {wheel_ver}"
 | 
			
		||||
            f"{arch_version} NCCL release tag version {nccl_release_tag} does not correspond to wheel version {wheel_ver}"
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -454,14 +412,7 @@ def generate_wheels_matrix(
 | 
			
		||||
    return ret
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
arch_version = ""
 | 
			
		||||
for arch_version in CUDA_ARCHES:
 | 
			
		||||
    validate_nccl_dep_consistency(arch_version)
 | 
			
		||||
del arch_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    # Used by tools/nightly.py
 | 
			
		||||
    (SCRIPT_DIR / "nightly_source_matrix.json").write_text(
 | 
			
		||||
        json.dumps(NIGHTLY_SOURCE_MATRIX, indent=4) + "\n"
 | 
			
		||||
    )
 | 
			
		||||
validate_nccl_dep_consistency("13.0")
 | 
			
		||||
validate_nccl_dep_consistency("12.9")
 | 
			
		||||
validate_nccl_dep_consistency("12.8")
 | 
			
		||||
validate_nccl_dep_consistency("12.6")
 | 
			
		||||
 | 
			
		||||
@ -79,9 +79,9 @@ jobs:
 | 
			
		||||
    runs-on: "windows-11-arm64-preview"
 | 
			
		||||
    {%- else %}
 | 
			
		||||
    {%- if branches == "nightly" %}
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    {%- else %}
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge.nonephemeral"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
 | 
			
		||||
    {%- endif %}
 | 
			
		||||
    {%- endif %}
 | 
			
		||||
    timeout-minutes: !{{ common.timeout_minutes_windows_binary }}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										13
									
								
								.github/workflows/_xpu-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/workflows/_xpu-test.yml
									
									
									
									
										vendored
									
									
								
							@ -38,10 +38,6 @@ on:
 | 
			
		||||
        default: ""
 | 
			
		||||
        description: |
 | 
			
		||||
          List of tests to include (empty string implies default list)
 | 
			
		||||
      dashboard-tag:
 | 
			
		||||
        required: false
 | 
			
		||||
        type: string
 | 
			
		||||
        default: ""
 | 
			
		||||
      disable-monitor:
 | 
			
		||||
        description: |
 | 
			
		||||
          [Experimental] Disable utilization monitoring for tests.
 | 
			
		||||
@ -62,11 +58,6 @@ on:
 | 
			
		||||
        required: false
 | 
			
		||||
        type: number
 | 
			
		||||
        default: 1
 | 
			
		||||
    secrets:
 | 
			
		||||
      HUGGING_FACE_HUB_TOKEN:
 | 
			
		||||
        required: false
 | 
			
		||||
        description: |
 | 
			
		||||
          HF Auth token to avoid rate limits when downloading models or datasets from hub
 | 
			
		||||
permissions:
 | 
			
		||||
  id-token: write
 | 
			
		||||
  contents: read
 | 
			
		||||
@ -205,8 +196,6 @@ jobs:
 | 
			
		||||
          PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }}
 | 
			
		||||
          PYTORCH_TEST_RERUN_DISABLED_TESTS: ${{ matrix.rerun_disabled_tests && '1' || '0' }}
 | 
			
		||||
          TESTS_TO_INCLUDE: ${{ inputs.tests-to-include }}
 | 
			
		||||
          DASHBOARD_TAG: ${{ inputs.dashboard-tag }}
 | 
			
		||||
          HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
 | 
			
		||||
        timeout-minutes: ${{ fromJson(steps.test-timeout.outputs.timeout) }}
 | 
			
		||||
        run: |
 | 
			
		||||
          # Fetch aws credential from IMDs
 | 
			
		||||
@ -257,8 +246,6 @@ jobs:
 | 
			
		||||
            -e PYTORCH_TEST_RERUN_DISABLED_TESTS \
 | 
			
		||||
            -e TESTS_TO_INCLUDE \
 | 
			
		||||
            -e ZE_AFFINITY_MASK \
 | 
			
		||||
            -e HUGGING_FACE_HUB_TOKEN \
 | 
			
		||||
            -e DASHBOARD_TAG \
 | 
			
		||||
            --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
 | 
			
		||||
            --ulimit stack=10485760:83886080 \
 | 
			
		||||
            --ulimit core=0 \
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/build-almalinux-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/build-almalinux-images.yml
									
									
									
									
										vendored
									
									
								
							@ -36,7 +36,7 @@ jobs:
 | 
			
		||||
    runs-on: linux.9xlarge.ephemeral
 | 
			
		||||
    strategy:
 | 
			
		||||
      matrix:
 | 
			
		||||
        tag: ["cuda12.6", "cuda12.8", "cuda12.9", "cuda13.0", "rocm7.0", "rocm7.1", "cpu"]
 | 
			
		||||
        tag: ["cuda12.6", "cuda12.8", "cuda12.9", "cuda13.0", "rocm6.4", "rocm7.0", "cpu"]
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Build docker image
 | 
			
		||||
        uses: pytorch/pytorch/.github/actions/binary-docker-build@main
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/build-libtorch-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/build-libtorch-images.yml
									
									
									
									
										vendored
									
									
								
							@ -52,8 +52,8 @@ jobs:
 | 
			
		||||
          { tag: "cuda12.9" },
 | 
			
		||||
          { tag: "cuda12.8" },
 | 
			
		||||
          { tag: "cuda12.6" },
 | 
			
		||||
          { tag: "rocm6.4"  },
 | 
			
		||||
          { tag: "rocm7.0"  },
 | 
			
		||||
          { tag: "rocm7.1"  },
 | 
			
		||||
          { tag: "cpu"      },
 | 
			
		||||
        ]
 | 
			
		||||
    steps:
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/build-magma-rocm-linux.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/build-magma-rocm-linux.yml
									
									
									
									
										vendored
									
									
								
							@ -34,7 +34,7 @@ jobs:
 | 
			
		||||
      id-token: write
 | 
			
		||||
    strategy:
 | 
			
		||||
      matrix:
 | 
			
		||||
        rocm_version: ["71", "70"]
 | 
			
		||||
        rocm_version: ["70", "64"]
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Checkout PyTorch
 | 
			
		||||
        uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							@ -54,8 +54,8 @@ jobs:
 | 
			
		||||
          { name: "manylinuxaarch64-builder",       tag: "cuda12.9",          runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinuxaarch64-builder",       tag: "cuda12.8",          runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinuxaarch64-builder",       tag: "cuda12.6",          runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "rocm6.4",           runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "rocm7.0",           runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "rocm7.1",           runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "cpu",               runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28_aarch64-builder",  tag: "cpu-aarch64",       runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "xpu",               runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										9
									
								
								.github/workflows/build-triton-wheel.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								.github/workflows/build-triton-wheel.yml
									
									
									
									
										vendored
									
									
								
							@ -55,7 +55,7 @@ jobs:
 | 
			
		||||
        docker-image: ["pytorch/manylinux2_28-builder:cpu"]
 | 
			
		||||
        include:
 | 
			
		||||
          - device: "rocm"
 | 
			
		||||
            rocm_version: "7.1"
 | 
			
		||||
            rocm_version: "7.0"
 | 
			
		||||
            runs_on: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge"
 | 
			
		||||
          - device: "cuda"
 | 
			
		||||
            rocm_version: ""
 | 
			
		||||
@ -159,7 +159,12 @@ jobs:
 | 
			
		||||
            WITH_CLANG_LDD="--with-clang-ldd"
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          docker exec -t "${container_name}" bash -c "${PYTHON_EXECUTABLE} /pytorch/.github/scripts/build_triton_wheel.py --device=$BUILD_DEVICE $RELEASE $WITH_CLANG_LDD"
 | 
			
		||||
          if [[ "${BUILD_DEVICE}" == xpu ]]; then
 | 
			
		||||
            docker exec -t "${container_name}" bash -c "dnf install -y gcc-toolset-13-gcc-c++"
 | 
			
		||||
            docker exec -t "${container_name}" bash -c "source /opt/rh/gcc-toolset-13/enable && ${PYTHON_EXECUTABLE} /pytorch/.github/scripts/build_triton_wheel.py --device=$BUILD_DEVICE $RELEASE"
 | 
			
		||||
          else
 | 
			
		||||
            docker exec -t "${container_name}" bash -c "${PYTHON_EXECUTABLE} /pytorch/.github/scripts/build_triton_wheel.py --device=$BUILD_DEVICE $RELEASE $WITH_CLANG_LDD"
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          if [[ ("${{ matrix.device }}" == "cuda" || "${{ matrix.device }}" == "xpu") ]]; then
 | 
			
		||||
            docker exec -t "${container_name}"  bash -c "auditwheel repair --plat ${PLATFORM} //artifacts/*.whl"
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/docker-builds.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/docker-builds.yml
									
									
									
									
										vendored
									
									
								
							@ -57,7 +57,6 @@ jobs:
 | 
			
		||||
          pytorch-linux-jammy-cuda12.4-cudnn9-py3-gcc11,
 | 
			
		||||
          pytorch-linux-jammy-py3.10-clang12,
 | 
			
		||||
          pytorch-linux-jammy-py3.13-clang12,
 | 
			
		||||
          pytorch-linux-jammy-py3.14-clang12,
 | 
			
		||||
          pytorch-linux-jammy-rocm-n-py3,
 | 
			
		||||
          pytorch-linux-noble-rocm-n-py3,
 | 
			
		||||
          pytorch-linux-jammy-rocm-n-py3-benchmarks,
 | 
			
		||||
@ -67,7 +66,6 @@ jobs:
 | 
			
		||||
          pytorch-linux-jammy-py3.12-halide,
 | 
			
		||||
          pytorch-linux-jammy-xpu-n-1-py3,
 | 
			
		||||
          pytorch-linux-jammy-xpu-n-py3,
 | 
			
		||||
          pytorch-linux-jammy-xpu-n-py3-inductor-benchmarks,
 | 
			
		||||
          pytorch-linux-jammy-py3-clang18-asan,
 | 
			
		||||
          pytorch-linux-jammy-py3-clang12-onnx,
 | 
			
		||||
          pytorch-linux-jammy-linter,
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										56
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										56
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -132,7 +132,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda-aarch64-12_6
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -178,7 +178,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda-aarch64-12_8
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -224,7 +224,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -270,7 +270,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda-aarch64-13_0
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.4.5; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -381,7 +381,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda-aarch64-12_6
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -427,7 +427,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda-aarch64-12_8
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -473,7 +473,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -519,7 +519,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda-aarch64-13_0
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.4.5; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -630,7 +630,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda-aarch64-12_6
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -676,7 +676,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda-aarch64-12_8
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -722,7 +722,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -768,7 +768,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda-aarch64-13_0
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.4.5; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -879,7 +879,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda-aarch64-12_6
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -925,7 +925,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda-aarch64-12_8
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -971,7 +971,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1017,7 +1017,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda-aarch64-13_0
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.4.5; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1128,7 +1128,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda-aarch64-12_6
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1174,7 +1174,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda-aarch64-12_8
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1220,7 +1220,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1266,7 +1266,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda-aarch64-13_0
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.4.5; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1377,7 +1377,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda-aarch64-12_6
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1423,7 +1423,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda-aarch64-12_8
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1469,7 +1469,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1515,7 +1515,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda-aarch64-13_0
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.4.5; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1626,7 +1626,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda-aarch64-12_6
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1672,7 +1672,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda-aarch64-12_8
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1718,7 +1718,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.4.5; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
@ -1764,7 +1764,7 @@ jobs:
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda-aarch64-13_0
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.4.5; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux'
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										236
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										236
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -384,6 +384,124 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  libtorch-rocm6_4-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: rocm6.4
 | 
			
		||||
      GPU_ARCH_VERSION: "6.4"
 | 
			
		||||
      GPU_ARCH_TYPE: rocm
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      timeout-minutes: 300
 | 
			
		||||
      build_name: libtorch-rocm6_4-shared-with-deps-release
 | 
			
		||||
      build_environment: linux-binary-libtorch
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  libtorch-rocm6_4-shared-with-deps-release-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - libtorch-rocm6_4-shared-with-deps-release-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    runs-on: linux.rocm.gpu.mi250
 | 
			
		||||
    timeout-minutes: 240
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: rocm6.4
 | 
			
		||||
      GPU_ARCH_VERSION: "6.4"
 | 
			
		||||
      GPU_ARCH_TYPE: rocm
 | 
			
		||||
      SKIP_ALL_TESTS: 1
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
      - uses: actions/download-artifact@v4.1.7
 | 
			
		||||
        name: Download Build Artifacts
 | 
			
		||||
        with:
 | 
			
		||||
          name: libtorch-rocm6_4-shared-with-deps-release
 | 
			
		||||
          path: "${{ runner.temp }}/artifacts/"
 | 
			
		||||
      - name: Checkout PyTorch
 | 
			
		||||
        uses: actions/checkout@v4
 | 
			
		||||
        with:
 | 
			
		||||
          ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
 | 
			
		||||
          submodules: recursive
 | 
			
		||||
          path: pytorch
 | 
			
		||||
          show-progress: false
 | 
			
		||||
      - name: Clean PyTorch checkout
 | 
			
		||||
        run: |
 | 
			
		||||
          # Remove any artifacts from the previous checkouts
 | 
			
		||||
          git clean -fxd
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
      - name: ROCm set GPU_FLAG
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
 | 
			
		||||
      - name: configure aws credentials
 | 
			
		||||
        id: aws_creds
 | 
			
		||||
        if: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/') }}
 | 
			
		||||
        uses: aws-actions/configure-aws-credentials@v4
 | 
			
		||||
        with:
 | 
			
		||||
          role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
 | 
			
		||||
          aws-region: us-east-1
 | 
			
		||||
          role-duration-seconds: 18000
 | 
			
		||||
      - name: Calculate docker image
 | 
			
		||||
        id: calculate-docker-image
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
 | 
			
		||||
        with:
 | 
			
		||||
          docker-registry: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/') && '308535385114.dkr.ecr.us-east-1.amazonaws.com' || 'docker.io' }}
 | 
			
		||||
          docker-image-name: libtorch-cxx11-builder
 | 
			
		||||
          custom-tag-prefix: rocm6.4
 | 
			
		||||
          docker-build-dir: .ci/docker
 | 
			
		||||
          working-directory: pytorch
 | 
			
		||||
      - name: Pull Docker image
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/pull-docker-image@main
 | 
			
		||||
        with:
 | 
			
		||||
          docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
 | 
			
		||||
      - name: Test Pytorch binary
 | 
			
		||||
        uses: ./pytorch/.github/actions/test-pytorch-binary
 | 
			
		||||
        env:
 | 
			
		||||
          DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
 | 
			
		||||
      - name: Teardown ROCm
 | 
			
		||||
        uses: ./.github/actions/teardown-rocm
 | 
			
		||||
  libtorch-rocm6_4-shared-with-deps-release-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: libtorch-rocm6_4-shared-with-deps-release-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: rocm6.4
 | 
			
		||||
      GPU_ARCH_VERSION: "6.4"
 | 
			
		||||
      GPU_ARCH_TYPE: rocm
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      build_name: libtorch-rocm6_4-shared-with-deps-release
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  libtorch-rocm7_0-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -501,121 +619,3 @@ jobs:
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  libtorch-rocm7_1-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: rocm7.1
 | 
			
		||||
      GPU_ARCH_VERSION: "7.1"
 | 
			
		||||
      GPU_ARCH_TYPE: rocm
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.1
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      timeout-minutes: 300
 | 
			
		||||
      build_name: libtorch-rocm7_1-shared-with-deps-release
 | 
			
		||||
      build_environment: linux-binary-libtorch
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  libtorch-rocm7_1-shared-with-deps-release-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - libtorch-rocm7_1-shared-with-deps-release-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    runs-on: linux.rocm.gpu.mi250
 | 
			
		||||
    timeout-minutes: 240
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: rocm7.1
 | 
			
		||||
      GPU_ARCH_VERSION: "7.1"
 | 
			
		||||
      GPU_ARCH_TYPE: rocm
 | 
			
		||||
      SKIP_ALL_TESTS: 1
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.1
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
      - uses: actions/download-artifact@v4.1.7
 | 
			
		||||
        name: Download Build Artifacts
 | 
			
		||||
        with:
 | 
			
		||||
          name: libtorch-rocm7_1-shared-with-deps-release
 | 
			
		||||
          path: "${{ runner.temp }}/artifacts/"
 | 
			
		||||
      - name: Checkout PyTorch
 | 
			
		||||
        uses: actions/checkout@v4
 | 
			
		||||
        with:
 | 
			
		||||
          ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
 | 
			
		||||
          submodules: recursive
 | 
			
		||||
          path: pytorch
 | 
			
		||||
          show-progress: false
 | 
			
		||||
      - name: Clean PyTorch checkout
 | 
			
		||||
        run: |
 | 
			
		||||
          # Remove any artifacts from the previous checkouts
 | 
			
		||||
          git clean -fxd
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
      - name: ROCm set GPU_FLAG
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
 | 
			
		||||
      - name: configure aws credentials
 | 
			
		||||
        id: aws_creds
 | 
			
		||||
        if: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/') }}
 | 
			
		||||
        uses: aws-actions/configure-aws-credentials@v4
 | 
			
		||||
        with:
 | 
			
		||||
          role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
 | 
			
		||||
          aws-region: us-east-1
 | 
			
		||||
          role-duration-seconds: 18000
 | 
			
		||||
      - name: Calculate docker image
 | 
			
		||||
        id: calculate-docker-image
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
 | 
			
		||||
        with:
 | 
			
		||||
          docker-registry: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/') && '308535385114.dkr.ecr.us-east-1.amazonaws.com' || 'docker.io' }}
 | 
			
		||||
          docker-image-name: libtorch-cxx11-builder
 | 
			
		||||
          custom-tag-prefix: rocm7.1
 | 
			
		||||
          docker-build-dir: .ci/docker
 | 
			
		||||
          working-directory: pytorch
 | 
			
		||||
      - name: Pull Docker image
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/pull-docker-image@main
 | 
			
		||||
        with:
 | 
			
		||||
          docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
 | 
			
		||||
      - name: Test Pytorch binary
 | 
			
		||||
        uses: ./pytorch/.github/actions/test-pytorch-binary
 | 
			
		||||
        env:
 | 
			
		||||
          DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
 | 
			
		||||
      - name: Teardown ROCm
 | 
			
		||||
        uses: ./.github/actions/teardown-rocm
 | 
			
		||||
  libtorch-rocm7_1-shared-with-deps-release-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: libtorch-rocm7_1-shared-with-deps-release-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: rocm7.1
 | 
			
		||||
      GPU_ARCH_VERSION: "7.1"
 | 
			
		||||
      GPU_ARCH_TYPE: rocm
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.1
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      build_name: libtorch-rocm7_1-shared-with-deps-release
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1666
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1666
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -44,7 +44,7 @@ jobs:
 | 
			
		||||
  libtorch-cpu-shared-with-deps-debug-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -291,7 +291,7 @@ jobs:
 | 
			
		||||
  libtorch-cuda12_6-shared-with-deps-debug-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -541,7 +541,7 @@ jobs:
 | 
			
		||||
  libtorch-cuda12_8-shared-with-deps-debug-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -791,7 +791,7 @@ jobs:
 | 
			
		||||
  libtorch-cuda13_0-shared-with-deps-debug-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -44,7 +44,7 @@ jobs:
 | 
			
		||||
  libtorch-cpu-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -291,7 +291,7 @@ jobs:
 | 
			
		||||
  libtorch-cuda12_6-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -541,7 +541,7 @@ jobs:
 | 
			
		||||
  libtorch-cuda12_8-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -791,7 +791,7 @@ jobs:
 | 
			
		||||
  libtorch-cuda13_0-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										70
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										70
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -44,7 +44,7 @@ jobs:
 | 
			
		||||
  wheel-py3_10-cpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -279,7 +279,7 @@ jobs:
 | 
			
		||||
  wheel-py3_10-cuda12_6-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -517,7 +517,7 @@ jobs:
 | 
			
		||||
  wheel-py3_10-cuda12_8-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -755,7 +755,7 @@ jobs:
 | 
			
		||||
  wheel-py3_10-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -993,7 +993,7 @@ jobs:
 | 
			
		||||
  wheel-py3_10-xpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -1229,7 +1229,7 @@ jobs:
 | 
			
		||||
  wheel-py3_11-cpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -1464,7 +1464,7 @@ jobs:
 | 
			
		||||
  wheel-py3_11-cuda12_6-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -1702,7 +1702,7 @@ jobs:
 | 
			
		||||
  wheel-py3_11-cuda12_8-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -1940,7 +1940,7 @@ jobs:
 | 
			
		||||
  wheel-py3_11-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -2178,7 +2178,7 @@ jobs:
 | 
			
		||||
  wheel-py3_11-xpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -2414,7 +2414,7 @@ jobs:
 | 
			
		||||
  wheel-py3_12-cpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -2649,7 +2649,7 @@ jobs:
 | 
			
		||||
  wheel-py3_12-cuda12_6-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -2887,7 +2887,7 @@ jobs:
 | 
			
		||||
  wheel-py3_12-cuda12_8-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -3125,7 +3125,7 @@ jobs:
 | 
			
		||||
  wheel-py3_12-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -3363,7 +3363,7 @@ jobs:
 | 
			
		||||
  wheel-py3_12-xpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -3599,7 +3599,7 @@ jobs:
 | 
			
		||||
  wheel-py3_13-cpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -3834,7 +3834,7 @@ jobs:
 | 
			
		||||
  wheel-py3_13-cuda12_6-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -4072,7 +4072,7 @@ jobs:
 | 
			
		||||
  wheel-py3_13-cuda12_8-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -4310,7 +4310,7 @@ jobs:
 | 
			
		||||
  wheel-py3_13-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -4548,7 +4548,7 @@ jobs:
 | 
			
		||||
  wheel-py3_13-xpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -4784,7 +4784,7 @@ jobs:
 | 
			
		||||
  wheel-py3_13t-cpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -5019,7 +5019,7 @@ jobs:
 | 
			
		||||
  wheel-py3_13t-cuda12_6-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -5257,7 +5257,7 @@ jobs:
 | 
			
		||||
  wheel-py3_13t-cuda12_8-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -5495,7 +5495,7 @@ jobs:
 | 
			
		||||
  wheel-py3_13t-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -5733,7 +5733,7 @@ jobs:
 | 
			
		||||
  wheel-py3_13t-xpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -5969,7 +5969,7 @@ jobs:
 | 
			
		||||
  wheel-py3_14-cpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -6204,7 +6204,7 @@ jobs:
 | 
			
		||||
  wheel-py3_14-cuda12_6-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -6442,7 +6442,7 @@ jobs:
 | 
			
		||||
  wheel-py3_14-cuda12_8-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -6680,7 +6680,7 @@ jobs:
 | 
			
		||||
  wheel-py3_14-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -6918,7 +6918,7 @@ jobs:
 | 
			
		||||
  wheel-py3_14-xpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -7154,7 +7154,7 @@ jobs:
 | 
			
		||||
  wheel-py3_14t-cpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -7389,7 +7389,7 @@ jobs:
 | 
			
		||||
  wheel-py3_14t-cuda12_6-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -7627,7 +7627,7 @@ jobs:
 | 
			
		||||
  wheel-py3_14t-cuda12_8-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -7865,7 +7865,7 @@ jobs:
 | 
			
		||||
  wheel-py3_14t-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
@ -8103,7 +8103,7 @@ jobs:
 | 
			
		||||
  wheel-py3_14t-xpu-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										148
									
								
								.github/workflows/inductor-perf-test-nightly-xpu.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										148
									
								
								.github/workflows/inductor-perf-test-nightly-xpu.yml
									
									
									
									
										vendored
									
									
								
							@ -1,148 +0,0 @@
 | 
			
		||||
name: inductor-perf-nightly-xpu
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  push:
 | 
			
		||||
    tags:
 | 
			
		||||
      - ciflow/inductor-perf-test-nightly-xpu/*
 | 
			
		||||
  schedule:
 | 
			
		||||
    - cron: 30 17 * * *
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
    inputs:
 | 
			
		||||
      training:
 | 
			
		||||
        description: Run training (on by default)?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: true
 | 
			
		||||
      inference:
 | 
			
		||||
        description: Run inference (on by default)?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: true
 | 
			
		||||
      default:
 | 
			
		||||
        description: Run inductor_default?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      dynamic:
 | 
			
		||||
        description: Run inductor_dynamic_shapes?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      cppwrapper:
 | 
			
		||||
        description: Run inductor_cpp_wrapper?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      cudagraphs:
 | 
			
		||||
        description: Run inductor_cudagraphs?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      freezing_cudagraphs:
 | 
			
		||||
        description: Run inductor_cudagraphs with freezing for inference?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      aotinductor:
 | 
			
		||||
        description: Run aot_inductor for inference?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      maxautotune:
 | 
			
		||||
        description: Run inductor_max_autotune?
 | 
			
		||||
        required: false
 | 
			
		||||
        type: boolean
 | 
			
		||||
        default: false
 | 
			
		||||
      benchmark_configs:
 | 
			
		||||
        description: The list of configs used the benchmark
 | 
			
		||||
        required: false
 | 
			
		||||
        type: string
 | 
			
		||||
        default: inductor_huggingface_perf,inductor_timm_perf,inductor_torchbench_perf,cachebench
 | 
			
		||||
 | 
			
		||||
concurrency:
 | 
			
		||||
  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
 | 
			
		||||
  cancel-in-progress: true
 | 
			
		||||
 | 
			
		||||
permissions: read-all
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  get-label-type:
 | 
			
		||||
    name: get-label-type
 | 
			
		||||
    uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
 | 
			
		||||
    if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
 | 
			
		||||
    with:
 | 
			
		||||
      triggering_actor: ${{ github.triggering_actor }}
 | 
			
		||||
      issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
 | 
			
		||||
      curr_branch: ${{ github.head_ref || github.ref_name }}
 | 
			
		||||
      curr_ref_type: ${{ github.ref_type }}
 | 
			
		||||
      opt_out_experiments: lf
 | 
			
		||||
 | 
			
		||||
  xpu-n-py3_10-inductor-benchmark-build:
 | 
			
		||||
    name: xpu-n-py3.10-inductor-benchmark
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build-environment: linux-jammy-xpu-n-py3.10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3-inductor-benchmarks
 | 
			
		||||
      runner: linux.c7i.12xlarge
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "inductor_huggingface_perf_xpu", shard: 1, num_shards: 5, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_xpu", shard: 2, num_shards: 5, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_xpu", shard: 3, num_shards: 5, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_xpu", shard: 4, num_shards: 5, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_huggingface_perf_xpu", shard: 5, num_shards: 5, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_timm_perf_xpu", shard: 1, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_timm_perf_xpu", shard: 2, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_timm_perf_xpu", shard: 3, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_timm_perf_xpu", shard: 4, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_timm_perf_xpu", shard: 5, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_timm_perf_xpu", shard: 6, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_xpu", shard: 1, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_xpu", shard: 2, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_xpu", shard: 3, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_xpu", shard: 4, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_xpu", shard: 5, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "inductor_torchbench_perf_xpu", shard: 6, num_shards: 6, runner: "linux.idc.xpu" },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  xpu-n-py3_10-inductor-benchmark-test-nightly:
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    if: github.event_name != 'workflow_dispatch'
 | 
			
		||||
    name: xpu-n-py3.10-inductor-benchmark
 | 
			
		||||
    uses: ./.github/workflows/_xpu-test.yml
 | 
			
		||||
    needs: xpu-n-py3_10-inductor-benchmark-build
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: linux-jammy-xpu-n-py3.10
 | 
			
		||||
      dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-false-cppwrapper-true-aotinductor-true-freezing_cudagraphs-false-cudagraphs_low_precision-false
 | 
			
		||||
      docker-image: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.docker-image }}
 | 
			
		||||
      test-matrix: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.test-matrix }}
 | 
			
		||||
      timeout-minutes: 720
 | 
			
		||||
      # Disable monitor in perf tests for more investigation
 | 
			
		||||
      disable-monitor: true
 | 
			
		||||
      monitor-log-interval: 10
 | 
			
		||||
      monitor-data-collect-interval: 2
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  xpu-n-py3_10-inductor-benchmark-test:
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    if: github.event_name == 'workflow_dispatch'
 | 
			
		||||
    name: xpu-n-py3.10-inductor-test
 | 
			
		||||
    uses: ./.github/workflows/_xpu-test.yml
 | 
			
		||||
    needs: xpu-n-py3_10-inductor-benchmark-build
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: linux-jammy-xpu-n-py3.10
 | 
			
		||||
      dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-${{ inputs.cppwrapper }}-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}-cudagraphs_low_precision-${{ inputs.cudagraphs }}
 | 
			
		||||
      docker-image: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.docker-image }}
 | 
			
		||||
      test-matrix: ${{ needs.xpu-n-py3_10-inductor-benchmark-build.outputs.test-matrix }}
 | 
			
		||||
      timeout-minutes: 720
 | 
			
		||||
      disable-monitor: false
 | 
			
		||||
      monitor-log-interval: 15
 | 
			
		||||
      monitor-data-collect-interval: 4
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
							
								
								
									
										1
									
								
								.github/workflows/inductor-periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/inductor-periodic.yml
									
									
									
									
										vendored
									
									
								
							@ -88,6 +88,7 @@ jobs:
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3_10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks
 | 
			
		||||
      sync-tag: rocm-build
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "dynamo_eager_torchbench", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										15
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										15
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							@ -76,12 +76,11 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  # NOTE: mypy needs its own job because it depends on --all-files, without assessing all files it sometimes
 | 
			
		||||
  #       fails to find types when it should
 | 
			
		||||
  # NOTE: We should be able to disable this and consolidate with Pyrefly
 | 
			
		||||
  lintrunner-pyrefly:
 | 
			
		||||
  lintrunner-mypy:
 | 
			
		||||
    uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
 | 
			
		||||
    name: lintrunner-pyrefly-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }}
 | 
			
		||||
    name: lintrunner-mypy-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }}
 | 
			
		||||
    needs: [get-label-type, get-changed-files]
 | 
			
		||||
    # Only run if there are changed files relevant to pyrefly
 | 
			
		||||
    # Only run if there are changed files relevant to mypy
 | 
			
		||||
    if: |
 | 
			
		||||
      github.repository_owner == 'pytorch' && (
 | 
			
		||||
        needs.get-changed-files.outputs.changed-files == '*' ||
 | 
			
		||||
@ -99,8 +98,8 @@ jobs:
 | 
			
		||||
      ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
 | 
			
		||||
      script: |
 | 
			
		||||
        CHANGED_FILES="${{ needs.get-changed-files.outputs.changed-files }}"
 | 
			
		||||
        echo "Running pyrefly"
 | 
			
		||||
        ADDITIONAL_LINTRUNNER_ARGS="--take PYREFLY --all-files" .github/scripts/lintrunner.sh
 | 
			
		||||
        echo "Running mypy"
 | 
			
		||||
        ADDITIONAL_LINTRUNNER_ARGS="--take MYPY,MYPYSTRICT --all-files" .github/scripts/lintrunner.sh
 | 
			
		||||
 | 
			
		||||
  lintrunner-noclang:
 | 
			
		||||
    uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
 | 
			
		||||
@ -119,9 +118,9 @@ jobs:
 | 
			
		||||
        CHANGED_FILES="${{ needs.get-changed-files.outputs.changed-files }}"
 | 
			
		||||
        echo "Running all other linters"
 | 
			
		||||
        if [ "$CHANGED_FILES" = '*' ]; then
 | 
			
		||||
          ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,PYREFLY --all-files" .github/scripts/lintrunner.sh
 | 
			
		||||
          ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT,PYREFLY --all-files" .github/scripts/lintrunner.sh
 | 
			
		||||
        else
 | 
			
		||||
          ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,PYREFLY ${CHANGED_FILES}" .github/scripts/lintrunner.sh
 | 
			
		||||
          ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT,PYREFLY ${CHANGED_FILES}" .github/scripts/lintrunner.sh
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
  quick-checks:
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										84
									
								
								.github/workflows/periodic-rocm-mi200.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										84
									
								
								.github/workflows/periodic-rocm-mi200.yml
									
									
									
									
										vendored
									
									
								
							@ -1,84 +0,0 @@
 | 
			
		||||
name: periodic-rocm-mi200
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  schedule:
 | 
			
		||||
    # We have several schedules so jobs can check github.event.schedule to activate only for a fraction of the runs.
 | 
			
		||||
    # Also run less frequently on weekends.
 | 
			
		||||
    - cron: 45 0,8,16 * * 1-5
 | 
			
		||||
    - cron: 45 4 * * 0,6
 | 
			
		||||
    - cron: 45 4,12,20 * * 1-5
 | 
			
		||||
    - cron: 45 12 * * 0,6
 | 
			
		||||
    - cron: 29 8 * * *  # about 1:29am PDT, for mem leak check and rerun disabled tests
 | 
			
		||||
  push:
 | 
			
		||||
    tags:
 | 
			
		||||
      - ciflow/periodic/*
 | 
			
		||||
      - ciflow/periodic-rocm-mi200/*
 | 
			
		||||
    branches:
 | 
			
		||||
      - release/*
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
 | 
			
		||||
concurrency:
 | 
			
		||||
  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}-${{ github.event.schedule }}
 | 
			
		||||
  cancel-in-progress: true
 | 
			
		||||
 | 
			
		||||
permissions:
 | 
			
		||||
  id-token: write
 | 
			
		||||
  contents: read
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  llm-td:
 | 
			
		||||
    if: github.repository_owner == 'pytorch'
 | 
			
		||||
    name: before-test
 | 
			
		||||
    uses: ./.github/workflows/llm_td_retrieval.yml
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
 | 
			
		||||
  target-determination:
 | 
			
		||||
    name: before-test
 | 
			
		||||
    uses: ./.github/workflows/target_determination.yml
 | 
			
		||||
    needs: llm-td
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
 | 
			
		||||
  get-label-type:
 | 
			
		||||
    name: get-label-type
 | 
			
		||||
    uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
 | 
			
		||||
    if: (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch'
 | 
			
		||||
    with:
 | 
			
		||||
      triggering_actor: ${{ github.triggering_actor }}
 | 
			
		||||
      issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
 | 
			
		||||
      curr_branch: ${{ github.head_ref || github.ref_name }}
 | 
			
		||||
      curr_ref_type: ${{ github.ref_type }}
 | 
			
		||||
 | 
			
		||||
  linux-jammy-rocm-py3_10-build:
 | 
			
		||||
    name: linux-jammy-rocm-py3.10
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3.10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
          { config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
          { config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  linux-jammy-rocm-py3_10-test:
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    name: linux-jammy-rocm-py3.10
 | 
			
		||||
    uses: ./.github/workflows/_rocm-test.yml
 | 
			
		||||
    needs:
 | 
			
		||||
      - linux-jammy-rocm-py3_10-build
 | 
			
		||||
      - target-determination
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3.10
 | 
			
		||||
      docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
 | 
			
		||||
      test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
							
								
								
									
										46
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										46
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							@ -147,16 +147,15 @@ jobs:
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build-environment: linux-jammy-cuda12.8-py3.10-gcc9-debug
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9
 | 
			
		||||
      cuda-arch-list: 8.9
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 2, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 3, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 4, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 5, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 6, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 7, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 2, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 3, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 4, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 5, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 6, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
          { config: "default", shard: 7, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
@ -204,6 +203,37 @@ jobs:
 | 
			
		||||
      test-matrix: ${{ needs.linux-jammy-cuda13_0-py3_10-gcc11-build.outputs.test-matrix }}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  linux-jammy-rocm-py3_10-build:
 | 
			
		||||
    name: linux-jammy-rocm-py3.10
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3.10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu.mi250.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
          { config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu.mi250.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
          { config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu.mi250.4", owners: ["module:rocm", "oncall:distributed"] },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  linux-jammy-rocm-py3_10-test:
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    name: linux-jammy-rocm-py3.10
 | 
			
		||||
    uses: ./.github/workflows/_rocm-test.yml
 | 
			
		||||
    needs:
 | 
			
		||||
      - linux-jammy-rocm-py3_10-build
 | 
			
		||||
      - target-determination
 | 
			
		||||
    with:
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3.10
 | 
			
		||||
      docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
 | 
			
		||||
      test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
  linux-jammy-cuda12_8-py3-gcc11-slow-gradcheck-build:
 | 
			
		||||
    name: linux-jammy-cuda12.8-py3-gcc11-slow-gradcheck
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										3
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							@ -347,8 +347,7 @@ jobs:
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      # This should sync with the build in xpu.yml but xpu uses a larger runner
 | 
			
		||||
      # sync-tag: linux-xpu-n-build
 | 
			
		||||
      sync-tag: linux-xpu-n-build
 | 
			
		||||
      runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
 | 
			
		||||
      build-environment: linux-jammy-xpu-n-py3.10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								.github/workflows/rocm-mi300.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/rocm-mi300.yml
									
									
									
									
										vendored
									
									
								
							@ -45,6 +45,7 @@ jobs:
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build-environment: linux-noble-rocm-py3.12-mi300
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-noble-rocm-n-py3
 | 
			
		||||
      sync-tag: rocm-build
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1" },
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								.github/workflows/rocm-mi355.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/rocm-mi355.yml
									
									
									
									
										vendored
									
									
								
							@ -42,6 +42,7 @@ jobs:
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build-environment: linux-noble-rocm-py3.12-mi355
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-noble-rocm-n-py3
 | 
			
		||||
      sync-tag: rocm-build
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" },
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										12
									
								
								.github/workflows/rocm-navi31.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								.github/workflows/rocm-navi31.yml
									
									
									
									
										vendored
									
									
								
							@ -26,23 +26,11 @@ jobs:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
 | 
			
		||||
  get-label-type:
 | 
			
		||||
    name: get-label-type
 | 
			
		||||
    uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
 | 
			
		||||
    if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
 | 
			
		||||
    with:
 | 
			
		||||
      triggering_actor: ${{ github.triggering_actor }}
 | 
			
		||||
      issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
 | 
			
		||||
      curr_branch: ${{ github.head_ref || github.ref_name }}
 | 
			
		||||
      curr_ref_type: ${{ github.ref_type }}
 | 
			
		||||
 | 
			
		||||
  linux-jammy-rocm-py3_10-build:
 | 
			
		||||
    if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
 | 
			
		||||
    name: linux-jammy-rocm-py3.10
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3.10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
 | 
			
		||||
      sync-tag: rocm-build
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										12
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							@ -26,23 +26,11 @@ jobs:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
 | 
			
		||||
  get-label-type:
 | 
			
		||||
    name: get-label-type
 | 
			
		||||
    uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
 | 
			
		||||
    if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
 | 
			
		||||
    with:
 | 
			
		||||
      triggering_actor: ${{ github.triggering_actor }}
 | 
			
		||||
      issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
 | 
			
		||||
      curr_branch: ${{ github.head_ref || github.ref_name }}
 | 
			
		||||
      curr_ref_type: ${{ github.ref_type }}
 | 
			
		||||
 | 
			
		||||
  linux-jammy-rocm-py3_10-build:
 | 
			
		||||
    if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
 | 
			
		||||
    name: linux-jammy-rocm-py3.10
 | 
			
		||||
    uses: ./.github/workflows/_linux-build.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build-environment: linux-jammy-rocm-py3.10
 | 
			
		||||
      docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
 | 
			
		||||
      sync-tag: rocm-build
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										149
									
								
								.github/workflows/trunk-tagging.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										149
									
								
								.github/workflows/trunk-tagging.yml
									
									
									
									
										vendored
									
									
								
							@ -58,10 +58,8 @@ jobs:
 | 
			
		||||
          else
 | 
			
		||||
            COMMIT_SHA="${{ github.sha }}"
 | 
			
		||||
          fi
 | 
			
		||||
          {
 | 
			
		||||
            echo "sha=${COMMIT_SHA}"
 | 
			
		||||
            echo "tag_name=trunk/${COMMIT_SHA}"
 | 
			
		||||
          } >> "${GITHUB_OUTPUT}"
 | 
			
		||||
          echo "sha=${COMMIT_SHA}" >> "${GITHUB_OUTPUT}"
 | 
			
		||||
          echo "tag_name=trunk/${COMMIT_SHA}" >> "${GITHUB_OUTPUT}"
 | 
			
		||||
 | 
			
		||||
      - name: Validate commit SHA
 | 
			
		||||
        run: |
 | 
			
		||||
@ -89,7 +87,7 @@ jobs:
 | 
			
		||||
            echo "✅ Commit ${COMMIT_SHA} is valid (automatic push trigger)"
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Create and push tag(s) with retry
 | 
			
		||||
      - name: Create and push tag with retry
 | 
			
		||||
        id: check_tag
 | 
			
		||||
        env:
 | 
			
		||||
          TAG_NAME: ${{ steps.commit.outputs.tag_name }}
 | 
			
		||||
@ -114,23 +112,14 @@ jobs:
 | 
			
		||||
            return 1
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          # Counters for summary reporting
 | 
			
		||||
          created_count=0
 | 
			
		||||
          skipped_count=0
 | 
			
		||||
          failed_count=0
 | 
			
		||||
          # Exit early if tag already exists
 | 
			
		||||
          if check_tag_exists; then
 | 
			
		||||
            echo "✅ Tag already exists - no action needed"
 | 
			
		||||
            echo "exists=true" >> "${GITHUB_OUTPUT}"
 | 
			
		||||
            exit 0
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          # Always write outputs once on exit
 | 
			
		||||
          finish() {
 | 
			
		||||
            set +e
 | 
			
		||||
            if [ -n "${GITHUB_OUTPUT:-}" ]; then
 | 
			
		||||
              {
 | 
			
		||||
                echo "created_count=${created_count}"
 | 
			
		||||
                echo "skipped_count=${skipped_count}"
 | 
			
		||||
                echo "failed_count=${failed_count}"
 | 
			
		||||
              } >> "${GITHUB_OUTPUT}"
 | 
			
		||||
            fi
 | 
			
		||||
          }
 | 
			
		||||
          trap finish EXIT
 | 
			
		||||
          echo "Tag ${TAG_NAME} does not exist, proceeding with creation"
 | 
			
		||||
 | 
			
		||||
          # Retry configuration
 | 
			
		||||
          MAX_RETRIES=5
 | 
			
		||||
@ -205,111 +194,31 @@ jobs:
 | 
			
		||||
            }
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          # New behavior for push events: enumerate commits in the push and tag each one.
 | 
			
		||||
          # For workflow_dispatch, retain existing single-SHA behavior.
 | 
			
		||||
 | 
			
		||||
          # Always fetch tags once up front to improve idempotency in loops
 | 
			
		||||
          git fetch origin --tags --quiet || true
 | 
			
		||||
 | 
			
		||||
          if [ "${{ github.event_name }}" = "push" ]; then
 | 
			
		||||
            BEFORE_SHA="${{ github.event.before }}"
 | 
			
		||||
            AFTER_SHA="${{ github.sha }}"  # same as event.after
 | 
			
		||||
 | 
			
		||||
            # List commits introduced by this push (old..new), oldest first for stable ordering
 | 
			
		||||
            commits_file="$(mktemp)"
 | 
			
		||||
            git rev-list --reverse "${BEFORE_SHA}..${AFTER_SHA}" > "${commits_file}"
 | 
			
		||||
 | 
			
		||||
            if [ ! -s "${commits_file}" ]; then
 | 
			
		||||
              echo "No new commits found between ${BEFORE_SHA}..${AFTER_SHA}; nothing to tag."
 | 
			
		||||
              rm -f "${commits_file}"
 | 
			
		||||
              exit 0
 | 
			
		||||
            fi
 | 
			
		||||
 | 
			
		||||
            commit_count="$(wc -l < "${commits_file}" | tr -d ' ')"
 | 
			
		||||
            echo "Found ${commit_count} commit(s) to tag for push:"
 | 
			
		||||
            while IFS= read -r sha; do
 | 
			
		||||
              printf '  %s\n' "${sha}"
 | 
			
		||||
            done < "${commits_file}"
 | 
			
		||||
 | 
			
		||||
            while IFS= read -r sha; do
 | 
			
		||||
              TAG_NAME="trunk/${sha}"
 | 
			
		||||
              COMMIT_SHA="${sha}"
 | 
			
		||||
 | 
			
		||||
              # If tag already exists locally or remotely, skip (idempotent)
 | 
			
		||||
              if check_tag_exists; then
 | 
			
		||||
                echo "✅ Tag ${TAG_NAME} already exists - skipping"
 | 
			
		||||
                skipped_count=$((skipped_count + 1))
 | 
			
		||||
                continue
 | 
			
		||||
              fi
 | 
			
		||||
 | 
			
		||||
              echo "Tag ${TAG_NAME} does not exist, proceeding with creation"
 | 
			
		||||
 | 
			
		||||
              if retry_with_backoff "tag_with_retry" "Creating tag ${TAG_NAME} for commit ${COMMIT_SHA}"; then
 | 
			
		||||
                created_count=$((created_count + 1))
 | 
			
		||||
              else
 | 
			
		||||
                echo "Tag creation failed after all retry attempts for ${TAG_NAME}"
 | 
			
		||||
                failed_count=$((failed_count + 1))
 | 
			
		||||
              fi
 | 
			
		||||
            done < "${commits_file}"
 | 
			
		||||
 | 
			
		||||
            rm -f "${commits_file}"
 | 
			
		||||
 | 
			
		||||
            if [ "${failed_count}" -gt 0 ]; then
 | 
			
		||||
              exit 1
 | 
			
		||||
            fi
 | 
			
		||||
          # Execute with retry
 | 
			
		||||
          if retry_with_backoff "tag_with_retry" "Creating tag ${TAG_NAME} for commit ${COMMIT_SHA}"; then
 | 
			
		||||
            echo "exists=false" >> "${GITHUB_OUTPUT}"
 | 
			
		||||
            exit 0
 | 
			
		||||
          else
 | 
			
		||||
            # workflow_dispatch path (single SHA tagging preserved)
 | 
			
		||||
 | 
			
		||||
            # Exit early if tag already exists
 | 
			
		||||
            if check_tag_exists; then
 | 
			
		||||
              echo "✅ Tag already exists - no action needed"
 | 
			
		||||
              skipped_count=1
 | 
			
		||||
              exit 0
 | 
			
		||||
            fi
 | 
			
		||||
 | 
			
		||||
            echo "Tag ${TAG_NAME} does not exist, proceeding with creation"
 | 
			
		||||
 | 
			
		||||
            if retry_with_backoff "tag_with_retry" "Creating tag ${TAG_NAME} for commit ${COMMIT_SHA}"; then
 | 
			
		||||
              created_count=1
 | 
			
		||||
              exit 0
 | 
			
		||||
            else
 | 
			
		||||
              echo "Tag creation failed after all retry attempts"
 | 
			
		||||
              failed_count=1
 | 
			
		||||
              exit 1
 | 
			
		||||
            fi
 | 
			
		||||
            echo "Tag creation failed after all retry attempts"
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Tag creation summary
 | 
			
		||||
        if: always()
 | 
			
		||||
        run: |
 | 
			
		||||
          if [ "${{ github.event_name }}" = "push" ]; then
 | 
			
		||||
            echo "Trigger: push on main"
 | 
			
		||||
            echo "Created: ${{ steps.check_tag.outputs.created_count }}"
 | 
			
		||||
            echo "Skipped (already existed): ${{ steps.check_tag.outputs.skipped_count }}"
 | 
			
		||||
            echo "Failed: ${{ steps.check_tag.outputs.failed_count }}"
 | 
			
		||||
            if [ "${{ steps.check_tag.outputs.failed_count }}" = "0" ]; then
 | 
			
		||||
              echo "✅ Completed tagging for push range ${{ github.event.before }}..${{ github.sha }}"
 | 
			
		||||
            else
 | 
			
		||||
              echo "❌ Some tags failed to create for push range ${{ github.event.before }}..${{ github.sha }}"
 | 
			
		||||
            fi
 | 
			
		||||
          if [ "${{ steps.check_tag.outputs.exists }}" = "true" ]; then
 | 
			
		||||
            echo "✅ Tag ${{ steps.commit.outputs.tag_name }} already existed - no action needed"
 | 
			
		||||
          elif [ "${{ job.status }}" = "success" ]; then
 | 
			
		||||
            echo "✅ Successfully created tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}"
 | 
			
		||||
          else
 | 
			
		||||
            if [ "${{ steps.check_tag.outputs.failed_count }}" = "0" ]; then
 | 
			
		||||
              if [ "${{ steps.check_tag.outputs.created_count }}" = "0" ]; then
 | 
			
		||||
                echo "✅ Tag ${{ steps.commit.outputs.tag_name }} already existed - no action needed"
 | 
			
		||||
              else
 | 
			
		||||
                echo "✅ Successfully created tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}"
 | 
			
		||||
              fi
 | 
			
		||||
            else
 | 
			
		||||
              echo "❌ Failed to create tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}"
 | 
			
		||||
            fi
 | 
			
		||||
 | 
			
		||||
            echo ""
 | 
			
		||||
            echo "Tag details:"
 | 
			
		||||
            echo "  Name: ${{ steps.commit.outputs.tag_name }}"
 | 
			
		||||
            echo "  Commit: ${{ steps.commit.outputs.sha }}"
 | 
			
		||||
            echo "  Trigger: ${{ github.event_name }}"
 | 
			
		||||
            if [ -n "${{ github.event.inputs.commit_sha }}" ]; then
 | 
			
		||||
              echo "  Manual commit: ${{ github.event.inputs.commit_sha }}"
 | 
			
		||||
            fi
 | 
			
		||||
            echo "❌ Failed to create tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}"
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          echo ""
 | 
			
		||||
          echo "Tag details:"
 | 
			
		||||
          echo "  Name: ${{ steps.commit.outputs.tag_name }}"
 | 
			
		||||
          echo "  Commit: ${{ steps.commit.outputs.sha }}"
 | 
			
		||||
          echo "  Trigger: ${{ github.event_name }}"
 | 
			
		||||
          if [ -n "${{ github.event.inputs.commit_sha }}" ]; then
 | 
			
		||||
            echo "  Manual commit: ${{ github.event.inputs.commit_sha }}"
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								.github/workflows/upload-test-stats.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/upload-test-stats.yml
									
									
									
									
										vendored
									
									
								
							@ -6,7 +6,6 @@ on:
 | 
			
		||||
      - pull
 | 
			
		||||
      - trunk
 | 
			
		||||
      - periodic
 | 
			
		||||
      - periodic-rocm-mi200
 | 
			
		||||
      - periodic-rocm-mi300
 | 
			
		||||
      - inductor
 | 
			
		||||
      - unstable
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										20
									
								
								.github/workflows/xpu.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										20
									
								
								.github/workflows/xpu.yml
									
									
									
									
										vendored
									
									
								
							@ -59,18 +59,14 @@ jobs:
 | 
			
		||||
      runner: linux.c7i.12xlarge
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 2, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 3, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 4, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 5, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 6, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 7, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 8, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 9, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 10, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 11, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 12, num_shards: 12, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 1, num_shards: 8, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 2, num_shards: 8, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 3, num_shards: 8, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 4, num_shards: 8, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 5, num_shards: 8, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 6, num_shards: 8, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 7, num_shards: 8, runner: "linux.idc.xpu" },
 | 
			
		||||
          { config: "default", shard: 8, num_shards: 8, runner: "linux.idc.xpu" },
 | 
			
		||||
        ]}
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@ -143,7 +143,6 @@ scripts/release_notes/*.json
 | 
			
		||||
sccache-stats*.json
 | 
			
		||||
lint.json
 | 
			
		||||
merge_record.json
 | 
			
		||||
.github/scripts/nightly_source_matrix.json
 | 
			
		||||
 | 
			
		||||
# These files get copied over on invoking setup.py
 | 
			
		||||
torchgen/packaged/*
 | 
			
		||||
@ -398,4 +397,3 @@ CLAUDE.local.md
 | 
			
		||||
/test_*.py
 | 
			
		||||
/debug_*.py
 | 
			
		||||
CLAUDE_CONTEXT/
 | 
			
		||||
/.claude/settings.local.json
 | 
			
		||||
 | 
			
		||||
@ -121,6 +121,94 @@ command = [
 | 
			
		||||
]
 | 
			
		||||
is_formatter = true
 | 
			
		||||
 | 
			
		||||
[[linter]]
 | 
			
		||||
code = 'MYPY'
 | 
			
		||||
include_patterns = [
 | 
			
		||||
    'setup.py',
 | 
			
		||||
    'functorch/dim/**/*.py',
 | 
			
		||||
    'torch/**/*.py',
 | 
			
		||||
    'torch/**/*.pyi',
 | 
			
		||||
    'caffe2/**/*.py',
 | 
			
		||||
    'caffe2/**/*.pyi',
 | 
			
		||||
    'test/test_bundled_images.py',
 | 
			
		||||
    'test/test_bundled_inputs.py',
 | 
			
		||||
    'test/test_complex.py',
 | 
			
		||||
    'test/test_datapipe.py',
 | 
			
		||||
    'test/test_futures.py',
 | 
			
		||||
    'test/test_numpy_interop.py',
 | 
			
		||||
    'test/test_torch.py',
 | 
			
		||||
    'test/test_type_hints.py',
 | 
			
		||||
    'test/test_type_info.py',
 | 
			
		||||
    'test/test_utils.py',
 | 
			
		||||
]
 | 
			
		||||
exclude_patterns = [
 | 
			
		||||
    '**/fb/**',
 | 
			
		||||
]
 | 
			
		||||
command = [
 | 
			
		||||
    'python3',
 | 
			
		||||
    'tools/linter/adapters/mypy_linter.py',
 | 
			
		||||
    '--config=mypy.ini',
 | 
			
		||||
    '--',
 | 
			
		||||
    '@{{PATHSFILE}}'
 | 
			
		||||
]
 | 
			
		||||
init_command = [
 | 
			
		||||
    'python3',
 | 
			
		||||
    'tools/linter/adapters/pip_init.py',
 | 
			
		||||
    '--dry-run={{DRYRUN}}',
 | 
			
		||||
    'numpy==1.26.4 ; python_version >= "3.10" and python_version <= "3.11"',
 | 
			
		||||
    'numpy==2.1.0 ; python_version >= "3.12"',
 | 
			
		||||
    'expecttest==0.3.0',
 | 
			
		||||
    'mypy==1.16.0',
 | 
			
		||||
    'sympy==1.13.3',
 | 
			
		||||
    'types-requests==2.27.25',
 | 
			
		||||
    'types-pyyaml==6.0.2',
 | 
			
		||||
    'types-tabulate==0.8.8',
 | 
			
		||||
    'types-protobuf==5.29.1.20250403',
 | 
			
		||||
    'types-setuptools==79.0.0.20250422',
 | 
			
		||||
    'types-jinja2==2.11.9',
 | 
			
		||||
    'types-colorama==0.4.6',
 | 
			
		||||
    'filelock==3.18.0',
 | 
			
		||||
    'junitparser==2.1.1',
 | 
			
		||||
    'rich==14.1.0',
 | 
			
		||||
    'pyyaml==6.0.2',
 | 
			
		||||
    'optree==0.13.0',
 | 
			
		||||
    'dataclasses-json==0.6.7',
 | 
			
		||||
    'pandas==2.2.3',
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[[linter]]
 | 
			
		||||
code = 'MYPYSTRICT'
 | 
			
		||||
include_patterns = [
 | 
			
		||||
    '.github/**/*.py',
 | 
			
		||||
    'benchmarks/instruction_counts/**/*.py',
 | 
			
		||||
    'tools/**/*.py',
 | 
			
		||||
    'torchgen/**/*.py',
 | 
			
		||||
    'torch/utils/_pytree.py',
 | 
			
		||||
    'torch/utils/_cxx_pytree.py',
 | 
			
		||||
    'torch/utils/benchmark/utils/common.py',
 | 
			
		||||
    'torch/utils/benchmark/utils/timer.py',
 | 
			
		||||
    'torch/utils/benchmark/utils/valgrind_wrapper/**/*.py',
 | 
			
		||||
]
 | 
			
		||||
exclude_patterns = [
 | 
			
		||||
    # (linbinyu) copied from internal repo
 | 
			
		||||
    '**/fb/**',
 | 
			
		||||
    'tools/code_analyzer/gen_operators_yaml.py',
 | 
			
		||||
    'tools/dynamo/verify_dynamo.py',
 | 
			
		||||
    'tools/gen_vulkan_spv.py',
 | 
			
		||||
    'tools/test/gen_operators_yaml_test.py',
 | 
			
		||||
    'tools/test/gen_oplist_test.py',
 | 
			
		||||
    'tools/test/test_selective_build.py',
 | 
			
		||||
    'tools/experimental/torchfuzz/**',
 | 
			
		||||
]
 | 
			
		||||
command = [
 | 
			
		||||
    'python3',
 | 
			
		||||
    'tools/linter/adapters/mypy_linter.py',
 | 
			
		||||
    '--config=mypy-strict.ini',
 | 
			
		||||
    '--code=MYPYSTRICT',
 | 
			
		||||
    '--',
 | 
			
		||||
    '@{{PATHSFILE}}'
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
[[linter]]
 | 
			
		||||
code = 'PYREFLY'
 | 
			
		||||
@ -142,7 +230,6 @@ init_command = [
 | 
			
		||||
    'python3',
 | 
			
		||||
    'tools/linter/adapters/pip_init.py',
 | 
			
		||||
    '--dry-run={{DRYRUN}}',
 | 
			
		||||
    'numpy==1.26.4 ; python_version >= "3.10" and python_version <= "3.11"',
 | 
			
		||||
    'numpy==2.1.0 ; python_version >= "3.12"',
 | 
			
		||||
    'expecttest==0.3.0',
 | 
			
		||||
    'pyrefly==0.36.2',
 | 
			
		||||
@ -746,7 +833,8 @@ exclude_patterns = [
 | 
			
		||||
command = [
 | 
			
		||||
    'python3',
 | 
			
		||||
    'tools/linter/adapters/grep_linter.py',
 | 
			
		||||
    '--pattern=(cudaSetDevice|cudaGetDevice)\\(',
 | 
			
		||||
    '--pattern=cudaSetDevice(',
 | 
			
		||||
    '--pattern=cudaGetDevice(',
 | 
			
		||||
    '--linter-name=RAWCUDADEVICE',
 | 
			
		||||
    '--error-name=raw CUDA API usage',
 | 
			
		||||
    """--error-description=\
 | 
			
		||||
@ -1050,8 +1138,11 @@ command = [
 | 
			
		||||
[[linter]]
 | 
			
		||||
code = 'WORKFLOWSYNC'
 | 
			
		||||
include_patterns = [
 | 
			
		||||
    '.github/workflows/*.yml',
 | 
			
		||||
    '.github/workflows/*.yaml',
 | 
			
		||||
    '.github/workflows/pull.yml',
 | 
			
		||||
    '.github/workflows/trunk.yml',
 | 
			
		||||
    '.github/workflows/periodic.yml',
 | 
			
		||||
    '.github/workflows/mac-mps.yml',
 | 
			
		||||
    '.github/workflows/slow.yml',
 | 
			
		||||
]
 | 
			
		||||
command = [
 | 
			
		||||
    'python3',
 | 
			
		||||
 | 
			
		||||
@ -374,7 +374,7 @@ cmake_dependent_option(
 | 
			
		||||
  "Build the lazy Torchscript backend, not compatible with mobile builds" ON
 | 
			
		||||
  "NOT INTERN_BUILD_MOBILE" OFF)
 | 
			
		||||
cmake_dependent_option(BUILD_FUNCTORCH "Build Functorch" ON "BUILD_PYTHON" OFF)
 | 
			
		||||
cmake_dependent_option(BUILD_BUNDLE_PTXAS "Bundle PTX into torch/bin folder"
 | 
			
		||||
cmake_dependent_option(BUILD_BUNDLE_PTXAS "Bundle PTX into torch/bin fodler"
 | 
			
		||||
                       OFF "USE_CUDA" OFF)
 | 
			
		||||
cmake_dependent_option(USE_KLEIDIAI "Use KleidiAI for the ARM CPU & AARCH64 architecture." ON
 | 
			
		||||
                        "CPU_AARCH64" OFF)
 | 
			
		||||
 | 
			
		||||
@ -11,6 +11,7 @@ aspects of contributing to PyTorch.
 | 
			
		||||
<!-- toc -->
 | 
			
		||||
 | 
			
		||||
- [Developing PyTorch](#developing-pytorch)
 | 
			
		||||
  - [Setup the development environment](#setup-the-development-environment)
 | 
			
		||||
  - [Tips and Debugging](#tips-and-debugging)
 | 
			
		||||
- [Nightly Checkout & Pull](#nightly-checkout--pull)
 | 
			
		||||
- [Codebase structure](#codebase-structure)
 | 
			
		||||
@ -66,6 +67,23 @@ aspects of contributing to PyTorch.
 | 
			
		||||
 | 
			
		||||
Follow the instructions for [installing PyTorch from source](https://github.com/pytorch/pytorch#from-source). If you get stuck when developing PyTorch on your machine, check out the [tips and debugging](#tips-and-debugging) section below for common solutions.
 | 
			
		||||
 | 
			
		||||
### Setup the development environment
 | 
			
		||||
 | 
			
		||||
First, you need to [fork the PyTorch project on GitHub](https://github.com/pytorch/pytorch/fork) and follow the instructions at [Connecting to GitHub with SSH](https://docs.github.com/en/authentication/connecting-to-github-with-ssh) to setup your SSH authentication credentials.
 | 
			
		||||
 | 
			
		||||
Then clone the PyTorch project and setup the development environment:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
git clone git@github.com:<USERNAME>/pytorch.git
 | 
			
		||||
cd pytorch
 | 
			
		||||
git remote add upstream git@github.com:pytorch/pytorch.git
 | 
			
		||||
 | 
			
		||||
make setup-env
 | 
			
		||||
# Or run `make setup-env-cuda` for pre-built CUDA binaries
 | 
			
		||||
# Or run `make setup-env-rocm` for pre-built ROCm binaries
 | 
			
		||||
source venv/bin/activate  # or `. .\venv\Scripts\activate` on Windows
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Tips and Debugging
 | 
			
		||||
 | 
			
		||||
* If you want to have no-op incremental rebuilds (which are fast), see [Make no-op build fast](#make-no-op-build-fast) below.
 | 
			
		||||
 | 
			
		||||
@ -1,4 +1,4 @@
 | 
			
		||||

 | 
			
		||||

 | 
			
		||||
 | 
			
		||||
--------------------------------------------------------------------------------
 | 
			
		||||
 | 
			
		||||
@ -72,7 +72,7 @@ Elaborating Further:
 | 
			
		||||
 | 
			
		||||
If you use NumPy, then you have used Tensors (a.k.a. ndarray).
 | 
			
		||||
 | 
			
		||||

 | 
			
		||||

 | 
			
		||||
 | 
			
		||||
PyTorch provides Tensors that can live either on the CPU or the GPU and accelerates the
 | 
			
		||||
computation by a huge amount.
 | 
			
		||||
@ -99,7 +99,7 @@ from several research papers on this topic, as well as current and past work suc
 | 
			
		||||
While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date.
 | 
			
		||||
You get the best of speed and flexibility for your crazy research.
 | 
			
		||||
 | 
			
		||||

 | 
			
		||||

 | 
			
		||||
 | 
			
		||||
### Python First
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -31,9 +31,9 @@ Be careful when running untrusted models. This classification includes models cr
 | 
			
		||||
 | 
			
		||||
**Prefer to execute untrusted models within a secure, isolated environment such as a sandbox** (e.g., containers, virtual machines). This helps protect your system from potentially malicious code. You can find further details and instructions in [this page](https://developers.google.com/code-sandboxing).
 | 
			
		||||
 | 
			
		||||
**Be mindful of risky model formats**. Give preference to share and load weights with the appropriate format for your use case. [safetensors](https://huggingface.co/docs/safetensors/en/index) gives the most safety but is the most restricted in what it supports. [`torch.load`](https://pytorch.org/docs/stable/generated/torch.load.html#torch.load) has a significantly larger surface of attack but is more flexible in what it can serialize. See the documentation for more details.
 | 
			
		||||
**Be mindful of risky model formats**. Give preference to share and load weights with the appropriate format for your use case. [safetensors](https://huggingface.co/docs/safetensors/en/index) gives the most safety but is the most restricted in what it supports. [`torch.load`](https://pytorch.org/docs/stable/generated/torch.load.html#torch.load) with `weights_only=True` is also secure to our knowledge even though it offers significantly larger surface of attack. Loading un-trusted checkpoint with `weights_only=False` MUST never be done.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Even for more secure serialization formats, unexpected inputs to the downstream system can cause diverse security threats (e.g. denial of service, out of bound reads/writes) and thus we recommend extensive validation of any untrusted inputs.
 | 
			
		||||
 | 
			
		||||
Important Note: The trustworthiness of a model is not binary. You must always determine the proper level of caution depending on the specific model and how it matches your use case and risk tolerance.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -38,7 +38,7 @@ set_bool(AT_HIPSPARSELT_ENABLED CAFFE2_USE_HIPSPARSELT)
 | 
			
		||||
 | 
			
		||||
configure_file(Config.h.in "${CMAKE_CURRENT_SOURCE_DIR}/Config.h")
 | 
			
		||||
# TODO: Do not generate CUDAConfig.h for ROCm BUILDS
 | 
			
		||||
# At the moment, `jit_macros.h` include CUDAConfig.h for both CUDA and HIP builds
 | 
			
		||||
# At the moment, `jit_macors.h` include CUDAConfig.h for both CUDA and HIP builds
 | 
			
		||||
if(USE_CUDA OR USE_ROCM)
 | 
			
		||||
  configure_file(cuda/CUDAConfig.h.in "${CMAKE_CURRENT_SOURCE_DIR}/cuda/CUDAConfig.h")
 | 
			
		||||
endif()
 | 
			
		||||
@ -260,7 +260,7 @@ IF(USE_FBGEMM_GENAI)
 | 
			
		||||
  if(USE_CUDA)
 | 
			
		||||
    # To avoid increasing the build time/binary size unnecessarily, use an allow-list of kernels to build.
 | 
			
		||||
    # If you want to integrate a kernel from FBGEMM into torch, you have to add it here.
 | 
			
		||||
    set(FBGEMM_CUTLASS_KERNELS_REGEX ".*(mx8mx8bf16_grouped|f4f4bf16_grouped).*")
 | 
			
		||||
    set(FBGEMM_CUTLASS_KERNELS_REGEX ".*mx8mx8bf16_grouped.*")
 | 
			
		||||
    file(GLOB_RECURSE fbgemm_genai_native_cuda_cu
 | 
			
		||||
      "${FBGEMM_GENAI_SRCS}/cutlass_extensions/*.cu"
 | 
			
		||||
      "${FBGEMM_GENAI_SRCS}/cutlass_extensions/**/*.cu")
 | 
			
		||||
@ -289,16 +289,14 @@ IF(USE_FBGEMM_GENAI)
 | 
			
		||||
 | 
			
		||||
    set_target_properties(fbgemm_genai PROPERTIES POSITION_INDEPENDENT_CODE ON)
 | 
			
		||||
 | 
			
		||||
    set(fbgemm_genai_cuh
 | 
			
		||||
    set(fbgemm_genai_mx8mx8bf16_grouped
 | 
			
		||||
      "${FBGEMM_GENAI_SRCS}/cutlass_extensions/mx8mx8bf16_grouped/"
 | 
			
		||||
      "${FBGEMM_GENAI_SRCS}/cutlass_extensions/f4f4bf16_grouped/"
 | 
			
		||||
      "${FBGEMM_GENAI_SRCS}/"
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    target_include_directories(fbgemm_genai PRIVATE
 | 
			
		||||
      ${FBGEMM_THIRD_PARTY}/cutlass/include
 | 
			
		||||
      ${FBGEMM_THIRD_PARTY}/cutlass/tools/util/include
 | 
			
		||||
      ${fbgemm_genai_cuh}
 | 
			
		||||
      ${fbgemm_genai_mx8mx8bf16_grouped}
 | 
			
		||||
      ${FBGEMM_GENAI_SRCS}/common/include/   # includes fbgemm_gpu/quantize/utils.h, fbgemm_gpu/quantize/tuning_cache.hpp
 | 
			
		||||
      ${FBGEMM_GENAI_SRCS}/include/          # includes fbgemm_gpu/torch_ops.h
 | 
			
		||||
    )
 | 
			
		||||
@ -315,14 +313,13 @@ IF(USE_FBGEMM_GENAI)
 | 
			
		||||
 | 
			
		||||
    # Add additional HIPCC compiler flags for performance
 | 
			
		||||
    set(FBGEMM_GENAI_EXTRA_HIPCC_FLAGS
 | 
			
		||||
      -mllvm
 | 
			
		||||
      -amdgpu-coerce-illegal-types=1
 | 
			
		||||
      -mllvm
 | 
			
		||||
      -enable-post-misched=0
 | 
			
		||||
      -mllvm
 | 
			
		||||
      -greedy-reverse-local-assignment=1
 | 
			
		||||
      -fhip-new-launch-api)
 | 
			
		||||
    if(DEFINED ROCM_VERSION_DEV AND ROCM_VERSION_DEV VERSION_LESS "7.2.0")
 | 
			
		||||
        list(PREPEND FBGEMM_GENAI_EXTRA_HIPCC_FLAGS -mllvm -amdgpu-coerce-illegal-types=1)
 | 
			
		||||
      endif()
 | 
			
		||||
 | 
			
		||||
    # Only compile for gfx942 for now.
 | 
			
		||||
    # This is rather hacky, I could not figure out a clean solution :(
 | 
			
		||||
 | 
			
		||||
@ -181,7 +181,7 @@ c10::intrusive_ptr<c10::TensorImpl> CPUGeneratorImpl::get_state() const {
 | 
			
		||||
  static const size_t size = sizeof(CPUGeneratorImplState);
 | 
			
		||||
  static_assert(std::is_standard_layout_v<CPUGeneratorImplState>, "CPUGeneratorImplState is not a PODType");
 | 
			
		||||
 | 
			
		||||
  auto state_tensor = at::detail::empty_cpu({static_cast<int64_t>(size)}, ScalarType::Byte, std::nullopt, std::nullopt, std::nullopt, std::nullopt);
 | 
			
		||||
  auto state_tensor = at::detail::empty_cpu({(int64_t)size}, ScalarType::Byte, std::nullopt, std::nullopt, std::nullopt, std::nullopt);
 | 
			
		||||
  auto rng_state = state_tensor.data_ptr();
 | 
			
		||||
 | 
			
		||||
  // accumulate generator data to be copied into byte tensor
 | 
			
		||||
 | 
			
		||||
@ -223,7 +223,7 @@ void Context::setSDPPriorityOrder(const std::vector<int64_t>& order) {
 | 
			
		||||
    "setSDPPriority order expected ", sdp_priority_order.size() - 1, " but got ",
 | 
			
		||||
    at::num_sdp_backends, " unique backends specified in priority order.");
 | 
			
		||||
  for (uint32_t i = 0; i < order.size(); i++) {
 | 
			
		||||
    sdp_priority_order[i] = static_cast<at::SDPBackend>(order[i]);
 | 
			
		||||
    sdp_priority_order[i] = (at::SDPBackend) order[i];
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -825,14 +825,6 @@ void Context::setDisplayVmapFallbackWarnings(bool enabled) {
 | 
			
		||||
  display_vmap_fallback_warnings_ = enabled;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool Context::warnOnAccumulateGradStreamMismatch() const {
 | 
			
		||||
  return warn_on_accumulate_grad_stream_mismatch_;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Context::setWarnOnAccumulateGradStreamMismatch(bool enabled) {
 | 
			
		||||
  warn_on_accumulate_grad_stream_mismatch_ = enabled;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool Context::isDefaultMobileCPUAllocatorSet() {
 | 
			
		||||
  return prev_allocator_ptr_ != nullptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -19,7 +19,6 @@
 | 
			
		||||
#include <ATen/detail/MPSHooksInterface.h>
 | 
			
		||||
#include <ATen/detail/MTIAHooksInterface.h>
 | 
			
		||||
#include <ATen/detail/PrivateUse1HooksInterface.h>
 | 
			
		||||
#include <ATen/detail/XLAHooksInterface.h>
 | 
			
		||||
#include <ATen/detail/XPUHooksInterface.h>
 | 
			
		||||
#include <c10/core/QEngine.h>
 | 
			
		||||
#include <c10/core/impl/DeviceGuardImplInterface.h>
 | 
			
		||||
@ -89,8 +88,6 @@ class TORCH_API Context {
 | 
			
		||||
      return at::detail::getHIPHooks();
 | 
			
		||||
    } else if (opt_device_type == at::kHPU) {
 | 
			
		||||
      return at::detail::getHPUHooks();
 | 
			
		||||
    } else if (opt_device_type == at::kXLA) {
 | 
			
		||||
      return at::detail::getXLAHooks();
 | 
			
		||||
    } else {
 | 
			
		||||
      TORCH_CHECK(
 | 
			
		||||
          false,
 | 
			
		||||
@ -199,7 +196,7 @@ class TORCH_API Context {
 | 
			
		||||
    return c10::impl::hasDeviceGuardImpl(c10::DeviceType::IPU);
 | 
			
		||||
  }
 | 
			
		||||
  static bool hasXLA() {
 | 
			
		||||
    return detail::getXLAHooks().hasXLA();
 | 
			
		||||
    return c10::impl::hasDeviceGuardImpl(c10::DeviceType::XLA);
 | 
			
		||||
  }
 | 
			
		||||
  static bool hasXPU() {
 | 
			
		||||
    return detail::getXPUHooks().hasXPU();
 | 
			
		||||
@ -404,9 +401,6 @@ class TORCH_API Context {
 | 
			
		||||
  void setDisplayVmapFallbackWarnings(bool enabled);
 | 
			
		||||
  bool areVmapFallbackWarningsEnabled() const;
 | 
			
		||||
 | 
			
		||||
  void setWarnOnAccumulateGradStreamMismatch(bool enabled);
 | 
			
		||||
  bool warnOnAccumulateGradStreamMismatch() const;
 | 
			
		||||
 | 
			
		||||
  bool isDefaultMobileCPUAllocatorSet();
 | 
			
		||||
  void setDefaultMobileCPUAllocator();
 | 
			
		||||
  void unsetDefaultMobileCPUAllocator();
 | 
			
		||||
@ -497,7 +491,6 @@ class TORCH_API Context {
 | 
			
		||||
  bool release_original_weights = false;
 | 
			
		||||
#endif
 | 
			
		||||
  bool display_vmap_fallback_warnings_ = false;
 | 
			
		||||
  bool warn_on_accumulate_grad_stream_mismatch_ = true;
 | 
			
		||||
  std::atomic<at::QEngine> quantized_engine = at::QEngine::NoQEngine;
 | 
			
		||||
  bool enable_sparse_tensor_invariant_checks = false;
 | 
			
		||||
  bool allow_fp16_reduction_cpu = false;
 | 
			
		||||
 | 
			
		||||
@ -197,7 +197,6 @@ inline at::ScalarType scalar_type(at::ScalarType s) {
 | 
			
		||||
    /* don't use TYPE again in case it is an expensive or side-effect op */ \
 | 
			
		||||
    at::ScalarType _st = ::detail::scalar_type(the_type);                   \
 | 
			
		||||
    RECORD_KERNEL_FUNCTION_DTYPE(at_dispatch_name, _st);                    \
 | 
			
		||||
    C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-enum")             \
 | 
			
		||||
    switch (_st) {                                                          \
 | 
			
		||||
      __VA_ARGS__                                                           \
 | 
			
		||||
      default:                                                              \
 | 
			
		||||
@ -209,7 +208,6 @@ inline at::ScalarType scalar_type(at::ScalarType s) {
 | 
			
		||||
            toString(_st),                                                  \
 | 
			
		||||
            "'");                                                           \
 | 
			
		||||
    }                                                                       \
 | 
			
		||||
    C10_DIAGNOSTIC_POP()                                                    \
 | 
			
		||||
  }()
 | 
			
		||||
 | 
			
		||||
#define AT_DISPATCH_CASE_FLOATING_TYPES(...)            \
 | 
			
		||||
 | 
			
		||||
@ -122,7 +122,7 @@ void FunctionalTensorWrapper::freeze_storage() const {
 | 
			
		||||
//          |   have their own storages, but backends like functorch      |
 | 
			
		||||
//         \/   are allowed to re-alias underneath the pass               \/
 | 
			
		||||
// . - - - - - - - - - - - - - .                             . - - - - - - - - - - - - - - - .
 | 
			
		||||
// |    underlying_storage     |                             |      underlying_storage       |
 | 
			
		||||
// |    underyling_storage     |                             |      underyling_storage       |
 | 
			
		||||
// . - - - - - - - - - - - - - .                             . - - - - - - - - - - - - - - - .
 | 
			
		||||
//
 | 
			
		||||
// This constructor is only used by view ops.
 | 
			
		||||
 | 
			
		||||
@ -252,13 +252,13 @@ MapAllocator::MapAllocator(WithFd /*unused*/, std::string_view filename, int fd,
 | 
			
		||||
    if (!(flags_ & ALLOCATOR_MAPPED_FROMFD)) {
 | 
			
		||||
      if (flags_ & ALLOCATOR_MAPPED_SHARED) {
 | 
			
		||||
        // NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
 | 
			
		||||
        if ((fd = open(filename_.c_str(), flags, static_cast<mode_t>(0600))) == -1) {
 | 
			
		||||
        if ((fd = open(filename_.c_str(), flags, (mode_t)0600)) == -1) {
 | 
			
		||||
          TORCH_CHECK(false, "unable to open file <", filename_, "> in read-write mode: ", c10::utils::str_error(errno), " (", errno, ")");
 | 
			
		||||
        }
 | 
			
		||||
      } else if (flags_ & ALLOCATOR_MAPPED_SHAREDMEM) {
 | 
			
		||||
#ifdef HAVE_SHM_OPEN
 | 
			
		||||
        // NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
 | 
			
		||||
        if((fd = shm_open(filename_.c_str(), flags, static_cast<mode_t>(0600))) == -1) {
 | 
			
		||||
        if((fd = shm_open(filename_.c_str(), flags, (mode_t)0600)) == -1) {
 | 
			
		||||
          TORCH_CHECK(false, "unable to open shared memory object <", filename_, "> in read-write mode: ", c10::utils::str_error(errno), " (", errno, ")");
 | 
			
		||||
        }
 | 
			
		||||
#else
 | 
			
		||||
@ -503,7 +503,7 @@ RefcountedMapAllocator::RefcountedMapAllocator(WithFd /*unused*/, const char *fi
 | 
			
		||||
 | 
			
		||||
void RefcountedMapAllocator::initializeAlloc() {
 | 
			
		||||
  TORCH_CHECK(base_ptr_, "base_ptr_ is null");
 | 
			
		||||
  MapInfo *map_info = static_cast<MapInfo*>(base_ptr_);
 | 
			
		||||
  MapInfo *map_info = (MapInfo*)base_ptr_;
 | 
			
		||||
 | 
			
		||||
#ifdef _WIN32
 | 
			
		||||
  ReleaseContext* r_ctx = new ReleaseContext;
 | 
			
		||||
@ -539,7 +539,7 @@ void RefcountedMapAllocator::close() {
 | 
			
		||||
  }
 | 
			
		||||
#else /* _WIN32 */
 | 
			
		||||
 | 
			
		||||
  MapInfo *info = static_cast<MapInfo*>(data);
 | 
			
		||||
  MapInfo *info = (MapInfo*)(data);
 | 
			
		||||
  if (--info->refcount == 0) {
 | 
			
		||||
#ifdef HAVE_SHM_UNLINK
 | 
			
		||||
    if (shm_unlink(filename_.c_str()) == -1) {
 | 
			
		||||
 | 
			
		||||
@ -862,7 +862,7 @@ void TensorIteratorBase::narrow(int dim, int64_t start, int64_t size) {
 | 
			
		||||
  shape_[dim] = size;
 | 
			
		||||
  view_offsets_[dim] += start;
 | 
			
		||||
  for (auto& op : operands_) {
 | 
			
		||||
    op.data = (static_cast<char*>(op.data)) + op.stride_bytes[dim] * start;
 | 
			
		||||
    op.data = ((char*)op.data) + op.stride_bytes[dim] * start;
 | 
			
		||||
  }
 | 
			
		||||
  if (size == 1 && !is_reduction_) {
 | 
			
		||||
    coalesce_dimensions();
 | 
			
		||||
@ -873,7 +873,7 @@ void TensorIteratorBase::select_all_keeping_dim(int start_dim, IntArrayRef indic
 | 
			
		||||
  TORCH_INTERNAL_ASSERT(start_dim <= ndim());
 | 
			
		||||
  for (const auto i : c10::irange(start_dim, ndim())) {
 | 
			
		||||
    for (auto& op : operands_) {
 | 
			
		||||
      op.data = (static_cast<char*>(op.data)) + op.stride_bytes[i] * indices[i - start_dim];
 | 
			
		||||
      op.data = ((char*)op.data) + op.stride_bytes[i] * indices[i - start_dim];
 | 
			
		||||
    }
 | 
			
		||||
    shape_[i] = 1;
 | 
			
		||||
  }
 | 
			
		||||
@ -1534,7 +1534,7 @@ void TensorIteratorBase::build(TensorIteratorConfig& config) {
 | 
			
		||||
 | 
			
		||||
  // XLA and lazy tensors don't have storage, so they don't have an underlying data pointer.
 | 
			
		||||
  // Nothing beyond this point is important for meta functions, so it's fine to exit early here.
 | 
			
		||||
  // Extend the condition to MAIA tensors as MAIA tensors also don't have storage.
 | 
			
		||||
  // Extend the condition to MAIA tesnors as MAIA tensors also don't have storage.
 | 
			
		||||
  if (privateuse1_without_storage  ||
 | 
			
		||||
      common_device_.type() == DeviceType::XLA  ||
 | 
			
		||||
      common_device_.type() == DeviceType::IPU  ||
 | 
			
		||||
 | 
			
		||||
@ -41,7 +41,7 @@ inline void serial_for_each(
 | 
			
		||||
    IntArrayRef strides,
 | 
			
		||||
    char** base_ptrs,
 | 
			
		||||
    size_t ntensors,
 | 
			
		||||
    TensorIteratorBase::loop2d_t loop,
 | 
			
		||||
    typename TensorIteratorBase::loop2d_t loop,
 | 
			
		||||
    Range range) {
 | 
			
		||||
  const auto ndim = shape.size();
 | 
			
		||||
  TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
 | 
			
		||||
 | 
			
		||||
@ -39,7 +39,7 @@ struct HostBlock {
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <typename B>
 | 
			
		||||
struct alignas(hardware_destructive_interference_size) FreeBlockList {
 | 
			
		||||
struct alignas(64) FreeBlockList {
 | 
			
		||||
  std::mutex mutex_;
 | 
			
		||||
  std::deque<B*> list_;
 | 
			
		||||
};
 | 
			
		||||
@ -94,11 +94,11 @@ struct PinnedReserveSegment {
 | 
			
		||||
struct TORCH_API HostStats {
 | 
			
		||||
  // COUNT: total allocations (active)
 | 
			
		||||
  Stat active_requests;
 | 
			
		||||
  // SUM: bytes allocated/reserved by this memory allocator. (active)
 | 
			
		||||
  // SUM: bytes allocated/reserved by this memory alocator. (active)
 | 
			
		||||
  Stat active_bytes;
 | 
			
		||||
  // COUNT: total allocations (active + free)
 | 
			
		||||
  Stat allocations;
 | 
			
		||||
  // SUM: bytes allocated/reserved by this memory allocator. This accounts
 | 
			
		||||
  // SUM: bytes allocated/reserved by this memory alocator. This accounts
 | 
			
		||||
  // for both free and in-use blocks.
 | 
			
		||||
  Stat allocated_bytes;
 | 
			
		||||
 | 
			
		||||
@ -122,12 +122,12 @@ struct TORCH_API HostStats {
 | 
			
		||||
// Struct containing memory allocator summary statistics for host, as they
 | 
			
		||||
// are staged for reporting. This is a temporary struct that is used to
 | 
			
		||||
// avoid locking the allocator while collecting stats.
 | 
			
		||||
struct alignas(hardware_destructive_interference_size) HostStatsStaged {
 | 
			
		||||
struct alignas(64) HostStatsStaged {
 | 
			
		||||
  std::mutex timing_mutex_;
 | 
			
		||||
  // COUNT: total allocations (active + free)
 | 
			
		||||
  // LOCK: access to this stat is protected by the allocator's blocks_mutex_
 | 
			
		||||
  Stat allocations;
 | 
			
		||||
  // SUM: bytes allocated/reserved by this memory allocator. This accounts
 | 
			
		||||
  // SUM: bytes allocated/reserved by this memory alocator. This accounts
 | 
			
		||||
  // for both free and in-use blocks.
 | 
			
		||||
  Stat allocated_bytes;
 | 
			
		||||
  // COUNT: number of allocations per bucket (active)
 | 
			
		||||
@ -455,7 +455,7 @@ struct CachingHostAllocatorImpl {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void resetAccumulatedStats() {
 | 
			
		||||
    // Resetting accumulated memory stats requires concurrently holding both the
 | 
			
		||||
    // Reseting accumulated memory stats requires concurrently holding both the
 | 
			
		||||
    // free list mutexes and the blocks mutex. Previously, this was only done in
 | 
			
		||||
    // empty_cache function.
 | 
			
		||||
    for (size_t i = 0; i < free_list_.size(); ++i) {
 | 
			
		||||
@ -482,7 +482,7 @@ struct CachingHostAllocatorImpl {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void resetPeakStats() {
 | 
			
		||||
    // Resetting peak memory stats requires concurrently holding both the
 | 
			
		||||
    // Reseting peak memory stats requires concurrently holding both the
 | 
			
		||||
    // free list mutexes and the blocks mutex. Previously, this was only done in
 | 
			
		||||
    // empty_cache function.
 | 
			
		||||
    for (size_t i = 0; i < free_list_.size(); ++i) {
 | 
			
		||||
@ -669,7 +669,7 @@ struct CachingHostAllocatorImpl {
 | 
			
		||||
    TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for query_event");
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  alignas(hardware_destructive_interference_size) std::mutex blocks_mutex_;
 | 
			
		||||
  alignas(64) std::mutex blocks_mutex_;
 | 
			
		||||
  ska::flat_hash_set<B*> blocks_; // block list
 | 
			
		||||
  ska::flat_hash_map<void*, B*> ptr_to_block_;
 | 
			
		||||
 | 
			
		||||
@ -677,17 +677,17 @@ struct CachingHostAllocatorImpl {
 | 
			
		||||
  // size. This allows us to quickly find a free block of the right size.
 | 
			
		||||
  // We use deque to store per size free list and guard the list with its own
 | 
			
		||||
  // mutex.
 | 
			
		||||
  alignas(hardware_destructive_interference_size) std::vector<FreeBlockList<B>>
 | 
			
		||||
      free_list_{MAX_SIZE_INDEX};
 | 
			
		||||
  alignas(64) std::vector<FreeBlockList<B>> free_list_ =
 | 
			
		||||
      std::vector<FreeBlockList<B>>(MAX_SIZE_INDEX);
 | 
			
		||||
 | 
			
		||||
  alignas(hardware_destructive_interference_size) std::mutex events_mutex_;
 | 
			
		||||
  alignas(64) std::mutex events_mutex_;
 | 
			
		||||
  std::deque<std::pair<E, B*>> events_; // event queue paired with block
 | 
			
		||||
 | 
			
		||||
  // Indicates whether the object is active.
 | 
			
		||||
  // Set to false in the destructor to signal background threads to stop.
 | 
			
		||||
  std::atomic<bool> active_{true};
 | 
			
		||||
protected:
 | 
			
		||||
  alignas(hardware_destructive_interference_size) HostStatsStaged stats_;
 | 
			
		||||
  alignas(64) HostStatsStaged stats_;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct TORCH_API HostAllocator : public at::Allocator {
 | 
			
		||||
 | 
			
		||||
@ -59,7 +59,9 @@ struct TORCH_API Generator {
 | 
			
		||||
 | 
			
		||||
  explicit Generator(c10::intrusive_ptr<c10::GeneratorImpl> gen_impl)
 | 
			
		||||
   : impl_(std::move(gen_impl)) {
 | 
			
		||||
    TORCH_CHECK(impl_.get(), "GeneratorImpl with nullptr is not supported");
 | 
			
		||||
    if (impl_.get() == nullptr) {
 | 
			
		||||
      throw std::runtime_error("GeneratorImpl with nullptr is not supported");
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  bool operator==(const Generator& rhs) const {
 | 
			
		||||
 | 
			
		||||
@ -190,14 +190,12 @@ class IListRef;
 | 
			
		||||
 * it to a function (e.g. `ImplT::<dispatch-function>(this_)`).
 | 
			
		||||
 */
 | 
			
		||||
#define TORCH_ILISTREF_UNWRAP(TAG, BODY)                         \
 | 
			
		||||
  C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-enum")    \
 | 
			
		||||
  switch (TAG) {                                                 \
 | 
			
		||||
    TORCH_ILISTREF_FORALL_TAGS(TORCH_ILISTREF_UNWRAP_CASE, BODY) \
 | 
			
		||||
    break;                                                       \
 | 
			
		||||
    default:                                                     \
 | 
			
		||||
      TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag.");     \
 | 
			
		||||
  } \
 | 
			
		||||
  C10_DIAGNOSTIC_POP()
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
enum class IListRefTag {
 | 
			
		||||
#define DEFINE_TAG(tag, ...) tag,
 | 
			
		||||
 | 
			
		||||
@ -111,7 +111,9 @@ class TORCH_API TensorBase {
 | 
			
		||||
  explicit TensorBase(
 | 
			
		||||
      c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)
 | 
			
		||||
      : impl_(std::move(tensor_impl)) {
 | 
			
		||||
    TORCH_CHECK(impl_.get(), "TensorImpl with nullptr is not supported");
 | 
			
		||||
    if (impl_.get() == nullptr) {
 | 
			
		||||
      throw std::runtime_error("TensorImpl with nullptr is not supported");
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  TensorBase(const TensorBase&) = default;
 | 
			
		||||
  TensorBase(TensorBase&&) noexcept = default;
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ C10_HOST_DEVICE inline T uniform_int_full_range(V val) {
 | 
			
		||||
 * in this overloaded version
 | 
			
		||||
 */
 | 
			
		||||
template <typename T, typename V>
 | 
			
		||||
C10_HOST_DEVICE inline std::enable_if_t<!std::is_floating_point_v<T>, T>uniform_int(V val) {
 | 
			
		||||
C10_HOST_DEVICE inline std::enable_if_t<!(std::is_floating_point_v<T>), T>uniform_int(V val) {
 | 
			
		||||
  if constexpr (std::is_same_v<T, bool>) {
 | 
			
		||||
    return static_cast<bool>(val & 1);
 | 
			
		||||
  } else if constexpr (std::is_same_v<T, int64_t>) {
 | 
			
		||||
 | 
			
		||||
@ -109,10 +109,6 @@ TORCH_LIBRARY_IMPL(_, AutogradHPU, m) {
 | 
			
		||||
  m.fallback(AUTOGRAD_FALLBACK);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
TORCH_LIBRARY_IMPL(_, AutogradPrivateUse1, m) {
 | 
			
		||||
  m.fallback(AUTOGRAD_FALLBACK);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#undef AUTOGRAD_FALLBACK
 | 
			
		||||
 | 
			
		||||
} // namespace
 | 
			
		||||
 | 
			
		||||
@ -114,25 +114,25 @@ inline typename remove_symint<T>::type unpackSymInt(T x) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
inline remove_symint<c10::SymInt>::type unpackSymInt(c10::SymInt x) {
 | 
			
		||||
inline typename remove_symint<c10::SymInt>::type unpackSymInt(c10::SymInt x) {
 | 
			
		||||
  return x.guard_int(__FILE__, __LINE__);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
inline remove_symint<c10::SymIntArrayRef>::type unpackSymInt(
 | 
			
		||||
inline typename remove_symint<c10::SymIntArrayRef>::type unpackSymInt(
 | 
			
		||||
    c10::SymIntArrayRef x) {
 | 
			
		||||
  return C10_AS_INTARRAYREF_SLOW(x);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
inline remove_symint<std::optional<c10::SymInt>>::type unpackSymInt(
 | 
			
		||||
inline typename remove_symint<std::optional<c10::SymInt>>::type unpackSymInt(
 | 
			
		||||
    std::optional<c10::SymInt> x) {
 | 
			
		||||
  return x.has_value() ? std::make_optional(x->guard_int(__FILE__, __LINE__))
 | 
			
		||||
                       : std::nullopt;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
inline remove_symint<at::OptionalSymIntArrayRef>::type unpackSymInt(
 | 
			
		||||
inline typename remove_symint<at::OptionalSymIntArrayRef>::type unpackSymInt(
 | 
			
		||||
    at::OptionalSymIntArrayRef x) {
 | 
			
		||||
  return x.has_value() ? std::make_optional(C10_AS_INTARRAYREF_SLOW(*x))
 | 
			
		||||
                       : std::nullopt;
 | 
			
		||||
 | 
			
		||||
@ -631,8 +631,8 @@ call_functor_with_args_from_stack_(
 | 
			
		||||
    Stack* stack,
 | 
			
		||||
    std::index_sequence<ivalue_arg_indices...> /*unused*/,
 | 
			
		||||
    guts::typelist::typelist<ArgTypes...>* /*unused*/) {
 | 
			
		||||
  (void)stack; // when sizeof...(ivalue_arg_indices) == 0, this argument would
 | 
			
		||||
               // be unused and we have to silence the compiler warning.
 | 
			
		||||
  (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would
 | 
			
		||||
                 // be unused and we have to silence the compiler warning.
 | 
			
		||||
 | 
			
		||||
  // We're explicitly filtering out DispatchKeySet from the argument list.
 | 
			
		||||
  // Some kernels take a DispatchKeySet as their first argument in order to
 | 
			
		||||
 | 
			
		||||
@ -148,7 +148,7 @@ struct TORCH_API ClassType : public NamedType {
 | 
			
		||||
 | 
			
		||||
  void checkNotExist(const std::string& name, const std::string& what) const;
 | 
			
		||||
 | 
			
		||||
  // Attributes are stored in a specific slot at runtime for efficiency.
 | 
			
		||||
  // Attributes are stored in a specific slot at runtime for effiency.
 | 
			
		||||
  // When emitting instructions we specify the slot so that attribute access is
 | 
			
		||||
  // a constant lookup
 | 
			
		||||
  std::optional<size_t> findAttributeSlot(const std::string& name) const {
 | 
			
		||||
@ -412,7 +412,7 @@ struct TORCH_API ClassType : public NamedType {
 | 
			
		||||
  // Holds method attributes
 | 
			
		||||
  std::weak_ptr<CompilationUnit> compilation_unit_;
 | 
			
		||||
 | 
			
		||||
  // Holds all attributes, attribute details are found on ClassAttribute
 | 
			
		||||
  // Holds all atrributes, attribute details are found on ClassAttribute
 | 
			
		||||
  std::vector<ClassAttribute> attributes_;
 | 
			
		||||
  // Construct mirroring attributes_, only around due to the fact that `containedTypes()` method returns an ArrayRef.
 | 
			
		||||
  // Never fill this without using the appropriate provideNewClassAttribute method
 | 
			
		||||
 | 
			
		||||
@ -442,17 +442,11 @@ RegistrationHandleRAII Dispatcher::registerFallback(DispatchKey dispatchKey, Ker
 | 
			
		||||
 | 
			
		||||
  auto idx = getDispatchTableIndexForDispatchKey(dispatchKey);
 | 
			
		||||
  TORCH_CHECK(idx >= 0 && static_cast<uint64_t>(idx) < backendFallbackKernels_.size(), "idx=", idx);
 | 
			
		||||
  // NB: Perserve BC for registering fallback for AutogradPrivateUse1 multiple time,
 | 
			
		||||
  // refer to https://github.com/pytorch/pytorch/issues/163979 for more informations.
 | 
			
		||||
  TORCH_CHECK(
 | 
			
		||||
      dispatchKey == DispatchKey::AutogradPrivateUse1 ||
 | 
			
		||||
          !backendFallbackKernels_[idx].kernel.isValid(),
 | 
			
		||||
      "Tried to register multiple backend fallbacks for the same dispatch key ",
 | 
			
		||||
      dispatchKey,
 | 
			
		||||
      "; previous registration ",
 | 
			
		||||
      backendFallbackKernels_[idx].debug,
 | 
			
		||||
      ", new registration ",
 | 
			
		||||
      debug);
 | 
			
		||||
    !backendFallbackKernels_[idx].kernel.isValid(),
 | 
			
		||||
    "Tried to register multiple backend fallbacks for the same dispatch key ", dispatchKey, "; previous registration ",
 | 
			
		||||
    backendFallbackKernels_[idx].debug, ", new registration ", debug
 | 
			
		||||
  );
 | 
			
		||||
  // NB: inferred function schema is always nullptr for fallbacks, as fallbacks
 | 
			
		||||
  // cannot be unboxed
 | 
			
		||||
  backendFallbackKernels_[idx] = impl::AnnotatedKernel(std::move(kernel), nullptr, std::move(debug));
 | 
			
		||||
@ -537,7 +531,7 @@ int64_t Dispatcher::sequenceNumberForRunningRecordFunction(DispatchKey dispatchK
 | 
			
		||||
 | 
			
		||||
  // Note: this records a sequence number for both Autograd keys, and for
 | 
			
		||||
  // non-Autograd keys where the dispatchKeySet still contains an autograd key.
 | 
			
		||||
  // This means that we might collect the same sequence number two different
 | 
			
		||||
  // This means that we might collect the same sequence nubmer two different
 | 
			
		||||
  // events if they all occurred above Autograd and still had the Autograd
 | 
			
		||||
  // dispatch key in the dispatch key set.
 | 
			
		||||
  // However, this usually doesn't happen: normally the first call will
 | 
			
		||||
 | 
			
		||||
@ -585,7 +585,7 @@ class TORCH_API OperatorHandle {
 | 
			
		||||
 | 
			
		||||
  // We need to store this iterator in order to make
 | 
			
		||||
  // Dispatcher::cleanup() fast -- it runs a lot on program
 | 
			
		||||
  // termination (and presumably library unloading).
 | 
			
		||||
  // termination (and presuambly library unloading).
 | 
			
		||||
  std::list<Dispatcher::OperatorDef>::iterator operatorIterator_;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -365,7 +365,7 @@ std::pair<const AnnotatedKernel&, const char*> OperatorEntry::computeDispatchTab
 | 
			
		||||
  //          For autograd keys, we only use kernel from CompositeImplicitAutograd when there's no direct registration
 | 
			
		||||
  //          to its corresponding backend key or CompositeExplicitAutograd. See Note [CompositeExplicitAutograd and CompositeImplicitAutograd].
 | 
			
		||||
  //          For AutogradOther, we eagerly return ambiguousAutogradOtherKernel() if there's registration to any of
 | 
			
		||||
  //          its backends and ask backend extender to request a dedicated Autograd key for the backend.
 | 
			
		||||
  //          its backends and ask backend extender to request a decicated Autograd key for the backend.
 | 
			
		||||
  //          See Note [Ambiguity in AutogradOther kernel] for more details.
 | 
			
		||||
  //          A CompositeExplicitAutograd kernel prevents CompositeImplicitAutograd kernel being used for Autograd keys, but it doesn't
 | 
			
		||||
  //          cause confusion for AutogradOther. It's pretty straightforward to use Autograd (if available)
 | 
			
		||||
 | 
			
		||||
@ -18,7 +18,6 @@ struct TORCH_API EnumType : public NamedType {
 | 
			
		||||
      TypePtr value,
 | 
			
		||||
      std::vector<EnumNameValue> enum_names_values,
 | 
			
		||||
      std::weak_ptr<::torch::jit::CompilationUnit> cu) {
 | 
			
		||||
    C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-enum")
 | 
			
		||||
    switch (value->kind()) {
 | 
			
		||||
      case TypeKind::IntType:
 | 
			
		||||
      case TypeKind::FloatType:
 | 
			
		||||
@ -35,7 +34,6 @@ struct TORCH_API EnumType : public NamedType {
 | 
			
		||||
            value->str(),
 | 
			
		||||
            "', only int, float and string are supported");
 | 
			
		||||
    }
 | 
			
		||||
    C10_DIAGNOSTIC_POP()
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::string str() const override {
 | 
			
		||||
 | 
			
		||||
@ -261,7 +261,7 @@ std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) {
 | 
			
		||||
    //
 | 
			
		||||
    // There are 2 cases
 | 
			
		||||
    // 1. something like 'aten::items.str(Dict(str, t) self) -> ((str, t)[])'.
 | 
			
		||||
    // without the extra parenthesis, the c++ scheme parser can not parse it.
 | 
			
		||||
    // without the extra parenthesis, the c++ schem parser can not parse it.
 | 
			
		||||
    // 2. something like '-> ((str, str))'. Need extra parenthesis so the return
 | 
			
		||||
    // type is a single tuple rather than two strings.
 | 
			
		||||
    // PR (https://github.com/pytorch/pytorch/pull/23204) has more context about
 | 
			
		||||
 | 
			
		||||
@ -68,7 +68,11 @@ Symbol InternedStrings::_symbol(const std::string& s) {
 | 
			
		||||
    return it->second;
 | 
			
		||||
 | 
			
		||||
  auto pos = s.find("::");
 | 
			
		||||
  TORCH_CHECK(pos != std::string::npos, "all symbols must have a namespace, <namespace>::<string>, but found: ", s);
 | 
			
		||||
  if (pos == std::string::npos) {
 | 
			
		||||
    std::stringstream ss;
 | 
			
		||||
    ss << "all symbols must have a namespace, <namespace>::<string>, but found: " << s;
 | 
			
		||||
    throw std::runtime_error(ss.str());
 | 
			
		||||
  }
 | 
			
		||||
  Symbol ns = _symbol("namespaces::" + s.substr(0, pos));
 | 
			
		||||
 | 
			
		||||
  Symbol sym(sym_to_info_.size());
 | 
			
		||||
@ -117,7 +121,12 @@ std::string Symbol::domainString() const {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Symbol Symbol::fromDomainAndUnqualString(const std::string & d, const std::string & s) {
 | 
			
		||||
  TORCH_CHECK(d.compare(0, domain_prefix().size(), domain_prefix()) == 0, "Symbol: domain string is expected to be prefixed with '", domain_prefix(), "', e.g. 'org.pytorch.aten'");
 | 
			
		||||
  if (d.compare(0, domain_prefix().size(), domain_prefix()) != 0) {
 | 
			
		||||
    std::ostringstream ss;
 | 
			
		||||
    ss << "Symbol: domain string is expected to be prefixed with '"
 | 
			
		||||
       << domain_prefix() << "', e.g. 'org.pytorch.aten'";
 | 
			
		||||
    throw std::runtime_error(ss.str());
 | 
			
		||||
  }
 | 
			
		||||
  std::string qualString = d.substr(domain_prefix().size()) + "::" + s;
 | 
			
		||||
  return fromQualString(qualString);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -7,7 +7,6 @@
 | 
			
		||||
#include <ATen/core/jit_type.h>
 | 
			
		||||
#include <ATen/core/stack.h>
 | 
			
		||||
#include <ATen/core/type_factory.h>
 | 
			
		||||
#include <c10/util/Exception.h>
 | 
			
		||||
#include <c10/util/StringUtil.h>
 | 
			
		||||
#include <c10/util/hash.h>
 | 
			
		||||
#include <c10/util/irange.h>
 | 
			
		||||
@ -413,7 +412,7 @@ size_t IValue::hash(const IValue& v) {
 | 
			
		||||
    case Tag::Enum:
 | 
			
		||||
    case Tag::Stream:
 | 
			
		||||
    case Tag::Uninitialized:
 | 
			
		||||
      TORCH_CHECK(false,
 | 
			
		||||
      throw std::runtime_error(
 | 
			
		||||
          "unhashable type: '" + v.type()->repr_str() + "'");
 | 
			
		||||
  }
 | 
			
		||||
  // the above switch should be exhaustive
 | 
			
		||||
@ -601,8 +600,8 @@ std::ostream& IValue::repr(
 | 
			
		||||
      double d = v.toDouble();
 | 
			
		||||
      int c = std::fpclassify(d);
 | 
			
		||||
      if ((c == FP_NORMAL || c == FP_ZERO ) && std::abs(d) < 1e10) {
 | 
			
		||||
        int64_t i = static_cast<int64_t>(d);
 | 
			
		||||
        if (static_cast<double>(i) == d) {
 | 
			
		||||
        int64_t i = int64_t(d);
 | 
			
		||||
        if (double(i) == d) {
 | 
			
		||||
          // -0.0 (signed zero) needs to be parsed as -0.
 | 
			
		||||
          if (i == 0 && std::signbit(d)) {
 | 
			
		||||
            return out << "-" << i << ".";
 | 
			
		||||
@ -799,8 +798,8 @@ std::ostream& operator<<(std::ostream & out, const IValue & v) {
 | 
			
		||||
      double d = v.toDouble();
 | 
			
		||||
      int c = std::fpclassify(d);
 | 
			
		||||
      if (c == FP_NORMAL || c == FP_ZERO) {
 | 
			
		||||
        int64_t i = static_cast<int64_t>(d);
 | 
			
		||||
        if (static_cast<double>(i) == d) {
 | 
			
		||||
        int64_t i = int64_t(d);
 | 
			
		||||
        if (double(i) == d) {
 | 
			
		||||
          return out << i << ".";
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
@ -1176,7 +1176,7 @@ struct TORCH_API IValue final {
 | 
			
		||||
  using HashIdentityIValueMap =
 | 
			
		||||
      std::unordered_map<IValue, IValue, HashIdentityIValue, CompIdentityIValues>;
 | 
			
		||||
 | 
			
		||||
  // Checks if this and rhs has a subvalues in common.
 | 
			
		||||
  // Chechs if this and rhs has a subvalues in common.
 | 
			
		||||
  // [t1,t2] and [t2, t3] returns true.
 | 
			
		||||
  bool overlaps(const IValue& rhs) const;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -1501,7 +1501,7 @@ struct C10_EXPORT ivalue::Object final : c10::intrusive_ptr_target {
 | 
			
		||||
  // However, the CompilationUnit holds ownership of the type's graphs, so
 | 
			
		||||
  // inserting a constant object into a Graph would create a reference cycle if
 | 
			
		||||
  // that constant object held a shared_ptr to its CU. For these objects we
 | 
			
		||||
  // instantiate them with non-owning references to its CU
 | 
			
		||||
  // instatiate them with non-owning references to its CU
 | 
			
		||||
  Object(WeakOrStrongTypePtr type, size_t numSlots) : type_(std::move(type)) {
 | 
			
		||||
    slots_.resize(numSlots);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@ -8,7 +8,6 @@
 | 
			
		||||
#include <ATen/core/type_factory.h>
 | 
			
		||||
#include <ATen/core/qualified_name.h>
 | 
			
		||||
#include <c10/util/TypeList.h>
 | 
			
		||||
#include <c10/util/Exception.h>
 | 
			
		||||
#include <optional>
 | 
			
		||||
#include <c10/core/SymFloat.h>
 | 
			
		||||
#include <c10/core/SymBool.h>
 | 
			
		||||
@ -41,7 +40,7 @@ void standardizeVectorForUnion(std::vector<TypePtr>* to_flatten);
 | 
			
		||||
inline bool is_contiguous_strides(
 | 
			
		||||
    const IntArrayRef sizes,
 | 
			
		||||
    const IntArrayRef strides) {
 | 
			
		||||
  size_t n_dim = sizes.size();
 | 
			
		||||
  int n_dim = static_cast<int>(sizes.size());
 | 
			
		||||
  if (n_dim == 0) {
 | 
			
		||||
    return true;
 | 
			
		||||
  }
 | 
			
		||||
@ -50,7 +49,7 @@ inline bool is_contiguous_strides(
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  for (int i = static_cast<int>(n_dim) - 2; i >= 0; i--) {
 | 
			
		||||
  for (int i = n_dim - 2; i >= 0; i--) {
 | 
			
		||||
    if (strides[i] != strides[i + 1] * sizes[i + 1]) {
 | 
			
		||||
      return false;
 | 
			
		||||
    }
 | 
			
		||||
@ -117,8 +116,10 @@ struct SingleElementType : public SharedType {
 | 
			
		||||
 | 
			
		||||
 protected:
 | 
			
		||||
  SingleElementType(TypePtr elem) : SharedType(Kind), elem(std::move(elem)) {
 | 
			
		||||
    TORCH_CHECK(this->elem, c10::str(
 | 
			
		||||
    if (!this->elem) {
 | 
			
		||||
      throw std::runtime_error(c10::str(
 | 
			
		||||
            "Can not create ", typeKindToString(Kind), " with None type"));
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
@ -373,7 +374,7 @@ struct TORCH_API SymbolicShape {
 | 
			
		||||
  // Unranked shape constructor.
 | 
			
		||||
  SymbolicShape() : dims_(std::nullopt) {}
 | 
			
		||||
 | 
			
		||||
  // Known rank but unknown dimensions.
 | 
			
		||||
  // Known rank but unknown dimentions.
 | 
			
		||||
  SymbolicShape(std::optional<size_t> rank) : dims_(std::nullopt) {
 | 
			
		||||
    if(!rank) {
 | 
			
		||||
      return;
 | 
			
		||||
@ -415,12 +416,16 @@ struct TORCH_API SymbolicShape {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ShapeSymbol operator[](size_t i) const {
 | 
			
		||||
    TORCH_CHECK(dims_, "Rank isn't fixed");
 | 
			
		||||
    if (!dims_) {
 | 
			
		||||
      throw std::runtime_error("Rank isn't fixed");
 | 
			
		||||
    }
 | 
			
		||||
    return (*dims_).at(i);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ShapeSymbol at(size_t i) const {
 | 
			
		||||
    TORCH_CHECK(dims_, "Rank isn't fixed");
 | 
			
		||||
    if (!dims_) {
 | 
			
		||||
      throw std::runtime_error("Rank isn't fixed");
 | 
			
		||||
    }
 | 
			
		||||
    return (*dims_).at(i);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@ -515,7 +520,9 @@ struct VaryingShape {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const std::optional<T> &operator[](size_t i) const {
 | 
			
		||||
    TORCH_CHECK(dims_, "Rank isn't fixed");
 | 
			
		||||
    if (!dims_) {
 | 
			
		||||
      throw std::runtime_error("Rank isn't fixed");
 | 
			
		||||
    }
 | 
			
		||||
    return (*dims_).at(i);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@ -884,9 +891,9 @@ struct TORCH_API ListType
 | 
			
		||||
 | 
			
		||||
  // global singleton
 | 
			
		||||
  // Given an inner type T and an identifier,
 | 
			
		||||
  // this function will return the global singleton type pointer
 | 
			
		||||
  // this function wil return the global singleton type pointer
 | 
			
		||||
  // the type List<T>.
 | 
			
		||||
  // The extra "identifier" argument is needed because we have multiple container types
 | 
			
		||||
  // The extra "identifier" argument is needed beccause we have multiple container types
 | 
			
		||||
  // that all re-use this function (List<T>, array<T, N>, etc.)
 | 
			
		||||
  static TypePtr get(const std::string& identifier, TypePtr inner);
 | 
			
		||||
 | 
			
		||||
@ -922,7 +929,6 @@ struct TORCH_API DictType : public SharedType {
 | 
			
		||||
    if (auto dyn = key->castRaw<DynamicType>()) {
 | 
			
		||||
      kind = dyn->dynamicKind();
 | 
			
		||||
    }
 | 
			
		||||
    C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wswitch-enum")
 | 
			
		||||
    switch (kind) {
 | 
			
		||||
      case TypeKind::AnyType:
 | 
			
		||||
      case TypeKind::IntType:
 | 
			
		||||
@ -939,7 +945,6 @@ struct TORCH_API DictType : public SharedType {
 | 
			
		||||
            key->str(),
 | 
			
		||||
            "', only int, float, complex, Tensor, device and string keys are supported");
 | 
			
		||||
    }
 | 
			
		||||
    C10_DIAGNOSTIC_POP()
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // aligned with the format in FunctionSchema
 | 
			
		||||
@ -952,7 +957,9 @@ struct TORCH_API DictType : public SharedType {
 | 
			
		||||
 | 
			
		||||
  TypePtr createWithContained(
 | 
			
		||||
      std::vector<TypePtr> contained_types) const override {
 | 
			
		||||
    TORCH_CHECK(contained_types.size() == 2, "Expected 2 contained types");
 | 
			
		||||
    if (contained_types.size() != 2) {
 | 
			
		||||
      throw std::runtime_error("Expected 2 contained types");
 | 
			
		||||
    }
 | 
			
		||||
    return create(std::move(contained_types.at(0)), std::move(contained_types.at(1)));
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@ -2373,7 +2380,7 @@ private:
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<>
 | 
			
		||||
inline detail::CastReturnType<NamedType>::type Type::cast<NamedType>() {
 | 
			
		||||
inline typename detail::CastReturnType<NamedType>::type Type::cast<NamedType>() {
 | 
			
		||||
  if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType ||
 | 
			
		||||
      kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) {
 | 
			
		||||
    return std::static_pointer_cast<NamedType>(static_cast<NamedType *>(this)->shared_from_this());
 | 
			
		||||
@ -2382,7 +2389,7 @@ inline detail::CastReturnType<NamedType>::type Type::cast<NamedType>() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<>
 | 
			
		||||
inline detail::CastConstReturnType<NamedType>::type Type::cast<NamedType>() const {
 | 
			
		||||
inline typename detail::CastConstReturnType<NamedType>::type Type::cast<NamedType>() const {
 | 
			
		||||
  if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType ||
 | 
			
		||||
      kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) {
 | 
			
		||||
    return std::static_pointer_cast<const NamedType>(static_cast<const NamedType *>(this)->shared_from_this());
 | 
			
		||||
 | 
			
		||||
@ -185,11 +185,11 @@ struct TORCH_API Type {
 | 
			
		||||
        : repr_(nullptr) {}
 | 
			
		||||
 | 
			
		||||
    /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr<T> p)
 | 
			
		||||
        : repr_(makeSingletonSharedPtr(p.get())) {}
 | 
			
		||||
        : repr_(p) {}
 | 
			
		||||
 | 
			
		||||
    template <typename U, std::enable_if_t<std::is_convertible_v<U*, T*>, bool> = true>
 | 
			
		||||
    /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr<U> p)
 | 
			
		||||
        : repr_(makeSingletonSharedPtr(static_cast<T*>(p.get()))) {}
 | 
			
		||||
        : repr_(SingletonTypePtr<T>(p.get())) {}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    // We need to support construction from T* for pybind. The problem
 | 
			
		||||
@ -202,8 +202,8 @@ struct TORCH_API Type {
 | 
			
		||||
    // Case 2: if T is exactly Type, we need to do a dynamic_cast to
 | 
			
		||||
    // check if it's a SharedType and do the right thing.
 | 
			
		||||
    //
 | 
			
		||||
    // Case 3: Otherwise, T is not a SharedType. Use a singleton
 | 
			
		||||
    // pointer.
 | 
			
		||||
    // Case 3: Otherwise, T is not a SharedType. (debug-check this
 | 
			
		||||
    // assumption!) Use a singleton pointer.
 | 
			
		||||
 | 
			
		||||
    template <typename U = T, std::enable_if_t<std::is_base_of_v<SharedType, U>, bool> = true>
 | 
			
		||||
    /* implicit */ SingletonOrSharedTypePtr(T* p) : SingletonOrSharedTypePtr(static_cast<typename detail::as_shared_type<U>::type>(p)->shared_from_this()) {}
 | 
			
		||||
@ -211,15 +211,15 @@ struct TORCH_API Type {
 | 
			
		||||
    template <typename U = T, std::enable_if_t<std::is_same_v<Type, U>, bool> = true>
 | 
			
		||||
    /* implicit */ SingletonOrSharedTypePtr(T* p) {
 | 
			
		||||
      if (auto* shared_p = dynamic_cast<typename detail::as_shared_type<U>::type>(p)) {
 | 
			
		||||
        repr_ = shared_p->shared_from_this();
 | 
			
		||||
        repr_ = Repr(shared_p->shared_from_this());
 | 
			
		||||
      } else {
 | 
			
		||||
        repr_ = makeSingletonSharedPtr(p);
 | 
			
		||||
        repr_ = Repr(p);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename U = T, std::enable_if_t<!std::is_same_v<Type, U> && !std::is_base_of_v<SharedType, U>, bool> = true>
 | 
			
		||||
    /* implicit */ SingletonOrSharedTypePtr(T* p)
 | 
			
		||||
        : repr_(makeSingletonSharedPtr(p)) {
 | 
			
		||||
        : repr_(p) {
 | 
			
		||||
      TORCH_INTERNAL_ASSERT_DEBUG_ONLY(dynamic_cast<typename detail::as_shared_type<U>::type>(p) == nullptr);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -230,19 +230,19 @@ struct TORCH_API Type {
 | 
			
		||||
    ~SingletonOrSharedTypePtr() = default;
 | 
			
		||||
 | 
			
		||||
    T* get() const {
 | 
			
		||||
      return repr_.get();
 | 
			
		||||
      return repr_.isSharedAndNonNull() ? repr_.shared_.repr_.get() : static_cast<T*>(repr_.rawRepr().first);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    operator bool() const {
 | 
			
		||||
      return repr_ != nullptr;
 | 
			
		||||
      return repr_.isNonNull();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool operator==(std::nullptr_t) const {
 | 
			
		||||
      return repr_ == nullptr;
 | 
			
		||||
      return !repr_.isNonNull();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool operator!=(std::nullptr_t) const {
 | 
			
		||||
      return repr_ != nullptr;
 | 
			
		||||
      return repr_.isNonNull();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename U = T, std::enable_if_t<!std::is_same_v<std::remove_const_t<U>, void>, bool> = true>
 | 
			
		||||
@ -255,14 +255,138 @@ struct TORCH_API Type {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  private:
 | 
			
		||||
    // Use shared_ptr's aliasing constructor to create a non-owning pointer
 | 
			
		||||
    // to a singleton. The lifetime is tied to the null shared_ptr, so there's
 | 
			
		||||
    // no reference counting overhead for the singleton itself.
 | 
			
		||||
    static std::shared_ptr<T> makeSingletonSharedPtr(T* ptr) {
 | 
			
		||||
      return std::shared_ptr<T>(std::shared_ptr<T>(), ptr);
 | 
			
		||||
    }
 | 
			
		||||
    // NOTE: SharedPtrWrapper exists to work around a baffling bug in
 | 
			
		||||
    // nvcc; see comment in destroy() below.
 | 
			
		||||
    struct SharedPtrWrapper {
 | 
			
		||||
      SharedPtrWrapper(std::shared_ptr<T> &&x)
 | 
			
		||||
          : repr_(std::move(x)) {}
 | 
			
		||||
      std::shared_ptr<T> repr_;
 | 
			
		||||
    };
 | 
			
		||||
    union Repr {
 | 
			
		||||
      Repr() : Repr(nullptr) {}
 | 
			
		||||
 | 
			
		||||
    std::shared_ptr<T> repr_;
 | 
			
		||||
      explicit Repr(std::shared_ptr<T> x)
 | 
			
		||||
          : shared_(std::move(x)) {}
 | 
			
		||||
 | 
			
		||||
      explicit Repr(std::nullptr_t)
 | 
			
		||||
          : singletonRepr_(nullptr) {}
 | 
			
		||||
 | 
			
		||||
      explicit Repr(SingletonTypePtr<T> p)
 | 
			
		||||
          : singletonRepr_(p.get()) {}
 | 
			
		||||
 | 
			
		||||
      ~Repr() {
 | 
			
		||||
        destroy();
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      // NOTE: the only non-UB way to access our null state is through
 | 
			
		||||
      // rawRepr(), because our copy operation doesn't preserve which
 | 
			
		||||
      // union member is active for null pointers.
 | 
			
		||||
      Repr(const Repr& rhs) {
 | 
			
		||||
        if (rhs.isSharedAndNonNull()) {
 | 
			
		||||
          new (&shared_) SharedPtrWrapper(rhs.shared_);
 | 
			
		||||
        } else {
 | 
			
		||||
          singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
 | 
			
		||||
          TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr);
 | 
			
		||||
          singletonRepr_.unused_ = nullptr;
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      Repr(Repr&& rhs) noexcept {
 | 
			
		||||
        if (rhs.isSharedAndNonNull()) {
 | 
			
		||||
          new (&shared_) SharedPtrWrapper(std::move(rhs.shared_));
 | 
			
		||||
        } else {
 | 
			
		||||
          singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
 | 
			
		||||
          TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr);
 | 
			
		||||
          singletonRepr_.unused_ = nullptr;
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      Repr& operator=(const Repr& rhs) {
 | 
			
		||||
        if (&rhs == this) {
 | 
			
		||||
          return *this;
 | 
			
		||||
        }
 | 
			
		||||
        if (rhs.isSharedAndNonNull()) {
 | 
			
		||||
          if (isSharedAndNonNull()) {
 | 
			
		||||
            shared_ = rhs.shared_;
 | 
			
		||||
          } else {
 | 
			
		||||
            new (&shared_) SharedPtrWrapper(rhs.shared_);
 | 
			
		||||
          }
 | 
			
		||||
        } else {
 | 
			
		||||
          if (isSharedAndNonNull()) {
 | 
			
		||||
            destroy();
 | 
			
		||||
          }
 | 
			
		||||
          singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
 | 
			
		||||
          TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr);
 | 
			
		||||
          singletonRepr_.unused_ = nullptr;
 | 
			
		||||
        }
 | 
			
		||||
        return *this;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      Repr& operator=(Repr&& rhs) noexcept {
 | 
			
		||||
        if (&rhs == this) {
 | 
			
		||||
          return *this;
 | 
			
		||||
        }
 | 
			
		||||
        if (rhs.isSharedAndNonNull()) {
 | 
			
		||||
          if (isSharedAndNonNull()) {
 | 
			
		||||
            shared_ = std::move(rhs.shared_);
 | 
			
		||||
          } else {
 | 
			
		||||
            new (&shared_) SharedPtrWrapper(std::move(rhs.shared_));
 | 
			
		||||
          }
 | 
			
		||||
        } else {
 | 
			
		||||
          if (isSharedAndNonNull()) {
 | 
			
		||||
            destroy();
 | 
			
		||||
          }
 | 
			
		||||
          singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
 | 
			
		||||
          TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr);
 | 
			
		||||
          singletonRepr_.unused_ = nullptr;
 | 
			
		||||
        }
 | 
			
		||||
        return *this;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      SharedPtrWrapper shared_;
 | 
			
		||||
 | 
			
		||||
      struct SingletonRepr {
 | 
			
		||||
        explicit SingletonRepr(T* s) : singleton_(s) {}
 | 
			
		||||
        T* singleton_;
 | 
			
		||||
        void* unused_ = nullptr;
 | 
			
		||||
      } singletonRepr_;
 | 
			
		||||
      struct RawRepr {
 | 
			
		||||
        void* first;
 | 
			
		||||
        void* nullIfSingleton_;
 | 
			
		||||
      };
 | 
			
		||||
 | 
			
		||||
      // It is UB to read the singleton part of Repr if it was
 | 
			
		||||
      // constructed as a shared_ptr and vice versa, but memcpying out
 | 
			
		||||
      // the representation is always OK, so here's an accessor to obey
 | 
			
		||||
      // the letter of the law.
 | 
			
		||||
      RawRepr rawRepr() const {
 | 
			
		||||
        RawRepr repr{};
 | 
			
		||||
        memcpy(&repr, reinterpret_cast<const char *>(this), sizeof(RawRepr));
 | 
			
		||||
        return repr;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      bool isNonNull() const {
 | 
			
		||||
        auto repr = rawRepr();
 | 
			
		||||
        TORCH_INTERNAL_ASSERT_DEBUG_ONLY(repr.nullIfSingleton_ == nullptr || repr.first != nullptr);
 | 
			
		||||
        return repr.first != nullptr;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      bool isSharedAndNonNull() const {
 | 
			
		||||
        return rawRepr().nullIfSingleton_ != nullptr;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
     private:
 | 
			
		||||
      void destroy() {
 | 
			
		||||
        if (isSharedAndNonNull()) {
 | 
			
		||||
          // Without SharedPtrWrapper, this line would read
 | 
			
		||||
          // `shared_.~shared_ptr()` and nvcc would complain with
 | 
			
		||||
          // "error: expected primary-expression before '>' token"
 | 
			
		||||
          // referring to the "t" in "shared_ptr". SharedPtrWrapper
 | 
			
		||||
          // exists to work around this compiler bug.
 | 
			
		||||
          shared_.~SharedPtrWrapper();
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    } repr_;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  using TypePtr = SingletonOrSharedTypePtr<Type>;
 | 
			
		||||
 | 
			
		||||
@ -21,7 +21,7 @@ namespace c10 {
 | 
			
		||||
 | 
			
		||||
namespace detail {
 | 
			
		||||
// The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
 | 
			
		||||
// We do this because every argument in a function schema is expected to be convertible
 | 
			
		||||
// We do this because every argument in a function schema is expected to be convertable
 | 
			
		||||
// to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of.
 | 
			
		||||
// See Note [Plumbing Keys Through The Dispatcher]
 | 
			
		||||
template<class KernelFunctor>
 | 
			
		||||
 | 
			
		||||
@ -251,7 +251,7 @@ TEST(OperatorRegistrationTest, whenRegisteringCPUTensorType_thenCanOnlyCallUnbox
 | 
			
		||||
  callOpUnboxedWithPrecomputedDispatchKeySet<void, Tensor>(*op, c10::DispatchKeySet(c10::DispatchKey::CPU), dummyTensor(c10::DispatchKey::CUDA));
 | 
			
		||||
  EXPECT_TRUE(called_kernel_cpu);
 | 
			
		||||
 | 
			
		||||
  // Ensure that dispatch key from tensor is not used here.
 | 
			
		||||
  // Ensure that disptach key from tensor is not used here.
 | 
			
		||||
  called_kernel_cpu = false;
 | 
			
		||||
  expectThrows<c10::Error>([&] {
 | 
			
		||||
    callOpUnboxedWithPrecomputedDispatchKeySet<void, Tensor>(*op, c10::DispatchKeySet(c10::DispatchKey::CUDA), dummyTensor(c10::DispatchKey::CPU));
 | 
			
		||||
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user