mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-11-04 08:00:58 +08:00 
			
		
		
		
	Compare commits
	
		
			1 Commits
		
	
	
		
			csl/revert
			...
			fixbugh100
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 56b58b1b6e | 
@ -8,8 +8,6 @@ if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then
 | 
			
		||||
    export TORCH_CUDA_ARCH_LIST="8.0;9.0"
 | 
			
		||||
elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then
 | 
			
		||||
    export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
 | 
			
		||||
elif [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then
 | 
			
		||||
    export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
 | 
			
		||||
elif [[ "$GPU_ARCH_VERSION" == *"13.0"* ]]; then
 | 
			
		||||
    export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;11.0;12.0+PTX"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
@ -143,7 +143,7 @@ def sample_vllm_test_library():
 | 
			
		||||
                "pytest -v -s compile/test_decorator.py",
 | 
			
		||||
            ],
 | 
			
		||||
        },
 | 
			
		||||
        "vllm_language_model_test_extended_generation_28_failure_test": {
 | 
			
		||||
        "vllm_languagde_model_test_extended_generation_28_failure_test": {
 | 
			
		||||
            "title": "Language Models Test (Extended Generation) 2.8 release failure",
 | 
			
		||||
            "id": "vllm_languagde_model_test_extended_generation_28_failure_test",
 | 
			
		||||
            "package_install": [
 | 
			
		||||
 | 
			
		||||
@ -63,7 +63,7 @@ class VllmBuildParameters:
 | 
			
		||||
    # DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True"
 | 
			
		||||
    use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True)
 | 
			
		||||
    dockerfile_path: Path = env_path_field(
 | 
			
		||||
        "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile"
 | 
			
		||||
        "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm"
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    # the cleaning script to remove torch dependencies from pip
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										13
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							@ -111,16 +111,3 @@ runs:
 | 
			
		||||
        # This video group ID maps to subgid 1 inside the docker image due to the /etc/subgid entries.
 | 
			
		||||
        # The group name corresponding to group ID 1 can change depending on the OS, so both are necessary.
 | 
			
		||||
        echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd $DEVICE_FLAG --group-add video --group-add $render_gid --group-add daemon --group-add bin --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --network=host" >> "${GITHUB_ENV}"
 | 
			
		||||
 | 
			
		||||
    - name: configure aws credentials
 | 
			
		||||
      id: aws_creds
 | 
			
		||||
      uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
 | 
			
		||||
      with:
 | 
			
		||||
        role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
 | 
			
		||||
        aws-region: us-east-1
 | 
			
		||||
        role-duration-seconds: 18000
 | 
			
		||||
 | 
			
		||||
    - name: Login to Amazon ECR
 | 
			
		||||
      id: login-ecr
 | 
			
		||||
      continue-on-error: true
 | 
			
		||||
      uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							@ -1 +1 @@
 | 
			
		||||
8ad2aa5d354d1bf432339113860185d5a5d1abbd
 | 
			
		||||
87ff22e49ed0e92576c4935ccb8c143daac4a3cd
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							@ -1 +1 @@
 | 
			
		||||
f5c6c2ec6490455e86f67b2a25c10390d60a27f7
 | 
			
		||||
966da7e46f65d6d49df3e31214470a4fe5cc8e66
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/vllm.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/vllm.txt
									
									
									
									
										vendored
									
									
								
							@ -1 +1 @@
 | 
			
		||||
e5192819208c4d68194844b7dfafbc00020d0dea
 | 
			
		||||
0ad9951c416d33c5da4f7a504fb162cbe62386f5
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							@ -1 +1 @@
 | 
			
		||||
0fa6e3129e61143224663e1ec67980d12b7ec4eb
 | 
			
		||||
2a9138a26ee257fef05310ad3fecf7c55fe80d73
 | 
			
		||||
 | 
			
		||||
@ -1,41 +1,59 @@
 | 
			
		||||
# TODO(elainwy): remove this file after the torch nightly dockerfile is in sync in vllm repo
 | 
			
		||||
# The vLLM Dockerfile is used to construct vLLM image against torch nightly and torch main that can be directly used for testing
 | 
			
		||||
 | 
			
		||||
ARG CUDA_VERSION=12.8.1
 | 
			
		||||
ARG PYTHON_VERSION=3.12
 | 
			
		||||
 | 
			
		||||
# BUILD_BASE_IMAGE: used to setup python build xformers, and vllm wheels, It can be replaced with a different base image from local machine,
 | 
			
		||||
# by default, it uses the torch-nightly-base stage from this docker image
 | 
			
		||||
ARG BUILD_BASE_IMAGE=torch-nightly-base
 | 
			
		||||
 | 
			
		||||
# FINAL_BASE_IMAGE: used to set up vllm-instaled environment and build flashinfer,
 | 
			
		||||
# by default, it uses devel-ubuntu22.04 official image.
 | 
			
		||||
ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
 | 
			
		||||
 | 
			
		||||
# The logic is copied from https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile
 | 
			
		||||
ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#################### TORCH NIGHTLY BASE IMAGE ####################
 | 
			
		||||
# A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci
 | 
			
		||||
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base
 | 
			
		||||
 | 
			
		||||
ARG CUDA_VERSION
 | 
			
		||||
ARG PYTHON_VERSION
 | 
			
		||||
ARG GET_PIP_URL
 | 
			
		||||
 | 
			
		||||
# Install system dependencies and uv, then create Python virtual environment
 | 
			
		||||
# Install Python and other dependencies
 | 
			
		||||
RUN apt-get update -y \
 | 
			
		||||
    && apt-get install -y ccache software-properties-common git curl sudo vim python3-pip \
 | 
			
		||||
    && curl -LsSf https://astral.sh/uv/install.sh | sh \
 | 
			
		||||
    && $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \
 | 
			
		||||
    && rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \
 | 
			
		||||
    && ln -s /opt/venv/bin/python3 /usr/bin/python3 \
 | 
			
		||||
    && ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \
 | 
			
		||||
    && ln -s /opt/venv/bin/pip /usr/bin/pip \
 | 
			
		||||
    && apt-get install -y ccache software-properties-common git curl wget sudo vim \
 | 
			
		||||
    && add-apt-repository -y ppa:deadsnakes/ppa \
 | 
			
		||||
    && apt-get update -y \
 | 
			
		||||
    && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
 | 
			
		||||
    && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
 | 
			
		||||
    && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
 | 
			
		||||
    && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
 | 
			
		||||
    && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \
 | 
			
		||||
    && python3 --version && python3 -m pip --version
 | 
			
		||||
 | 
			
		||||
# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519
 | 
			
		||||
# as it was causing spam when compiling the CUTLASS kernels
 | 
			
		||||
RUN apt-get install -y gcc-10 g++-10
 | 
			
		||||
RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10
 | 
			
		||||
RUN <<EOF
 | 
			
		||||
gcc --version
 | 
			
		||||
EOF
 | 
			
		||||
# Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519)
 | 
			
		||||
RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \
 | 
			
		||||
    if command -v apt-get >/dev/null; then \
 | 
			
		||||
        if [ "$current_gcc_version" -lt 10 ]; then \
 | 
			
		||||
            echo "GCC version is $current_gcc_version, installing gcc-10..."; \
 | 
			
		||||
            apt-get update \
 | 
			
		||||
            && apt-get install -y gcc-10 g++-10 \
 | 
			
		||||
            && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 \
 | 
			
		||||
            && update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \
 | 
			
		||||
        else \
 | 
			
		||||
            echo "GCC version is $current_gcc_version, no need to install gcc-10."; \
 | 
			
		||||
        fi \
 | 
			
		||||
    fi \
 | 
			
		||||
    && gcc --version && g++ --version
 | 
			
		||||
 | 
			
		||||
# Install uv for faster pip installs
 | 
			
		||||
# install uv for faster pip installs
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    python3 -m pip install uv==0.8.4
 | 
			
		||||
 | 
			
		||||
@ -43,32 +61,36 @@ ENV UV_HTTP_TIMEOUT=500
 | 
			
		||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
 | 
			
		||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
 | 
			
		||||
ENV UV_LINK_MODE=copy
 | 
			
		||||
 | 
			
		||||
#################### TORCH NIGHTLY  BASE IMAGE ####################
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#################### BASE BUILD IMAGE ####################
 | 
			
		||||
# A base image for building vLLM with torch nightly or torch wheels
 | 
			
		||||
# prepare basic build environment
 | 
			
		||||
FROM ${BUILD_BASE_IMAGE} AS base
 | 
			
		||||
USER root
 | 
			
		||||
 | 
			
		||||
ARG CUDA_VERSION
 | 
			
		||||
ARG PYTHON_VERSION
 | 
			
		||||
 | 
			
		||||
# Only work with PyTorch manylinux builder
 | 
			
		||||
# TODO (huydhn): Only work with PyTorch manylinux builder
 | 
			
		||||
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}"
 | 
			
		||||
 | 
			
		||||
# Install some system dependencies and double check python version
 | 
			
		||||
RUN if command -v apt-get >/dev/null; then \
 | 
			
		||||
        apt-get update -y \
 | 
			
		||||
        && apt-get install -y ccache software-properties-common git wget sudo vim; \
 | 
			
		||||
        && apt-get install -y ccache software-properties-common git curl wget sudo vim; \
 | 
			
		||||
    else \
 | 
			
		||||
        dnf install -y git wget sudo; \
 | 
			
		||||
        dnf install -y git curl wget sudo; \
 | 
			
		||||
    fi \
 | 
			
		||||
    && python3 --version && python3 -m pip --version
 | 
			
		||||
 | 
			
		||||
# Install uv for faster pip installs if not existed
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    python3 -m pip install uv==0.8.4
 | 
			
		||||
 | 
			
		||||
    if ! python3 -m uv --version >/dev/null 2>&1; then \
 | 
			
		||||
        python3 -m pip install uv==0.8.4; \
 | 
			
		||||
    fi
 | 
			
		||||
ENV UV_HTTP_TIMEOUT=500
 | 
			
		||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
 | 
			
		||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
 | 
			
		||||
@ -76,15 +98,15 @@ ENV UV_LINK_MODE=copy
 | 
			
		||||
 | 
			
		||||
WORKDIR /workspace
 | 
			
		||||
 | 
			
		||||
# Install build and runtime dependencies
 | 
			
		||||
# install build and runtime dependencies
 | 
			
		||||
COPY requirements/common.txt requirements/common.txt
 | 
			
		||||
COPY use_existing_torch.py use_existing_torch.py
 | 
			
		||||
COPY pyproject.toml pyproject.toml
 | 
			
		||||
 | 
			
		||||
# Install build and runtime dependencies without stable torch version
 | 
			
		||||
# install build and runtime dependencies without stable torch version
 | 
			
		||||
RUN python3 use_existing_torch.py
 | 
			
		||||
 | 
			
		||||
# Default mount file as placeholder, this just avoid the mount error
 | 
			
		||||
# default mount file as placeholder, this just avoid the mount error
 | 
			
		||||
# change to a different vllm folder if this does not exist anymore
 | 
			
		||||
ARG TORCH_WHEELS_PATH="./requirements"
 | 
			
		||||
ARG PINNED_TORCH_VERSION
 | 
			
		||||
@ -116,36 +138,56 @@ RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system -r requirements/common.txt
 | 
			
		||||
 | 
			
		||||
# Must put before installing xformers, so it can install the correct version of xfomrers.
 | 
			
		||||
ARG xformers_cuda_arch_list='7.5;8.0+PTX;9.0a'
 | 
			
		||||
ENV TORCH_CUDA_ARCH_LIST=${xformers_cuda_arch_list}
 | 
			
		||||
 | 
			
		||||
ARG max_jobs=16
 | 
			
		||||
ENV MAX_JOBS=${max_jobs}
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH'
 | 
			
		||||
    export TORCH_CUDA_ARCH_LIST='7.5 8.0+PTX 9.0a'
 | 
			
		||||
    git clone https://github.com/facebookresearch/xformers.git
 | 
			
		||||
RUN echo ${TORCH_CUDA_ARCH_LIST}
 | 
			
		||||
RUN echo ${MAX_JOBS}
 | 
			
		||||
RUN pip freeze | grep -E 'ninja'
 | 
			
		||||
 | 
			
		||||
    pushd xformers
 | 
			
		||||
    git checkout v0.0.32.post2
 | 
			
		||||
    git submodule update --init --recursive
 | 
			
		||||
    python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose
 | 
			
		||||
    popd
 | 
			
		||||
# Build xformers with cuda and torch nightly/wheel
 | 
			
		||||
# following official xformers guidance: https://github.com/facebookresearch/xformers#build
 | 
			
		||||
# sha for https://github.com/facebookresearch/xformers/tree/v0.0.32.post2
 | 
			
		||||
ARG XFORMERS_COMMIT=5d4b92a5e5a9c6c6d4878283f47d82e17995b468
 | 
			
		||||
ENV CCACHE_DIR=/root/.cache/ccache
 | 
			
		||||
 | 
			
		||||
    rm -rf xformers
 | 
			
		||||
BASH
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/ccache \
 | 
			
		||||
    --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    echo 'git clone xformers...' \
 | 
			
		||||
    && git clone https://github.com/facebookresearch/xformers.git --recursive \
 | 
			
		||||
    && cd xformers \
 | 
			
		||||
    && git checkout ${XFORMERS_COMMIT} \
 | 
			
		||||
    && git submodule update --init --recursive \
 | 
			
		||||
    && echo 'finish git clone xformers...' \
 | 
			
		||||
    && rm -rf build \
 | 
			
		||||
    && python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose \
 | 
			
		||||
    && cd .. \
 | 
			
		||||
    && rm -rf xformers
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system xformers-dist/*.whl
 | 
			
		||||
    uv pip install --system xformers-dist/*.whl --verbose
 | 
			
		||||
 | 
			
		||||
# Build can take a long time, and the torch nightly version fetched from url can be different in next docker stage.
 | 
			
		||||
# track the nightly torch version used in the build, when we set up runtime environment we can make sure the version is the same
 | 
			
		||||
RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio' > torch_build_versions.txt
 | 
			
		||||
 | 
			
		||||
RUN cat torch_build_versions.txt
 | 
			
		||||
RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio'
 | 
			
		||||
 | 
			
		||||
#################### BASE BUILD IMAGE ####################
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#################### WHEEL BUILD IMAGE ####################
 | 
			
		||||
# Image used to build vllm wheel
 | 
			
		||||
FROM base AS build
 | 
			
		||||
ARG TARGETPLATFORM
 | 
			
		||||
 | 
			
		||||
COPY . .
 | 
			
		||||
 | 
			
		||||
RUN python3 use_existing_torch.py
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
@ -155,17 +197,20 @@ ARG GIT_REPO_CHECK=0
 | 
			
		||||
RUN --mount=type=bind,source=.git,target=.git \
 | 
			
		||||
    if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi
 | 
			
		||||
 | 
			
		||||
# Max jobs used by Ninja to build extensions
 | 
			
		||||
ARG max_jobs=16
 | 
			
		||||
ENV MAX_JOBS=${max_jobs}
 | 
			
		||||
ARG nvcc_threads=8
 | 
			
		||||
ARG nvcc_threads=4
 | 
			
		||||
ENV NVCC_THREADS=$nvcc_threads
 | 
			
		||||
ARG torch_cuda_arch_list='8.0 8.6 8.9 9.0'
 | 
			
		||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
 | 
			
		||||
 | 
			
		||||
ARG USE_SCCACHE
 | 
			
		||||
ARG SCCACHE_BUCKET_NAME=vllm-build-sccache
 | 
			
		||||
ARG SCCACHE_REGION_NAME=us-west-2
 | 
			
		||||
ARG SCCACHE_S3_NO_CREDENTIALS=0
 | 
			
		||||
 | 
			
		||||
# Use sccache to speed up compilation
 | 
			
		||||
# if USE_SCCACHE is set, use sccache to speed up compilation
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    --mount=type=bind,source=.git,target=.git \
 | 
			
		||||
    if [ "$USE_SCCACHE" = "1" ]; then \
 | 
			
		||||
@ -190,9 +235,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
        && sccache --show-stats; \
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
ARG torch_cuda_arch_list='8.0 8.6 8.9 9.0'
 | 
			
		||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
 | 
			
		||||
 | 
			
		||||
ARG vllm_target_device="cuda"
 | 
			
		||||
ENV VLLM_TARGET_DEVICE=${vllm_target_device}
 | 
			
		||||
ENV CCACHE_DIR=/root/.cache/ccache
 | 
			
		||||
@ -206,10 +248,17 @@ RUN --mount=type=cache,target=/root/.cache/ccache \
 | 
			
		||||
        export VLLM_DOCKER_BUILD_CONTEXT=1 && \
 | 
			
		||||
        python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
RUN echo "[INFO] Listing current directory:" && \
 | 
			
		||||
    ls -al && \
 | 
			
		||||
    echo "[INFO] Showing torch_build_versions.txt content:" && \
 | 
			
		||||
    cat torch_build_versions.txt
 | 
			
		||||
 | 
			
		||||
#################### WHEEL BUILD IMAGE ####################
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
################### VLLM INSTALLED IMAGE ####################
 | 
			
		||||
# Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer
 | 
			
		||||
FROM ${FINAL_BASE_IMAGE} AS vllm-base
 | 
			
		||||
USER root
 | 
			
		||||
 | 
			
		||||
@ -217,7 +266,7 @@ ARG CUDA_VERSION
 | 
			
		||||
ARG PYTHON_VERSION
 | 
			
		||||
ARG GET_PIP_URL
 | 
			
		||||
 | 
			
		||||
# Only work with PyTorch manylinux builder
 | 
			
		||||
# TODO (huydhn): Only work with PyTorch manylinux builder
 | 
			
		||||
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}"
 | 
			
		||||
 | 
			
		||||
# prepare for environment starts
 | 
			
		||||
@ -226,19 +275,20 @@ WORKDIR /workspace
 | 
			
		||||
# Install Python and other dependencies
 | 
			
		||||
RUN if command -v apt-get >/dev/null; then \
 | 
			
		||||
        apt-get update -y \
 | 
			
		||||
        && apt-get install -y ccache software-properties-common git sudo vim python3-pip; \
 | 
			
		||||
        && apt-get install -y ccache software-properties-common git curl wget sudo vim \
 | 
			
		||||
        && add-apt-repository -y ppa:deadsnakes/ppa \
 | 
			
		||||
        && apt-get update -y \
 | 
			
		||||
        && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
 | 
			
		||||
        && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
 | 
			
		||||
        && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
 | 
			
		||||
        && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
 | 
			
		||||
        && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION}; \
 | 
			
		||||
    else \
 | 
			
		||||
        dnf install -y git wget sudo; \
 | 
			
		||||
        dnf install -y git curl wget sudo; \
 | 
			
		||||
    fi \
 | 
			
		||||
    && curl -LsSf https://astral.sh/uv/install.sh | sh \
 | 
			
		||||
    && $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \
 | 
			
		||||
    && rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \
 | 
			
		||||
    && ln -s /opt/venv/bin/python3 /usr/bin/python3 \
 | 
			
		||||
    && ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \
 | 
			
		||||
    && ln -s /opt/venv/bin/pip /usr/bin/pip \
 | 
			
		||||
    && python3 --version && python3 -m pip --version
 | 
			
		||||
 | 
			
		||||
# Get the torch versions, and whls used in previous stage
 | 
			
		||||
# Get the torch versions, and whls used in previous stagtes for consistency
 | 
			
		||||
COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt
 | 
			
		||||
COPY --from=base /workspace/xformers-dist /wheels/xformers
 | 
			
		||||
COPY --from=build /workspace/vllm-dist /wheels/vllm
 | 
			
		||||
@ -247,29 +297,33 @@ RUN echo "[INFO] Listing current directory before torch install step:" && \
 | 
			
		||||
    echo "[INFO] Showing torch_build_versions.txt content:" && \
 | 
			
		||||
    cat torch_build_versions.txt
 | 
			
		||||
 | 
			
		||||
# Install uv for faster pip installs if not existed
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    python3 -m pip install uv==0.8.4
 | 
			
		||||
 | 
			
		||||
ENV UV_HTTP_TIMEOUT=500
 | 
			
		||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
 | 
			
		||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
 | 
			
		||||
ENV UV_LINK_MODE=copy
 | 
			
		||||
 | 
			
		||||
# Install build and runtime dependencies, this is needed for flashinfer install
 | 
			
		||||
COPY requirements/build.txt requirements/build.txt
 | 
			
		||||
COPY use_existing_torch.py use_existing_torch.py
 | 
			
		||||
RUN python3 use_existing_torch.py
 | 
			
		||||
RUN cat requirements/build.txt
 | 
			
		||||
 | 
			
		||||
# Install uv for faster pip installs if not existed
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    if ! python3 -m uv --version > /dev/null 2>&1; then \
 | 
			
		||||
        python3 -m pip install uv==0.8.4; \
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
ENV UV_HTTP_TIMEOUT=500
 | 
			
		||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
 | 
			
		||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
 | 
			
		||||
ENV UV_LINK_MODE=copy
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system -r requirements/build.txt
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Default mount file as placeholder, this just avoid the mount error
 | 
			
		||||
ARG TORCH_WHEELS_PATH="./requirements"
 | 
			
		||||
# Install torch, torchaudio and torchvision. If TORCH_WHEELS_PATH is default
 | 
			
		||||
# to ./requirements, it will pull the nightly versions using pip. Otherwise,
 | 
			
		||||
# it will use the local wheels from TORCH_WHEELS_PATH
 | 
			
		||||
# Install torch, torchaudio and torchvision
 | 
			
		||||
# if TORCH_WHEELS_PATH is default "./requirements", it will pull the nightly versions using pip using torch_build_versions.txt
 | 
			
		||||
# otherwise, it will use the whls from TORCH_WHEELS_PATH from the host machine
 | 
			
		||||
RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
 | 
			
		||||
    --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
 | 
			
		||||
@ -290,14 +344,18 @@ RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
# Install xformers wheel from previous stage
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system /wheels/xformers/*.whl --verbose
 | 
			
		||||
 | 
			
		||||
# Build FlashInfer from source
 | 
			
		||||
# Build flashinfer from source.
 | 
			
		||||
ARG torch_cuda_arch_list='8.0;8.9;9.0a;10.0a;12.0'
 | 
			
		||||
# install package for build flashinfer
 | 
			
		||||
# see issue: https://github.com/flashinfer-ai/flashinfer/issues/738
 | 
			
		||||
 | 
			
		||||
RUN pip freeze | grep -E 'setuptools|packaging|build'
 | 
			
		||||
 | 
			
		||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
 | 
			
		||||
 | 
			
		||||
# Build flashinfer for torch nightly from source around 10 mins
 | 
			
		||||
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
 | 
			
		||||
# Keep this in sync with https://github.com/vllm-project/vllm/blob/main/requirements/cuda.txt
 | 
			
		||||
ARG FLASHINFER_GIT_REF="v0.2.14.post1"
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    git clone --depth 1 --recursive --shallow-submodules \
 | 
			
		||||
        --branch ${FLASHINFER_GIT_REF} \
 | 
			
		||||
@ -309,7 +367,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    && cd .. \
 | 
			
		||||
    && rm -rf flashinfer
 | 
			
		||||
 | 
			
		||||
# Install FlashInfer
 | 
			
		||||
# install flashinfer python
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system wheels/flashinfer/*.whl --verbose
 | 
			
		||||
 | 
			
		||||
@ -319,6 +377,49 @@ RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio\|^xformers\|^vllm
 | 
			
		||||
################### VLLM INSTALLED IMAGE ####################
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#################### UNITTEST IMAGE #############################
 | 
			
		||||
FROM vllm-base as test
 | 
			
		||||
 | 
			
		||||
ENV UV_HTTP_TIMEOUT=500
 | 
			
		||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
 | 
			
		||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
 | 
			
		||||
ENV UV_LINK_MODE=copy
 | 
			
		||||
 | 
			
		||||
COPY tests/ tests/
 | 
			
		||||
COPY examples examples
 | 
			
		||||
COPY benchmarks benchmarks
 | 
			
		||||
COPY ./vllm/collect_env.py .
 | 
			
		||||
COPY requirements/common.txt requirements/common.txt
 | 
			
		||||
COPY use_existing_torch.py use_existing_torch.py
 | 
			
		||||
COPY pyproject.toml pyproject.toml
 | 
			
		||||
# Install build and runtime dependencies without stable torch version
 | 
			
		||||
COPY requirements/nightly_torch_test.txt requirements/nightly_torch_test.txt
 | 
			
		||||
 | 
			
		||||
RUN python3 use_existing_torch.py
 | 
			
		||||
 | 
			
		||||
# install packages
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system -r requirements/common.txt
 | 
			
		||||
# enable fast downloads from hf (for testing)
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system hf_transfer
 | 
			
		||||
ENV HF_HUB_ENABLE_HF_TRANSFER 1
 | 
			
		||||
 | 
			
		||||
# install development dependencies (for testing)
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system -e tests/vllm_test_utils
 | 
			
		||||
 | 
			
		||||
RUN --mount=type=cache,target=/root/.cache/uv \
 | 
			
		||||
    uv pip install --system -r requirements/nightly_torch_test.txt
 | 
			
		||||
 | 
			
		||||
# Logging to confirm the torch versions
 | 
			
		||||
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'
 | 
			
		||||
 | 
			
		||||
# Logging to confirm all the packages are installed
 | 
			
		||||
RUN pip freeze
 | 
			
		||||
 | 
			
		||||
#################### UNITTEST IMAGE #############################
 | 
			
		||||
 | 
			
		||||
#################### EXPORT STAGE ####################
 | 
			
		||||
FROM scratch as export-wheels
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							@ -512,8 +512,6 @@ def perform_misc_tasks(
 | 
			
		||||
        "keep-going",
 | 
			
		||||
        branch == MAIN_BRANCH
 | 
			
		||||
        or bool(tag and re.match(r"^trunk/[a-f0-9]{40}$", tag))
 | 
			
		||||
        # Pattern for tags created via manual run on HUD
 | 
			
		||||
        or bool(tag and re.match(r"^ciflow/[^/]+/[a-f0-9]{40}$", tag))
 | 
			
		||||
        or check_for_setting(labels, pr_body, "keep-going"),
 | 
			
		||||
    )
 | 
			
		||||
    set_output(
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										26
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										26
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							@ -16,18 +16,16 @@ from typing import Optional
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this
 | 
			
		||||
CUDA_ARCHES = ["12.6", "12.8", "12.9", "13.0"]
 | 
			
		||||
CUDA_ARCHES = ["12.6", "12.8", "13.0"]
 | 
			
		||||
CUDA_STABLE = "12.8"
 | 
			
		||||
CUDA_ARCHES_FULL_VERSION = {
 | 
			
		||||
    "12.6": "12.6.3",
 | 
			
		||||
    "12.8": "12.8.1",
 | 
			
		||||
    "12.9": "12.9.1",
 | 
			
		||||
    "13.0": "13.0.0",
 | 
			
		||||
}
 | 
			
		||||
CUDA_ARCHES_CUDNN_VERSION = {
 | 
			
		||||
    "12.6": "9",
 | 
			
		||||
    "12.8": "9",
 | 
			
		||||
    "12.9": "9",
 | 
			
		||||
    "13.0": "9",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -40,7 +38,7 @@ CPU_AARCH64_ARCH = ["cpu-aarch64"]
 | 
			
		||||
 | 
			
		||||
CPU_S390X_ARCH = ["cpu-s390x"]
 | 
			
		||||
 | 
			
		||||
CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "12.9-aarch64", "13.0-aarch64"]
 | 
			
		||||
CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "13.0-aarch64"]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
 | 
			
		||||
@ -78,23 +76,6 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
 | 
			
		||||
        "nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'"
 | 
			
		||||
    ),
 | 
			
		||||
    "12.9": (
 | 
			
		||||
        "nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
 | 
			
		||||
        "nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'"
 | 
			
		||||
    ),
 | 
			
		||||
    "13.0": (
 | 
			
		||||
        "nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | "
 | 
			
		||||
        "nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | "
 | 
			
		||||
@ -341,7 +322,7 @@ def generate_wheels_matrix(
 | 
			
		||||
            # cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install
 | 
			
		||||
 | 
			
		||||
            if (
 | 
			
		||||
                arch_version in ["13.0", "12.9", "12.8", "12.6"]
 | 
			
		||||
                arch_version in ["13.0", "12.8", "12.6"]
 | 
			
		||||
                and os == "linux"
 | 
			
		||||
                or arch_version in CUDA_AARCH64_ARCHES
 | 
			
		||||
            ):
 | 
			
		||||
@ -405,6 +386,5 @@ def generate_wheels_matrix(
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
validate_nccl_dep_consistency("13.0")
 | 
			
		||||
validate_nccl_dep_consistency("12.9")
 | 
			
		||||
validate_nccl_dep_consistency("12.8")
 | 
			
		||||
validate_nccl_dep_consistency("12.6")
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										4
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							@ -2042,6 +2042,10 @@ def validate_revert(
 | 
			
		||||
            f"[{', '.join(allowed_reverters)}], but instead is {author_association}."
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    # Raises exception if matching rule is not found, but ignores all status checks
 | 
			
		||||
    find_matching_merge_rule(
 | 
			
		||||
        pr, repo, skip_mandatory_checks=True, skip_internal_checks=True
 | 
			
		||||
    )
 | 
			
		||||
    commit_sha = get_pr_commit_sha(repo, pr)
 | 
			
		||||
    return (author_login, commit_sha)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -177,9 +177,6 @@ jobs:
 | 
			
		||||
    runs-on: linux.rocm.gpu.mi250
 | 
			
		||||
    timeout-minutes: !{{ common.timeout_minutes }}
 | 
			
		||||
    !{{ upload.binary_env(config) }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							@ -72,7 +72,7 @@ jobs:
 | 
			
		||||
            # Let's try to figure out how this can be improved
 | 
			
		||||
            timeout-minutes: 360
 | 
			
		||||
          - docs_type: python
 | 
			
		||||
            runner: ${{ inputs.runner_prefix }}linux.c7i.2xlarge
 | 
			
		||||
            runner: ${{ inputs.runner_prefix }}linux.2xlarge
 | 
			
		||||
            # It takes less than 30m to finish python docs unless there are issues
 | 
			
		||||
            timeout-minutes: 30
 | 
			
		||||
    # Set a fixed name for this job instead of using the current matrix-generated name, i.e. build-docs (cpp, linux.12xlarge, 180)
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										13
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							@ -102,6 +102,19 @@ jobs:
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: configure aws credentials
 | 
			
		||||
        id: aws_creds
 | 
			
		||||
        uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
 | 
			
		||||
        with:
 | 
			
		||||
          role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
 | 
			
		||||
          aws-region: us-east-1
 | 
			
		||||
          role-duration-seconds: 18000
 | 
			
		||||
 | 
			
		||||
      - name: Login to Amazon ECR
 | 
			
		||||
        id: login-ecr
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
 | 
			
		||||
 | 
			
		||||
      - name: Calculate docker image
 | 
			
		||||
        id: calculate-docker-image
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										4
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							@ -46,12 +46,10 @@ jobs:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        include: [
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "cuda13.0",          runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "cuda13.0",         runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "cuda12.8",          runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "cuda12.9",          runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "cuda12.6",          runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinuxaarch64-builder",       tag: "cuda13.0",          runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinuxaarch64-builder",       tag: "cuda12.9",          runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinuxaarch64-builder",       tag: "cuda12.8",          runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinuxaarch64-builder",       tag: "cuda12.6",          runner: "linux.arm64.2xlarge.ephemeral" },
 | 
			
		||||
          { name: "manylinux2_28-builder",          tag: "rocm6.4",           runner: "linux.9xlarge.ephemeral" },
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										322
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										322
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -204,52 +204,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_10-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_10-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_10-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_10-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -453,52 +407,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_11-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_11-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_11-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_11-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -702,52 +610,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_12-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_12-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_12-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_12-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -951,52 +813,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_13-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -1200,52 +1016,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13t-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13t-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_13t-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13t-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -1449,52 +1219,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_14-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -1698,52 +1422,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14t-cuda-aarch64-12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.arm64.r7g.12xlarge.memory
 | 
			
		||||
      ALPINE_IMAGE: "arm64v8/alpine"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda-aarch64-12_9
 | 
			
		||||
      build_environment: linux-aarch64-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
      timeout-minutes: 420
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14t-cuda-aarch64-12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_14t-cuda-aarch64-12_9-build
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9-aarch64"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda-aarch64
 | 
			
		||||
      DOCKER_IMAGE: manylinuxaarch64-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda-aarch64-12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14t-cuda-aarch64-13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										74
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										74
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -248,74 +248,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
      build_environment: linux-binary-libtorch
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - libtorch-cuda12_9-shared-with-deps-release-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      build_name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
      build_environment: linux-binary-libtorch
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: libtorch-cuda12_9-shared-with-deps-release-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: libtorch-cxx11-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      build_name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  libtorch-cuda13_0-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -426,9 +358,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -544,9 +473,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										504
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										504
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -241,72 +241,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_10-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_10-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_10-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_10-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_10-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      build_name: manywheel-py3_10-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_10-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -413,9 +347,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -528,9 +459,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -907,72 +835,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_11-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_11-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_11-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_11-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_11-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
      build_name: manywheel-py3_11-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_11-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -1079,9 +941,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -1194,9 +1053,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.11"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -1573,72 +1429,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_12-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_12-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_12-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_12-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_12-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
      build_name: manywheel-py3_12-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_12-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -1745,9 +1535,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -1860,9 +1647,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.12"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -2239,72 +2023,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_13-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_13-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
      build_name: manywheel-py3_13-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -2411,9 +2129,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -2526,9 +2241,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.13"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -2905,72 +2617,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13t-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13t-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_13t-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_13t-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_13t-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
      build_name: manywheel-py3_13t-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_13t-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -3077,9 +2723,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -3192,9 +2835,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.13t"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -3571,72 +3211,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_14-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_14-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
      build_name: manywheel-py3_14-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -3743,9 +3317,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -3858,9 +3429,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.14"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -4237,72 +3805,6 @@ jobs:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14t-cuda12_9-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14t-cuda12_9-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - manywheel-py3_14t-cuda12_9-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    uses: ./.github/workflows/_binary-test-linux.yml
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda12_9
 | 
			
		||||
      build_environment: linux-binary-manywheel
 | 
			
		||||
      runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
 | 
			
		||||
      runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
  manywheel-py3_14t-cuda12_9-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: manywheel-py3_14t-cuda12_9-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: /pytorch
 | 
			
		||||
      PACKAGE_TYPE: manywheel
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: cuda12.9
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
      build_name: manywheel-py3_14t-cuda12_9
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
 | 
			
		||||
  manywheel-py3_14t-cuda13_0-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-build-linux.yml
 | 
			
		||||
@ -4409,9 +3911,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm6.4
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
@ -4524,9 +4023,6 @@ jobs:
 | 
			
		||||
      DOCKER_IMAGE: manylinux2_28-builder
 | 
			
		||||
      DOCKER_IMAGE_TAG_PREFIX: rocm7.0
 | 
			
		||||
      DESIRED_PYTHON: "3.14t"
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Setup ROCm
 | 
			
		||||
        uses: ./.github/actions/setup-rocm
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -788,256 +788,6 @@ jobs:
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-debug-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      SKIP_ALL_TESTS: 1
 | 
			
		||||
      LIBTORCH_CONFIG: debug
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    steps:
 | 
			
		||||
      # NOTE: These environment variables are put here so that they can be applied on every job equally
 | 
			
		||||
      #       They are also here because setting them at a workflow level doesn't give us access to the
 | 
			
		||||
      #       runner.temp variable, which we need.
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
 | 
			
		||||
      - name: Display EC2 information
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          set -euo pipefail
 | 
			
		||||
          function get_ec2_metadata() {
 | 
			
		||||
            # Pulled from instance metadata endpoint for EC2
 | 
			
		||||
            # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
 | 
			
		||||
            category=$1
 | 
			
		||||
            curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}"
 | 
			
		||||
          }
 | 
			
		||||
          echo "ami-id: $(get_ec2_metadata ami-id)"
 | 
			
		||||
          echo "instance-id: $(get_ec2_metadata instance-id)"
 | 
			
		||||
          echo "instance-type: $(get_ec2_metadata instance-type)"
 | 
			
		||||
          echo "system info $(uname -a)"
 | 
			
		||||
      - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/setup-ssh@main
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        with:
 | 
			
		||||
          github-secret: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
      - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          git config --global core.longpaths true
 | 
			
		||||
          git config --global core.symlinks true
 | 
			
		||||
 | 
			
		||||
          # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock
 | 
			
		||||
          # the directory on Windows and prevent GHA from checking out as reported
 | 
			
		||||
          # in https://github.com/actions/checkout/issues/1018
 | 
			
		||||
          git config --global core.fsmonitor false
 | 
			
		||||
      # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
 | 
			
		||||
      - name: Enable long paths on Windows
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
 | 
			
		||||
      # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
 | 
			
		||||
      # removed once Windows Defender is removed from the AMI
 | 
			
		||||
      - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
 | 
			
		||||
          # Let's both exclude the path and disable Windows Defender completely just to be sure
 | 
			
		||||
          # that it doesn't interfere
 | 
			
		||||
          Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
 | 
			
		||||
      - name: Checkout PyTorch
 | 
			
		||||
        uses: actions/checkout@v4
 | 
			
		||||
        with:
 | 
			
		||||
          ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
 | 
			
		||||
          submodules: recursive
 | 
			
		||||
          path: pytorch
 | 
			
		||||
          show-progress: false
 | 
			
		||||
      - name: Clean PyTorch checkout
 | 
			
		||||
        run: |
 | 
			
		||||
          # Remove any artifacts from the previous checkouts
 | 
			
		||||
          git clean -fxd
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
 | 
			
		||||
      - name: Build PyTorch binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
 | 
			
		||||
      - uses: actions/upload-artifact@v4.4.0
 | 
			
		||||
        if: always()
 | 
			
		||||
        with:
 | 
			
		||||
          name: libtorch-cuda12_9-shared-with-deps-debug
 | 
			
		||||
          retention-days: 14
 | 
			
		||||
          if-no-files-found: error
 | 
			
		||||
          path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
 | 
			
		||||
      - name: Wait until all sessions have drained
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        timeout-minutes: 120
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\wait_for_ssh_to_drain.ps1
 | 
			
		||||
      - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\kill_active_ssh_sessions.ps1
 | 
			
		||||
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-debug-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - libtorch-cuda12_9-shared-with-deps-debug-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.g4dn.xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      SKIP_ALL_TESTS: 1
 | 
			
		||||
      LIBTORCH_CONFIG: debug
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Display EC2 information
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          set -euo pipefail
 | 
			
		||||
          function get_ec2_metadata() {
 | 
			
		||||
            # Pulled from instance metadata endpoint for EC2
 | 
			
		||||
            # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
 | 
			
		||||
            category=$1
 | 
			
		||||
            curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}"
 | 
			
		||||
          }
 | 
			
		||||
          echo "ami-id: $(get_ec2_metadata ami-id)"
 | 
			
		||||
          echo "instance-id: $(get_ec2_metadata instance-id)"
 | 
			
		||||
          echo "instance-type: $(get_ec2_metadata instance-type)"
 | 
			
		||||
          echo "system info $(uname -a)"
 | 
			
		||||
      - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/setup-ssh@main
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        with:
 | 
			
		||||
          github-secret: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
      - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          git config --global core.longpaths true
 | 
			
		||||
          git config --global core.symlinks true
 | 
			
		||||
 | 
			
		||||
          # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock
 | 
			
		||||
          # the directory on Windows and prevent GHA from checking out as reported
 | 
			
		||||
          # in https://github.com/actions/checkout/issues/1018
 | 
			
		||||
          git config --global core.fsmonitor false
 | 
			
		||||
      # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
 | 
			
		||||
      - name: Enable long paths on Windows
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
 | 
			
		||||
      # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
 | 
			
		||||
      # removed once Windows Defender is removed from the AMI
 | 
			
		||||
      - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
 | 
			
		||||
          # Let's both exclude the path and disable Windows Defender completely just to be sure
 | 
			
		||||
          # that it doesn't interfere
 | 
			
		||||
          Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
 | 
			
		||||
      - name: Checkout PyTorch
 | 
			
		||||
        uses: actions/checkout@v4
 | 
			
		||||
        with:
 | 
			
		||||
          ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
 | 
			
		||||
          submodules: recursive
 | 
			
		||||
          path: pytorch
 | 
			
		||||
          show-progress: false
 | 
			
		||||
      - name: Clean PyTorch checkout
 | 
			
		||||
        run: |
 | 
			
		||||
          # Remove any artifacts from the previous checkouts
 | 
			
		||||
          git clean -fxd
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
      # NOTE: These environment variables are put here so that they can be applied on every job equally
 | 
			
		||||
      #       They are also here because setting them at a workflow level doesn't give us access to the
 | 
			
		||||
      #       runner.temp variable, which we need.
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
 | 
			
		||||
      - uses: actions/download-artifact@v4.1.7
 | 
			
		||||
        name: Download Build Artifacts
 | 
			
		||||
        with:
 | 
			
		||||
          name: libtorch-cuda12_9-shared-with-deps-debug
 | 
			
		||||
          path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
 | 
			
		||||
      - name: Test PyTorch binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
 | 
			
		||||
      - name: Wait until all sessions have drained
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        timeout-minutes: 120
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\wait_for_ssh_to_drain.ps1
 | 
			
		||||
      - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\kill_active_ssh_sessions.ps1
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-debug-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: libtorch-cuda12_9-shared-with-deps-debug-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      LIBTORCH_CONFIG: debug
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      build_name: libtorch-cuda12_9-shared-with-deps-debug
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
  libtorch-cuda13_0-shared-with-deps-debug-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							@ -788,256 +788,6 @@ jobs:
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      SKIP_ALL_TESTS: 1
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    steps:
 | 
			
		||||
      # NOTE: These environment variables are put here so that they can be applied on every job equally
 | 
			
		||||
      #       They are also here because setting them at a workflow level doesn't give us access to the
 | 
			
		||||
      #       runner.temp variable, which we need.
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
 | 
			
		||||
      - name: Display EC2 information
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          set -euo pipefail
 | 
			
		||||
          function get_ec2_metadata() {
 | 
			
		||||
            # Pulled from instance metadata endpoint for EC2
 | 
			
		||||
            # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
 | 
			
		||||
            category=$1
 | 
			
		||||
            curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}"
 | 
			
		||||
          }
 | 
			
		||||
          echo "ami-id: $(get_ec2_metadata ami-id)"
 | 
			
		||||
          echo "instance-id: $(get_ec2_metadata instance-id)"
 | 
			
		||||
          echo "instance-type: $(get_ec2_metadata instance-type)"
 | 
			
		||||
          echo "system info $(uname -a)"
 | 
			
		||||
      - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/setup-ssh@main
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        with:
 | 
			
		||||
          github-secret: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
      - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          git config --global core.longpaths true
 | 
			
		||||
          git config --global core.symlinks true
 | 
			
		||||
 | 
			
		||||
          # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock
 | 
			
		||||
          # the directory on Windows and prevent GHA from checking out as reported
 | 
			
		||||
          # in https://github.com/actions/checkout/issues/1018
 | 
			
		||||
          git config --global core.fsmonitor false
 | 
			
		||||
      # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
 | 
			
		||||
      - name: Enable long paths on Windows
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
 | 
			
		||||
      # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
 | 
			
		||||
      # removed once Windows Defender is removed from the AMI
 | 
			
		||||
      - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
 | 
			
		||||
          # Let's both exclude the path and disable Windows Defender completely just to be sure
 | 
			
		||||
          # that it doesn't interfere
 | 
			
		||||
          Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
 | 
			
		||||
      - name: Checkout PyTorch
 | 
			
		||||
        uses: actions/checkout@v4
 | 
			
		||||
        with:
 | 
			
		||||
          ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
 | 
			
		||||
          submodules: recursive
 | 
			
		||||
          path: pytorch
 | 
			
		||||
          show-progress: false
 | 
			
		||||
      - name: Clean PyTorch checkout
 | 
			
		||||
        run: |
 | 
			
		||||
          # Remove any artifacts from the previous checkouts
 | 
			
		||||
          git clean -fxd
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
 | 
			
		||||
      - name: Build PyTorch binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
 | 
			
		||||
      - uses: actions/upload-artifact@v4.4.0
 | 
			
		||||
        if: always()
 | 
			
		||||
        with:
 | 
			
		||||
          name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
          retention-days: 14
 | 
			
		||||
          if-no-files-found: error
 | 
			
		||||
          path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
 | 
			
		||||
      - name: Wait until all sessions have drained
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        timeout-minutes: 120
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\wait_for_ssh_to_drain.ps1
 | 
			
		||||
      - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\kill_active_ssh_sessions.ps1
 | 
			
		||||
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-test:  # Testing
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs:
 | 
			
		||||
      - libtorch-cuda12_9-shared-with-deps-release-build
 | 
			
		||||
      - get-label-type
 | 
			
		||||
    runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.g4dn.xlarge"
 | 
			
		||||
    timeout-minutes: 360
 | 
			
		||||
    env:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      SKIP_ALL_TESTS: 1
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Display EC2 information
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          set -euo pipefail
 | 
			
		||||
          function get_ec2_metadata() {
 | 
			
		||||
            # Pulled from instance metadata endpoint for EC2
 | 
			
		||||
            # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
 | 
			
		||||
            category=$1
 | 
			
		||||
            curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}"
 | 
			
		||||
          }
 | 
			
		||||
          echo "ami-id: $(get_ec2_metadata ami-id)"
 | 
			
		||||
          echo "instance-id: $(get_ec2_metadata instance-id)"
 | 
			
		||||
          echo "instance-type: $(get_ec2_metadata instance-type)"
 | 
			
		||||
          echo "system info $(uname -a)"
 | 
			
		||||
      - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
 | 
			
		||||
        uses: pytorch/test-infra/.github/actions/setup-ssh@main
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        with:
 | 
			
		||||
          github-secret: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
      - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          git config --global core.longpaths true
 | 
			
		||||
          git config --global core.symlinks true
 | 
			
		||||
 | 
			
		||||
          # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock
 | 
			
		||||
          # the directory on Windows and prevent GHA from checking out as reported
 | 
			
		||||
          # in https://github.com/actions/checkout/issues/1018
 | 
			
		||||
          git config --global core.fsmonitor false
 | 
			
		||||
      # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
 | 
			
		||||
      - name: Enable long paths on Windows
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
 | 
			
		||||
      # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
 | 
			
		||||
      # removed once Windows Defender is removed from the AMI
 | 
			
		||||
      - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        run: |
 | 
			
		||||
          Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
 | 
			
		||||
          # Let's both exclude the path and disable Windows Defender completely just to be sure
 | 
			
		||||
          # that it doesn't interfere
 | 
			
		||||
          Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
 | 
			
		||||
      - name: Checkout PyTorch
 | 
			
		||||
        uses: actions/checkout@v4
 | 
			
		||||
        with:
 | 
			
		||||
          ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
 | 
			
		||||
          submodules: recursive
 | 
			
		||||
          path: pytorch
 | 
			
		||||
          show-progress: false
 | 
			
		||||
      - name: Clean PyTorch checkout
 | 
			
		||||
        run: |
 | 
			
		||||
          # Remove any artifacts from the previous checkouts
 | 
			
		||||
          git clean -fxd
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
      # NOTE: These environment variables are put here so that they can be applied on every job equally
 | 
			
		||||
      #       They are also here because setting them at a workflow level doesn't give us access to the
 | 
			
		||||
      #       runner.temp variable, which we need.
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
 | 
			
		||||
          echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
 | 
			
		||||
      - uses: actions/download-artifact@v4.1.7
 | 
			
		||||
        name: Download Build Artifacts
 | 
			
		||||
        with:
 | 
			
		||||
          name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
          path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
 | 
			
		||||
      - name: Populate binary env
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
 | 
			
		||||
      - name: Test PyTorch binary
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
 | 
			
		||||
      - name: Wait until all sessions have drained
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        timeout-minutes: 120
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\wait_for_ssh_to_drain.ps1
 | 
			
		||||
      - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
 | 
			
		||||
        shell: powershell
 | 
			
		||||
        working-directory: pytorch
 | 
			
		||||
        if: always()
 | 
			
		||||
        run: |
 | 
			
		||||
          .github\scripts\kill_active_ssh_sessions.ps1
 | 
			
		||||
  libtorch-cuda12_9-shared-with-deps-release-upload:  # Uploading
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    permissions:
 | 
			
		||||
      id-token: write
 | 
			
		||||
      contents: read
 | 
			
		||||
    needs: libtorch-cuda12_9-shared-with-deps-release-test
 | 
			
		||||
    with:
 | 
			
		||||
      PYTORCH_ROOT: ${{ github.workspace }}/pytorch
 | 
			
		||||
      PACKAGE_TYPE: libtorch
 | 
			
		||||
      # TODO: This is a legacy variable that we eventually want to get rid of in
 | 
			
		||||
      #       favor of GPU_ARCH_VERSION
 | 
			
		||||
      DESIRED_CUDA: cu129
 | 
			
		||||
      GPU_ARCH_VERSION: "12.9"
 | 
			
		||||
      GPU_ARCH_TYPE: cuda
 | 
			
		||||
      LIBTORCH_CONFIG: release
 | 
			
		||||
      LIBTORCH_VARIANT: shared-with-deps
 | 
			
		||||
      # This is a dummy value for libtorch to work correctly with our batch scripts
 | 
			
		||||
      # without this value pip does not get installed for some reason
 | 
			
		||||
      DESIRED_PYTHON: "3.10"
 | 
			
		||||
      build_name: libtorch-cuda12_9-shared-with-deps-release
 | 
			
		||||
    secrets:
 | 
			
		||||
      github-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    uses: ./.github/workflows/_binary-upload.yml
 | 
			
		||||
  libtorch-cuda13_0-shared-with-deps-release-build:
 | 
			
		||||
    if: ${{ github.repository_owner == 'pytorch' }}
 | 
			
		||||
    needs: get-label-type
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1666
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1666
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										4
									
								
								.github/workflows/vllm.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/vllm.yml
									
									
									
									
										vendored
									
									
								
							@ -46,7 +46,7 @@ jobs:
 | 
			
		||||
      runner: linux.24xlarge.memory
 | 
			
		||||
      test-matrix: |
 | 
			
		||||
        { include: [
 | 
			
		||||
          { config: "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config:  "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_basic_models_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_entrypoints_test", shard: 1, num_shards: 1,runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_regression_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
@ -54,7 +54,7 @@ jobs:
 | 
			
		||||
          { config: "vllm_pytorch_compilation_unit_tests", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_lora_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_multi_model_test_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
 | 
			
		||||
          { config: "vllm_language_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
 | 
			
		||||
          { config: "vllm_languagde_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
 | 
			
		||||
          { config: "vllm_distributed_test_2_gpu_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_lora_test", shard: 0, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
          { config: "vllm_lora_test", shard: 1, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
 | 
			
		||||
 | 
			
		||||
@ -58,7 +58,7 @@ namespace at {
 | 
			
		||||
namespace{
 | 
			
		||||
 | 
			
		||||
// PyTorch allows operations to specify dim 0 and dim -1 on a scalar tensor.
 | 
			
		||||
bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
 | 
			
		||||
static bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
 | 
			
		||||
  return dim == 0 || dim == -1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -365,7 +365,7 @@ Tensor select_batching_rule(const Tensor& self, int64_t dim, int64_t index) {
 | 
			
		||||
  return self_physical.getPhysicalToLogicalMap().apply(result);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) {
 | 
			
		||||
static int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) {
 | 
			
		||||
  return maybe_wrap_dim(dim, static_cast<int64_t>(input_sizes.size())) + num_batch_dims;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -488,7 +488,7 @@ Tensor view_as_complex_batching_rule(const Tensor& self) {
 | 
			
		||||
// Checks that the smallest batch stride is greater than the largest example
 | 
			
		||||
// stride. This is something we can support but we choose not to because it's
 | 
			
		||||
// potentially error prone.
 | 
			
		||||
void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) {
 | 
			
		||||
static void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) {
 | 
			
		||||
  auto smallest_batch_stride = std::min_element(
 | 
			
		||||
      physical_strides.begin(), physical_strides.begin() + num_batch_dims);
 | 
			
		||||
  auto largest_example_stride = std::max_element(
 | 
			
		||||
@ -508,7 +508,7 @@ void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_bat
 | 
			
		||||
// given (sizes, strides, storage_offset) returns the maximum location that
 | 
			
		||||
// can be indexed (or nullopt if such a location doesn't exist, e.g., tensors
 | 
			
		||||
// with zero-size dims).
 | 
			
		||||
std::optional<int64_t> maximum_indexable_location(
 | 
			
		||||
static std::optional<int64_t> maximum_indexable_location(
 | 
			
		||||
    IntArrayRef sizes, IntArrayRef strides, int64_t storage_offset) {
 | 
			
		||||
  auto result = native::storage_size_for(sizes, strides);
 | 
			
		||||
  if (result == 0) {
 | 
			
		||||
@ -521,7 +521,7 @@ std::optional<int64_t> maximum_indexable_location(
 | 
			
		||||
// This checks that the range of possible memory locations accessible by
 | 
			
		||||
// x.as_strided(sizes, strides, maybe_storage_offset)
 | 
			
		||||
// are within the bounds of possible memory locations accessible by x.
 | 
			
		||||
void checkBasicAsStridedValidForSlice(
 | 
			
		||||
static void checkBasicAsStridedValidForSlice(
 | 
			
		||||
    const Tensor& physical_tensor,
 | 
			
		||||
    int64_t num_batch_dims,
 | 
			
		||||
    IntArrayRef sizes,
 | 
			
		||||
 | 
			
		||||
@ -13,7 +13,7 @@ namespace {
 | 
			
		||||
  // and left at true for the rest of the execution.
 | 
			
		||||
  // It's an optimization so that users who never use default hooks don't need to
 | 
			
		||||
  // read the thread_local variables pack_hook_ and unpack_hook_.
 | 
			
		||||
  bool is_initialized(false);
 | 
			
		||||
  static bool is_initialized(false);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void assertSavedTensorHooksNotDisabled() {
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ inline void get_strides(int64_t* strides, ArrayRef<OperandInfo> operands, int64_
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
OptionalTensorRef make_otr(const TensorBase &tensor) {
 | 
			
		||||
static OptionalTensorRef make_otr(const TensorBase &tensor) {
 | 
			
		||||
  if (tensor.defined()) {
 | 
			
		||||
    return OptionalTensorRef(tensor);
 | 
			
		||||
  } else {
 | 
			
		||||
 | 
			
		||||
@ -36,7 +36,7 @@ namespace {
 | 
			
		||||
using weakref_type = c10::weak_intrusive_ptr<TensorImpl, UndefinedTensorImpl>;
 | 
			
		||||
using val_type = std::tuple<weakref_type, Tensor>;
 | 
			
		||||
 | 
			
		||||
ska::flat_hash_map<TensorImpl*, val_type>& get_cached_casts() {
 | 
			
		||||
static ska::flat_hash_map<TensorImpl*, val_type>& get_cached_casts() {
 | 
			
		||||
  static ska::flat_hash_map<TensorImpl*, val_type> cached_casts;
 | 
			
		||||
  return cached_casts;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -6,9 +6,9 @@ namespace at {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
std::array<HostAllocator*, at::COMPILE_TIME_MAX_DEVICE_TYPES>
 | 
			
		||||
static std::array<HostAllocator*, at::COMPILE_TIME_MAX_DEVICE_TYPES>
 | 
			
		||||
    allocator_array{};
 | 
			
		||||
std::array<uint8_t, at::COMPILE_TIME_MAX_DEVICE_TYPES>
 | 
			
		||||
static std::array<uint8_t, at::COMPILE_TIME_MAX_DEVICE_TYPES>
 | 
			
		||||
    allocator_priority{};
 | 
			
		||||
 | 
			
		||||
} // anonymous namespace
 | 
			
		||||
 | 
			
		||||
@ -108,7 +108,7 @@ static hipblasStatus_t rocBLASStatusToHIPStatus(rocblas_status error)
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
cublasOperation_t _cublasOpFromChar(char op) {
 | 
			
		||||
static cublasOperation_t _cublasOpFromChar(char op) {
 | 
			
		||||
  // NOLINTNEXTLINE(bugprone-switch-missing-default-case)
 | 
			
		||||
  switch (op) {
 | 
			
		||||
    case 'n':
 | 
			
		||||
@ -128,7 +128,7 @@ cublasOperation_t _cublasOpFromChar(char op) {
 | 
			
		||||
      "_cublasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) {
 | 
			
		||||
static void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) {
 | 
			
		||||
  // Note: leading dimensions generally are checked that they are > 0
 | 
			
		||||
  // and at least as big the result requires (even if the value won't
 | 
			
		||||
  // be used).
 | 
			
		||||
@ -142,7 +142,7 @@ void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) {
 | 
			
		||||
    *lda = std::max<int64_t>(m, 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void _cublasAdjustLdLevel3(
 | 
			
		||||
static void _cublasAdjustLdLevel3(
 | 
			
		||||
    char transa,
 | 
			
		||||
    char transb,
 | 
			
		||||
    int64_t m,
 | 
			
		||||
 | 
			
		||||
@ -15,19 +15,19 @@ namespace cuda::detail {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
// Total number of gpus in the system.
 | 
			
		||||
int64_t num_gpus;
 | 
			
		||||
static int64_t num_gpus;
 | 
			
		||||
 | 
			
		||||
// Ensures default_gens_cuda is initialized once.
 | 
			
		||||
std::deque<c10::once_flag> cuda_gens_init_flag;
 | 
			
		||||
static std::deque<c10::once_flag> cuda_gens_init_flag;
 | 
			
		||||
 | 
			
		||||
// Default, global CUDA generators, one per GPU.
 | 
			
		||||
std::vector<Generator> default_gens_cuda;
 | 
			
		||||
static std::vector<Generator> default_gens_cuda;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Populates the global variables related to CUDA generators
 | 
			
		||||
 * Warning: this function must only be called once!
 | 
			
		||||
 */
 | 
			
		||||
void initCUDAGenVector() {
 | 
			
		||||
static void initCUDAGenVector() {
 | 
			
		||||
  // Ensures we only call cudaGetDeviceCount only once.
 | 
			
		||||
  static bool num_gpu_init_flag [[maybe_unused]] = []() {
 | 
			
		||||
    num_gpus = static_cast<int32_t>(c10::cuda::device_count());
 | 
			
		||||
 | 
			
		||||
@ -39,7 +39,7 @@ Tensor vdot_decomp(const Tensor& A, const Tensor& B) {
 | 
			
		||||
// NB: I wrote this like this because we *might* want its for a future matmul
 | 
			
		||||
// batch rule that isn't decomposed...
 | 
			
		||||
// "tv" = tensor @ vector
 | 
			
		||||
std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule(
 | 
			
		||||
static std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule(
 | 
			
		||||
    const Tensor& self, std::optional<int64_t> self_bdim,
 | 
			
		||||
    const Tensor& other, std::optional<int64_t> other_bdim) {
 | 
			
		||||
  if (self_bdim && other_bdim) {
 | 
			
		||||
@ -66,7 +66,7 @@ std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule(
 | 
			
		||||
  TORCH_INTERNAL_ASSERT(false, "can't get here");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule(
 | 
			
		||||
static std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule(
 | 
			
		||||
    const Tensor& self, std::optional<int64_t> self_bdim,
 | 
			
		||||
    const Tensor& other, std::optional<int64_t> other_bdim) {
 | 
			
		||||
  auto self_logical_rank = rankWithoutBatchDim(self, self_bdim);
 | 
			
		||||
@ -79,7 +79,7 @@ std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule(
 | 
			
		||||
  return tv_batch_rule(self, self_bdim, other, other_bdim);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule(
 | 
			
		||||
static std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule(
 | 
			
		||||
    const Tensor& self, std::optional<int64_t> self_bdim,
 | 
			
		||||
    const Tensor& other, std::optional<int64_t> other_bdim) {
 | 
			
		||||
  auto self_logical_rank = rankWithoutBatchDim(self, self_bdim);
 | 
			
		||||
@ -94,7 +94,7 @@ std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule(
 | 
			
		||||
  return std::make_tuple( at::matmul(self_, other_), 0 );
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::tuple<Tensor, std::optional<int64_t>> bmm_batch_rule(
 | 
			
		||||
static std::tuple<Tensor, std::optional<int64_t>> bmm_batch_rule(
 | 
			
		||||
    const Tensor& self, std::optional<int64_t> self_bdim,
 | 
			
		||||
    const Tensor& other, std::optional<int64_t> other_bdim) {
 | 
			
		||||
  auto self_logical_rank = rankWithoutBatchDim(self, self_bdim);
 | 
			
		||||
@ -250,7 +250,7 @@ struct LinalgCheckMatrixBinaryRuleHelper<op_name, F, Func, typelist<A, B, T...>>
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
void expect_at_least_rank(
 | 
			
		||||
static void expect_at_least_rank(
 | 
			
		||||
    const Tensor& tensor,
 | 
			
		||||
    std::optional<int64_t> tensor_bdim,
 | 
			
		||||
    int64_t expected_rank,
 | 
			
		||||
@ -472,7 +472,7 @@ atol_rtol_tensor_batch_rule(
 | 
			
		||||
  return std::make_tuple(Func(input_, atol_, rtol_, hermitian), 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::tuple<Tensor, std::optional<int64_t>>
 | 
			
		||||
static std::tuple<Tensor, std::optional<int64_t>>
 | 
			
		||||
pinv_batch_rule(
 | 
			
		||||
    const Tensor& input, std::optional<int64_t> input_bdim, const std::optional<Tensor>& atol,
 | 
			
		||||
    const std::optional<int64_t> atol_bdim, const std::optional<Tensor>& rtol,
 | 
			
		||||
 | 
			
		||||
@ -19,7 +19,7 @@
 | 
			
		||||
namespace at::functorch {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) {
 | 
			
		||||
static bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) {
 | 
			
		||||
  for (const auto& bdim : bdims) {
 | 
			
		||||
    if (bdim.has_value()) {
 | 
			
		||||
      return true;
 | 
			
		||||
@ -28,7 +28,7 @@ bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) {
 | 
			
		||||
  return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) {
 | 
			
		||||
static int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) {
 | 
			
		||||
  int64_t result = 0;
 | 
			
		||||
  for (const auto& idx : indices) {
 | 
			
		||||
    if (!idx.has_value() || !idx->defined()) {
 | 
			
		||||
@ -40,7 +40,7 @@ int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) {
 | 
			
		||||
  return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int64_t get_max_index_logical_dim(
 | 
			
		||||
static int64_t get_max_index_logical_dim(
 | 
			
		||||
    ArrayRef<std::optional<Tensor>> indices,
 | 
			
		||||
    ArrayRef<std::optional<int64_t>> indices_bdims) {
 | 
			
		||||
  int64_t max_logical_dim = -1;
 | 
			
		||||
@ -57,7 +57,7 @@ int64_t get_max_index_logical_dim(
 | 
			
		||||
  return max_logical_dim;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::vector<std::optional<Tensor>> batchIndices(
 | 
			
		||||
static std::vector<std::optional<Tensor>> batchIndices(
 | 
			
		||||
  at::TensorOptions options,
 | 
			
		||||
  ArrayRef<std::optional<Tensor>> indices,
 | 
			
		||||
  ArrayRef<std::optional<int64_t>> indices_bdims,
 | 
			
		||||
@ -126,7 +126,7 @@ std::vector<std::optional<Tensor>> batchIndices(
 | 
			
		||||
 | 
			
		||||
// Define an "advanced index" to be a selection object that is
 | 
			
		||||
// a non-trivial Tensor (i.e. it does not represent :).
 | 
			
		||||
bool is_advanced_index(const std::optional<Tensor>& idx) {
 | 
			
		||||
static bool is_advanced_index(const std::optional<Tensor>& idx) {
 | 
			
		||||
  if (!idx.has_value()) {
 | 
			
		||||
    return false;
 | 
			
		||||
  }
 | 
			
		||||
@ -137,7 +137,7 @@ bool is_advanced_index(const std::optional<Tensor>& idx) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// See NOTE: [advanced indices adjacent] for definition
 | 
			
		||||
bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indices) {
 | 
			
		||||
static bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indices) {
 | 
			
		||||
  int64_t num_advanced_indices_regions = 0;
 | 
			
		||||
  bool in_advanced_indices_region = false;
 | 
			
		||||
  for (const auto& idx : indices) {
 | 
			
		||||
@ -165,7 +165,7 @@ bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indices) {
 | 
			
		||||
// - result: Tensor[B, 4, 5, 6, 2, 3, 7, 8]
 | 
			
		||||
//                     -------  ----
 | 
			
		||||
//                     region2  region1
 | 
			
		||||
Tensor swap_regions(const Tensor& tensor, int64_t first_region_size, int64_t second_region_size) {
 | 
			
		||||
static Tensor swap_regions(const Tensor& tensor, int64_t first_region_size, int64_t second_region_size) {
 | 
			
		||||
  VmapDimVector permutation(tensor.dim(), 0);
 | 
			
		||||
  std::iota(permutation.begin(), permutation.end(), 0);
 | 
			
		||||
  std::rotate(
 | 
			
		||||
@ -553,7 +553,7 @@ Tensor &_index_put_impl__plumbing(Tensor &self, const List<std::optional<Tensor>
 | 
			
		||||
  return self;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Tensor maybe_permute_values(
 | 
			
		||||
static Tensor maybe_permute_values(
 | 
			
		||||
    const Tensor& values,
 | 
			
		||||
    ArrayRef<std::optional<Tensor>> orig_indices,
 | 
			
		||||
    ArrayRef<std::optional<int64_t>> orig_indices_bdims) {
 | 
			
		||||
@ -1052,7 +1052,7 @@ std::tuple<Tensor, std::optional<int64_t>> index_add_batch_rule(
 | 
			
		||||
                                   other, other_bdim, alpha, false);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::tuple<Tensor,Tensor> binary_pointwise_align(
 | 
			
		||||
static std::tuple<Tensor,Tensor> binary_pointwise_align(
 | 
			
		||||
    const Tensor & self,
 | 
			
		||||
    std::optional<int64_t> self_bdim,
 | 
			
		||||
    const Tensor & mask,
 | 
			
		||||
 | 
			
		||||
@ -346,7 +346,7 @@ std::tuple<Tensor, std::optional<int64_t>> slice_batch_rule(
 | 
			
		||||
  return std::make_tuple(std::move(result), 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
 | 
			
		||||
static bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
 | 
			
		||||
  return dim == 0 || dim == -1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -68,18 +68,18 @@ namespace at::functorch {
 | 
			
		||||
 | 
			
		||||
namespace{
 | 
			
		||||
// PyTorch allows operations to specify dim 0 and dim -1 on a scalar tensor.
 | 
			
		||||
bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
 | 
			
		||||
static bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
 | 
			
		||||
  return dim == 0 || dim == -1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int64_t get_current_level() {
 | 
			
		||||
static int64_t get_current_level() {
 | 
			
		||||
  auto maybe_level = maybeCurrentDynamicLayer();
 | 
			
		||||
  TORCH_INTERNAL_ASSERT(maybe_level.has_value());
 | 
			
		||||
  return maybe_level->layerId();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// This check should probably go into the dispatcher...
 | 
			
		||||
bool participatesInCurrentLevel(const Tensor& self) {
 | 
			
		||||
static bool participatesInCurrentLevel(const Tensor& self) {
 | 
			
		||||
  auto current_level = get_current_level();
 | 
			
		||||
  auto* maybe_batched_impl = maybeGetBatchedImpl(self);
 | 
			
		||||
  if (!maybe_batched_impl) {
 | 
			
		||||
@ -90,7 +90,7 @@ bool participatesInCurrentLevel(const Tensor& self) {
 | 
			
		||||
  return self_level == current_level;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool participatesInCurrentLevel(ITensorListRef self) {
 | 
			
		||||
static bool participatesInCurrentLevel(ITensorListRef self) {
 | 
			
		||||
  for (const Tensor& tensor : self) {
 | 
			
		||||
    if (participatesInCurrentLevel(tensor)) {
 | 
			
		||||
      return true;
 | 
			
		||||
@ -285,7 +285,7 @@ std::vector<Tensor> unbind_batching_rule(const Tensor& self, int64_t dim) {
 | 
			
		||||
// given (sizes, strides, storage_offset) returns the maximum location that
 | 
			
		||||
// can be indexed (or nullopt if such a location doesn't exist, e.g., tensors
 | 
			
		||||
// with zero-size dims).
 | 
			
		||||
std::optional<c10::SymInt> maximum_indexable_location(
 | 
			
		||||
static std::optional<c10::SymInt> maximum_indexable_location(
 | 
			
		||||
    c10::SymIntArrayRef sizes, c10::SymIntArrayRef strides, const c10::SymInt& storage_offset) {
 | 
			
		||||
  auto result = native::storage_size_for(sizes, strides);
 | 
			
		||||
  if (result == 0) {
 | 
			
		||||
@ -298,7 +298,7 @@ std::optional<c10::SymInt> maximum_indexable_location(
 | 
			
		||||
// This checks that the range of possible memory locations accessible by
 | 
			
		||||
// x.as_strided(sizes, strides, maybe_storage_offset)
 | 
			
		||||
// are within the bounds of possible memory locations accessible by x.
 | 
			
		||||
void checkBasicAsStridedValidForSlice(
 | 
			
		||||
static void checkBasicAsStridedValidForSlice(
 | 
			
		||||
    const Tensor& physical_tensor,
 | 
			
		||||
    int64_t num_batch_dims,
 | 
			
		||||
    c10::SymIntArrayRef sizes,
 | 
			
		||||
 | 
			
		||||
@ -71,7 +71,7 @@ Tensor linear_hack(const Tensor& input, const Tensor& weight, const std::optiona
 | 
			
		||||
  return output;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) {
 | 
			
		||||
static inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) {
 | 
			
		||||
  if (reduction == at::Reduction::Mean) {
 | 
			
		||||
    return unreduced.mean();
 | 
			
		||||
  } else if (reduction == at::Reduction::Sum) {
 | 
			
		||||
@ -127,7 +127,7 @@ namespace {
 | 
			
		||||
template<bool inplace>
 | 
			
		||||
using Ctype = std::conditional_t<inplace, Tensor&, Tensor>;
 | 
			
		||||
 | 
			
		||||
Tensor make_feature_noise(const Tensor& input) {
 | 
			
		||||
static Tensor make_feature_noise(const Tensor& input) {
 | 
			
		||||
  auto input_sizes = input.sizes();
 | 
			
		||||
  TORCH_CHECK(input.dim() >= 2, "Feature dropout requires at least 2 dimensions in the input");
 | 
			
		||||
  std::vector<int64_t> sizes;
 | 
			
		||||
@ -141,7 +141,7 @@ Tensor make_feature_noise(const Tensor& input) {
 | 
			
		||||
  return at::empty(sizes, input.options());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool is_fused_kernel_acceptable(const Tensor& input, double p) {
 | 
			
		||||
static bool is_fused_kernel_acceptable(const Tensor& input, double p) {
 | 
			
		||||
  return (input.is_cuda() || input.is_xpu() || input.is_lazy() || input.is_privateuseone()) && p > 0 && p < 1 && input.numel() > 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -210,7 +210,7 @@ ALIAS_SPECIALIZATION(_feature_dropout,       true,  false)
 | 
			
		||||
ALIAS_SPECIALIZATION(_alpha_dropout,         false, true )
 | 
			
		||||
ALIAS_SPECIALIZATION(_feature_alpha_dropout, true,  true )
 | 
			
		||||
 | 
			
		||||
Tensor dropout(const Tensor& input, double p, bool train) {
 | 
			
		||||
static Tensor dropout(const Tensor& input, double p, bool train) {
 | 
			
		||||
  auto result = [&]() {
 | 
			
		||||
    NoNamesGuard guard;
 | 
			
		||||
    if (train && is_fused_kernel_acceptable(input, p)) {
 | 
			
		||||
 | 
			
		||||
@ -24,7 +24,7 @@ namespace at::native {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void adaptive_avg_pool3d_out_frame(
 | 
			
		||||
static void adaptive_avg_pool3d_out_frame(
 | 
			
		||||
    const scalar_t* input_p,
 | 
			
		||||
    scalar_t* output_p,
 | 
			
		||||
    int64_t sizeD,
 | 
			
		||||
@ -176,7 +176,7 @@ void adaptive_avg_pool3d_out_cpu_template(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void adaptive_avg_pool3d_backward_out_frame(
 | 
			
		||||
static void adaptive_avg_pool3d_backward_out_frame(
 | 
			
		||||
    scalar_t* gradInput_p,
 | 
			
		||||
    const scalar_t* gradOutput_p,
 | 
			
		||||
    int64_t sizeD,
 | 
			
		||||
 | 
			
		||||
@ -93,7 +93,7 @@ namespace {
 | 
			
		||||
// 5d tensor B x D x T x H x W
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void adaptive_max_pool3d_single_out_frame(
 | 
			
		||||
static void adaptive_max_pool3d_single_out_frame(
 | 
			
		||||
          const scalar_t *input_p,
 | 
			
		||||
          scalar_t *output_p,
 | 
			
		||||
          int64_t *ind_p,
 | 
			
		||||
@ -170,7 +170,7 @@ void adaptive_max_pool3d_single_out_frame(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void adaptive_max_pool3d_out_frame(
 | 
			
		||||
static void adaptive_max_pool3d_out_frame(
 | 
			
		||||
          const scalar_t *input_data,
 | 
			
		||||
          scalar_t *output_data,
 | 
			
		||||
          int64_t *indices_data,
 | 
			
		||||
@ -202,7 +202,7 @@ void adaptive_max_pool3d_out_frame(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void adaptive_max_pool3d_backward_single_out_frame(
 | 
			
		||||
static void adaptive_max_pool3d_backward_single_out_frame(
 | 
			
		||||
          scalar_t *gradInput_p,
 | 
			
		||||
          const scalar_t *gradOutput_p,
 | 
			
		||||
          const int64_t *ind_p,
 | 
			
		||||
@ -241,7 +241,7 @@ void adaptive_max_pool3d_backward_single_out_frame(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void adaptive_max_pool3d_backward_out_frame(
 | 
			
		||||
static void adaptive_max_pool3d_backward_out_frame(
 | 
			
		||||
          scalar_t *gradInput_data,
 | 
			
		||||
          const scalar_t *gradOutput_data,
 | 
			
		||||
          const int64_t *indices_data,
 | 
			
		||||
 | 
			
		||||
@ -153,7 +153,7 @@ namespace at::native {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void avg_pool3d_out_frame(
 | 
			
		||||
static void avg_pool3d_out_frame(
 | 
			
		||||
          const scalar_t *input_p,
 | 
			
		||||
          scalar_t *output_p,
 | 
			
		||||
          int64_t nslices,
 | 
			
		||||
@ -333,7 +333,7 @@ TORCH_IMPL_FUNC(avg_pool3d_out_cpu) (
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void avg_pool3d_backward_out_frame(
 | 
			
		||||
static void avg_pool3d_backward_out_frame(
 | 
			
		||||
          scalar_t *gradInput_p,
 | 
			
		||||
          const scalar_t *gradOutput_p,
 | 
			
		||||
          int64_t nslices,
 | 
			
		||||
 | 
			
		||||
@ -143,13 +143,13 @@ Tensor& cholesky_inverse_kernel_impl(Tensor& result, Tensor& infos, bool upper)
 | 
			
		||||
 For more info see https://github.com/pytorch/pytorch/issues/145801#issuecomment-2631781776
 | 
			
		||||
*/
 | 
			
		||||
template <typename T>
 | 
			
		||||
inline
 | 
			
		||||
static inline
 | 
			
		||||
std::enable_if_t<std::is_floating_point_v<T>, int> lapack_work_to_int(const T val) {
 | 
			
		||||
    const auto next_after = std::nextafter(val, std::numeric_limits<T>::infinity());
 | 
			
		||||
    return std::max<int>(1, std::ceil(next_after));
 | 
			
		||||
}
 | 
			
		||||
template <typename T>
 | 
			
		||||
inline
 | 
			
		||||
static inline
 | 
			
		||||
std::enable_if_t<c10::is_complex<T>::value, int> lapack_work_to_int(const T val) {
 | 
			
		||||
    return lapack_work_to_int(val.real());
 | 
			
		||||
}
 | 
			
		||||
@ -343,7 +343,7 @@ void linalg_eigh_kernel(const Tensor& eigenvalues, const Tensor& eigenvectors, c
 | 
			
		||||
  For further details, please see the LAPACK documentation for GEQRF.
 | 
			
		||||
*/
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void apply_geqrf(const Tensor& input, const Tensor& tau) {
 | 
			
		||||
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
 | 
			
		||||
#if !AT_BUILD_WITH_LAPACK()
 | 
			
		||||
  TORCH_CHECK(
 | 
			
		||||
      false,
 | 
			
		||||
@ -1039,7 +1039,7 @@ void lu_solve_kernel(const Tensor& LU, const Tensor& pivots, const Tensor& B, Tr
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void apply_svd(const Tensor& A,
 | 
			
		||||
static void apply_svd(const Tensor& A,
 | 
			
		||||
                      const bool full_matrices,
 | 
			
		||||
                      const bool compute_uv,
 | 
			
		||||
                      const Tensor& U,
 | 
			
		||||
 | 
			
		||||
@ -71,7 +71,7 @@
 | 
			
		||||
namespace at::native {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
void col2im_out_cpu_template(
 | 
			
		||||
static void col2im_out_cpu_template(
 | 
			
		||||
    Tensor& output,
 | 
			
		||||
    const Tensor& input_,
 | 
			
		||||
    IntArrayRef output_size,
 | 
			
		||||
 | 
			
		||||
@ -25,7 +25,7 @@ namespace at::native {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
Tensor compute_columns2d(
 | 
			
		||||
static Tensor compute_columns2d(
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
    IntArrayRef padding,
 | 
			
		||||
    IntArrayRef stride,
 | 
			
		||||
@ -93,7 +93,7 @@ Tensor compute_columns2d(
 | 
			
		||||
  return columns.contiguous();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void slow_conv2d_shape_check(
 | 
			
		||||
static inline void slow_conv2d_shape_check(
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
    const Tensor& grad_output,
 | 
			
		||||
    const Tensor& weight,
 | 
			
		||||
@ -205,7 +205,7 @@ inline void slow_conv2d_shape_check(
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline Tensor view_weight_2d(const Tensor& weight_,
 | 
			
		||||
static inline Tensor view_weight_2d(const Tensor& weight_,
 | 
			
		||||
    at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) {
 | 
			
		||||
  Tensor weight = weight_.contiguous(memory_format);
 | 
			
		||||
  if (weight.dim() == 4) {
 | 
			
		||||
@ -220,7 +220,7 @@ inline Tensor view_weight_2d(const Tensor& weight_,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void slow_conv2d_update_output_frame(
 | 
			
		||||
static void slow_conv2d_update_output_frame(
 | 
			
		||||
    TensorAccessor<const scalar_t, 3> input,
 | 
			
		||||
    TensorAccessor<scalar_t, 3> output,
 | 
			
		||||
    TensorAccessor<const scalar_t, 2> weight,
 | 
			
		||||
@ -480,7 +480,7 @@ void slow_conv2d_backward_weight_frame(
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void slow_conv2d_backward_weight_out_cpu_template(
 | 
			
		||||
static void slow_conv2d_backward_weight_out_cpu_template(
 | 
			
		||||
    Tensor& grad_weight,
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
    const Tensor& grad_output_,
 | 
			
		||||
 | 
			
		||||
@ -28,7 +28,7 @@ namespace at::native {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
Tensor compute_columns3d(
 | 
			
		||||
static Tensor compute_columns3d(
 | 
			
		||||
    const Tensor& input_,
 | 
			
		||||
    IntArrayRef stride,
 | 
			
		||||
    IntArrayRef padding,
 | 
			
		||||
@ -108,7 +108,7 @@ Tensor compute_columns3d(
 | 
			
		||||
  return columns;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void slow_conv3d_shape_check(
 | 
			
		||||
static inline void slow_conv3d_shape_check(
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
    const Tensor& grad_output,
 | 
			
		||||
    const Tensor& weight,
 | 
			
		||||
@ -273,7 +273,7 @@ inline void slow_conv3d_shape_check(
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Tensor view_weight_2d(const Tensor& weight_) {
 | 
			
		||||
static Tensor view_weight_2d(const Tensor& weight_) {
 | 
			
		||||
  Tensor weight = weight_.contiguous();
 | 
			
		||||
  if (weight.dim() == 5) {
 | 
			
		||||
    const int64_t s1 = weight.size(0);
 | 
			
		||||
@ -286,7 +286,7 @@ Tensor view_weight_2d(const Tensor& weight_) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void slow_conv3d_update_output_frame(
 | 
			
		||||
static void slow_conv3d_update_output_frame(
 | 
			
		||||
    TensorAccessor<const scalar_t, 4> input,
 | 
			
		||||
    TensorAccessor<scalar_t, 4> output,
 | 
			
		||||
    TensorAccessor<const scalar_t, 2> weight,
 | 
			
		||||
@ -515,7 +515,7 @@ void slow_conv3d_backward_weight_frame(
 | 
			
		||||
      grad_weight.data(), ldc, grad_weight.stride(0) * n);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void slow_conv3d_backward_parameters_out_cpu_template(
 | 
			
		||||
static void slow_conv3d_backward_parameters_out_cpu_template(
 | 
			
		||||
    Tensor& grad_weight,
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
    const Tensor& grad_output,
 | 
			
		||||
 | 
			
		||||
@ -108,7 +108,7 @@ bool is_fast_path(const Tensor& src, const std::optional<Tensor>& scale, Tensor&
 | 
			
		||||
// index_add (using add_indices as the index), without creating an intermediary
 | 
			
		||||
// tensor to hold the selected embeddings
 | 
			
		||||
template <typename data_t, typename index_t>
 | 
			
		||||
std::enable_if_t<std::is_same_v<data_t, double>, void>
 | 
			
		||||
static std::enable_if_t<std::is_same_v<data_t, double>, void>
 | 
			
		||||
index_select_add(
 | 
			
		||||
    const Tensor& select_indices,
 | 
			
		||||
    const Tensor& add_indices,
 | 
			
		||||
@ -494,7 +494,7 @@ index_select_add(const Tensor &select_indices,
 | 
			
		||||
// mul (scaling by per_sample_weights)
 | 
			
		||||
// index_add (using add_indices as the index)
 | 
			
		||||
template <typename data_t, typename index_t>
 | 
			
		||||
std::enable_if_t<std::is_same_v<data_t, double>, void>
 | 
			
		||||
static std::enable_if_t<std::is_same_v<data_t, double>, void>
 | 
			
		||||
index_select_scale_add(
 | 
			
		||||
    const Tensor& select_indices,
 | 
			
		||||
    const Tensor& add_indices,
 | 
			
		||||
 | 
			
		||||
@ -130,7 +130,7 @@ namespace native {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void fractional_max_pool2d_out_single_batch_frame(
 | 
			
		||||
static void fractional_max_pool2d_out_single_batch_frame(
 | 
			
		||||
  const scalar_t* input,
 | 
			
		||||
  scalar_t* output,
 | 
			
		||||
  int64_t* indices,
 | 
			
		||||
@ -188,7 +188,7 @@ void fractional_max_pool2d_out_single_batch_frame(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void fractional_max_pool2d_out_frame(
 | 
			
		||||
static void fractional_max_pool2d_out_frame(
 | 
			
		||||
  const scalar_t* input,
 | 
			
		||||
  scalar_t* output,
 | 
			
		||||
  int64_t* indices,
 | 
			
		||||
@ -220,7 +220,7 @@ void fractional_max_pool2d_out_frame(
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void fractional_max_pool2d_backward_out_single_batch_frame(
 | 
			
		||||
static void fractional_max_pool2d_backward_out_single_batch_frame(
 | 
			
		||||
  scalar_t* gradInput,
 | 
			
		||||
  const scalar_t* gradOutput,
 | 
			
		||||
  const int64_t* indices,
 | 
			
		||||
@ -247,7 +247,7 @@ void fractional_max_pool2d_backward_out_single_batch_frame(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void fractional_max_pool2d_backward_out_frame(
 | 
			
		||||
static void fractional_max_pool2d_backward_out_frame(
 | 
			
		||||
  scalar_t* gradInput,
 | 
			
		||||
  const scalar_t* gradOutput,
 | 
			
		||||
  const int64_t* indices,
 | 
			
		||||
 | 
			
		||||
@ -99,7 +99,7 @@ namespace at::native {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
template<typename scalar_t>
 | 
			
		||||
void fractional_max_pool3d_out_single_batch_frame(
 | 
			
		||||
static void fractional_max_pool3d_out_single_batch_frame(
 | 
			
		||||
  const scalar_t* input,
 | 
			
		||||
  scalar_t* output,
 | 
			
		||||
  int64_t* indices,
 | 
			
		||||
@ -169,7 +169,7 @@ void fractional_max_pool3d_out_single_batch_frame(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<typename scalar_t>
 | 
			
		||||
void fractional_max_pool3d_out_frame(
 | 
			
		||||
static void fractional_max_pool3d_out_frame(
 | 
			
		||||
  const scalar_t* input,
 | 
			
		||||
  scalar_t* output,
 | 
			
		||||
  int64_t* indices,
 | 
			
		||||
@ -257,7 +257,7 @@ TORCH_IMPL_FUNC(fractional_max_pool3d_out_cpu)(
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
template<typename scalar_t>
 | 
			
		||||
void fractional_max_pool3d_backward_out_single_batch_frame(
 | 
			
		||||
static void fractional_max_pool3d_backward_out_single_batch_frame(
 | 
			
		||||
  scalar_t* gradInput,
 | 
			
		||||
  const scalar_t* gradOutput,
 | 
			
		||||
  const int64_t* indices,
 | 
			
		||||
@ -287,7 +287,7 @@ void fractional_max_pool3d_backward_out_single_batch_frame(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<typename scalar_t>
 | 
			
		||||
void fractional_max_pool3d_backward_out_frame(
 | 
			
		||||
static void fractional_max_pool3d_backward_out_frame(
 | 
			
		||||
  scalar_t* gradInput,
 | 
			
		||||
  const scalar_t* gradOutput,
 | 
			
		||||
  const int64_t* indices,
 | 
			
		||||
 | 
			
		||||
@ -19,7 +19,7 @@
 | 
			
		||||
namespace at::native {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
void im2col_out_cpu_template(
 | 
			
		||||
static void im2col_out_cpu_template(
 | 
			
		||||
    Tensor& output,
 | 
			
		||||
    const Tensor& input_,
 | 
			
		||||
    IntArrayRef kernel_size,
 | 
			
		||||
 | 
			
		||||
@ -61,7 +61,7 @@
 | 
			
		||||
constexpr float EPSILON = 1e-12;
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
  inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) {
 | 
			
		||||
  static inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) {
 | 
			
		||||
    if (reduction == at::Reduction::Mean) {
 | 
			
		||||
      return unreduced.mean();
 | 
			
		||||
    } else if (reduction == at::Reduction::Sum) {
 | 
			
		||||
 | 
			
		||||
@ -44,7 +44,7 @@ namespace {
 | 
			
		||||
 | 
			
		||||
// this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1]) note that no bound-checking is done
 | 
			
		||||
template<typename target_t>
 | 
			
		||||
inline int64_t get_target_prime(target_t* target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK) {
 | 
			
		||||
static inline int64_t get_target_prime(target_t* target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK) {
 | 
			
		||||
  if (idx % 2 == 0) {
 | 
			
		||||
    return BLANK;
 | 
			
		||||
  } else {
 | 
			
		||||
 | 
			
		||||
@ -58,7 +58,7 @@ inline scalar_t multilabel_margin_loss_forward_inner_sum_cpu(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void multilabel_margin_loss_forward_out_frame(
 | 
			
		||||
static void multilabel_margin_loss_forward_out_frame(
 | 
			
		||||
    const Tensor& input_contiguous,
 | 
			
		||||
    const Tensor& target_contiguous,
 | 
			
		||||
    Tensor& output,
 | 
			
		||||
@ -108,7 +108,7 @@ void multilabel_margin_loss_forward_out_frame(
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void multilabel_margin_loss_forward_out_cpu_template(
 | 
			
		||||
static void multilabel_margin_loss_forward_out_cpu_template(
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
    const Tensor& target,
 | 
			
		||||
    Tensor& output,
 | 
			
		||||
@ -153,7 +153,7 @@ void multilabel_margin_loss_forward_out_cpu_template(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void multilabel_margin_loss_backward_out_frame(
 | 
			
		||||
static void multilabel_margin_loss_backward_out_frame(
 | 
			
		||||
    Tensor& grad_input,
 | 
			
		||||
    const Tensor& grad_output,
 | 
			
		||||
    const Tensor& input_contiguous,
 | 
			
		||||
@ -222,7 +222,7 @@ void multilabel_margin_loss_backward_out_frame(
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void multilabel_margin_loss_backward_out_cpu_template(
 | 
			
		||||
static void multilabel_margin_loss_backward_out_cpu_template(
 | 
			
		||||
    Tensor& grad_input,
 | 
			
		||||
    const Tensor& grad_output,
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
 | 
			
		||||
@ -57,7 +57,7 @@ inline int64_t target_index_checked(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
inline void multi_margin_loss_cpu_kernel(
 | 
			
		||||
static inline void multi_margin_loss_cpu_kernel(
 | 
			
		||||
    Tensor& output,
 | 
			
		||||
    const scalar_t* input_data,
 | 
			
		||||
    const int64_t* target_data,
 | 
			
		||||
@ -148,7 +148,7 @@ void multi_margin_loss_out_cpu_template(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void multi_margin_loss_backward_cpu_kernel(
 | 
			
		||||
static void multi_margin_loss_backward_cpu_kernel(
 | 
			
		||||
    scalar_t* grad_input_data,
 | 
			
		||||
    const Tensor& grad_output,
 | 
			
		||||
    const scalar_t* input_data,
 | 
			
		||||
 | 
			
		||||
@ -159,7 +159,7 @@ inline scalar_t* optional_data(const Tensor& source) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t, typename target_t>
 | 
			
		||||
void nll_loss_out_frame(
 | 
			
		||||
static void nll_loss_out_frame(
 | 
			
		||||
    const Tensor& output,
 | 
			
		||||
    const Tensor& total_weight,
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
@ -338,7 +338,7 @@ void nll_loss_forward_out_cpu_template(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t, typename target_t>
 | 
			
		||||
void nll_loss_backward_out_frame(
 | 
			
		||||
static void nll_loss_backward_out_frame(
 | 
			
		||||
    const Tensor& grad_input,
 | 
			
		||||
    const Tensor& grad_output,
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
 | 
			
		||||
@ -99,7 +99,7 @@ inline void check_gradout_shape_nll_loss2d(
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void nll_loss2d_forward_out_frame(
 | 
			
		||||
static void nll_loss2d_forward_out_frame(
 | 
			
		||||
    Tensor& output,
 | 
			
		||||
    Tensor& total_weight,
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
@ -280,7 +280,7 @@ void nll_loss2d_forward_out_cpu_template(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void nll_loss2d_backward_out_frame(
 | 
			
		||||
static void nll_loss2d_backward_out_frame(
 | 
			
		||||
    Tensor& grad_input,
 | 
			
		||||
    const Tensor& grad_output,
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
 | 
			
		||||
@ -24,7 +24,7 @@
 | 
			
		||||
 | 
			
		||||
namespace at {
 | 
			
		||||
namespace {
 | 
			
		||||
inline void slow_conv_transpose2d_shape_check(
 | 
			
		||||
static inline void slow_conv_transpose2d_shape_check(
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
    const Tensor& grad_output,
 | 
			
		||||
    const Tensor& weight,
 | 
			
		||||
@ -386,7 +386,7 @@ void slow_conv_transpose2d_out_cpu_template(
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void slow_conv_transpose2d_backward_out_cpu_template(
 | 
			
		||||
static void slow_conv_transpose2d_backward_out_cpu_template(
 | 
			
		||||
    const Tensor& input_,
 | 
			
		||||
    const Tensor& grad_output_,
 | 
			
		||||
    Tensor& grad_input,
 | 
			
		||||
 | 
			
		||||
@ -22,7 +22,7 @@ namespace at::native {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
inline void slow_conv_transpose3d_shape_check(
 | 
			
		||||
static inline void slow_conv_transpose3d_shape_check(
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
    const Tensor& grad_output,
 | 
			
		||||
    const Tensor& weight,
 | 
			
		||||
 | 
			
		||||
@ -92,7 +92,7 @@ namespace {
 | 
			
		||||
             arg_name, " should contain ", expected, " elements not ", actual);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  inline Tensor repeat_if_defined(const Tensor& t, const SymInt& repeat) {
 | 
			
		||||
  static inline Tensor repeat_if_defined(const Tensor& t, const SymInt& repeat) {
 | 
			
		||||
    if (t.defined()) {
 | 
			
		||||
      return t.repeat_symint(repeat);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@ -538,7 +538,7 @@ c10::intrusive_ptr<CellParamsBase> make_quantized_cell_params_fp16(
 | 
			
		||||
      std::move(w_ih_packed), std::move(w_hh_packed));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::unordered_map<
 | 
			
		||||
static std::unordered_map<
 | 
			
		||||
    std::string,
 | 
			
		||||
    c10::intrusive_ptr<CellParamsBase> (*)(CellParamsSerializationType)>
 | 
			
		||||
    cell_params_deserializers = {
 | 
			
		||||
@ -578,7 +578,7 @@ struct QRNNCellParamsWrapper {
 | 
			
		||||
 | 
			
		||||
// Gathers every two elements of a vector in a vector of pairs
 | 
			
		||||
template<typename T>
 | 
			
		||||
std::vector<pair_of<T>> pair_vec(const std::vector<T>& vals) {
 | 
			
		||||
static std::vector<pair_of<T>> pair_vec(const std::vector<T>& vals) {
 | 
			
		||||
  TORCH_CHECK(vals.size() % 2 == 0, "Odd number of params or hiddens given to a bidirectional RNN");
 | 
			
		||||
  std::vector<pair_of<T>> result;
 | 
			
		||||
  result.reserve(vals.size() / 2);
 | 
			
		||||
@ -590,7 +590,7 @@ std::vector<pair_of<T>> pair_vec(const std::vector<T>& vals) {
 | 
			
		||||
 | 
			
		||||
// Flattens a vector of pairs
 | 
			
		||||
template<typename T>
 | 
			
		||||
std::vector<T> unpair_vec(std::vector<pair_of<T>>&& vals) {
 | 
			
		||||
static std::vector<T> unpair_vec(std::vector<pair_of<T>>&& vals) {
 | 
			
		||||
  std::vector<T> result;
 | 
			
		||||
  result.reserve(vals.size() * 2);
 | 
			
		||||
  for (const auto i : c10::irange(vals.size())) {
 | 
			
		||||
@ -601,7 +601,7 @@ std::vector<T> unpair_vec(std::vector<pair_of<T>>&& vals) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Parses a flat list of parameter tensors into a list of CellParams
 | 
			
		||||
std::vector<CellParams> gather_params(TensorList params, bool has_biases, bool has_projections = false) {
 | 
			
		||||
static std::vector<CellParams> gather_params(TensorList params, bool has_biases, bool has_projections = false) {
 | 
			
		||||
  static at::Tensor undefined;
 | 
			
		||||
  std::vector<CellParams> result;
 | 
			
		||||
  if (has_biases) {
 | 
			
		||||
@ -1894,10 +1894,10 @@ static DEFINE_QUANTIZED_RNN_CELL_DYNAMIC(quantized_rnn_tanh_cell_dynamic, simple
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
[[maybe_unused]] auto ensure_linear_params_registered =
 | 
			
		||||
[[maybe_unused]] static auto ensure_linear_params_registered =
 | 
			
		||||
    register_linear_params();
 | 
			
		||||
 | 
			
		||||
auto cell_params_base_registry =
 | 
			
		||||
static auto cell_params_base_registry =
 | 
			
		||||
    torch::selective_class_<CellParamsBase>("rnn", TORCH_SELECTIVE_CLASS("CellParamsBase"))
 | 
			
		||||
        .def_pickle(
 | 
			
		||||
            [](const c10::intrusive_ptr<CellParamsBase>& self)
 | 
			
		||||
 | 
			
		||||
@ -2676,7 +2676,7 @@ inline std::tuple<Tensor, Tensor, int64_t> _take_along_dim_helper(
 | 
			
		||||
      std::move(dim));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void checkDevice(CheckedFrom c, const Tensor& t, Device device) {
 | 
			
		||||
static inline void checkDevice(CheckedFrom c, const Tensor& t, Device device) {
 | 
			
		||||
  TORCH_CHECK(
 | 
			
		||||
      !t.defined() || t.device() == device,
 | 
			
		||||
      "Expected tensor to have ",
 | 
			
		||||
@ -2689,7 +2689,7 @@ inline void checkDevice(CheckedFrom c, const Tensor& t, Device device) {
 | 
			
		||||
      ")");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void checkDevice(
 | 
			
		||||
static inline void checkDevice(
 | 
			
		||||
    CheckedFrom c,
 | 
			
		||||
    at::ArrayRef<Tensor> tensors,
 | 
			
		||||
    Device device) {
 | 
			
		||||
 | 
			
		||||
@ -3641,7 +3641,7 @@ Tensor& transpose_(Tensor& self, int64_t dim0, int64_t dim1) {
 | 
			
		||||
namespace {
 | 
			
		||||
// Transpose implementation for sparse compressed layouts
 | 
			
		||||
// NB: We assume that dim1,dim0 have already been wrapped
 | 
			
		||||
inline Tensor sparse_compressed_transpose(
 | 
			
		||||
static inline Tensor sparse_compressed_transpose(
 | 
			
		||||
    const Tensor& self,
 | 
			
		||||
    int64_t dim0,
 | 
			
		||||
    int64_t dim1) {
 | 
			
		||||
 | 
			
		||||
@ -29,7 +29,7 @@ namespace {
 | 
			
		||||
// grad_in does not mean that it is a gradient wrt to input,
 | 
			
		||||
// grad_in/grad_out is just an input/output of unfold_backward kernel.
 | 
			
		||||
 | 
			
		||||
[[maybe_unused]] TensorIterator _make_unfold_backward_iter_over_grad_out(
 | 
			
		||||
[[maybe_unused]] static TensorIterator _make_unfold_backward_iter_over_grad_out(
 | 
			
		||||
    Tensor& grad_out,
 | 
			
		||||
    const Tensor& grad_in,
 | 
			
		||||
    int64_t dim,
 | 
			
		||||
 | 
			
		||||
@ -105,7 +105,7 @@ namespace at::native {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void upsample_bicubic2d_backward_out_frame(
 | 
			
		||||
static void upsample_bicubic2d_backward_out_frame(
 | 
			
		||||
    const scalar_t* odata,
 | 
			
		||||
    scalar_t* idata,
 | 
			
		||||
    int64_t input_height,
 | 
			
		||||
@ -177,7 +177,7 @@ void upsample_bicubic2d_backward_out_frame(
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void upsample_bicubic2d_backward_kernel(
 | 
			
		||||
static void upsample_bicubic2d_backward_kernel(
 | 
			
		||||
    const Tensor& grad_input,
 | 
			
		||||
    const Tensor& grad_output_,
 | 
			
		||||
    IntArrayRef output_size,
 | 
			
		||||
 | 
			
		||||
@ -39,6 +39,6 @@ int register_linear_params() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
[[maybe_unused]] auto linear_params = register_linear_params();
 | 
			
		||||
[[maybe_unused]] static auto linear_params = register_linear_params();
 | 
			
		||||
} // namespace
 | 
			
		||||
}  // namespace ao::sparse
 | 
			
		||||
 | 
			
		||||
@ -30,7 +30,7 @@ namespace {
 | 
			
		||||
// Workaround for gcc-14.2.0 ICE during RTL pass: expand when compiling for NEON
 | 
			
		||||
__attribute__((optimize("no-tree-vectorize")))
 | 
			
		||||
#endif
 | 
			
		||||
void log_sigmoid_cpu_kernel(TensorBase &output, TensorBase &buffer, const TensorBase &input) {
 | 
			
		||||
static void log_sigmoid_cpu_kernel(TensorBase &output, TensorBase &buffer, const TensorBase &input) {
 | 
			
		||||
  if (at::isReducedFloatingType(input.scalar_type())) {
 | 
			
		||||
    AT_DISPATCH_REDUCED_FLOATING_TYPES(input.scalar_type(), "log_sigmoid_cpu", [&]() {
 | 
			
		||||
    using Vec = Vectorized<scalar_t>;
 | 
			
		||||
@ -96,7 +96,7 @@ void log_sigmoid_cpu_kernel(TensorBase &output, TensorBase &buffer, const Tensor
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void log_sigmoid_backward_cpu_kernel(TensorIterator& iter) {
 | 
			
		||||
static void log_sigmoid_backward_cpu_kernel(TensorIterator& iter) {
 | 
			
		||||
  if (at::isReducedFloatingType(iter.dtype())) {
 | 
			
		||||
    AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "log_sigmoid_backward_cpu", [&]() {
 | 
			
		||||
      using Vec = Vectorized<scalar_t>;
 | 
			
		||||
@ -150,7 +150,7 @@ void log_sigmoid_backward_cpu_kernel(TensorIterator& iter) {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void threshold_kernel(
 | 
			
		||||
static void threshold_kernel(
 | 
			
		||||
    TensorIteratorBase& iter,
 | 
			
		||||
    const Scalar& threshold_scalar,
 | 
			
		||||
    const Scalar& value_scalar) {
 | 
			
		||||
@ -868,7 +868,7 @@ void hardswish_backward_kernel(TensorIterator& iter) {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
 | 
			
		||||
static void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
 | 
			
		||||
  if (at::isReducedFloatingType(iter.dtype())) {
 | 
			
		||||
    AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "leaky_relu_cpu", [&]() {
 | 
			
		||||
    auto zero_vec = Vectorized<float>((float)(0));
 | 
			
		||||
@ -907,7 +907,7 @@ void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
 | 
			
		||||
static void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
 | 
			
		||||
  if (at::isReducedFloatingType(iter.dtype())) {
 | 
			
		||||
    AT_DISPATCH_REDUCED_FLOATING_TYPES(iter.dtype(), "leaky_relu_backward_cpu", [&]() {
 | 
			
		||||
    auto zero_vec = Vectorized<float>((float)(0));
 | 
			
		||||
 | 
			
		||||
@ -369,7 +369,7 @@ void gemm_notrans_(
 | 
			
		||||
#endif // defined(__aarch64__) && !defined(C10_MOBILE)
 | 
			
		||||
 | 
			
		||||
#if !defined(C10_MOBILE)
 | 
			
		||||
float compute_dot(const at::Half* a, const at::Half* b, int64_t len) {
 | 
			
		||||
static float compute_dot(const at::Half* a, const at::Half* b, int64_t len) {
 | 
			
		||||
  return at::native::CPU_CAPABILITY::fp16_dot_with_fp32_arith(
 | 
			
		||||
      a, b, len);
 | 
			
		||||
}
 | 
			
		||||
@ -406,7 +406,7 @@ void gemm_transa_(
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
float compute_dot(const at::BFloat16* a, const at::BFloat16* b, int64_t len) {
 | 
			
		||||
static float compute_dot(const at::BFloat16* a, const at::BFloat16* b, int64_t len) {
 | 
			
		||||
  return at::native::CPU_CAPABILITY::bf16_dot_with_fp32_arith(a, b, len);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -15,12 +15,12 @@ namespace at::native {
 | 
			
		||||
inline namespace CPU_CAPABILITY {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
bool reduced_input(ScalarType input_t, ScalarType output_t) {
 | 
			
		||||
static bool reduced_input(ScalarType input_t, ScalarType output_t) {
 | 
			
		||||
  return !at::isFloat8Type(input_t) && at::isReducedFloatingType(input_t) &&
 | 
			
		||||
      output_t == kFloat;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool reduced_output(ScalarType input_t, ScalarType output_t) {
 | 
			
		||||
static bool reduced_output(ScalarType input_t, ScalarType output_t) {
 | 
			
		||||
  return !at::isFloat8Type(output_t) && at::isReducedFloatingType(output_t) &&
 | 
			
		||||
      input_t == kFloat;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -15,7 +15,7 @@ namespace at::native {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
template<typename scalar_t>
 | 
			
		||||
void apply_cross(const Tensor& result, const Tensor& a, const Tensor& b, const int64_t dim) {
 | 
			
		||||
static void apply_cross(const Tensor& result, const Tensor& a, const Tensor& b, const int64_t dim) {
 | 
			
		||||
  int64_t total = a.numel() / 3;
 | 
			
		||||
  int64_t a_stride = a.stride(dim);
 | 
			
		||||
  int64_t b_stride = b.stride(dim);
 | 
			
		||||
@ -68,7 +68,7 @@ void apply_cross(const Tensor& result, const Tensor& a, const Tensor& b, const i
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void cross_kernel_impl(const Tensor& result, const Tensor& a, const Tensor& b, const int64_t dim) {
 | 
			
		||||
static void cross_kernel_impl(const Tensor& result, const Tensor& a, const Tensor& b, const int64_t dim) {
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kBFloat16, kHalf, result.scalar_type(), "cross", [&]() {
 | 
			
		||||
    apply_cross<scalar_t>(result, a, b, dim);
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
@ -422,19 +422,19 @@ void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, const double
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
 | 
			
		||||
static void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
 | 
			
		||||
  AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "pdist_backward", [&] {
 | 
			
		||||
    Dist<scalar_t>::apply_backward_pdist(result, grad, self, p, dist);
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void cdist_kernel_impl(Tensor& result, const Tensor& x1, const Tensor& x2, const double p) {
 | 
			
		||||
static void cdist_kernel_impl(Tensor& result, const Tensor& x1, const Tensor& x2, const double p) {
 | 
			
		||||
  AT_DISPATCH_FLOATING_TYPES(result.scalar_type(), "cdist", [&] {
 | 
			
		||||
    Dist<scalar_t>::apply_cdist(result, x1, x2, p);
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void cdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& x1, const Tensor& x2, const double p, const Tensor& dist) {
 | 
			
		||||
static void cdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& x1, const Tensor& x2, const double p, const Tensor& dist) {
 | 
			
		||||
  AT_DISPATCH_FLOATING_TYPES(result.scalar_type(), "cdist_backward", [&] {
 | 
			
		||||
    Dist<scalar_t>::apply_backward_cdist(result, grad, x1, x2, p, dist);
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
@ -27,7 +27,7 @@
 | 
			
		||||
namespace at::native {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, std::optional<Generator> gen) {
 | 
			
		||||
static void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, std::optional<Generator> gen) {
 | 
			
		||||
  CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
 | 
			
		||||
  templates::cpu::cauchy_kernel(iter, median, sigma, generator);
 | 
			
		||||
}
 | 
			
		||||
@ -101,7 +101,7 @@ void bernoulli_scalar_kernel(const TensorBase &self, double p, std::optional<Gen
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
void exponential_kernel_default(TensorIteratorBase& iter, double lambda, std::optional<Generator> gen) {
 | 
			
		||||
static void exponential_kernel_default(TensorIteratorBase& iter, double lambda, std::optional<Generator> gen) {
 | 
			
		||||
  CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
 | 
			
		||||
  templates::cpu::exponential_kernel(iter, lambda, generator);
 | 
			
		||||
}
 | 
			
		||||
@ -198,12 +198,12 @@ void exponential_kernel(TensorIteratorBase &iter, double lambda, std::optional<G
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
void geometric_kernel(TensorIteratorBase& iter, double p, std::optional<Generator> gen) {
 | 
			
		||||
static void geometric_kernel(TensorIteratorBase& iter, double p, std::optional<Generator> gen) {
 | 
			
		||||
  CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
 | 
			
		||||
  templates::cpu::geometric_kernel(iter, p, generator);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, std::optional<Generator> gen) {
 | 
			
		||||
static void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, std::optional<Generator> gen) {
 | 
			
		||||
  CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
 | 
			
		||||
  templates::cpu::log_normal_kernel(iter, mean, std, generator);
 | 
			
		||||
}
 | 
			
		||||
@ -218,12 +218,12 @@ void normal_kernel(const TensorBase &self, double mean, double std, std::optiona
 | 
			
		||||
  templates::cpu::normal_kernel(self, mean, std, generator);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, std::optional<Generator> gen) {
 | 
			
		||||
static void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, std::optional<Generator> gen) {
 | 
			
		||||
  CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
 | 
			
		||||
  templates::cpu::random_from_to_kernel(iter, range, base, generator);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void random_kernel(TensorIteratorBase& iter, std::optional<Generator> gen) {
 | 
			
		||||
static void random_kernel(TensorIteratorBase& iter, std::optional<Generator> gen) {
 | 
			
		||||
  CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
 | 
			
		||||
  templates::cpu::random_kernel(iter, generator);
 | 
			
		||||
}
 | 
			
		||||
@ -231,7 +231,7 @@ void random_kernel(TensorIteratorBase& iter, std::optional<Generator> gen) {
 | 
			
		||||
// This is the special kernel to handle single specific case:
 | 
			
		||||
// from(inclusive) = std::numeric_limits<int64_t>::lowest()
 | 
			
		||||
// to(exclusive) = None (= std::numeric_limits<int64_t>::max() + 1)
 | 
			
		||||
void random_full_64_bits_range_kernel(TensorIteratorBase& iter, std::optional<Generator> gen) {
 | 
			
		||||
static void random_full_64_bits_range_kernel(TensorIteratorBase& iter, std::optional<Generator> gen) {
 | 
			
		||||
  CPUGeneratorImpl* generator = get_generator_or_default<CPUGeneratorImpl>(gen, detail::getDefaultCPUGenerator());
 | 
			
		||||
  templates::cpu::random_full_64_bits_range_kernel(iter, generator);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -85,7 +85,7 @@ struct RandomKernel {
 | 
			
		||||
// ==================================================== Normal ========================================================
 | 
			
		||||
 | 
			
		||||
#ifdef CPU_CAPABILITY_AVX2
 | 
			
		||||
void normal_fill_16_AVX2(float *data,
 | 
			
		||||
static void normal_fill_16_AVX2(float *data,
 | 
			
		||||
                         const __m256* two_pi,
 | 
			
		||||
                         const __m256* one,
 | 
			
		||||
                         const __m256* minus_two,
 | 
			
		||||
@ -136,7 +136,7 @@ void normal_fill_AVX2(const TensorBase &self, const float mean, const float std,
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void normal_fill_16(scalar_t *data, const scalar_t mean, const scalar_t std) {
 | 
			
		||||
static void normal_fill_16(scalar_t *data, const scalar_t mean, const scalar_t std) {
 | 
			
		||||
  for (const auto j : c10::irange(8)) {
 | 
			
		||||
    const scalar_t u1 = 1 - data[j]; // [0, 1) -> (0, 1] for log.
 | 
			
		||||
    const scalar_t u2 = data[j + 8];
 | 
			
		||||
 | 
			
		||||
@ -158,14 +158,14 @@ inline void _mul_reduce_max_fusion_kernel(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
inline scalar_t* conditional_data_ptr(scalar_t* ptr, scalar_t* ptr2) {
 | 
			
		||||
static inline scalar_t* conditional_data_ptr(scalar_t* ptr, scalar_t* ptr2) {
 | 
			
		||||
  TORCH_CHECK(ptr2 == nullptr);
 | 
			
		||||
  return ptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t,
 | 
			
		||||
          typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
 | 
			
		||||
inline scalar_t* conditional_data_ptr(float* ptr, scalar_t* ptr2) {
 | 
			
		||||
static inline scalar_t* conditional_data_ptr(float* ptr, scalar_t* ptr2) {
 | 
			
		||||
  return ptr2;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -441,7 +441,7 @@ struct ComputeLocation<scalar_t, GridSamplerPadding::Reflection, align_corners>
 | 
			
		||||
// See NOTE [ Grid Sample CPU Kernels ] for details.
 | 
			
		||||
 | 
			
		||||
template<typename scalar_t>
 | 
			
		||||
inline void
 | 
			
		||||
static inline void
 | 
			
		||||
mask_scatter_add(const scalar_t *src, scalar_t* base_addr,
 | 
			
		||||
                 const int_same_size_t<scalar_t> *offsets,
 | 
			
		||||
                 const int_same_size_t<scalar_t> *mask, int64_t len) {
 | 
			
		||||
@ -1030,7 +1030,7 @@ struct ApplyGridSample<scalar_t, 2, GridSamplerInterpolation::Bicubic,
 | 
			
		||||
// See NOTE [ Grid Sample CPU Kernels ] for details.
 | 
			
		||||
 | 
			
		||||
template<typename scalar_t, typename ApplyFn>
 | 
			
		||||
inline void grid_sample_2d_grid_slice_iterator(
 | 
			
		||||
static inline void grid_sample_2d_grid_slice_iterator(
 | 
			
		||||
    const TensorAccessor<const scalar_t, 3>& grid_slice, const ApplyFn &apply_fn) {
 | 
			
		||||
  int64_t out_H = grid_slice.size(0);
 | 
			
		||||
  int64_t out_W = grid_slice.size(1);
 | 
			
		||||
 | 
			
		||||
@ -259,7 +259,7 @@ void histogramdd_out_cpu_template(const Tensor& self, const std::optional<Tensor
 | 
			
		||||
 *
 | 
			
		||||
 * Refer to histogramdd_out_cpu_template for more details.
 | 
			
		||||
 */
 | 
			
		||||
void histogramdd_kernel_impl(const Tensor& self, const std::optional<Tensor>& weight, bool density,
 | 
			
		||||
static void histogramdd_kernel_impl(const Tensor& self, const std::optional<Tensor>& weight, bool density,
 | 
			
		||||
        Tensor& hist, const TensorList& bin_edges) {
 | 
			
		||||
    histogramdd_out_cpu_template<BINARY_SEARCH>(self, weight, density, hist, bin_edges);
 | 
			
		||||
}
 | 
			
		||||
@ -269,7 +269,7 @@ void histogramdd_kernel_impl(const Tensor& self, const std::optional<Tensor>& we
 | 
			
		||||
 *
 | 
			
		||||
 * Refer to histogramdd_out_cpu_template for more details.
 | 
			
		||||
 */
 | 
			
		||||
void histogramdd_linear_kernel_impl(const Tensor& self, const std::optional<Tensor>& weight,
 | 
			
		||||
static void histogramdd_linear_kernel_impl(const Tensor& self, const std::optional<Tensor>& weight,
 | 
			
		||||
        bool density, Tensor& hist, const TensorList& bin_edges, bool local_search) {
 | 
			
		||||
    if (local_search) {
 | 
			
		||||
        // histogramdd codepath: both hist and bin_edges are eventually returned as output,
 | 
			
		||||
@ -298,7 +298,7 @@ void infer_bin_edges_from_input(const Tensor& input, const int64_t N,
 | 
			
		||||
    std::copy(max_data, max_data + N, rightmost_edges.begin());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void histogram_select_outer_bin_edges_impl(const Tensor& input, const int64_t N,
 | 
			
		||||
static void histogram_select_outer_bin_edges_impl(const Tensor& input, const int64_t N,
 | 
			
		||||
        std::vector<double> &leftmost_edges, std::vector<double> &rightmost_edges) {
 | 
			
		||||
    AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "histogramdd", [&]() {
 | 
			
		||||
        infer_bin_edges_from_input<scalar_t>(input, N, leftmost_edges, rightmost_edges);
 | 
			
		||||
 | 
			
		||||
@ -210,7 +210,7 @@ multinomial_with_replacement_apply(
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void multinomial_with_replacement_kernel_impl(
 | 
			
		||||
static void multinomial_with_replacement_kernel_impl(
 | 
			
		||||
    Tensor& result,
 | 
			
		||||
    const Tensor& self,
 | 
			
		||||
    const int64_t n_sample,
 | 
			
		||||
 | 
			
		||||
@ -96,7 +96,7 @@ struct ReplicationPad {
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
inline void copy_stub(scalar_t* out, const scalar_t* in, int64_t size) {
 | 
			
		||||
static inline void copy_stub(scalar_t* out, const scalar_t* in, int64_t size) {
 | 
			
		||||
  using Vec = Vectorized<scalar_t>;
 | 
			
		||||
  int64_t d = 0;
 | 
			
		||||
  for (; d < size - (size % Vec::size()); d += Vec::size()) {
 | 
			
		||||
@ -112,7 +112,7 @@ inline void copy_stub(scalar_t* out, const scalar_t* in, int64_t size) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
inline void add_stub(scalar_t* grad_in, const scalar_t* grad_out, int64_t size) {
 | 
			
		||||
static inline void add_stub(scalar_t* grad_in, const scalar_t* grad_out, int64_t size) {
 | 
			
		||||
  using Vec = Vectorized<scalar_t>;
 | 
			
		||||
  int64_t d = 0;
 | 
			
		||||
  for (; d < size - (size % Vec::size()); d += Vec::size()) {
 | 
			
		||||
 | 
			
		||||
@ -9,7 +9,7 @@
 | 
			
		||||
namespace at::native {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
void addcmul_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
 | 
			
		||||
static void addcmul_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
 | 
			
		||||
  ScalarType dtype = iter.common_dtype();
 | 
			
		||||
  if (at::isReducedFloatingType(dtype)) {
 | 
			
		||||
    AT_DISPATCH_REDUCED_FLOATING_TYPES(dtype, "addcmul_cpu_out", [&]() {
 | 
			
		||||
@ -50,7 +50,7 @@ void addcmul_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void addcdiv_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
 | 
			
		||||
static void addcdiv_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
 | 
			
		||||
  ScalarType dtype = iter.common_dtype();
 | 
			
		||||
  if (at::isReducedFloatingType(dtype)) {
 | 
			
		||||
    AT_DISPATCH_REDUCED_FLOATING_TYPES(dtype, "addcdiv_cpu_out", [&]() {
 | 
			
		||||
@ -90,7 +90,7 @@ void addcdiv_cpu_kernel(TensorIteratorBase& iter, const Scalar& value) {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void smooth_l1_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, double beta) {
 | 
			
		||||
static void smooth_l1_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, double beta) {
 | 
			
		||||
  ScalarType dtype = iter.dtype(0);
 | 
			
		||||
  if (dtype == kBFloat16) {
 | 
			
		||||
    auto norm_val = norm.to<float>();
 | 
			
		||||
@ -176,7 +176,7 @@ void smooth_l1_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, dou
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void huber_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, double delta) {
 | 
			
		||||
static void huber_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, double delta) {
 | 
			
		||||
  ScalarType dtype = iter.dtype(0);
 | 
			
		||||
  AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, dtype, "huber_backward_cpu_out", [&] {
 | 
			
		||||
    auto norm_val = norm.to<scalar_t>();
 | 
			
		||||
@ -215,7 +215,7 @@ void huber_backward_cpu_kernel(TensorIterator& iter, const Scalar& norm, double
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void mse_backward_cpu_kernel(TensorIterator& iter, const Scalar& value) {
 | 
			
		||||
static void mse_backward_cpu_kernel(TensorIterator& iter, const Scalar& value) {
 | 
			
		||||
  ScalarType dtype = iter.dtype(0);
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, dtype, "mse_backward_cpu_out", [&] {
 | 
			
		||||
    scalar_t scalar_val = value.to<scalar_t>();
 | 
			
		||||
 | 
			
		||||
@ -18,7 +18,7 @@ namespace {
 | 
			
		||||
 | 
			
		||||
using namespace vec;
 | 
			
		||||
 | 
			
		||||
void arange_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scalar& scalar_steps, const Scalar& scalar_step) {
 | 
			
		||||
static void arange_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scalar& scalar_steps, const Scalar& scalar_step) {
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "arange_cpu", [&]() {
 | 
			
		||||
    using accscalar_t = at::acc_type<scalar_t, false>;
 | 
			
		||||
    auto start = scalar_start.to<accscalar_t>();
 | 
			
		||||
@ -42,7 +42,7 @@ void arange_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scala
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void linspace_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scalar& scalar_end, int64_t steps) {
 | 
			
		||||
static void linspace_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scalar& scalar_end, int64_t steps) {
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, iter.dtype(), "linspace_cpu", [&]() {
 | 
			
		||||
    // step should be of double type for all integral types
 | 
			
		||||
    using step_t = std::conditional_t<std::is_integral_v<scalar_t>, double, scalar_t>;
 | 
			
		||||
 | 
			
		||||
@ -62,7 +62,7 @@ inline void reduce_all_impl(
 | 
			
		||||
  output.fill_(result);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void min_all_kernel_impl(Tensor& result, const Tensor& input) {
 | 
			
		||||
static void min_all_kernel_impl(Tensor& result, const Tensor& input) {
 | 
			
		||||
  if (input.scalar_type() == ScalarType::Bool) {
 | 
			
		||||
    TensorIterator iter = TensorIteratorConfig()
 | 
			
		||||
      .add_input(input)
 | 
			
		||||
@ -87,7 +87,7 @@ void min_all_kernel_impl(Tensor& result, const Tensor& input) {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void max_all_kernel_impl(Tensor& result, const Tensor& input) {
 | 
			
		||||
static void max_all_kernel_impl(Tensor& result, const Tensor& input) {
 | 
			
		||||
  if (input.scalar_type() == ScalarType::Bool) {
 | 
			
		||||
    TensorIterator iter = TensorIteratorConfig()
 | 
			
		||||
      .add_input(input)
 | 
			
		||||
@ -167,7 +167,7 @@ inline void reduce_all_impl_vec_two_outputs(
 | 
			
		||||
  output2.fill_(result.second);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void aminmax_allreduce_kernel(
 | 
			
		||||
static void aminmax_allreduce_kernel(
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
    Tensor& min_result,
 | 
			
		||||
    Tensor& max_result) {
 | 
			
		||||
 | 
			
		||||
@ -28,7 +28,7 @@ namespace at::native { namespace {
 | 
			
		||||
using namespace vec;
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t, typename func_t>
 | 
			
		||||
inline void cpu_cum_base_kernel(const Tensor& result,
 | 
			
		||||
static inline void cpu_cum_base_kernel(const Tensor& result,
 | 
			
		||||
    const Tensor& self,
 | 
			
		||||
    int64_t dim,
 | 
			
		||||
    const func_t& f,
 | 
			
		||||
@ -76,7 +76,7 @@ inline void cpu_cum_base_kernel(const Tensor& result,
 | 
			
		||||
  iter.for_each(loop, grain_size);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void cumsum_cpu_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
static void cumsum_cpu_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
  auto wrap_dim = maybe_wrap_dim(dim, self.dim());
 | 
			
		||||
  int64_t self_dim_size = ensure_nonempty_size(self, wrap_dim);
 | 
			
		||||
 | 
			
		||||
@ -95,7 +95,7 @@ void cumsum_cpu_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void cumprod_cpu_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
static void cumprod_cpu_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
  auto wrap_dim = maybe_wrap_dim(dim, self.dim());
 | 
			
		||||
  int64_t self_dim_size = ensure_nonempty_size(self, wrap_dim);
 | 
			
		||||
 | 
			
		||||
@ -114,7 +114,7 @@ void cumprod_cpu_kernel(const Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void logcumsumexp_cpu_kernel(Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
static void logcumsumexp_cpu_kernel(Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
  auto wrap_dim = maybe_wrap_dim(dim, self.dim());
 | 
			
		||||
  int64_t self_dim_size = ensure_nonempty_size(self, wrap_dim);
 | 
			
		||||
 | 
			
		||||
@ -135,7 +135,7 @@ void logcumsumexp_cpu_kernel(Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) {
 | 
			
		||||
static void std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) {
 | 
			
		||||
  AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "std_cpu", [&] {
 | 
			
		||||
    binary_kernel_reduce(
 | 
			
		||||
        iter,
 | 
			
		||||
@ -148,7 +148,7 @@ void std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void prod_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
static void prod_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
  // Workaround for the error: '*' in boolean context, suggest '&&' instead
 | 
			
		||||
  if (iter.dtype() == ScalarType::Bool) {
 | 
			
		||||
    using scalar_t = bool;
 | 
			
		||||
@ -203,7 +203,7 @@ void norm_kernel_cpu_impl(TensorIterator& iter, const double& val) {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void norm_kernel_tensor_iterator_impl(
 | 
			
		||||
static void norm_kernel_tensor_iterator_impl(
 | 
			
		||||
    TensorIterator& iter,
 | 
			
		||||
    const Scalar& p) {
 | 
			
		||||
  double val = 0;
 | 
			
		||||
@ -274,7 +274,7 @@ void norm_kernel_tensor_iterator_impl(
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void and_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
static void and_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
  if (iter.dtype() == ScalarType::Byte) {
 | 
			
		||||
    // Refer [all, any : uint8 compatibility]
 | 
			
		||||
    binary_kernel_reduce_vec(
 | 
			
		||||
@ -312,7 +312,7 @@ void and_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void or_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
static void or_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
  if (iter.dtype() == ScalarType::Byte) {
 | 
			
		||||
    // Refer [all, any : uint8 compatibility]
 | 
			
		||||
    binary_kernel_reduce_vec(
 | 
			
		||||
@ -346,7 +346,7 @@ struct MinValuesOps: public at::native::MinOps<scalar_t> {
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
void min_values_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
static void min_values_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
  if (iter.dtype() == kLong) {
 | 
			
		||||
    // This case is special because of Vectorized<int64_t> does not
 | 
			
		||||
    // handle upper_bound<int64_t>().
 | 
			
		||||
@ -367,7 +367,7 @@ void min_values_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void max_values_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
static void max_values_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), "max_values_cpu", [&iter] {
 | 
			
		||||
    binary_kernel_reduce_vec(
 | 
			
		||||
      iter,
 | 
			
		||||
@ -377,7 +377,7 @@ void max_values_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void argmax_kernel_impl(TensorIterator &iter) {
 | 
			
		||||
static void argmax_kernel_impl(TensorIterator &iter) {
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(1), "argmax_cpu", [&] {
 | 
			
		||||
    if (is_reduce_lastdim(iter)) {
 | 
			
		||||
      using arg_t = std::pair<scalar_t, int64_t>;
 | 
			
		||||
@ -401,7 +401,7 @@ void argmax_kernel_impl(TensorIterator &iter) {
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void argmin_kernel_impl(TensorIterator &iter) {
 | 
			
		||||
static void argmin_kernel_impl(TensorIterator &iter) {
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(1), "argmin_cpu", [&] {
 | 
			
		||||
    if (is_reduce_lastdim(iter)) {
 | 
			
		||||
      using arg_t = std::pair<scalar_t, int64_t>;
 | 
			
		||||
@ -459,7 +459,7 @@ struct XorSumOps {
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
void xor_sum_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
static void xor_sum_kernel_impl(TensorIterator& iter) {
 | 
			
		||||
  // Use iter.dtype(1) to dispatch based on the type of the input tensor
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND3(
 | 
			
		||||
      kBFloat16, kHalf, kBool, iter.dtype(1), "xor_sum_cpu", [&] {
 | 
			
		||||
 | 
			
		||||
@ -41,7 +41,7 @@ public:
 | 
			
		||||
    *self_data = c10::load(self_data) && c10::load(src_data);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
ReduceMultiply reduce_multiply;
 | 
			
		||||
static ReduceMultiply reduce_multiply;
 | 
			
		||||
 | 
			
		||||
class ReduceAdd {
 | 
			
		||||
public:
 | 
			
		||||
@ -51,7 +51,7 @@ public:
 | 
			
		||||
    *self_data += opmath_t(c10::load(src_data));
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
ReduceAdd reduce_add;
 | 
			
		||||
static ReduceAdd reduce_add;
 | 
			
		||||
 | 
			
		||||
class ReduceMean {
 | 
			
		||||
public:
 | 
			
		||||
@ -61,7 +61,7 @@ public:
 | 
			
		||||
    *self_data += opmath_t(c10::load(src_data));
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
ReduceMean reduce_mean;
 | 
			
		||||
static ReduceMean reduce_mean;
 | 
			
		||||
 | 
			
		||||
class ReduceMaximum {
 | 
			
		||||
public:
 | 
			
		||||
@ -73,7 +73,7 @@ public:
 | 
			
		||||
    *self_data = at::_isnan<scalar_t>(src_value) ? opmath_t(src_value) : std::max(self_value, opmath_t(src_value));
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
ReduceMaximum reduce_maximum;
 | 
			
		||||
static ReduceMaximum reduce_maximum;
 | 
			
		||||
 | 
			
		||||
class ReduceMinimum {
 | 
			
		||||
public:
 | 
			
		||||
@ -85,7 +85,7 @@ public:
 | 
			
		||||
    *self_data = at::_isnan<scalar_t>(src_value) ? opmath_t(src_value) : std::min(self_value, opmath_t(src_value));
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
ReduceMinimum reduce_minimum;
 | 
			
		||||
static ReduceMinimum reduce_minimum;
 | 
			
		||||
 | 
			
		||||
class TensorAssign {
 | 
			
		||||
public:
 | 
			
		||||
@ -95,7 +95,7 @@ public:
 | 
			
		||||
    *self_data = opmath_t(c10::load(src_data));
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
TensorAssign tensor_assign;
 | 
			
		||||
static TensorAssign tensor_assign;
 | 
			
		||||
 | 
			
		||||
template <bool is_scatter_like = true>
 | 
			
		||||
struct _cpu_scatter_gather_dim_loop {
 | 
			
		||||
 | 
			
		||||
@ -968,7 +968,7 @@ struct vec_host_softmax_backward {
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
void softmax_lastdim_kernel_impl(
 | 
			
		||||
static void softmax_lastdim_kernel_impl(
 | 
			
		||||
    const Tensor& result,
 | 
			
		||||
    const Tensor& self) {
 | 
			
		||||
  AT_DISPATCH_FLOATING_TYPES_AND2(
 | 
			
		||||
@ -977,13 +977,13 @@ void softmax_lastdim_kernel_impl(
 | 
			
		||||
      [&] { vec_host_softmax_lastdim<scalar_t, false>::apply(result, self); });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void softmax_kernel_impl(const Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
static void softmax_kernel_impl(const Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
  AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::BFloat16, at::ScalarType::Half, self.scalar_type(),
 | 
			
		||||
    "softmax_kernel_impl",
 | 
			
		||||
    [&] { vec_softmax<scalar_t, false>::apply(result, self, dim); });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void log_softmax_lastdim_kernel_impl(
 | 
			
		||||
static void log_softmax_lastdim_kernel_impl(
 | 
			
		||||
    const Tensor& result,
 | 
			
		||||
    const Tensor& self) {
 | 
			
		||||
  AT_DISPATCH_FLOATING_TYPES_AND2(
 | 
			
		||||
@ -992,13 +992,13 @@ void log_softmax_lastdim_kernel_impl(
 | 
			
		||||
      [&] { vec_host_softmax_lastdim<scalar_t, true>::apply(result, self); });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void log_softmax_kernel_impl(const Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
static void log_softmax_kernel_impl(const Tensor& result, const Tensor& self, int64_t dim) {
 | 
			
		||||
  AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::BFloat16, at::ScalarType::Half, self.scalar_type(),
 | 
			
		||||
    "softmax_kernel_impl",
 | 
			
		||||
    [&] { vec_softmax<scalar_t, true>::apply(result, self, dim); });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void softmax_backward_lastdim_kernel_impl(
 | 
			
		||||
static void softmax_backward_lastdim_kernel_impl(
 | 
			
		||||
    const Tensor& grad_input,
 | 
			
		||||
    const Tensor& grad,
 | 
			
		||||
    const Tensor& output) {
 | 
			
		||||
@ -1010,7 +1010,7 @@ void softmax_backward_lastdim_kernel_impl(
 | 
			
		||||
      });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void log_softmax_backward_lastdim_kernel_impl(
 | 
			
		||||
static void log_softmax_backward_lastdim_kernel_impl(
 | 
			
		||||
    const Tensor& grad_input,
 | 
			
		||||
    const Tensor& grad,
 | 
			
		||||
    const Tensor& output) {
 | 
			
		||||
@ -1022,7 +1022,7 @@ void log_softmax_backward_lastdim_kernel_impl(
 | 
			
		||||
      });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void softmax_backward_kernel_impl(
 | 
			
		||||
static void softmax_backward_kernel_impl(
 | 
			
		||||
    const Tensor& grad_input,
 | 
			
		||||
    const Tensor& grad,
 | 
			
		||||
    const Tensor& output,
 | 
			
		||||
@ -1038,7 +1038,7 @@ void softmax_backward_kernel_impl(
 | 
			
		||||
      });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void log_softmax_backward_kernel_impl(
 | 
			
		||||
static void log_softmax_backward_kernel_impl(
 | 
			
		||||
    const Tensor& grad_input,
 | 
			
		||||
    const Tensor& grad,
 | 
			
		||||
    const Tensor& output,
 | 
			
		||||
 | 
			
		||||
@ -90,7 +90,7 @@ struct KeyValueCompDesc {
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#ifdef USE_FBGEMM
 | 
			
		||||
bool can_use_radix_sort(const TensorBase& values, const bool descending) {
 | 
			
		||||
static bool can_use_radix_sort(const TensorBase& values, const bool descending) {
 | 
			
		||||
  // radix_sort can be used only for 1D data
 | 
			
		||||
  if (values.dim() != 1) return false;
 | 
			
		||||
  // radix_sort sorts in ascending order
 | 
			
		||||
@ -106,7 +106,7 @@ bool can_use_radix_sort(const TensorBase& values, const bool descending) {
 | 
			
		||||
  return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void parallel_sort1d_kernel(
 | 
			
		||||
static void parallel_sort1d_kernel(
 | 
			
		||||
    const TensorBase& values,
 | 
			
		||||
    const TensorBase& indices) {
 | 
			
		||||
  AT_DISPATCH_INTEGRAL_TYPES(values.scalar_type(), "parallel_sort1d_kernel", [&] {
 | 
			
		||||
@ -140,7 +140,7 @@ void parallel_sort1d_kernel(
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t, typename value_accessor_t, typename indices_accessor_t>
 | 
			
		||||
inline void sort_kernel_impl(const value_accessor_t& value_accessor,
 | 
			
		||||
static inline void sort_kernel_impl(const value_accessor_t& value_accessor,
 | 
			
		||||
            const indices_accessor_t& indices_accessor,
 | 
			
		||||
            int64_t dim_size, bool descending, bool stable) {
 | 
			
		||||
  auto composite_accessor = CompositeRandomAccessorCPU<
 | 
			
		||||
@ -165,7 +165,7 @@ inline void sort_kernel_impl(const value_accessor_t& value_accessor,
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void sort_kernel(
 | 
			
		||||
static void sort_kernel(
 | 
			
		||||
    const TensorBase& self,
 | 
			
		||||
    const TensorBase& values,
 | 
			
		||||
    const TensorBase& indices,
 | 
			
		||||
@ -222,7 +222,7 @@ void sort_kernel(
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void topk_kernel(
 | 
			
		||||
static void topk_kernel(
 | 
			
		||||
    const TensorBase &values,
 | 
			
		||||
    const TensorBase &indices,
 | 
			
		||||
    const TensorBase &self,
 | 
			
		||||
 | 
			
		||||
@ -286,12 +286,12 @@ struct CastStoreAccumulate {
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <typename StorePolicy, typename scalar_t>
 | 
			
		||||
void store(char * C10_RESTRICT data, int64_t stride, int64_t index, scalar_t value) {
 | 
			
		||||
static void store(char * C10_RESTRICT data, int64_t stride, int64_t index, scalar_t value) {
 | 
			
		||||
  StorePolicy::store(data, stride, index, value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename StorePolicy, typename scalar_t, size_t numel>
 | 
			
		||||
void store(char * C10_RESTRICT data, int64_t stride, int64_t index,
 | 
			
		||||
static void store(char * C10_RESTRICT data, int64_t stride, int64_t index,
 | 
			
		||||
                  const std::array<scalar_t, numel> &values) {
 | 
			
		||||
  auto *base_ptr = data + stride * index;
 | 
			
		||||
  for (const auto k : c10::irange(numel)) {
 | 
			
		||||
@ -301,7 +301,7 @@ void store(char * C10_RESTRICT data, int64_t stride, int64_t index,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename StorePolicy, typename scalar_t>
 | 
			
		||||
void store(char * C10_RESTRICT data, int64_t stride, int64_t index,
 | 
			
		||||
static void store(char * C10_RESTRICT data, int64_t stride, int64_t index,
 | 
			
		||||
                  const Vectorized<scalar_t> &values) {
 | 
			
		||||
  using vec_t = Vectorized<scalar_t>;
 | 
			
		||||
  alignas(64) std::array<scalar_t, vec_t::size()> array_values{};
 | 
			
		||||
 | 
			
		||||
@ -29,7 +29,7 @@
 | 
			
		||||
namespace at::native { namespace {
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t, typename scalar_t_2 = int64_t, typename loop1d_t>
 | 
			
		||||
inline void compare_base_kernel_core(
 | 
			
		||||
static inline void compare_base_kernel_core(
 | 
			
		||||
    const Tensor& result1,
 | 
			
		||||
    const Tensor& result2,
 | 
			
		||||
    const Tensor& self,
 | 
			
		||||
@ -71,7 +71,7 @@ inline void compare_base_kernel_core(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t, typename scalar_t_2=int64_t, typename func_t>
 | 
			
		||||
inline void compare_base_kernel(const Tensor& result1, const Tensor& result2,
 | 
			
		||||
static inline void compare_base_kernel(const Tensor& result1, const Tensor& result2,
 | 
			
		||||
    const Tensor& self,
 | 
			
		||||
    int64_t dim,
 | 
			
		||||
    bool keepdim,
 | 
			
		||||
@ -98,7 +98,7 @@ inline void compare_base_kernel(const Tensor& result1, const Tensor& result2,
 | 
			
		||||
      result1, result2, self, dim, keepdim, loop);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void min_kernel_impl(
 | 
			
		||||
static void min_kernel_impl(
 | 
			
		||||
    const Tensor& result,
 | 
			
		||||
    const Tensor& indice,
 | 
			
		||||
    const Tensor& self,
 | 
			
		||||
@ -131,7 +131,7 @@ void min_kernel_impl(
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void max_kernel_impl(
 | 
			
		||||
static void max_kernel_impl(
 | 
			
		||||
    const Tensor& result,
 | 
			
		||||
    const Tensor& indice,
 | 
			
		||||
    const Tensor& self,
 | 
			
		||||
@ -164,7 +164,7 @@ void max_kernel_impl(
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void aminmax_kernel(
 | 
			
		||||
static void aminmax_kernel(
 | 
			
		||||
    const Tensor& self,
 | 
			
		||||
    int64_t dim,
 | 
			
		||||
    bool keepdim,
 | 
			
		||||
@ -212,7 +212,7 @@ void aminmax_kernel(
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void where_kernel_impl(TensorIterator &iter) {
 | 
			
		||||
static void where_kernel_impl(TensorIterator &iter) {
 | 
			
		||||
  AT_DISPATCH_V2(
 | 
			
		||||
    iter.dtype(), "where_cpu", [&] {
 | 
			
		||||
      cpu_kernel(
 | 
			
		||||
@ -224,19 +224,19 @@ void where_kernel_impl(TensorIterator &iter) {
 | 
			
		||||
  kComplexHalf, kHalf, kBFloat16, kBool, AT_EXPAND(AT_ALL_TYPES_AND_COMPLEX), AT_EXPAND(AT_FLOAT8_TYPES));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void isposinf_kernel_impl(TensorIteratorBase& iter) {
 | 
			
		||||
static void isposinf_kernel_impl(TensorIteratorBase& iter) {
 | 
			
		||||
  AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cpu", [&]() {
 | 
			
		||||
    cpu_kernel(iter, [](scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); });
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void isneginf_kernel_impl(TensorIteratorBase& iter) {
 | 
			
		||||
static void isneginf_kernel_impl(TensorIteratorBase& iter) {
 | 
			
		||||
  AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cpu", [&]() {
 | 
			
		||||
    cpu_kernel(iter, [](scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); });
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void mode_kernel_impl(
 | 
			
		||||
static void mode_kernel_impl(
 | 
			
		||||
    Tensor& values,
 | 
			
		||||
    Tensor& indices,
 | 
			
		||||
    const Tensor& self,
 | 
			
		||||
@ -308,7 +308,7 @@ void mode_kernel_impl(
 | 
			
		||||
 | 
			
		||||
// Default brute force implementation of isin(). Used when the number of test elements is small.
 | 
			
		||||
// Iterates through each element and checks it against each test element.
 | 
			
		||||
void isin_default_kernel_cpu(
 | 
			
		||||
static void isin_default_kernel_cpu(
 | 
			
		||||
    const Tensor& elements,
 | 
			
		||||
    const Tensor& test_elements,
 | 
			
		||||
    bool invert,
 | 
			
		||||
@ -339,7 +339,7 @@ void isin_default_kernel_cpu(
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void clamp_kernel_impl(TensorIteratorBase& iter) {
 | 
			
		||||
static void clamp_kernel_impl(TensorIteratorBase& iter) {
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "clamp_cpu", [&]() {
 | 
			
		||||
    cpu_kernel_vec(iter,
 | 
			
		||||
      [](scalar_t a, scalar_t min, scalar_t max) -> scalar_t {
 | 
			
		||||
@ -355,7 +355,7 @@ void clamp_kernel_impl(TensorIteratorBase& iter) {
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void clamp_scalar_kernel_impl(TensorIteratorBase& iter, const Scalar& min_, const Scalar& max_) {
 | 
			
		||||
static void clamp_scalar_kernel_impl(TensorIteratorBase& iter, const Scalar& min_, const Scalar& max_) {
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "clamp_scalar_cpu", [&]() {
 | 
			
		||||
    const auto min = min_.to<scalar_t>();
 | 
			
		||||
    const auto max = max_.to<scalar_t>();
 | 
			
		||||
@ -371,7 +371,7 @@ void clamp_scalar_kernel_impl(TensorIteratorBase& iter, const Scalar& min_, cons
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void clamp_max_scalar_kernel_impl(TensorIteratorBase& iter, Scalar max_) {
 | 
			
		||||
static void clamp_max_scalar_kernel_impl(TensorIteratorBase& iter, Scalar max_) {
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "clamp_max_scalar_cpu", [&]() {
 | 
			
		||||
    const auto max = max_.to<scalar_t>();
 | 
			
		||||
    const Vectorized<scalar_t> max_vec(max);
 | 
			
		||||
@ -385,7 +385,7 @@ void clamp_max_scalar_kernel_impl(TensorIteratorBase& iter, Scalar max_) {
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void clamp_min_scalar_kernel_impl(TensorIteratorBase& iter, Scalar min_) {
 | 
			
		||||
static void clamp_min_scalar_kernel_impl(TensorIteratorBase& iter, Scalar min_) {
 | 
			
		||||
  AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "clamp_min_scalar_cpu", [&]() {
 | 
			
		||||
    const auto min = min_.to<scalar_t>();
 | 
			
		||||
    const Vectorized<scalar_t> min_vec(min);
 | 
			
		||||
 | 
			
		||||
@ -13,7 +13,7 @@ namespace at::native {
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
inline void cadd(
 | 
			
		||||
static inline void cadd(
 | 
			
		||||
    scalar_t* z,
 | 
			
		||||
    const scalar_t* x,
 | 
			
		||||
    const scalar_t* y,
 | 
			
		||||
@ -34,7 +34,7 @@ inline void cadd(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void unfolded2d_acc(
 | 
			
		||||
static void unfolded2d_acc(
 | 
			
		||||
    scalar_t* finput_data,
 | 
			
		||||
    scalar_t* input_data,
 | 
			
		||||
    int64_t kH,
 | 
			
		||||
@ -113,7 +113,7 @@ void unfolded2d_acc(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void unfolded2d_acc_channels_last(
 | 
			
		||||
static void unfolded2d_acc_channels_last(
 | 
			
		||||
    scalar_t* finput_data,
 | 
			
		||||
    scalar_t* input_data,
 | 
			
		||||
    int64_t kH,
 | 
			
		||||
@ -225,7 +225,7 @@ void unfolded2d_acc_kernel(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void unfolded2d_copy(
 | 
			
		||||
static void unfolded2d_copy(
 | 
			
		||||
    const scalar_t* input_data,
 | 
			
		||||
    scalar_t* finput_data,
 | 
			
		||||
    int64_t kH,
 | 
			
		||||
@ -326,7 +326,7 @@ void unfolded2d_copy(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void unfolded2d_copy_channels_last(
 | 
			
		||||
static void unfolded2d_copy_channels_last(
 | 
			
		||||
    const scalar_t* input_data,
 | 
			
		||||
    scalar_t* finput_data,
 | 
			
		||||
    int64_t kH,
 | 
			
		||||
 | 
			
		||||
@ -157,13 +157,13 @@ struct Interpolate<1, scalar_t, opmath_t, index_t, 2> {
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <int n, typename scalar_t, typename index_t, int interp_size>
 | 
			
		||||
inline scalar_t interpolate(char* src, char** data, const int64_t* strides, int64_t i) {
 | 
			
		||||
static inline scalar_t interpolate(char* src, char** data, const int64_t* strides, int64_t i) {
 | 
			
		||||
  using opmath_t = at::opmath_type<scalar_t>;
 | 
			
		||||
  return Interpolate<n, scalar_t, opmath_t, index_t, interp_size>::eval(src, data, strides, i);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t, typename index_t>
 | 
			
		||||
inline scalar_t interpolate_aa_single_dim_zero_strides(
 | 
			
		||||
static inline scalar_t interpolate_aa_single_dim_zero_strides(
 | 
			
		||||
    char* src,
 | 
			
		||||
    char** data,
 | 
			
		||||
    const index_t ids_stride) {
 | 
			
		||||
@ -187,7 +187,7 @@ inline scalar_t interpolate_aa_single_dim_zero_strides(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t, typename index_t>
 | 
			
		||||
inline scalar_t interpolate_aa_single_dim(
 | 
			
		||||
static inline scalar_t interpolate_aa_single_dim(
 | 
			
		||||
    char* src,
 | 
			
		||||
    char** data,
 | 
			
		||||
    const int64_t* strides,
 | 
			
		||||
@ -213,7 +213,7 @@ inline scalar_t interpolate_aa_single_dim(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<int m>
 | 
			
		||||
inline bool is_zero_stride(const int64_t* strides) {
 | 
			
		||||
static inline bool is_zero_stride(const int64_t* strides) {
 | 
			
		||||
  bool output = strides[0] == 0;
 | 
			
		||||
  for (const auto i : c10::irange(1, m)) {
 | 
			
		||||
    output &= (strides[i] == 0);
 | 
			
		||||
@ -222,7 +222,7 @@ inline bool is_zero_stride(const int64_t* strides) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t, typename index_t, int interp_size>
 | 
			
		||||
inline bool is_contiguous_stride(const int64_t* strides) {
 | 
			
		||||
static inline bool is_contiguous_stride(const int64_t* strides) {
 | 
			
		||||
  bool output = (strides[0] == sizeof(index_t)) && (strides[1] == sizeof(scalar_t));
 | 
			
		||||
  for (int i=2; i<2 * interp_size; i+=2) {
 | 
			
		||||
    output &= (strides[i] == sizeof(index_t)) && (strides[i + 1] == sizeof(scalar_t));
 | 
			
		||||
@ -282,13 +282,13 @@ struct CheckAlmostAllZeroStrides<0, non_zero_stride_dim, scalar_t, index_t, inte
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <int n, int s, typename scalar_t, typename index_t, int interp_size>
 | 
			
		||||
inline bool check_almost_all_zero_stride(const int64_t* strides) {
 | 
			
		||||
static inline bool check_almost_all_zero_stride(const int64_t* strides) {
 | 
			
		||||
  return CheckAlmostAllZeroStrides<n, s, scalar_t, index_t, interp_size>::eval(strides);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Helper method to compute interpolation for nearest, linear, cubic modes
 | 
			
		||||
template <typename scalar_t, typename index_t, int out_ndims, int interp_size>
 | 
			
		||||
inline void basic_loop(char** data, const int64_t* strides, int64_t n) {
 | 
			
		||||
static inline void basic_loop(char** data, const int64_t* strides, int64_t n) {
 | 
			
		||||
  char* dst = data[0];
 | 
			
		||||
  char* src = data[1];
 | 
			
		||||
  for (const auto i : c10::irange(n)) {
 | 
			
		||||
@ -298,7 +298,7 @@ inline void basic_loop(char** data, const int64_t* strides, int64_t n) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
inline void basic_loop_aa_vertical(
 | 
			
		||||
static inline void basic_loop_aa_vertical(
 | 
			
		||||
    char** data,
 | 
			
		||||
    const int64_t* strides,
 | 
			
		||||
    int64_t n,
 | 
			
		||||
@ -354,7 +354,7 @@ inline void basic_loop_aa_vertical<uint8_t>(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
inline void basic_loop_aa_horizontal(
 | 
			
		||||
static inline void basic_loop_aa_horizontal(
 | 
			
		||||
    char** data,
 | 
			
		||||
    const int64_t* strides,
 | 
			
		||||
    int64_t n,
 | 
			
		||||
 | 
			
		||||
@ -35,7 +35,7 @@ Like PIL, Pillow is licensed under the open source HPND License
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
inline __m128i mm_cvtsi32_si128(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
 | 
			
		||||
static inline __m128i mm_cvtsi32_si128(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
 | 
			
		||||
  int32_t v;
 | 
			
		||||
  if (i32_aligned) {
 | 
			
		||||
    v = *(const int32_t*)ptr;
 | 
			
		||||
@ -45,11 +45,11 @@ inline __m128i mm_cvtsi32_si128(const uint8_t* C10_RESTRICT ptr, bool i32_aligne
 | 
			
		||||
  return _mm_cvtsi32_si128(v);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline __m128i mm_cvtepu8_epi32(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
 | 
			
		||||
static inline __m128i mm_cvtepu8_epi32(const uint8_t* C10_RESTRICT ptr, bool i32_aligned) {
 | 
			
		||||
  return _mm_cvtepu8_epi32(mm_cvtsi32_si128(ptr, i32_aligned));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void _write_endline_rgb_as_uint32(
 | 
			
		||||
static inline void _write_endline_rgb_as_uint32(
 | 
			
		||||
    uint8_t* C10_RESTRICT output,
 | 
			
		||||
    uint32_t data
 | 
			
		||||
) {
 | 
			
		||||
 | 
			
		||||
@ -838,7 +838,7 @@ void dyn_quant_pack_4bit_weight_kernel(
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ref_dyn_quant_matmul_4bit_channelwise_kernel(
 | 
			
		||||
static void ref_dyn_quant_matmul_4bit_channelwise_kernel(
 | 
			
		||||
    size_t m,
 | 
			
		||||
    size_t n,
 | 
			
		||||
    size_t k,
 | 
			
		||||
@ -997,7 +997,7 @@ void ref_dyn_quant_matmul_4bit_channelwise_kernel(
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ref_dyn_quant_matmul_4bit_groupwise_kernel(
 | 
			
		||||
static void ref_dyn_quant_matmul_4bit_groupwise_kernel(
 | 
			
		||||
    size_t m,
 | 
			
		||||
    size_t n,
 | 
			
		||||
    size_t k,
 | 
			
		||||
 | 
			
		||||
@ -100,7 +100,7 @@ inline void tinygemm_kernel(
 | 
			
		||||
 | 
			
		||||
#elif defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
 | 
			
		||||
 | 
			
		||||
inline float _mm256_reduce_add_ps(__m256& v) {
 | 
			
		||||
static inline float _mm256_reduce_add_ps(__m256& v) {
 | 
			
		||||
  __m256 v1 = _mm256_permute2f128_ps(v, v, 0x1);
 | 
			
		||||
  v = _mm256_add_ps(v, v1);
 | 
			
		||||
  v1 = _mm256_shuffle_ps(v, v, 0x4E);
 | 
			
		||||
 | 
			
		||||
@ -296,7 +296,7 @@ static bool isSupportedHipLtROCmArch(int index) {
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void launchTunableGemmAndBias(cublasCommonArgs &args, const Scalar& alpha, const scalar_t* bias, cuda::blas::GEMMAndBiasActivationEpilogue activation) {
 | 
			
		||||
static void launchTunableGemmAndBias(cublasCommonArgs &args, const Scalar& alpha, const scalar_t* bias, cuda::blas::GEMMAndBiasActivationEpilogue activation) {
 | 
			
		||||
  bool transa_ = ((args.transa != 'n') && (args.transa != 'N'));
 | 
			
		||||
  bool transb_ = ((args.transb != 'n') && (args.transb != 'N'));
 | 
			
		||||
  at::cuda::tunable::GemmAndBiasParams<scalar_t> params;
 | 
			
		||||
 | 
			
		||||
@ -163,7 +163,7 @@ bool has_large_prime_factor(int64_t n) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Execute a general fft operation (can be c2c, onesided r2c or onesided c2r)
 | 
			
		||||
const Tensor& _exec_fft(Tensor& out, const Tensor& self, IntArrayRef out_sizes,
 | 
			
		||||
static const Tensor& _exec_fft(Tensor& out, const Tensor& self, IntArrayRef out_sizes,
 | 
			
		||||
                         IntArrayRef dim, bool forward) {
 | 
			
		||||
  const auto ndim = self.dim();
 | 
			
		||||
  const int64_t signal_ndim = dim.size();
 | 
			
		||||
 | 
			
		||||
@ -252,7 +252,7 @@ struct CacheKeyFusedWrapper : ParamsWrapper<CacheKeyFused> {
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
int getLRUCacheLimit() {
 | 
			
		||||
static int getLRUCacheLimit() {
 | 
			
		||||
  constexpr int DEFAULT_LIMIT =
 | 
			
		||||
      10000; // roughly corresponds to 2GiB assuming 200KiB per ExecutionPlan
 | 
			
		||||
  // 0 is used to indicate no limit
 | 
			
		||||
 | 
			
		||||
@ -99,9 +99,6 @@ Tensor getTensorView(const Tensor& t, MPSShape* shape);
 | 
			
		||||
MPSShape* getMPSShape(const TensorBase& t, c10::MemoryFormat memory_format = MemoryFormat::Contiguous);
 | 
			
		||||
MPSShape* getMPSShape(IntArrayRef sizes, c10::MemoryFormat memory_format = MemoryFormat::Contiguous);
 | 
			
		||||
 | 
			
		||||
// Determines whether a tensor is too large to use MPSGraph
 | 
			
		||||
bool isTooLargeForMPSGraph(const Tensor& tensor, bool useMPSStridedAPI = true);
 | 
			
		||||
 | 
			
		||||
static inline id<MTLBuffer> getMTLBufferStorage(const TensorBase& tensor) {
 | 
			
		||||
  return __builtin_bit_cast(id<MTLBuffer>, tensor.storage().data());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -439,22 +439,6 @@ static void check_mps_shape(MPSShape* shape) {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool isTooLargeForMPSGraph(const Tensor& tensor, bool useMPSStridedAPI) {
 | 
			
		||||
  static const bool is_macOS_15_0_or_newer = is_macos_13_or_newer(MacOSVersion::MACOS_VER_15_0_PLUS);
 | 
			
		||||
  if ((!tensor.is_contiguous() || tensor.storage_offset()) && useMPSStridedAPI && is_macOS_15_0_or_newer) {
 | 
			
		||||
    auto storage_numel = tensor.storage().nbytes() / tensor.element_size() - tensor.storage_offset();
 | 
			
		||||
    if (storage_numel > std::numeric_limits<int32_t>::max()) {
 | 
			
		||||
      return true;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  for (auto size : tensor.sizes()) {
 | 
			
		||||
    if (size > std::numeric_limits<int32_t>::max()) {
 | 
			
		||||
      return true;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
MPSNDArray* getMPSNDArray(const TensorBase& t, MPSShape* sizes, MPSShape* strides) {
 | 
			
		||||
  id<MTLBuffer> srcBuf = getMTLBufferStorage(t);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -1,18 +0,0 @@
 | 
			
		||||
#pragma once
 | 
			
		||||
#include <c10/metal/common.h>
 | 
			
		||||
 | 
			
		||||
template <unsigned N = c10::metal::max_ndim, typename idx_type_t = int64_t>
 | 
			
		||||
struct CatLargeSharedParams {
 | 
			
		||||
  int32_t ndim;
 | 
			
		||||
  int32_t cat_dim;
 | 
			
		||||
  ::c10::metal::array<idx_type_t, N> output_strides;
 | 
			
		||||
  ::c10::metal::array<idx_type_t, N> output_sizes;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <unsigned N = c10::metal::max_ndim, typename idx_type_t = int64_t>
 | 
			
		||||
struct CatLargeInputParams {
 | 
			
		||||
  idx_type_t cat_dim_offset;
 | 
			
		||||
  idx_type_t input_element_offset;
 | 
			
		||||
  ::c10::metal::array<idx_type_t, N> input_strides;
 | 
			
		||||
  ::c10::metal::array<idx_type_t, N> input_sizes;
 | 
			
		||||
};
 | 
			
		||||
@ -1,82 +0,0 @@
 | 
			
		||||
#include <ATen/native/mps/kernels/Shape.h>
 | 
			
		||||
#include <c10/metal/utils.h>
 | 
			
		||||
#include <metal_array>
 | 
			
		||||
#include <metal_stdlib>
 | 
			
		||||
 | 
			
		||||
using namespace metal;
 | 
			
		||||
using namespace c10::metal;
 | 
			
		||||
 | 
			
		||||
template <typename T_in, typename T_out>
 | 
			
		||||
kernel void cat_large(
 | 
			
		||||
    constant T_in* input [[buffer(0)]],
 | 
			
		||||
    device T_out* output [[buffer(1)]],
 | 
			
		||||
    constant CatLargeSharedParams<>& shared_params [[buffer(2)]],
 | 
			
		||||
    constant CatLargeInputParams<>& input_params [[buffer(3)]],
 | 
			
		||||
    uint tid [[thread_position_in_grid]]) {
 | 
			
		||||
  auto ndim = shared_params.ndim;
 | 
			
		||||
  auto cat_dim = shared_params.cat_dim;
 | 
			
		||||
  constant auto& output_strides = shared_params.output_strides;
 | 
			
		||||
  constant auto& output_sizes = shared_params.output_sizes;
 | 
			
		||||
 | 
			
		||||
  auto cat_dim_offset = input_params.cat_dim_offset;
 | 
			
		||||
  auto input_element_offset = input_params.input_element_offset;
 | 
			
		||||
  constant auto& input_strides = input_params.input_strides;
 | 
			
		||||
  constant auto& input_sizes = input_params.input_sizes;
 | 
			
		||||
 | 
			
		||||
  auto input_element_idx = static_cast<int64_t>(tid) + input_element_offset;
 | 
			
		||||
  int64_t input_offset = 0;
 | 
			
		||||
  int64_t output_offset = 0;
 | 
			
		||||
 | 
			
		||||
  for (auto dim = ndim - 1; dim >= 0; dim--) {
 | 
			
		||||
    auto dim_size = input_sizes[dim];
 | 
			
		||||
    auto input_dim_idx = input_element_idx % dim_size;
 | 
			
		||||
    auto output_dim_idx =
 | 
			
		||||
        input_dim_idx + ((dim == cat_dim) ? cat_dim_offset : 0);
 | 
			
		||||
 | 
			
		||||
    input_offset += input_strides[dim] * input_dim_idx;
 | 
			
		||||
    output_offset += output_strides[dim] * output_dim_idx;
 | 
			
		||||
 | 
			
		||||
    input_element_idx = input_element_idx / dim_size;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  output[output_offset] = static_cast<T_out>(input[input_offset]);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define REGISTER_CAT_LARGE_OP(T_in, T_out)                           \
 | 
			
		||||
  template [[host_name("cat_large_" #T_in "_" #T_out)]]              \
 | 
			
		||||
  kernel void cat_large<T_in, T_out>(                                \
 | 
			
		||||
      constant T_in * input [[buffer(0)]],                           \
 | 
			
		||||
      device T_out * output [[buffer(1)]],                           \
 | 
			
		||||
      constant CatLargeSharedParams<> & shared_params [[buffer(2)]], \
 | 
			
		||||
      constant CatLargeInputParams<> & input_params [[buffer(3)]],   \
 | 
			
		||||
      uint tid [[thread_position_in_grid]]);
 | 
			
		||||
 | 
			
		||||
#define REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(T_out) \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(float, T_out);               \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(half, T_out);                \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(bfloat, T_out);              \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(int, T_out);                 \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(uint, T_out);                \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(long, T_out);                \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(ulong, T_out);               \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(short, T_out);               \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(ushort, T_out);              \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(char, T_out);                \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(uchar, T_out);               \
 | 
			
		||||
  REGISTER_CAT_LARGE_OP(bool, T_out);
 | 
			
		||||
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(float);
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(half);
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(bfloat);
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(int);
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(uint);
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(long);
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(ulong);
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(short);
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(ushort);
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(char);
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(uchar);
 | 
			
		||||
REGISTER_CAT_LARGE_OP_ALL_INPUT_TYPES(bool);
 | 
			
		||||
 | 
			
		||||
REGISTER_CAT_LARGE_OP(float2, float2);
 | 
			
		||||
REGISTER_CAT_LARGE_OP(half2, half2);
 | 
			
		||||
@ -2,13 +2,9 @@
 | 
			
		||||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
 | 
			
		||||
#include <ATen/MemoryOverlap.h>
 | 
			
		||||
#include <ATen/WrapDimUtils.h>
 | 
			
		||||
#include <ATen/mps/MPSProfiler.h>
 | 
			
		||||
#include <ATen/native/TensorShape.h>
 | 
			
		||||
#include <ATen/native/TypeProperties.h>
 | 
			
		||||
#include <ATen/native/mps/OperationUtils.h>
 | 
			
		||||
#include <ATen/native/mps/kernels/Shape.h>
 | 
			
		||||
 | 
			
		||||
#include <fmt/format.h>
 | 
			
		||||
 | 
			
		||||
#ifndef AT_PER_OPERATOR_HEADERS
 | 
			
		||||
#include <ATen/Functions.h>
 | 
			
		||||
@ -20,13 +16,6 @@
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
namespace at::native {
 | 
			
		||||
 | 
			
		||||
#ifndef PYTORCH_JIT_COMPILE_SHADERS
 | 
			
		||||
static auto& lib = mps::MetalShaderLibrary::getBundledLibrary();
 | 
			
		||||
#else
 | 
			
		||||
#include <ATen/native/mps/Shape_metallib.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
namespace mps {
 | 
			
		||||
 | 
			
		||||
// Produces a shape with the `dim` dimension set to 0.
 | 
			
		||||
@ -68,70 +57,6 @@ static void check_shape_except_dim(const Tensor& first, const Tensor& second, in
 | 
			
		||||
                ")");
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// This implementation of cat is used only if one of the inputs or the output is
 | 
			
		||||
// too large to use MPSGraph.
 | 
			
		||||
// NOTE: `output` is expected to already have the correct size.
 | 
			
		||||
static void cat_out_large_tensor_mps(const ITensorListRef& inputs, int64_t dimension, const Tensor& output) {
 | 
			
		||||
  CatLargeSharedParams shared_params;
 | 
			
		||||
 | 
			
		||||
  shared_params.ndim = output.dim();
 | 
			
		||||
  shared_params.cat_dim = dimension;
 | 
			
		||||
 | 
			
		||||
  for (const auto dim : c10::irange(output.dim())) {
 | 
			
		||||
    shared_params.output_strides[dim] = output.stride(dim);
 | 
			
		||||
    shared_params.output_sizes[dim] = output.size(dim);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int64_t cat_dim_offset = 0;
 | 
			
		||||
  size_t input_idx = 0;
 | 
			
		||||
  MPSStream* stream = getCurrentMPSStream();
 | 
			
		||||
 | 
			
		||||
  // Launch a separate kernels for each input. This will produce some overhead,
 | 
			
		||||
  // but that should be relatively minimal since at least one of the inputs is
 | 
			
		||||
  // very large. In order to launch only one kernel to process all inputs, we
 | 
			
		||||
  // would have to copy all the input tensor data into a packed buffer, which
 | 
			
		||||
  // would not be ideal.
 | 
			
		||||
  for (const Tensor& input : inputs) {
 | 
			
		||||
    if (input.numel() == 0) {
 | 
			
		||||
      continue;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Metal can only launch up to MAX_INT threads at one time. If the input has
 | 
			
		||||
    // more than that number of elements, launch multiple kernels with different
 | 
			
		||||
    // offsets into the data.
 | 
			
		||||
    const int64_t max_num_threads = static_cast<int64_t>(std::numeric_limits<int32_t>::max());
 | 
			
		||||
 | 
			
		||||
    for (int64_t numel_remaining = input.numel(); numel_remaining > 0; numel_remaining -= max_num_threads) {
 | 
			
		||||
      auto num_threads = std::min(max_num_threads, numel_remaining);
 | 
			
		||||
      CatLargeInputParams input_params;
 | 
			
		||||
 | 
			
		||||
      input_params.cat_dim_offset = cat_dim_offset;
 | 
			
		||||
      input_params.input_element_offset = input.numel() - numel_remaining;
 | 
			
		||||
 | 
			
		||||
      for (const auto dim : c10::irange(input.dim())) {
 | 
			
		||||
        input_params.input_strides[dim] = input.stride(dim);
 | 
			
		||||
        input_params.input_sizes[dim] = input.size(dim);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      dispatch_sync_with_rethrow(stream->queue(), ^() {
 | 
			
		||||
        @autoreleasepool {
 | 
			
		||||
          id<MTLComputeCommandEncoder> computeEncoder = stream->commandEncoder();
 | 
			
		||||
          auto pipeline_state = lib.getPipelineStateForFunc(
 | 
			
		||||
              fmt::format("cat_large_{}_{}", scalarToMetalTypeString(input), scalarToMetalTypeString(output)));
 | 
			
		||||
          getMPSProfiler().beginProfileKernel(pipeline_state, "cat", {input});
 | 
			
		||||
          [computeEncoder setComputePipelineState:pipeline_state];
 | 
			
		||||
          mtl_setArgs(computeEncoder, input, output, shared_params, input_params);
 | 
			
		||||
          mtl_dispatch1DJob(computeEncoder, pipeline_state, num_threads);
 | 
			
		||||
          getMPSProfiler().endProfileKernel(pipeline_state);
 | 
			
		||||
        }
 | 
			
		||||
      });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    cat_dim_offset += input.size(dimension);
 | 
			
		||||
    input_idx++;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
} // namespace mps
 | 
			
		||||
 | 
			
		||||
// topk
 | 
			
		||||
@ -306,11 +231,7 @@ TORCH_IMPL_FUNC(cat_out_mps)
 | 
			
		||||
  // Compute size of the result in the cat dimension
 | 
			
		||||
  int64_t cat_dim_size = 0;
 | 
			
		||||
  idx = 0;
 | 
			
		||||
  bool has_large_tensor = false;
 | 
			
		||||
  for (const Tensor& tensor : materialized_inputs) {
 | 
			
		||||
    if (isTooLargeForMPSGraph(tensor)) {
 | 
			
		||||
      has_large_tensor |= true;
 | 
			
		||||
    }
 | 
			
		||||
    if (!should_skip(tensor)) {
 | 
			
		||||
      // TODO: Factor out `check_shape_except_dim`
 | 
			
		||||
      check_shape_except_dim(notSkippedTensor, tensor, dimension, idx);
 | 
			
		||||
@ -328,12 +249,6 @@ TORCH_IMPL_FUNC(cat_out_mps)
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  has_large_tensor |= isTooLargeForMPSGraph(out);
 | 
			
		||||
 | 
			
		||||
  if (has_large_tensor) {
 | 
			
		||||
    return mps::cat_out_large_tensor_mps(materialized_inputs, dimension, out);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  struct CachedGraph : public MPSCachedGraph {
 | 
			
		||||
    CachedGraph(MPSGraph* graph) : MPSCachedGraph(graph) {}
 | 
			
		||||
    std::vector<MPSGraphTensor*> inputTensors_;
 | 
			
		||||
 | 
			
		||||
@ -14,7 +14,7 @@ DEFINE_DISPATCH(index_put_kernel_quantized_stub);
 | 
			
		||||
DEFINE_DISPATCH(index_put_with_sort_quantized_stub);
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
TensorIterator make_index_put_iterator(const AdvancedIndex& info, const Tensor& value) {
 | 
			
		||||
static TensorIterator make_index_put_iterator(const AdvancedIndex& info, const Tensor& value) {
 | 
			
		||||
  TORCH_CHECK(is_expandable_to(value.sizes(), info.src.sizes()), "shape mismatch: value tensor of shape ", value.sizes(),
 | 
			
		||||
             " cannot be broadcast to indexing result of shape ", info.src.sizes());
 | 
			
		||||
  TensorIteratorConfig config;
 | 
			
		||||
@ -30,7 +30,7 @@ TensorIterator make_index_put_iterator(const AdvancedIndex& info, const Tensor&
 | 
			
		||||
  return config.build();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Tensor & masked_fill_impl_quantized_cpu(Tensor & self, const Tensor & mask, const Scalar& value) {
 | 
			
		||||
static Tensor & masked_fill_impl_quantized_cpu(Tensor & self, const Tensor & mask, const Scalar& value) {
 | 
			
		||||
  NoNamesGuard guard;
 | 
			
		||||
  TORCH_CHECK(mask.dtype() == ScalarType::Bool, "masked_fill only supports boolean masks, "
 | 
			
		||||
    "but got dtype ", mask.dtype());
 | 
			
		||||
 | 
			
		||||
@ -54,7 +54,7 @@ inline int end_index(int out_idx, int out_len, int in_len) {
 | 
			
		||||
 | 
			
		||||
// adaptive avg pool for 2D and 3D inputs
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void adaptive_avg_pool_single_out_frame(
 | 
			
		||||
static void adaptive_avg_pool_single_out_frame(
 | 
			
		||||
    scalar_t* input_p,
 | 
			
		||||
    scalar_t* output_p,
 | 
			
		||||
    int64_t sizeC,
 | 
			
		||||
 | 
			
		||||
@ -31,7 +31,7 @@ DEFINE_DISPATCH(qavg_pool2d_nhwc_stub);
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void avg_pool2d_out_frame(
 | 
			
		||||
static void avg_pool2d_out_frame(
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
    Tensor& output,
 | 
			
		||||
    int64_t nInputPlane,
 | 
			
		||||
 | 
			
		||||
@ -35,7 +35,7 @@ struct UpsampleBilinearParamW {
 | 
			
		||||
 | 
			
		||||
// at::native functions for the native_functions.yaml
 | 
			
		||||
template <typename scalar_t>
 | 
			
		||||
void upsample_bilinear2d_out_frame(
 | 
			
		||||
static void upsample_bilinear2d_out_frame(
 | 
			
		||||
    Tensor& output,
 | 
			
		||||
    const Tensor& input,
 | 
			
		||||
    int64_t input_height,
 | 
			
		||||
 | 
			
		||||
@ -543,9 +543,9 @@ int register_embedding_params() {
 | 
			
		||||
 | 
			
		||||
namespace {
 | 
			
		||||
 | 
			
		||||
[[maybe_unused]] auto conv2d_params = register_conv_params<2>();
 | 
			
		||||
[[maybe_unused]] auto conv3d_params = register_conv_params<3>();
 | 
			
		||||
[[maybe_unused]] auto linear_params = register_linear_params();
 | 
			
		||||
[[maybe_unused]] auto embedding_params = register_embedding_params();
 | 
			
		||||
[[maybe_unused]] static auto conv2d_params = register_conv_params<2>();
 | 
			
		||||
[[maybe_unused]] static auto conv3d_params = register_conv_params<3>();
 | 
			
		||||
[[maybe_unused]] static auto linear_params = register_linear_params();
 | 
			
		||||
[[maybe_unused]] static auto embedding_params = register_embedding_params();
 | 
			
		||||
 | 
			
		||||
} // namespace
 | 
			
		||||
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user