mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-10-31 12:15:03 +08:00 
			
		
		
		
	Compare commits
	
		
			1 Commits
		
	
	
		
			csl/build_
			...
			windows_mm
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 26f67ef050 | 
| @ -8,8 +8,6 @@ if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0" | ||||
| elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0" | ||||
| elif [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0" | ||||
| elif [[ "$GPU_ARCH_VERSION" == *"13.0"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;11.0;12.0+PTX" | ||||
| fi | ||||
|  | ||||
| @ -181,7 +181,7 @@ case "$tag" in | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950;gfx1100" | ||||
|     PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950" | ||||
|     if [[ $tag =~ "benchmarks" ]]; then | ||||
|       INDUCTOR_BENCHMARKS=yes | ||||
|     fi | ||||
| @ -344,7 +344,7 @@ docker build \ | ||||
|        --build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \ | ||||
|        --build-arg "KATEX=${KATEX:-}" \ | ||||
|        --build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \ | ||||
|        --build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}" \ | ||||
|        --build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx90a;gfx942;gfx1100}" \ | ||||
|        --build-arg "IMAGE_NAME=${IMAGE_NAME}" \ | ||||
|        --build-arg "UCX_COMMIT=${UCX_COMMIT}" \ | ||||
|        --build-arg "UCC_COMMIT=${UCC_COMMIT}" \ | ||||
|  | ||||
| @ -9,7 +9,7 @@ install_ubuntu() { | ||||
|   # Instead use lib and headers from OpenSSL1.1 installed in `install_openssl.sh`` | ||||
|   apt-get install -y cargo | ||||
|   echo "Checking out sccache repo" | ||||
|   git clone https://github.com/mozilla/sccache -b v0.11.0 | ||||
|   git clone https://github.com/mozilla/sccache -b v0.10.0 | ||||
|   cd sccache | ||||
|   echo "Building sccache" | ||||
|   cargo build --release | ||||
|  | ||||
| @ -10,6 +10,11 @@ BAD_SSL = "https://self-signed.badssl.com" | ||||
|  | ||||
| print("Testing SSL certificate checking for Python:", sys.version) | ||||
|  | ||||
| if sys.version_info[:2] < (2, 7) or sys.version_info[:2] < (3, 4): | ||||
|     print("This version never checks SSL certs; skipping tests") | ||||
|     sys.exit(0) | ||||
|  | ||||
|  | ||||
| EXC = OSError | ||||
|  | ||||
| print(f"Connecting to {GOOD_SSL} should work") | ||||
|  | ||||
| @ -143,7 +143,7 @@ def sample_vllm_test_library(): | ||||
|                 "pytest -v -s compile/test_decorator.py", | ||||
|             ], | ||||
|         }, | ||||
|         "vllm_language_model_test_extended_generation_28_failure_test": { | ||||
|         "vllm_languagde_model_test_extended_generation_28_failure_test": { | ||||
|             "title": "Language Models Test (Extended Generation) 2.8 release failure", | ||||
|             "id": "vllm_languagde_model_test_extended_generation_28_failure_test", | ||||
|             "package_install": [ | ||||
|  | ||||
| @ -63,7 +63,7 @@ class VllmBuildParameters: | ||||
|     # DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True" | ||||
|     use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True) | ||||
|     dockerfile_path: Path = env_path_field( | ||||
|         "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile" | ||||
|         "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm" | ||||
|     ) | ||||
|  | ||||
|     # the cleaning script to remove torch dependencies from pip | ||||
|  | ||||
| @ -187,22 +187,19 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then | ||||
|             export USE_CUFILE=0 | ||||
|         else | ||||
|             DEPS_LIST+=( | ||||
|                 "/usr/local/cuda/lib64/libnvToolsExt.so.1" | ||||
|                 "/usr/local/cuda/lib64/libcublas.so.12" | ||||
|                 "/usr/local/cuda/lib64/libcublasLt.so.12" | ||||
|                 "/usr/local/cuda/lib64/libcudart.so.12" | ||||
|                 "/usr/local/cuda/lib64/libnvrtc.so.12" | ||||
|                 "/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12") | ||||
|             DEPS_SONAME+=( | ||||
|                 "libnvToolsExt.so.1" | ||||
|                 "libcublas.so.12" | ||||
|                 "libcublasLt.so.12" | ||||
|                 "libcudart.so.12" | ||||
|                 "libnvrtc.so.12" | ||||
|                 "libcupti.so.12") | ||||
|  | ||||
|             if [[ $CUDA_VERSION != 12.9* ]]; then | ||||
|                 DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1") | ||||
|                 DEPS_SONAME+=("libnvToolsExt.so.1") | ||||
|             fi | ||||
|         fi | ||||
|     else | ||||
|         echo "Using nvidia libs from pypi." | ||||
|  | ||||
| @ -337,13 +337,13 @@ test_python() { | ||||
|  | ||||
| test_python_smoke() { | ||||
|   # Smoke tests for H100/B200 | ||||
|   time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 inductor/test_max_autotune $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running | ||||
|   time python test/run_test.py --include test_matmul_cuda inductor/test_fp8 inductor/test_max_autotune $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running | ||||
|   assert_git_not_dirty | ||||
| } | ||||
|  | ||||
| test_python_smoke_b200() { | ||||
|   # Targeted smoke tests for B200 - staged approach to avoid too many failures | ||||
|   time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running | ||||
|   time python test/run_test.py --include test_matmul_cuda inductor/test_fp8 $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running | ||||
|   assert_git_not_dirty | ||||
| } | ||||
|  | ||||
|  | ||||
							
								
								
									
										2
									
								
								.flake8
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								.flake8
									
									
									
									
									
								
							| @ -12,7 +12,7 @@ ignore = | ||||
|     # to line this up with executable bit | ||||
|     EXE001, | ||||
|     # these ignores are from flake8-bugbear; please fix! | ||||
|     B007,B008,B017,B019,B023,B028,B903,B905,B906,B907,B908,B910 | ||||
|     B007,B008,B017,B019,B023,B028,B903,B904,B905,B906,B907,B908,B910 | ||||
|     # these ignores are from flake8-comprehensions; please fix! | ||||
|     C407, | ||||
|     # these ignores are from flake8-logging-format; please fix! | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/ISSUE_TEMPLATE/ci-sev.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/ISSUE_TEMPLATE/ci-sev.md
									
									
									
									
										vendored
									
									
								
							| @ -8,7 +8,6 @@ assignees: '' | ||||
| --- | ||||
|  | ||||
| > NOTE: Remember to label this issue with "`ci: sev`" | ||||
| >       If you want autorevert to be disabled, keep the ci: disable-autorevert label | ||||
|  | ||||
|  <!-- Add the `merge blocking` label to this PR to prevent PRs from being merged while this issue is open --> | ||||
|  | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/ISSUE_TEMPLATE/disable-autorevert.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/ISSUE_TEMPLATE/disable-autorevert.md
									
									
									
									
										vendored
									
									
								
							| @ -1,7 +1,7 @@ | ||||
| --- | ||||
| name: "D❌\U0001F519 ISABLE AUTOREVERT" | ||||
| name: DISABLE AUTOREVERT | ||||
| about: Disables autorevert when open | ||||
| title: "[DISABLE AUTOREVERT]" | ||||
| title: "❌\U0001F519 [DISABLE AUTOREVERT]" | ||||
| labels: 'ci: disable-autorevert' | ||||
| assignees: '' | ||||
|  | ||||
|  | ||||
							
								
								
									
										13
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							| @ -111,16 +111,3 @@ runs: | ||||
|         # This video group ID maps to subgid 1 inside the docker image due to the /etc/subgid entries. | ||||
|         # The group name corresponding to group ID 1 can change depending on the OS, so both are necessary. | ||||
|         echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd $DEVICE_FLAG --group-add video --group-add $render_gid --group-add daemon --group-add bin --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --network=host" >> "${GITHUB_ENV}" | ||||
|  | ||||
|     - name: configure aws credentials | ||||
|       id: aws_creds | ||||
|       uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0 | ||||
|       with: | ||||
|         role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only | ||||
|         aws-region: us-east-1 | ||||
|         role-duration-seconds: 18000 | ||||
|  | ||||
|     - name: Login to Amazon ECR | ||||
|       id: login-ecr | ||||
|       continue-on-error: true | ||||
|       uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| 1b013f5b5a87a1882eb143c26d79d091150d6a37 | ||||
| 87ff22e49ed0e92576c4935ccb8c143daac4a3cd | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| faffd5cf673615583da6517275e361cb3dbc77e6 | ||||
| 966da7e46f65d6d49df3e31214470a4fe5cc8e66 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/vllm.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/vllm.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| e5192819208c4d68194844b7dfafbc00020d0dea | ||||
| 0ad9951c416d33c5da4f7a504fb162cbe62386f5 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| 0fa6e3129e61143224663e1ec67980d12b7ec4eb | ||||
| 2a9138a26ee257fef05310ad3fecf7c55fe80d73 | ||||
|  | ||||
| @ -1,41 +1,59 @@ | ||||
| # TODO(elainwy): remove this file after the torch nightly dockerfile is in sync in vllm repo | ||||
| # The vLLM Dockerfile is used to construct vLLM image against torch nightly and torch main that can be directly used for testing | ||||
| 
 | ||||
| ARG CUDA_VERSION=12.8.1 | ||||
| ARG PYTHON_VERSION=3.12 | ||||
| 
 | ||||
| # BUILD_BASE_IMAGE: used to setup python build xformers, and vllm wheels, It can be replaced with a different base image from local machine, | ||||
| # by default, it uses the torch-nightly-base stage from this docker image | ||||
| ARG BUILD_BASE_IMAGE=torch-nightly-base | ||||
| 
 | ||||
| # FINAL_BASE_IMAGE: used to set up vllm-instaled environment and build flashinfer, | ||||
| # by default, it uses devel-ubuntu22.04 official image. | ||||
| ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 | ||||
| 
 | ||||
| # The logic is copied from https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile | ||||
| ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py" | ||||
| 
 | ||||
| 
 | ||||
| #################### TORCH NIGHTLY BASE IMAGE #################### | ||||
| # A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci | ||||
| FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base | ||||
| 
 | ||||
| ARG CUDA_VERSION | ||||
| ARG PYTHON_VERSION | ||||
| ARG GET_PIP_URL | ||||
| 
 | ||||
| # Install system dependencies and uv, then create Python virtual environment | ||||
| # Install Python and other dependencies | ||||
| RUN apt-get update -y \ | ||||
|     && apt-get install -y ccache software-properties-common git curl sudo vim python3-pip \ | ||||
|     && curl -LsSf https://astral.sh/uv/install.sh | sh \ | ||||
|     && $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \ | ||||
|     && rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \ | ||||
|     && ln -s /opt/venv/bin/python3 /usr/bin/python3 \ | ||||
|     && ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \ | ||||
|     && ln -s /opt/venv/bin/pip /usr/bin/pip \ | ||||
|     && apt-get install -y ccache software-properties-common git curl wget sudo vim \ | ||||
|     && add-apt-repository -y ppa:deadsnakes/ppa \ | ||||
|     && apt-get update -y \ | ||||
|     && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ | ||||
|     && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ | ||||
|     && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ | ||||
|     && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ | ||||
|     && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \ | ||||
|     && python3 --version && python3 -m pip --version | ||||
| 
 | ||||
| # Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519 | ||||
| # as it was causing spam when compiling the CUTLASS kernels | ||||
| RUN apt-get install -y gcc-10 g++-10 | ||||
| RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10 | ||||
| RUN <<EOF | ||||
| gcc --version | ||||
| EOF | ||||
| # Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519) | ||||
| RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \ | ||||
|     if command -v apt-get >/dev/null; then \ | ||||
|         if [ "$current_gcc_version" -lt 10 ]; then \ | ||||
|             echo "GCC version is $current_gcc_version, installing gcc-10..."; \ | ||||
|             apt-get update \ | ||||
|             && apt-get install -y gcc-10 g++-10 \ | ||||
|             && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 \ | ||||
|             && update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \ | ||||
|         else \ | ||||
|             echo "GCC version is $current_gcc_version, no need to install gcc-10."; \ | ||||
|         fi \ | ||||
|     fi \ | ||||
|     && gcc --version && g++ --version | ||||
| 
 | ||||
| # Install uv for faster pip installs | ||||
| # install uv for faster pip installs | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     python3 -m pip install uv==0.8.4 | ||||
| 
 | ||||
| @ -43,32 +61,36 @@ ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| #################### TORCH NIGHTLY  BASE IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| #################### BASE BUILD IMAGE #################### | ||||
| # A base image for building vLLM with torch nightly or torch wheels | ||||
| # prepare basic build environment | ||||
| FROM ${BUILD_BASE_IMAGE} AS base | ||||
| USER root | ||||
| 
 | ||||
| ARG CUDA_VERSION | ||||
| ARG PYTHON_VERSION | ||||
| 
 | ||||
| # Only work with PyTorch manylinux builder | ||||
| # TODO (huydhn): Only work with PyTorch manylinux builder | ||||
| ENV PATH="/opt/python/cp312-cp312/bin:${PATH}" | ||||
| 
 | ||||
| # Install some system dependencies and double check python version | ||||
| RUN if command -v apt-get >/dev/null; then \ | ||||
|         apt-get update -y \ | ||||
|         && apt-get install -y ccache software-properties-common git wget sudo vim; \ | ||||
|         && apt-get install -y ccache software-properties-common git curl wget sudo vim; \ | ||||
|     else \ | ||||
|         dnf install -y git wget sudo; \ | ||||
|         dnf install -y git curl wget sudo; \ | ||||
|     fi \ | ||||
|     && python3 --version && python3 -m pip --version | ||||
| 
 | ||||
| # Install uv for faster pip installs if not existed | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     python3 -m pip install uv==0.8.4 | ||||
| 
 | ||||
|     if ! python3 -m uv --version >/dev/null 2>&1; then \ | ||||
|         python3 -m pip install uv==0.8.4; \ | ||||
|     fi | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| @ -76,15 +98,15 @@ ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| WORKDIR /workspace | ||||
| 
 | ||||
| # Install build and runtime dependencies | ||||
| # install build and runtime dependencies | ||||
| COPY requirements/common.txt requirements/common.txt | ||||
| COPY use_existing_torch.py use_existing_torch.py | ||||
| COPY pyproject.toml pyproject.toml | ||||
| 
 | ||||
| # Install build and runtime dependencies without stable torch version | ||||
| # install build and runtime dependencies without stable torch version | ||||
| RUN python3 use_existing_torch.py | ||||
| 
 | ||||
| # Default mount file as placeholder, this just avoid the mount error | ||||
| # default mount file as placeholder, this just avoid the mount error | ||||
| # change to a different vllm folder if this does not exist anymore | ||||
| ARG TORCH_WHEELS_PATH="./requirements" | ||||
| ARG PINNED_TORCH_VERSION | ||||
| @ -116,36 +138,56 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/common.txt | ||||
| 
 | ||||
| # Must put before installing xformers, so it can install the correct version of xfomrers. | ||||
| ARG xformers_cuda_arch_list='7.5;8.0+PTX;9.0a' | ||||
| ENV TORCH_CUDA_ARCH_LIST=${xformers_cuda_arch_list} | ||||
| 
 | ||||
| ARG max_jobs=16 | ||||
| ENV MAX_JOBS=${max_jobs} | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH' | ||||
|     export TORCH_CUDA_ARCH_LIST='7.5 8.0+PTX 9.0a' | ||||
|     git clone https://github.com/facebookresearch/xformers.git | ||||
| RUN echo ${TORCH_CUDA_ARCH_LIST} | ||||
| RUN echo ${MAX_JOBS} | ||||
| RUN pip freeze | grep -E 'ninja' | ||||
| 
 | ||||
|     pushd xformers | ||||
|     git checkout v0.0.32.post2 | ||||
|     git submodule update --init --recursive | ||||
|     python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose | ||||
|     popd | ||||
| # Build xformers with cuda and torch nightly/wheel | ||||
| # following official xformers guidance: https://github.com/facebookresearch/xformers#build | ||||
| # sha for https://github.com/facebookresearch/xformers/tree/v0.0.32.post2 | ||||
| ARG XFORMERS_COMMIT=5d4b92a5e5a9c6c6d4878283f47d82e17995b468 | ||||
| ENV CCACHE_DIR=/root/.cache/ccache | ||||
| 
 | ||||
|     rm -rf xformers | ||||
| BASH | ||||
| RUN --mount=type=cache,target=/root/.cache/ccache \ | ||||
|     --mount=type=cache,target=/root/.cache/uv \ | ||||
|     echo 'git clone xformers...' \ | ||||
|     && git clone https://github.com/facebookresearch/xformers.git --recursive \ | ||||
|     && cd xformers \ | ||||
|     && git checkout ${XFORMERS_COMMIT} \ | ||||
|     && git submodule update --init --recursive \ | ||||
|     && echo 'finish git clone xformers...' \ | ||||
|     && rm -rf build \ | ||||
|     && python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose \ | ||||
|     && cd .. \ | ||||
|     && rm -rf xformers | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system xformers-dist/*.whl | ||||
|     uv pip install --system xformers-dist/*.whl --verbose | ||||
| 
 | ||||
| # Build can take a long time, and the torch nightly version fetched from url can be different in next docker stage. | ||||
| # track the nightly torch version used in the build, when we set up runtime environment we can make sure the version is the same | ||||
| RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio' > torch_build_versions.txt | ||||
| 
 | ||||
| RUN cat torch_build_versions.txt | ||||
| RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio' | ||||
| 
 | ||||
| #################### BASE BUILD IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| #################### WHEEL BUILD IMAGE #################### | ||||
| # Image used to build vllm wheel | ||||
| FROM base AS build | ||||
| ARG TARGETPLATFORM | ||||
| 
 | ||||
| COPY . . | ||||
| 
 | ||||
| RUN python3 use_existing_torch.py | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
| @ -155,17 +197,20 @@ ARG GIT_REPO_CHECK=0 | ||||
| RUN --mount=type=bind,source=.git,target=.git \ | ||||
|     if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi | ||||
| 
 | ||||
| # Max jobs used by Ninja to build extensions | ||||
| ARG max_jobs=16 | ||||
| ENV MAX_JOBS=${max_jobs} | ||||
| ARG nvcc_threads=8 | ||||
| ARG nvcc_threads=4 | ||||
| ENV NVCC_THREADS=$nvcc_threads | ||||
| ARG torch_cuda_arch_list='8.0 8.6 8.9 9.0' | ||||
| ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} | ||||
| 
 | ||||
| ARG USE_SCCACHE | ||||
| ARG SCCACHE_BUCKET_NAME=vllm-build-sccache | ||||
| ARG SCCACHE_REGION_NAME=us-west-2 | ||||
| ARG SCCACHE_S3_NO_CREDENTIALS=0 | ||||
| 
 | ||||
| # Use sccache to speed up compilation | ||||
| # if USE_SCCACHE is set, use sccache to speed up compilation | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     --mount=type=bind,source=.git,target=.git \ | ||||
|     if [ "$USE_SCCACHE" = "1" ]; then \ | ||||
| @ -190,9 +235,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|         && sccache --show-stats; \ | ||||
|     fi | ||||
| 
 | ||||
| ARG torch_cuda_arch_list='8.0 8.6 8.9 9.0' | ||||
| ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} | ||||
| 
 | ||||
| ARG vllm_target_device="cuda" | ||||
| ENV VLLM_TARGET_DEVICE=${vllm_target_device} | ||||
| ENV CCACHE_DIR=/root/.cache/ccache | ||||
| @ -206,10 +248,17 @@ RUN --mount=type=cache,target=/root/.cache/ccache \ | ||||
|         export VLLM_DOCKER_BUILD_CONTEXT=1 && \ | ||||
|         python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \ | ||||
|     fi | ||||
| 
 | ||||
| RUN echo "[INFO] Listing current directory:" && \ | ||||
|     ls -al && \ | ||||
|     echo "[INFO] Showing torch_build_versions.txt content:" && \ | ||||
|     cat torch_build_versions.txt | ||||
| 
 | ||||
| #################### WHEEL BUILD IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| ################### VLLM INSTALLED IMAGE #################### | ||||
| # Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer | ||||
| FROM ${FINAL_BASE_IMAGE} AS vllm-base | ||||
| USER root | ||||
| 
 | ||||
| @ -217,7 +266,7 @@ ARG CUDA_VERSION | ||||
| ARG PYTHON_VERSION | ||||
| ARG GET_PIP_URL | ||||
| 
 | ||||
| # Only work with PyTorch manylinux builder | ||||
| # TODO (huydhn): Only work with PyTorch manylinux builder | ||||
| ENV PATH="/opt/python/cp312-cp312/bin:${PATH}" | ||||
| 
 | ||||
| # prepare for environment starts | ||||
| @ -226,19 +275,20 @@ WORKDIR /workspace | ||||
| # Install Python and other dependencies | ||||
| RUN if command -v apt-get >/dev/null; then \ | ||||
|         apt-get update -y \ | ||||
|         && apt-get install -y ccache software-properties-common git sudo vim python3-pip; \ | ||||
|         && apt-get install -y ccache software-properties-common git curl wget sudo vim \ | ||||
|         && add-apt-repository -y ppa:deadsnakes/ppa \ | ||||
|         && apt-get update -y \ | ||||
|         && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ | ||||
|         && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ | ||||
|         && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ | ||||
|         && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ | ||||
|         && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION}; \ | ||||
|     else \ | ||||
|         dnf install -y git wget sudo; \ | ||||
|         dnf install -y git curl wget sudo; \ | ||||
|     fi \ | ||||
|     && curl -LsSf https://astral.sh/uv/install.sh | sh \ | ||||
|     && $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \ | ||||
|     && rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \ | ||||
|     && ln -s /opt/venv/bin/python3 /usr/bin/python3 \ | ||||
|     && ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \ | ||||
|     && ln -s /opt/venv/bin/pip /usr/bin/pip \ | ||||
|     && python3 --version && python3 -m pip --version | ||||
| 
 | ||||
| # Get the torch versions, and whls used in previous stage | ||||
| # Get the torch versions, and whls used in previous stagtes for consistency | ||||
| COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt | ||||
| COPY --from=base /workspace/xformers-dist /wheels/xformers | ||||
| COPY --from=build /workspace/vllm-dist /wheels/vllm | ||||
| @ -247,29 +297,33 @@ RUN echo "[INFO] Listing current directory before torch install step:" && \ | ||||
|     echo "[INFO] Showing torch_build_versions.txt content:" && \ | ||||
|     cat torch_build_versions.txt | ||||
| 
 | ||||
| # Install uv for faster pip installs if not existed | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     python3 -m pip install uv==0.8.4 | ||||
| 
 | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| # Install build and runtime dependencies, this is needed for flashinfer install | ||||
| COPY requirements/build.txt requirements/build.txt | ||||
| COPY use_existing_torch.py use_existing_torch.py | ||||
| RUN python3 use_existing_torch.py | ||||
| RUN cat requirements/build.txt | ||||
| 
 | ||||
| # Install uv for faster pip installs if not existed | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     if ! python3 -m uv --version > /dev/null 2>&1; then \ | ||||
|         python3 -m pip install uv==0.8.4; \ | ||||
|     fi | ||||
| 
 | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/build.txt | ||||
| 
 | ||||
| 
 | ||||
| # Default mount file as placeholder, this just avoid the mount error | ||||
| ARG TORCH_WHEELS_PATH="./requirements" | ||||
| # Install torch, torchaudio and torchvision. If TORCH_WHEELS_PATH is default | ||||
| # to ./requirements, it will pull the nightly versions using pip. Otherwise, | ||||
| # it will use the local wheels from TORCH_WHEELS_PATH | ||||
| # Install torch, torchaudio and torchvision | ||||
| # if TORCH_WHEELS_PATH is default "./requirements", it will pull the nightly versions using pip using torch_build_versions.txt | ||||
| # otherwise, it will use the whls from TORCH_WHEELS_PATH from the host machine | ||||
| RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \ | ||||
|     --mount=type=cache,target=/root/.cache/uv \ | ||||
|     if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \ | ||||
| @ -290,14 +344,18 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
| # Install xformers wheel from previous stage | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system /wheels/xformers/*.whl --verbose | ||||
| 
 | ||||
| # Build FlashInfer from source | ||||
| # Build flashinfer from source. | ||||
| ARG torch_cuda_arch_list='8.0;8.9;9.0a;10.0a;12.0' | ||||
| # install package for build flashinfer | ||||
| # see issue: https://github.com/flashinfer-ai/flashinfer/issues/738 | ||||
| 
 | ||||
| RUN pip freeze | grep -E 'setuptools|packaging|build' | ||||
| 
 | ||||
| ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} | ||||
| 
 | ||||
| # Build flashinfer for torch nightly from source around 10 mins | ||||
| ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git" | ||||
| # Keep this in sync with https://github.com/vllm-project/vllm/blob/main/requirements/cuda.txt | ||||
| ARG FLASHINFER_GIT_REF="v0.2.14.post1" | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     git clone --depth 1 --recursive --shallow-submodules \ | ||||
|         --branch ${FLASHINFER_GIT_REF} \ | ||||
| @ -309,7 +367,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     && cd .. \ | ||||
|     && rm -rf flashinfer | ||||
| 
 | ||||
| # Install FlashInfer | ||||
| # install flashinfer python | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system wheels/flashinfer/*.whl --verbose | ||||
| 
 | ||||
| @ -319,6 +377,49 @@ RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio\|^xformers\|^vllm | ||||
| ################### VLLM INSTALLED IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| #################### UNITTEST IMAGE ############################# | ||||
| FROM vllm-base as test | ||||
| 
 | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| COPY tests/ tests/ | ||||
| COPY examples examples | ||||
| COPY benchmarks benchmarks | ||||
| COPY ./vllm/collect_env.py . | ||||
| COPY requirements/common.txt requirements/common.txt | ||||
| COPY use_existing_torch.py use_existing_torch.py | ||||
| COPY pyproject.toml pyproject.toml | ||||
| # Install build and runtime dependencies without stable torch version | ||||
| COPY requirements/nightly_torch_test.txt requirements/nightly_torch_test.txt | ||||
| 
 | ||||
| RUN python3 use_existing_torch.py | ||||
| 
 | ||||
| # install packages | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/common.txt | ||||
| # enable fast downloads from hf (for testing) | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system hf_transfer | ||||
| ENV HF_HUB_ENABLE_HF_TRANSFER 1 | ||||
| 
 | ||||
| # install development dependencies (for testing) | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -e tests/vllm_test_utils | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/nightly_torch_test.txt | ||||
| 
 | ||||
| # Logging to confirm the torch versions | ||||
| RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer' | ||||
| 
 | ||||
| # Logging to confirm all the packages are installed | ||||
| RUN pip freeze | ||||
| 
 | ||||
| #################### UNITTEST IMAGE ############################# | ||||
| 
 | ||||
| #################### EXPORT STAGE #################### | ||||
| FROM scratch as export-wheels | ||||
| 
 | ||||
							
								
								
									
										3
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							| @ -15,8 +15,7 @@ ciflow_push_tags: | ||||
| - ciflow/inductor-micro-benchmark | ||||
| - ciflow/inductor-micro-benchmark-cpu-x86 | ||||
| - ciflow/inductor-perf-compare | ||||
| - ciflow/inductor-perf-test-nightly-rocm-mi300 | ||||
| - ciflow/inductor-perf-test-nightly-rocm-mi355 | ||||
| - ciflow/inductor-perf-test-nightly-rocm | ||||
| - ciflow/inductor-perf-test-nightly-x86-zen | ||||
| - ciflow/inductor-periodic | ||||
| - ciflow/inductor-rocm | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							| @ -512,8 +512,6 @@ def perform_misc_tasks( | ||||
|         "keep-going", | ||||
|         branch == MAIN_BRANCH | ||||
|         or bool(tag and re.match(r"^trunk/[a-f0-9]{40}$", tag)) | ||||
|         # Pattern for tags created via manual run on HUD | ||||
|         or bool(tag and re.match(r"^ciflow/[^/]+/[a-f0-9]{40}$", tag)) | ||||
|         or check_for_setting(labels, pr_body, "keep-going"), | ||||
|     ) | ||||
|     set_output( | ||||
|  | ||||
							
								
								
									
										26
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										26
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							| @ -16,18 +16,16 @@ from typing import Optional | ||||
|  | ||||
|  | ||||
| # NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this | ||||
| CUDA_ARCHES = ["12.6", "12.8", "12.9", "13.0"] | ||||
| CUDA_ARCHES = ["12.6", "12.8", "13.0"] | ||||
| CUDA_STABLE = "12.8" | ||||
| CUDA_ARCHES_FULL_VERSION = { | ||||
|     "12.6": "12.6.3", | ||||
|     "12.8": "12.8.1", | ||||
|     "12.9": "12.9.1", | ||||
|     "13.0": "13.0.0", | ||||
| } | ||||
| CUDA_ARCHES_CUDNN_VERSION = { | ||||
|     "12.6": "9", | ||||
|     "12.8": "9", | ||||
|     "12.9": "9", | ||||
|     "13.0": "9", | ||||
| } | ||||
|  | ||||
| @ -40,7 +38,7 @@ CPU_AARCH64_ARCH = ["cpu-aarch64"] | ||||
|  | ||||
| CPU_S390X_ARCH = ["cpu-s390x"] | ||||
|  | ||||
| CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "12.9-aarch64", "13.0-aarch64"] | ||||
| CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "13.0-aarch64"] | ||||
|  | ||||
|  | ||||
| PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { | ||||
| @ -78,23 +76,6 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { | ||||
|         "nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | " | ||||
|         "nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'" | ||||
|     ), | ||||
|     "12.9": ( | ||||
|         "nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'" | ||||
|     ), | ||||
|     "13.0": ( | ||||
|         "nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | " | ||||
| @ -341,7 +322,7 @@ def generate_wheels_matrix( | ||||
|             # cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install | ||||
|  | ||||
|             if ( | ||||
|                 arch_version in ["13.0", "12.9", "12.8", "12.6"] | ||||
|                 arch_version in ["13.0", "12.8", "12.6"] | ||||
|                 and os == "linux" | ||||
|                 or arch_version in CUDA_AARCH64_ARCHES | ||||
|             ): | ||||
| @ -405,6 +386,5 @@ def generate_wheels_matrix( | ||||
|  | ||||
|  | ||||
| validate_nccl_dep_consistency("13.0") | ||||
| validate_nccl_dep_consistency("12.9") | ||||
| validate_nccl_dep_consistency("12.8") | ||||
| validate_nccl_dep_consistency("12.6") | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							| @ -2042,6 +2042,10 @@ def validate_revert( | ||||
|             f"[{', '.join(allowed_reverters)}], but instead is {author_association}." | ||||
|         ) | ||||
|  | ||||
|     # Raises exception if matching rule is not found, but ignores all status checks | ||||
|     find_matching_merge_rule( | ||||
|         pr, repo, skip_mandatory_checks=True, skip_internal_checks=True | ||||
|     ) | ||||
|     commit_sha = get_pr_commit_sha(repo, pr) | ||||
|     return (author_login, commit_sha) | ||||
|  | ||||
|  | ||||
| @ -177,9 +177,6 @@ jobs: | ||||
|     runs-on: linux.rocm.gpu.mi250 | ||||
|     timeout-minutes: !{{ common.timeout_minutes }} | ||||
|     !{{ upload.binary_env(config) }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							| @ -72,7 +72,7 @@ jobs: | ||||
|             # Let's try to figure out how this can be improved | ||||
|             timeout-minutes: 360 | ||||
|           - docs_type: python | ||||
|             runner: ${{ inputs.runner_prefix }}linux.c7i.2xlarge | ||||
|             runner: ${{ inputs.runner_prefix }}linux.2xlarge | ||||
|             # It takes less than 30m to finish python docs unless there are issues | ||||
|             timeout-minutes: 30 | ||||
|     # Set a fixed name for this job instead of using the current matrix-generated name, i.e. build-docs (cpp, linux.12xlarge, 180) | ||||
|  | ||||
							
								
								
									
										820
									
								
								.github/workflows/_linux-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										820
									
								
								.github/workflows/_linux-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -107,457 +107,457 @@ jobs: | ||||
|             All testing is done inside the container, to start an interactive session run: | ||||
|               docker exec -it $(docker container ps --format '{{.ID}}') bash | ||||
|  | ||||
|       # - name: Checkout PyTorch | ||||
|       #   uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|       #   with: | ||||
|       #     no-sudo: true | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           no-sudo: true | ||||
|  | ||||
|       # - name: Setup Python | ||||
|       #   if: contains(matrix.runner, 'b200') | ||||
|       #   uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 | ||||
|       #   with: | ||||
|       #     python-version: '3.12' | ||||
|       #     cache: pip | ||||
|       - name: Setup Python | ||||
|         if: contains(matrix.runner, 'b200') | ||||
|         uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 | ||||
|         with: | ||||
|           python-version: '3.12' | ||||
|           cache: pip | ||||
|  | ||||
|       # - name: Setup Linux | ||||
|       #   uses: ./.github/actions/setup-linux | ||||
|       #   if: inputs.build-environment != 'linux-s390x-binary-manywheel' && !contains(matrix.runner, 'b200') | ||||
|       - name: Setup Linux | ||||
|         uses: ./.github/actions/setup-linux | ||||
|         if: inputs.build-environment != 'linux-s390x-binary-manywheel' && !contains(matrix.runner, 'b200') | ||||
|  | ||||
|       # - name: configure aws credentials | ||||
|       #   if: ${{ inputs.aws-role-to-assume != '' && inputs.build-environment != 'linux-s390x-binary-manywheel' }} | ||||
|       #   uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0 | ||||
|       #   with: | ||||
|       #     role-to-assume: ${{ inputs.aws-role-to-assume }} | ||||
|       #     role-session-name: gha-linux-test | ||||
|       #     aws-region: us-east-1 | ||||
|       - name: configure aws credentials | ||||
|         if: ${{ inputs.aws-role-to-assume != '' && inputs.build-environment != 'linux-s390x-binary-manywheel' }} | ||||
|         uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0 | ||||
|         with: | ||||
|           role-to-assume: ${{ inputs.aws-role-to-assume }} | ||||
|           role-session-name: gha-linux-test | ||||
|           aws-region: us-east-1 | ||||
|  | ||||
|       # - name: Login to Amazon ECR | ||||
|       #   if: ${{ inputs.aws-role-to-assume != '' && contains(matrix.runner, 'b200') }} | ||||
|       #   id: login-ecr | ||||
|       #   continue-on-error: true | ||||
|       #   uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 | ||||
|       - name: Login to Amazon ECR | ||||
|         if: ${{ inputs.aws-role-to-assume != '' && contains(matrix.runner, 'b200') }} | ||||
|         id: login-ecr | ||||
|         continue-on-error: true | ||||
|         uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 | ||||
|  | ||||
|       # - name: Calculate docker image | ||||
|       #   id: calculate-docker-image | ||||
|       #   uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|       #   if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|       #   with: | ||||
|       #     docker-image-name: ${{ inputs.docker-image }} | ||||
|       - name: Calculate docker image | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|         with: | ||||
|           docker-image-name: ${{ inputs.docker-image }} | ||||
|  | ||||
|       # - name: Use following to pull public copy of the image | ||||
|       #   id: print-ghcr-mirror | ||||
|       #   if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|       #   env: | ||||
|       #     ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
|       #   shell: bash | ||||
|       #   run: | | ||||
|       #     tag=${ECR_DOCKER_IMAGE##*:} | ||||
|       #     echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}" | ||||
|       - name: Use following to pull public copy of the image | ||||
|         id: print-ghcr-mirror | ||||
|         if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|         env: | ||||
|           ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
|         shell: bash | ||||
|         run: | | ||||
|           tag=${ECR_DOCKER_IMAGE##*:} | ||||
|           echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}" | ||||
|  | ||||
|       # - name: Pull docker image | ||||
|       #   uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|       #   if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|       #   with: | ||||
|       #     docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
|       - name: Pull docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|         with: | ||||
|           docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
|  | ||||
|       # - name: Check if in a container runner | ||||
|       #   shell: bash | ||||
|       #   id: check_container_runner | ||||
|       #   run: echo "IN_CONTAINER_RUNNER=$(if [ -f /.inarc ] || [ -f /.incontainer ]; then echo true ; else echo false; fi)" >> "$GITHUB_OUTPUT" | ||||
|       - name: Check if in a container runner | ||||
|         shell: bash | ||||
|         id: check_container_runner | ||||
|         run: echo "IN_CONTAINER_RUNNER=$(if [ -f /.inarc ] || [ -f /.incontainer ]; then echo true ; else echo false; fi)" >> "$GITHUB_OUTPUT" | ||||
|  | ||||
|       # - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG | ||||
|       #   id: install-nvidia-driver | ||||
|       #   uses: pytorch/test-infra/.github/actions/setup-nvidia@main | ||||
|       #   with: | ||||
|       #     driver-version: ${{ matrix.config == 'legacy_nvidia_driver' && '525.105.17' || '580.82.07' }} | ||||
|       #   if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' && !contains(matrix.runner, 'b200') }} | ||||
|       - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG | ||||
|         id: install-nvidia-driver | ||||
|         uses: pytorch/test-infra/.github/actions/setup-nvidia@main | ||||
|         with: | ||||
|           driver-version: ${{ matrix.config == 'legacy_nvidia_driver' && '525.105.17' || '580.82.07' }} | ||||
|         if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' && !contains(matrix.runner, 'b200') }} | ||||
|  | ||||
|       # - name: Setup GPU_FLAG for docker run | ||||
|       #   id: setup-gpu-flag | ||||
|       #   run: echo "GPU_FLAG=--gpus all -e NVIDIA_DRIVER_CAPABILITIES=all" >> "${GITHUB_ENV}" | ||||
|       #   if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && (steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'true' || contains(matrix.runner, 'b200')) }} | ||||
|       - name: Setup GPU_FLAG for docker run | ||||
|         id: setup-gpu-flag | ||||
|         run: echo "GPU_FLAG=--gpus all -e NVIDIA_DRIVER_CAPABILITIES=all" >> "${GITHUB_ENV}" | ||||
|         if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && (steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'true' || contains(matrix.runner, 'b200')) }} | ||||
|  | ||||
|       # - name: Setup SCCACHE_SERVER_PORT environment for docker run when on container | ||||
|       #   id: setup-sscache-port-flag | ||||
|       #   run: echo "SCCACHE_SERVER_PORT_DOCKER_FLAG=-e SCCACHE_SERVER_PORT=$((RUNNER_UID + 4226))" >> "${GITHUB_ENV}" | ||||
|       #   if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'true' && !contains(matrix.runner, 'b200') }} | ||||
|       - name: Setup SCCACHE_SERVER_PORT environment for docker run when on container | ||||
|         id: setup-sscache-port-flag | ||||
|         run: echo "SCCACHE_SERVER_PORT_DOCKER_FLAG=-e SCCACHE_SERVER_PORT=$((RUNNER_UID + 4226))" >> "${GITHUB_ENV}" | ||||
|         if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'true' && !contains(matrix.runner, 'b200') }} | ||||
|  | ||||
|       # - name: Lock NVIDIA A100 40GB Frequency | ||||
|       #   run: | | ||||
|       #     sudo nvidia-smi -pm 1 | ||||
|       #     sudo nvidia-smi -ac 1215,1410 | ||||
|       #     nvidia-smi | ||||
|       #   if: ${{ contains(matrix.runner, 'a100') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }} | ||||
|       - name: Lock NVIDIA A100 40GB Frequency | ||||
|         run: | | ||||
|           sudo nvidia-smi -pm 1 | ||||
|           sudo nvidia-smi -ac 1215,1410 | ||||
|           nvidia-smi | ||||
|         if: ${{ contains(matrix.runner, 'a100') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }} | ||||
|  | ||||
|       # - name: Get workflow job id | ||||
|       #   id: get-job-id | ||||
|       #   uses: ./.github/actions/get-workflow-job-id | ||||
|       #   if: always() | ||||
|       #   with: | ||||
|       #     github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|       - name: Get workflow job id | ||||
|         id: get-job-id | ||||
|         uses: ./.github/actions/get-workflow-job-id | ||||
|         if: always() | ||||
|         with: | ||||
|           github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|  | ||||
|       # - name: Start monitoring script | ||||
|       #   id: monitor-script | ||||
|       #   if: ${{ !inputs.disable-monitor }} | ||||
|       #   shell: bash | ||||
|       #   continue-on-error: true | ||||
|       #   env: | ||||
|       #     JOB_ID: ${{ steps.get-job-id.outputs.job-id }} | ||||
|       #     JOB_NAME: ${{ steps.get-job-id.outputs.job-name }} | ||||
|       #     WORKFLOW_NAME: ${{ github.workflow }} | ||||
|       #     WORKFLOW_RUN_ID: ${{github.run_id}} | ||||
|       #     MONITOR_LOG_INTERVAL: ${{ inputs.monitor-log-interval }} | ||||
|       #     MONITOR_DATA_COLLECT_INTERVAL: ${{ inputs.monitor-data-collect-interval }} | ||||
|       #   run: | | ||||
|       #     python3 -m pip install psutil==5.9.8 dataclasses_json==0.6.7 nvidia-ml-py==11.525.84 | ||||
|       #     python3 -m tools.stats.monitor --log-interval "$MONITOR_LOG_INTERVAL" --data-collect-interval "$MONITOR_DATA_COLLECT_INTERVAL" > usage_log.txt 2>&1 & | ||||
|       #     echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}" | ||||
|       - name: Start monitoring script | ||||
|         id: monitor-script | ||||
|         if: ${{ !inputs.disable-monitor }} | ||||
|         shell: bash | ||||
|         continue-on-error: true | ||||
|         env: | ||||
|           JOB_ID: ${{ steps.get-job-id.outputs.job-id }} | ||||
|           JOB_NAME: ${{ steps.get-job-id.outputs.job-name }} | ||||
|           WORKFLOW_NAME: ${{ github.workflow }} | ||||
|           WORKFLOW_RUN_ID: ${{github.run_id}} | ||||
|           MONITOR_LOG_INTERVAL: ${{ inputs.monitor-log-interval }} | ||||
|           MONITOR_DATA_COLLECT_INTERVAL: ${{ inputs.monitor-data-collect-interval }} | ||||
|         run: | | ||||
|           python3 -m pip install psutil==5.9.8 dataclasses_json==0.6.7 nvidia-ml-py==11.525.84 | ||||
|           python3 -m tools.stats.monitor --log-interval "$MONITOR_LOG_INTERVAL" --data-collect-interval "$MONITOR_DATA_COLLECT_INTERVAL" > usage_log.txt 2>&1 & | ||||
|           echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}" | ||||
|  | ||||
|       # - name: Download build artifacts | ||||
|       #   uses: ./.github/actions/download-build-artifacts | ||||
|       #   with: | ||||
|       #     name: ${{ inputs.build-environment }} | ||||
|       #     s3-bucket: ${{ inputs.s3-bucket }} | ||||
|       #     use-gha: ${{ inputs.use-gha }} | ||||
|       - name: Download build artifacts | ||||
|         uses: ./.github/actions/download-build-artifacts | ||||
|         with: | ||||
|           name: ${{ inputs.build-environment }} | ||||
|           s3-bucket: ${{ inputs.s3-bucket }} | ||||
|           use-gha: ${{ inputs.use-gha }} | ||||
|  | ||||
|       # - name: Download TD artifacts | ||||
|       #   continue-on-error: true | ||||
|       #   uses: ./.github/actions/download-td-artifacts | ||||
|       - name: Download TD artifacts | ||||
|         continue-on-error: true | ||||
|         uses: ./.github/actions/download-td-artifacts | ||||
|  | ||||
|       # - name: Parse ref | ||||
|       #   id: parse-ref | ||||
|       #   run: .github/scripts/parse_ref.py | ||||
|       - name: Parse ref | ||||
|         id: parse-ref | ||||
|         run: .github/scripts/parse_ref.py | ||||
|  | ||||
|       # - name: Check for keep-going label and re-enabled test issues | ||||
|       #   # This uses the filter-test-configs action because it conveniently | ||||
|       #   # checks for labels and re-enabled test issues.  It does not actually do | ||||
|       #   # any filtering.  All filtering is done in the build step. | ||||
|       #   id: keep-going | ||||
|       #   uses: ./.github/actions/filter-test-configs | ||||
|       #   with: | ||||
|       #     github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|       #     test-matrix: ${{ inputs.test-matrix }} | ||||
|       #     job-name: ${{ steps.get-job-id.outputs.job-name }} | ||||
|       - name: Check for keep-going label and re-enabled test issues | ||||
|         # This uses the filter-test-configs action because it conveniently | ||||
|         # checks for labels and re-enabled test issues.  It does not actually do | ||||
|         # any filtering.  All filtering is done in the build step. | ||||
|         id: keep-going | ||||
|         uses: ./.github/actions/filter-test-configs | ||||
|         with: | ||||
|           github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|           test-matrix: ${{ inputs.test-matrix }} | ||||
|           job-name: ${{ steps.get-job-id.outputs.job-name }} | ||||
|  | ||||
|       # - name: Set Test step time | ||||
|       #   id: test-timeout | ||||
|       #   shell: bash | ||||
|       #   env: | ||||
|       #     JOB_TIMEOUT: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }} | ||||
|       #   run: | | ||||
|       #     echo "timeout=$((JOB_TIMEOUT-30))" >> "${GITHUB_OUTPUT}" | ||||
|       - name: Set Test step time | ||||
|         id: test-timeout | ||||
|         shell: bash | ||||
|         env: | ||||
|           JOB_TIMEOUT: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }} | ||||
|         run: | | ||||
|           echo "timeout=$((JOB_TIMEOUT-30))" >> "${GITHUB_OUTPUT}" | ||||
|  | ||||
|       # - name: Preserve github env variables for use in docker | ||||
|       #   shell: bash | ||||
|       #   run: | | ||||
|       #     env | grep '^GITHUB' >> "/tmp/github_env_${GITHUB_RUN_ID}" | ||||
|       #     env | grep '^CI' >> "/tmp/github_env_${GITHUB_RUN_ID}" | ||||
|       - name: Preserve github env variables for use in docker | ||||
|         shell: bash | ||||
|         run: | | ||||
|           env | grep '^GITHUB' >> "/tmp/github_env_${GITHUB_RUN_ID}" | ||||
|           env | grep '^CI' >> "/tmp/github_env_${GITHUB_RUN_ID}" | ||||
|  | ||||
|       # - name: Test | ||||
|       #   id: test | ||||
|       #   timeout-minutes: ${{ fromJson(steps.test-timeout.outputs.timeout) }} | ||||
|       #   env: | ||||
|       #     BUILD_ENVIRONMENT: ${{ inputs.build-environment }} | ||||
|       #     PR_NUMBER: ${{ github.event.pull_request.number }} | ||||
|       #     GITHUB_REPOSITORY: ${{ github.repository }} | ||||
|       #     GITHUB_WORKFLOW: ${{ github.workflow }} | ||||
|       #     GITHUB_JOB: ${{ github.job }} | ||||
|       #     GITHUB_RUN_ID: ${{ github.run_id }} | ||||
|       #     GITHUB_RUN_NUMBER: ${{ github.run_number }} | ||||
|       #     GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }} | ||||
|       #     JOB_ID: ${{ steps.get-job-id.outputs.job-id }} | ||||
|       #     JOB_NAME: ${{ steps.get-job-id.outputs.job-name }} | ||||
|       #     BRANCH: ${{ steps.parse-ref.outputs.branch }} | ||||
|       #     SHA1: ${{ github.event.pull_request.head.sha || github.sha }} | ||||
|       #     BASE_SHA: ${{ github.event.pull_request.base.sha || github.sha }} | ||||
|       #     TEST_CONFIG: ${{ matrix.config }} | ||||
|       #     SHARD_NUMBER: ${{ matrix.shard }} | ||||
|       #     NUM_TEST_SHARDS: ${{ matrix.num_shards }} | ||||
|       #     EXTRA_FLAGS: ${{ matrix.extra_flags || '' }} | ||||
|       #     OP_BENCHMARK_TESTS: ${{ matrix.op_benchmark_tests }} | ||||
|       #     REENABLED_ISSUES: ${{ steps.keep-going.outputs.reenabled-issues }} | ||||
|       #     CONTINUE_THROUGH_ERROR: ${{ steps.keep-going.outputs.keep-going }} | ||||
|       #     VERBOSE_TEST_LOGS: ${{ steps.keep-going.outputs.ci-verbose-test-logs }} | ||||
|       #     TEST_SHOWLOCALS: ${{ steps.keep-going.outputs.ci-test-showlocals }} | ||||
|       #     NO_TEST_TIMEOUT: ${{ steps.keep-going.outputs.ci-no-test-timeout }} | ||||
|       #     NO_TD: ${{ steps.keep-going.outputs.ci-no-td }} | ||||
|       #     TD_DISTRIBUTED: ${{ steps.keep-going.outputs.ci-td-distributed }} | ||||
|       #     # Do not set SCCACHE_S3_KEY_PREFIX to share the cache between all build jobs | ||||
|       #     SCCACHE_BUCKET: ${{ !contains(matrix.runner, 'b200') && 'ossci-compiler-cache-circleci-v2' || '' }} | ||||
|       #     SCCACHE_REGION: ${{ !contains(matrix.runner, 'b200') && 'us-east-1' || '' }} | ||||
|       #     SHM_SIZE: ${{ contains(inputs.build-environment, 'cuda') && '2g' || '1g' }} | ||||
|       #     DOCKER_IMAGE: ${{ inputs.docker-image }} | ||||
|       #     XLA_CUDA: ${{ contains(inputs.build-environment, 'xla') && '0' || '' }} | ||||
|       #     XLA_CLANG_CACHE_S3_BUCKET_NAME: ossci-compiler-clang-cache-circleci-xla | ||||
|       #     PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }} | ||||
|       #     PYTORCH_TEST_RERUN_DISABLED_TESTS: ${{ matrix.rerun_disabled_tests && '1' || '0' }} | ||||
|       #     DASHBOARD_TAG: ${{ inputs.dashboard-tag }} | ||||
|       #     VLLM_TEST_HUGGING_FACE_TOKEN: ${{ secrets.VLLM_TEST_HUGGING_FACE_TOKEN }} | ||||
|       #     HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|       #     SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} | ||||
|       #     ARTIFACTS_FILE_SUFFIX: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }} | ||||
|       #   run: | | ||||
|       #     set -x | ||||
|       - name: Test | ||||
|         id: test | ||||
|         timeout-minutes: ${{ fromJson(steps.test-timeout.outputs.timeout) }} | ||||
|         env: | ||||
|           BUILD_ENVIRONMENT: ${{ inputs.build-environment }} | ||||
|           PR_NUMBER: ${{ github.event.pull_request.number }} | ||||
|           GITHUB_REPOSITORY: ${{ github.repository }} | ||||
|           GITHUB_WORKFLOW: ${{ github.workflow }} | ||||
|           GITHUB_JOB: ${{ github.job }} | ||||
|           GITHUB_RUN_ID: ${{ github.run_id }} | ||||
|           GITHUB_RUN_NUMBER: ${{ github.run_number }} | ||||
|           GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }} | ||||
|           JOB_ID: ${{ steps.get-job-id.outputs.job-id }} | ||||
|           JOB_NAME: ${{ steps.get-job-id.outputs.job-name }} | ||||
|           BRANCH: ${{ steps.parse-ref.outputs.branch }} | ||||
|           SHA1: ${{ github.event.pull_request.head.sha || github.sha }} | ||||
|           BASE_SHA: ${{ github.event.pull_request.base.sha || github.sha }} | ||||
|           TEST_CONFIG: ${{ matrix.config }} | ||||
|           SHARD_NUMBER: ${{ matrix.shard }} | ||||
|           NUM_TEST_SHARDS: ${{ matrix.num_shards }} | ||||
|           EXTRA_FLAGS: ${{ matrix.extra_flags || '' }} | ||||
|           OP_BENCHMARK_TESTS: ${{ matrix.op_benchmark_tests }} | ||||
|           REENABLED_ISSUES: ${{ steps.keep-going.outputs.reenabled-issues }} | ||||
|           CONTINUE_THROUGH_ERROR: ${{ steps.keep-going.outputs.keep-going }} | ||||
|           VERBOSE_TEST_LOGS: ${{ steps.keep-going.outputs.ci-verbose-test-logs }} | ||||
|           TEST_SHOWLOCALS: ${{ steps.keep-going.outputs.ci-test-showlocals }} | ||||
|           NO_TEST_TIMEOUT: ${{ steps.keep-going.outputs.ci-no-test-timeout }} | ||||
|           NO_TD: ${{ steps.keep-going.outputs.ci-no-td }} | ||||
|           TD_DISTRIBUTED: ${{ steps.keep-going.outputs.ci-td-distributed }} | ||||
|           # Do not set SCCACHE_S3_KEY_PREFIX to share the cache between all build jobs | ||||
|           SCCACHE_BUCKET: ${{ !contains(matrix.runner, 'b200') && 'ossci-compiler-cache-circleci-v2' || '' }} | ||||
|           SCCACHE_REGION: ${{ !contains(matrix.runner, 'b200') && 'us-east-1' || '' }} | ||||
|           SHM_SIZE: ${{ contains(inputs.build-environment, 'cuda') && '2g' || '1g' }} | ||||
|           DOCKER_IMAGE: ${{ inputs.docker-image }} | ||||
|           XLA_CUDA: ${{ contains(inputs.build-environment, 'xla') && '0' || '' }} | ||||
|           XLA_CLANG_CACHE_S3_BUCKET_NAME: ossci-compiler-clang-cache-circleci-xla | ||||
|           PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }} | ||||
|           PYTORCH_TEST_RERUN_DISABLED_TESTS: ${{ matrix.rerun_disabled_tests && '1' || '0' }} | ||||
|           DASHBOARD_TAG: ${{ inputs.dashboard-tag }} | ||||
|           VLLM_TEST_HUGGING_FACE_TOKEN: ${{ secrets.VLLM_TEST_HUGGING_FACE_TOKEN }} | ||||
|           HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|           SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }} | ||||
|           ARTIFACTS_FILE_SUFFIX: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }} | ||||
|         run: | | ||||
|           set -x | ||||
|  | ||||
|       #     if [[ $TEST_CONFIG == 'multigpu' ]]; then | ||||
|       #       TEST_COMMAND=.ci/pytorch/multigpu-test.sh | ||||
|       #     elif [[ $BUILD_ENVIRONMENT == *onnx* ]]; then | ||||
|       #       TEST_COMMAND=.ci/onnx/test.sh | ||||
|       #     else | ||||
|       #       TEST_COMMAND=.ci/pytorch/test.sh | ||||
|       #     fi | ||||
|           if [[ $TEST_CONFIG == 'multigpu' ]]; then | ||||
|             TEST_COMMAND=.ci/pytorch/multigpu-test.sh | ||||
|           elif [[ $BUILD_ENVIRONMENT == *onnx* ]]; then | ||||
|             TEST_COMMAND=.ci/onnx/test.sh | ||||
|           else | ||||
|             TEST_COMMAND=.ci/pytorch/test.sh | ||||
|           fi | ||||
|  | ||||
|       #     # Leaving 1GB for the runner and other things | ||||
|       #     TOTAL_AVAILABLE_MEMORY_IN_GB=$(awk '/MemTotal/ { printf "%.3f \n", $2/1024/1024 - 1 }' /proc/meminfo) | ||||
|       #     # https://docs.docker.com/engine/containers/resource_constraints/#--memory-swap-details, the 3GB swap | ||||
|       #     # comes from https://github.com/pytorch/test-infra/pull/6058 | ||||
|       #     TOTAL_MEMORY_WITH_SWAP=$(("${TOTAL_AVAILABLE_MEMORY_IN_GB%.*}" + 3)) | ||||
|           # Leaving 1GB for the runner and other things | ||||
|           TOTAL_AVAILABLE_MEMORY_IN_GB=$(awk '/MemTotal/ { printf "%.3f \n", $2/1024/1024 - 1 }' /proc/meminfo) | ||||
|           # https://docs.docker.com/engine/containers/resource_constraints/#--memory-swap-details, the 3GB swap | ||||
|           # comes from https://github.com/pytorch/test-infra/pull/6058 | ||||
|           TOTAL_MEMORY_WITH_SWAP=$(("${TOTAL_AVAILABLE_MEMORY_IN_GB%.*}" + 3)) | ||||
|  | ||||
|       #     if [[ ${BUILD_ENVIRONMENT} == *"s390x"* ]]; then | ||||
|       #       SHM_OPTS= | ||||
|       #       JENKINS_USER= | ||||
|       #       # ensure that docker container cleanly exits in 12 hours | ||||
|       #       # if for some reason cleanup action doesn't stop container | ||||
|       #       # when job is cancelled | ||||
|       #       DOCKER_SHELL_CMD="sleep 12h" | ||||
|       #     else | ||||
|       #       SHM_OPTS="--shm-size=${SHM_SIZE}" | ||||
|       #       JENKINS_USER="--user jenkins" | ||||
|       #       DOCKER_SHELL_CMD= | ||||
|       #     fi | ||||
|           if [[ ${BUILD_ENVIRONMENT} == *"s390x"* ]]; then | ||||
|             SHM_OPTS= | ||||
|             JENKINS_USER= | ||||
|             # ensure that docker container cleanly exits in 12 hours | ||||
|             # if for some reason cleanup action doesn't stop container | ||||
|             # when job is cancelled | ||||
|             DOCKER_SHELL_CMD="sleep 12h" | ||||
|           else | ||||
|             SHM_OPTS="--shm-size=${SHM_SIZE}" | ||||
|             JENKINS_USER="--user jenkins" | ||||
|             DOCKER_SHELL_CMD= | ||||
|           fi | ||||
|  | ||||
|       #     # detached container should get cleaned up by teardown_ec2_linux | ||||
|       #     # TODO: Stop building test binaries as part of the build phase | ||||
|       #     # Used for GPU_FLAG, SHM_OPTS, JENKINS_USER and DOCKER_SHELL_CMD since that doesn't play nice | ||||
|       #     # shellcheck disable=SC2086,SC2090 | ||||
|       #     container_name=$(docker run \ | ||||
|       #       ${GPU_FLAG:-} \ | ||||
|       #       ${SCCACHE_SERVER_PORT_DOCKER_FLAG:-} \ | ||||
|       #       -e BUILD_ENVIRONMENT \ | ||||
|       #       -e PR_NUMBER \ | ||||
|       #       -e GITHUB_ACTIONS \ | ||||
|       #       -e GITHUB_REPOSITORY \ | ||||
|       #       -e GITHUB_WORKFLOW \ | ||||
|       #       -e GITHUB_JOB \ | ||||
|       #       -e GITHUB_RUN_ID \ | ||||
|       #       -e GITHUB_RUN_NUMBER \ | ||||
|       #       -e GITHUB_RUN_ATTEMPT \ | ||||
|       #       -e JOB_ID \ | ||||
|       #       -e JOB_NAME \ | ||||
|       #       -e BASE_SHA \ | ||||
|       #       -e BRANCH \ | ||||
|       #       -e SHA1 \ | ||||
|       #       -e AWS_DEFAULT_REGION \ | ||||
|       #       -e IN_WHEEL_TEST \ | ||||
|       #       -e SHARD_NUMBER \ | ||||
|       #       -e TEST_CONFIG \ | ||||
|       #       -e NUM_TEST_SHARDS \ | ||||
|       #       -e REENABLED_ISSUES \ | ||||
|       #       -e CONTINUE_THROUGH_ERROR \ | ||||
|       #       -e VERBOSE_TEST_LOGS \ | ||||
|       #       -e TEST_SHOWLOCALS \ | ||||
|       #       -e NO_TEST_TIMEOUT \ | ||||
|       #       -e NO_TD \ | ||||
|       #       -e TD_DISTRIBUTED \ | ||||
|       #       -e PR_LABELS \ | ||||
|       #       -e MAX_JOBS="$(nproc --ignore=2)" \ | ||||
|       #       -e SCCACHE_BUCKET \ | ||||
|       #       -e SCCACHE_REGION \ | ||||
|       #       -e XLA_CUDA \ | ||||
|       #       -e XLA_CLANG_CACHE_S3_BUCKET_NAME \ | ||||
|       #       -e PYTORCH_TEST_CUDA_MEM_LEAK_CHECK \ | ||||
|       #       -e PYTORCH_TEST_RERUN_DISABLED_TESTS \ | ||||
|       #       -e SKIP_SCCACHE_INITIALIZATION=1 \ | ||||
|       #       -e HUGGING_FACE_HUB_TOKEN \ | ||||
|       #       -e VLLM_TEST_HUGGING_FACE_TOKEN \ | ||||
|       #       -e SCRIBE_GRAPHQL_ACCESS_TOKEN \ | ||||
|       #       -e DASHBOARD_TAG \ | ||||
|       #       -e ARTIFACTS_FILE_SUFFIX \ | ||||
|       #       --memory="${TOTAL_AVAILABLE_MEMORY_IN_GB%.*}g" \ | ||||
|       #       --memory-swap="${TOTAL_MEMORY_WITH_SWAP}g" \ | ||||
|       #       --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \ | ||||
|       #       --security-opt seccomp=unconfined \ | ||||
|       #       --cap-add=SYS_PTRACE \ | ||||
|       #       --ipc=host \ | ||||
|       #       ${SHM_OPTS} \ | ||||
|       #       --tty \ | ||||
|       #       --detach \ | ||||
|       #       --name="${container_name}" \ | ||||
|       #       ${JENKINS_USER} \ | ||||
|       #       -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \ | ||||
|       #       -w /var/lib/jenkins/workspace \ | ||||
|       #       "${DOCKER_IMAGE}" \ | ||||
|       #       ${DOCKER_SHELL_CMD} | ||||
|       #     ) | ||||
|       #     echo "DOCKER_CONTAINER_ID=${container_name}" >> "${GITHUB_ENV}" | ||||
|           # detached container should get cleaned up by teardown_ec2_linux | ||||
|           # TODO: Stop building test binaries as part of the build phase | ||||
|           # Used for GPU_FLAG, SHM_OPTS, JENKINS_USER and DOCKER_SHELL_CMD since that doesn't play nice | ||||
|           # shellcheck disable=SC2086,SC2090 | ||||
|           container_name=$(docker run \ | ||||
|             ${GPU_FLAG:-} \ | ||||
|             ${SCCACHE_SERVER_PORT_DOCKER_FLAG:-} \ | ||||
|             -e BUILD_ENVIRONMENT \ | ||||
|             -e PR_NUMBER \ | ||||
|             -e GITHUB_ACTIONS \ | ||||
|             -e GITHUB_REPOSITORY \ | ||||
|             -e GITHUB_WORKFLOW \ | ||||
|             -e GITHUB_JOB \ | ||||
|             -e GITHUB_RUN_ID \ | ||||
|             -e GITHUB_RUN_NUMBER \ | ||||
|             -e GITHUB_RUN_ATTEMPT \ | ||||
|             -e JOB_ID \ | ||||
|             -e JOB_NAME \ | ||||
|             -e BASE_SHA \ | ||||
|             -e BRANCH \ | ||||
|             -e SHA1 \ | ||||
|             -e AWS_DEFAULT_REGION \ | ||||
|             -e IN_WHEEL_TEST \ | ||||
|             -e SHARD_NUMBER \ | ||||
|             -e TEST_CONFIG \ | ||||
|             -e NUM_TEST_SHARDS \ | ||||
|             -e REENABLED_ISSUES \ | ||||
|             -e CONTINUE_THROUGH_ERROR \ | ||||
|             -e VERBOSE_TEST_LOGS \ | ||||
|             -e TEST_SHOWLOCALS \ | ||||
|             -e NO_TEST_TIMEOUT \ | ||||
|             -e NO_TD \ | ||||
|             -e TD_DISTRIBUTED \ | ||||
|             -e PR_LABELS \ | ||||
|             -e MAX_JOBS="$(nproc --ignore=2)" \ | ||||
|             -e SCCACHE_BUCKET \ | ||||
|             -e SCCACHE_REGION \ | ||||
|             -e XLA_CUDA \ | ||||
|             -e XLA_CLANG_CACHE_S3_BUCKET_NAME \ | ||||
|             -e PYTORCH_TEST_CUDA_MEM_LEAK_CHECK \ | ||||
|             -e PYTORCH_TEST_RERUN_DISABLED_TESTS \ | ||||
|             -e SKIP_SCCACHE_INITIALIZATION=1 \ | ||||
|             -e HUGGING_FACE_HUB_TOKEN \ | ||||
|             -e VLLM_TEST_HUGGING_FACE_TOKEN \ | ||||
|             -e SCRIBE_GRAPHQL_ACCESS_TOKEN \ | ||||
|             -e DASHBOARD_TAG \ | ||||
|             -e ARTIFACTS_FILE_SUFFIX \ | ||||
|             --memory="${TOTAL_AVAILABLE_MEMORY_IN_GB%.*}g" \ | ||||
|             --memory-swap="${TOTAL_MEMORY_WITH_SWAP}g" \ | ||||
|             --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \ | ||||
|             --security-opt seccomp=unconfined \ | ||||
|             --cap-add=SYS_PTRACE \ | ||||
|             --ipc=host \ | ||||
|             ${SHM_OPTS} \ | ||||
|             --tty \ | ||||
|             --detach \ | ||||
|             --name="${container_name}" \ | ||||
|             ${JENKINS_USER} \ | ||||
|             -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \ | ||||
|             -w /var/lib/jenkins/workspace \ | ||||
|             "${DOCKER_IMAGE}" \ | ||||
|             ${DOCKER_SHELL_CMD} | ||||
|           ) | ||||
|           echo "DOCKER_CONTAINER_ID=${container_name}" >> "${GITHUB_ENV}" | ||||
|  | ||||
|       #     if [[ ${BUILD_ENVIRONMENT} == *"s390x"* ]]; then | ||||
|       #       docker exec -t "${container_name}" sh -c "python3 -m pip install -r .ci/docker/requirements-ci.txt" | ||||
|       #     fi | ||||
|           if [[ ${BUILD_ENVIRONMENT} == *"s390x"* ]]; then | ||||
|             docker exec -t "${container_name}" sh -c "python3 -m pip install -r .ci/docker/requirements-ci.txt" | ||||
|           fi | ||||
|  | ||||
|       #     docker exec -t "${container_name}" sh -c "python3 -m pip install $(echo dist/*.whl)[opt-einsum] && ${TEST_COMMAND}" | ||||
|           docker exec -t "${container_name}" sh -c "python3 -m pip install $(echo dist/*.whl)[opt-einsum] && ${TEST_COMMAND}" | ||||
|  | ||||
|       # - name: Upload pytest cache if tests failed | ||||
|       #   uses: ./.github/actions/pytest-cache-upload | ||||
|       #   continue-on-error: true | ||||
|       #   if: failure() && steps.test.conclusion && steps.test.conclusion == 'failure' && inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|       #   with: | ||||
|       #     cache_dir: .pytest_cache | ||||
|       #     shard: ${{ matrix.shard }} | ||||
|       #     sha: ${{ github.event.pull_request.head.sha || github.sha }} | ||||
|       #     test_config: ${{ matrix.config }} | ||||
|       #     job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }} | ||||
|       - name: Upload pytest cache if tests failed | ||||
|         uses: ./.github/actions/pytest-cache-upload | ||||
|         continue-on-error: true | ||||
|         if: failure() && steps.test.conclusion && steps.test.conclusion == 'failure' && inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|         with: | ||||
|           cache_dir: .pytest_cache | ||||
|           shard: ${{ matrix.shard }} | ||||
|           sha: ${{ github.event.pull_request.head.sha || github.sha }} | ||||
|           test_config: ${{ matrix.config }} | ||||
|           job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }} | ||||
|  | ||||
|       # - name: Authenticate with AWS | ||||
|       #   if: ${{ always() && contains(matrix.runner, 'b200') }} | ||||
|       #   uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0 | ||||
|       #   with: | ||||
|       #     role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_upload-benchmark-results | ||||
|       #     # The max duration enforced by the server side | ||||
|       #     role-duration-seconds: 18000 | ||||
|       #     aws-region: us-east-1 | ||||
|       - name: Authenticate with AWS | ||||
|         if: ${{ always() && contains(matrix.runner, 'b200') }} | ||||
|         uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0 | ||||
|         with: | ||||
|           role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_upload-benchmark-results | ||||
|           # The max duration enforced by the server side | ||||
|           role-duration-seconds: 18000 | ||||
|           aws-region: us-east-1 | ||||
|  | ||||
|       # - name: Upload the benchmark results | ||||
|       #   uses: pytorch/test-infra/.github/actions/upload-benchmark-results@main | ||||
|       #   if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|       #   with: | ||||
|       #     benchmark-results-dir: test/test-reports | ||||
|       #     dry-run: false | ||||
|       #     schema-version: v3 | ||||
|       #     github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|       - name: Upload the benchmark results | ||||
|         uses: pytorch/test-infra/.github/actions/upload-benchmark-results@main | ||||
|         if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|         with: | ||||
|           benchmark-results-dir: test/test-reports | ||||
|           dry-run: false | ||||
|           schema-version: v3 | ||||
|           github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|  | ||||
|       # - name: Print remaining test logs | ||||
|       #   shell: bash | ||||
|       #   if: always() && steps.test.conclusion | ||||
|       #   run: | | ||||
|       #     cat test/**/*_toprint.log || true | ||||
|       - name: Print remaining test logs | ||||
|         shell: bash | ||||
|         if: always() && steps.test.conclusion | ||||
|         run: | | ||||
|           cat test/**/*_toprint.log || true | ||||
|  | ||||
|       # - name: Stop monitoring script | ||||
|       #   if: ${{ always() && steps.monitor-script.outputs.monitor-script-pid }} | ||||
|       #   shell: bash | ||||
|       #   continue-on-error: true | ||||
|       #   env: | ||||
|       #     MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }} | ||||
|       #   run: | | ||||
|       #     kill "$MONITOR_SCRIPT_PID" | ||||
|       - name: Stop monitoring script | ||||
|         if: ${{ always() && steps.monitor-script.outputs.monitor-script-pid }} | ||||
|         shell: bash | ||||
|         continue-on-error: true | ||||
|         env: | ||||
|           MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }} | ||||
|         run: | | ||||
|           kill "$MONITOR_SCRIPT_PID" | ||||
|  | ||||
|       # - name: Upload test artifacts | ||||
|       #   uses: ./.github/actions/upload-test-artifacts | ||||
|       #   if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped' | ||||
|       #   with: | ||||
|       #     file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }} | ||||
|       #     use-gha: ${{ inputs.use-gha }} | ||||
|       #     s3-bucket: ${{ inputs.s3-bucket }} | ||||
|       - name: Upload test artifacts | ||||
|         uses: ./.github/actions/upload-test-artifacts | ||||
|         if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped' | ||||
|         with: | ||||
|           file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }} | ||||
|           use-gha: ${{ inputs.use-gha }} | ||||
|           s3-bucket: ${{ inputs.s3-bucket }} | ||||
|  | ||||
|       # - name: Collect backtraces from coredumps (if any) | ||||
|       #   if: always() | ||||
|       #   run: | | ||||
|       #     # shellcheck disable=SC2156 | ||||
|       #     find . -iname "core.[1-9]*" -exec docker exec "${DOCKER_CONTAINER_ID}" sh -c "gdb python {} -ex 'bt' -ex 'q'" \; | ||||
|       - name: Collect backtraces from coredumps (if any) | ||||
|         if: always() | ||||
|         run: | | ||||
|           # shellcheck disable=SC2156 | ||||
|           find . -iname "core.[1-9]*" -exec docker exec "${DOCKER_CONTAINER_ID}" sh -c "gdb python {} -ex 'bt' -ex 'q'" \; | ||||
|  | ||||
|       # - name: Store Core dumps on S3 | ||||
|       #   uses: seemethere/upload-artifact-s3@baba72d0712b404f646cebe0730933554ebce96a # v5.1.0 | ||||
|       #   if: failure() | ||||
|       #   with: | ||||
|       #     name: coredumps-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }} | ||||
|       #     retention-days: 14 | ||||
|       #     if-no-files-found: ignore | ||||
|       #     path: ./**/core.[1-9]* | ||||
|       - name: Store Core dumps on S3 | ||||
|         uses: seemethere/upload-artifact-s3@baba72d0712b404f646cebe0730933554ebce96a # v5.1.0 | ||||
|         if: failure() | ||||
|         with: | ||||
|           name: coredumps-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }} | ||||
|           retention-days: 14 | ||||
|           if-no-files-found: ignore | ||||
|           path: ./**/core.[1-9]* | ||||
|  | ||||
|       # - name: Upload utilization stats | ||||
|       #   if: ${{ always() && steps.test.conclusion && steps.test.conclusion != 'skipped' && !inputs.disable-monitor && inputs.build-environment != 'linux-s390x-binary-manywheel' }} | ||||
|       #   continue-on-error: true | ||||
|       #   uses: ./.github/actions/upload-utilization-stats | ||||
|       #   with: | ||||
|       #     job_id: ${{ steps.get-job-id.outputs.job-id }} | ||||
|       #     job_name: ${{ steps.get-job-id.outputs.job-name }} | ||||
|       #     workflow_name: ${{ github.workflow }} | ||||
|       #     workflow_run_id: ${{github.run_id}} | ||||
|       #     workflow_attempt: ${{github.run_attempt}} | ||||
|       - name: Upload utilization stats | ||||
|         if: ${{ always() && steps.test.conclusion && steps.test.conclusion != 'skipped' && !inputs.disable-monitor && inputs.build-environment != 'linux-s390x-binary-manywheel' }} | ||||
|         continue-on-error: true | ||||
|         uses: ./.github/actions/upload-utilization-stats | ||||
|         with: | ||||
|           job_id: ${{ steps.get-job-id.outputs.job-id }} | ||||
|           job_name: ${{ steps.get-job-id.outputs.job-name }} | ||||
|           workflow_name: ${{ github.workflow }} | ||||
|           workflow_run_id: ${{github.run_id}} | ||||
|           workflow_attempt: ${{github.run_attempt}} | ||||
|  | ||||
|       # - name: Teardown Linux | ||||
|       #   uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|       #   if: always() && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' | ||||
|       - name: Teardown Linux | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|         if: always() && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' | ||||
|  | ||||
|       # # NB: We are currently having an intermittent GPU-related issue on G5 runners with | ||||
|       # # A10G GPU. Once this happens, trying to reset the GPU as done in setup-nvidia does | ||||
|       # # not seem to help. Here are some symptoms: | ||||
|       # #   * Calling nvidia-smi timeouts after 60 second | ||||
|       # #   * Fail to run nvidia-smi with an unable to determine the device handle for GPU | ||||
|       # #     unknown error | ||||
|       # #   * Test fails with a missing CUDA GPU error when initializing CUDA in PyTorch | ||||
|       # #   * Run docker --gpus all fails with error response from daemon | ||||
|       # # | ||||
|       # # As both the root cause and recovery path are unclear, let's take the runner out of | ||||
|       # # service so that it doesn't get any more jobs | ||||
|       # - name: Check NVIDIA driver installation step | ||||
|       #   if: failure() && steps.install-nvidia-driver.outcome && steps.install-nvidia-driver.outcome != 'skipped' | ||||
|       #   shell: bash | ||||
|       #   run: | | ||||
|       #     set +e | ||||
|       #     set -x | ||||
|       # NB: We are currently having an intermittent GPU-related issue on G5 runners with | ||||
|       # A10G GPU. Once this happens, trying to reset the GPU as done in setup-nvidia does | ||||
|       # not seem to help. Here are some symptoms: | ||||
|       #   * Calling nvidia-smi timeouts after 60 second | ||||
|       #   * Fail to run nvidia-smi with an unable to determine the device handle for GPU | ||||
|       #     unknown error | ||||
|       #   * Test fails with a missing CUDA GPU error when initializing CUDA in PyTorch | ||||
|       #   * Run docker --gpus all fails with error response from daemon | ||||
|       # | ||||
|       # As both the root cause and recovery path are unclear, let's take the runner out of | ||||
|       # service so that it doesn't get any more jobs | ||||
|       - name: Check NVIDIA driver installation step | ||||
|         if: failure() && steps.install-nvidia-driver.outcome && steps.install-nvidia-driver.outcome != 'skipped' | ||||
|         shell: bash | ||||
|         run: | | ||||
|           set +e | ||||
|           set -x | ||||
|  | ||||
|       #     nvidia-smi | ||||
|       #     # NB: Surprisingly, nvidia-smi command returns successfully with return code 0 even in | ||||
|       #     # the case where the driver has already crashed as it still can get the driver version | ||||
|       #     # and some basic information like the bus ID.  However, the rest of the information | ||||
|       #     # would be missing (ERR!), for example: | ||||
|       #     # | ||||
|       #     # +-----------------------------------------------------------------------------+ | ||||
|       #     # | NVIDIA-SMI 525.89.02    Driver Version: 525.89.02    CUDA Version: 12.0     | | ||||
|       #     # |-------------------------------+----------------------+----------------------+ | ||||
|       #     # | GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC | | ||||
|       #     # | Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. | | ||||
|       #     # |                               |                      |               MIG M. | | ||||
|       #     # |===============================+======================+======================| | ||||
|       #     # |   0  ERR!                Off  | 00000000:00:1E.0 Off |                 ERR! | | ||||
|       #     # |ERR!  ERR! ERR!    ERR! / ERR! |   4184MiB / 23028MiB |    ERR!      Default | | ||||
|       #     # |                               |                      |                 ERR! | | ||||
|       #     # +-------------------------------+----------------------+----------------------+ | ||||
|       #     # | ||||
|       #     # +-----------------------------------------------------------------------------+ | ||||
|       #     # | Processes:                                                                  | | ||||
|       #     # |  GPU   GI   CI        PID   Type   Process name                  GPU Memory | | ||||
|       #     # |        ID   ID                                                   Usage      | | ||||
|       #     # |=============================================================================| | ||||
|       #     # +-----------------------------------------------------------------------------+ | ||||
|       #     # | ||||
|       #     # This should be reported as a failure instead as it will guarantee to fail when | ||||
|       #     # Docker tries to run with --gpus all | ||||
|       #     # | ||||
|       #     # So, the correct check here is to query one of the missing piece of info like | ||||
|       #     # GPU name, so that the command can fail accordingly | ||||
|       #     nvidia-smi --query-gpu=gpu_name --format=csv,noheader --id=0 | ||||
|       #     NVIDIA_SMI_STATUS=$? | ||||
|           nvidia-smi | ||||
|           # NB: Surprisingly, nvidia-smi command returns successfully with return code 0 even in | ||||
|           # the case where the driver has already crashed as it still can get the driver version | ||||
|           # and some basic information like the bus ID.  However, the rest of the information | ||||
|           # would be missing (ERR!), for example: | ||||
|           # | ||||
|           # +-----------------------------------------------------------------------------+ | ||||
|           # | NVIDIA-SMI 525.89.02    Driver Version: 525.89.02    CUDA Version: 12.0     | | ||||
|           # |-------------------------------+----------------------+----------------------+ | ||||
|           # | GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC | | ||||
|           # | Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. | | ||||
|           # |                               |                      |               MIG M. | | ||||
|           # |===============================+======================+======================| | ||||
|           # |   0  ERR!                Off  | 00000000:00:1E.0 Off |                 ERR! | | ||||
|           # |ERR!  ERR! ERR!    ERR! / ERR! |   4184MiB / 23028MiB |    ERR!      Default | | ||||
|           # |                               |                      |                 ERR! | | ||||
|           # +-------------------------------+----------------------+----------------------+ | ||||
|           # | ||||
|           # +-----------------------------------------------------------------------------+ | ||||
|           # | Processes:                                                                  | | ||||
|           # |  GPU   GI   CI        PID   Type   Process name                  GPU Memory | | ||||
|           # |        ID   ID                                                   Usage      | | ||||
|           # |=============================================================================| | ||||
|           # +-----------------------------------------------------------------------------+ | ||||
|           # | ||||
|           # This should be reported as a failure instead as it will guarantee to fail when | ||||
|           # Docker tries to run with --gpus all | ||||
|           # | ||||
|           # So, the correct check here is to query one of the missing piece of info like | ||||
|           # GPU name, so that the command can fail accordingly | ||||
|           nvidia-smi --query-gpu=gpu_name --format=csv,noheader --id=0 | ||||
|           NVIDIA_SMI_STATUS=$? | ||||
|  | ||||
|       #     # These are acceptable return code from nvidia-smi as copied from setup-nvidia GitHub action | ||||
|       #     if [ "$NVIDIA_SMI_STATUS" -ne 0 ] && [ "$NVIDIA_SMI_STATUS" -ne 14 ]; then | ||||
|       #       echo "NVIDIA driver installation has failed, shutting down the runner..." | ||||
|       #       .github/scripts/stop_runner_service.sh | ||||
|       #     fi | ||||
|           # These are acceptable return code from nvidia-smi as copied from setup-nvidia GitHub action | ||||
|           if [ "$NVIDIA_SMI_STATUS" -ne 0 ] && [ "$NVIDIA_SMI_STATUS" -ne 14 ]; then | ||||
|             echo "NVIDIA driver installation has failed, shutting down the runner..." | ||||
|             .github/scripts/stop_runner_service.sh | ||||
|           fi | ||||
|  | ||||
|       #     # For runner with multiple GPUs, we also want to confirm that the number of GPUs are the | ||||
|       #     # power of 2, i.e. 1, 2, 4, or 8. This is to avoid flaky test issue when one GPU fails | ||||
|       #     # https://github.com/pytorch/test-infra/issues/4000 | ||||
|       #     GPU_COUNT=$(nvidia-smi --list-gpus | wc -l) | ||||
|       #     NVIDIA_SMI_STATUS=$? | ||||
|           # For runner with multiple GPUs, we also want to confirm that the number of GPUs are the | ||||
|           # power of 2, i.e. 1, 2, 4, or 8. This is to avoid flaky test issue when one GPU fails | ||||
|           # https://github.com/pytorch/test-infra/issues/4000 | ||||
|           GPU_COUNT=$(nvidia-smi --list-gpus | wc -l) | ||||
|           NVIDIA_SMI_STATUS=$? | ||||
|  | ||||
|       #     # These are acceptable return code from nvidia-smi as copied from setup-nvidia GitHub action | ||||
|       #     if [ "$NVIDIA_SMI_STATUS" -ne 0 ] && [ "$NVIDIA_SMI_STATUS" -ne 14 ]; then | ||||
|       #       echo "NVIDIA driver installation has failed, shutting down the runner..." | ||||
|       #       .github/scripts/stop_runner_service.sh | ||||
|       #     fi | ||||
|           # These are acceptable return code from nvidia-smi as copied from setup-nvidia GitHub action | ||||
|           if [ "$NVIDIA_SMI_STATUS" -ne 0 ] && [ "$NVIDIA_SMI_STATUS" -ne 14 ]; then | ||||
|             echo "NVIDIA driver installation has failed, shutting down the runner..." | ||||
|             .github/scripts/stop_runner_service.sh | ||||
|           fi | ||||
|  | ||||
|       #     # Check the GPU count to be a power of 2 | ||||
|       #     if [ "$GPU_COUNT" -le 8 ] && [ "$GPU_COUNT" -ne 1 ] && [ "$GPU_COUNT" -ne 2 ] && [ "$GPU_COUNT" -ne 4 ] && [ "$GPU_COUNT" -ne 8 ]; then | ||||
|       #       echo "NVIDIA driver detects $GPU_COUNT GPUs. The runner has a broken GPU, shutting it down..." | ||||
|       #       .github/scripts/stop_runner_service.sh | ||||
|       #     fi | ||||
|           # Check the GPU count to be a power of 2 | ||||
|           if [ "$GPU_COUNT" -le 8 ] && [ "$GPU_COUNT" -ne 1 ] && [ "$GPU_COUNT" -ne 2 ] && [ "$GPU_COUNT" -ne 4 ] && [ "$GPU_COUNT" -ne 8 ]; then | ||||
|             echo "NVIDIA driver detects $GPU_COUNT GPUs. The runner has a broken GPU, shutting it down..." | ||||
|             .github/scripts/stop_runner_service.sh | ||||
|           fi | ||||
|  | ||||
|       # - name: Cleanup docker | ||||
|       #   if: always() && inputs.build-environment == 'linux-s390x-binary-manywheel' | ||||
|       #   shell: bash | ||||
|       #   run: | | ||||
|       #     # on s390x stop the container for clean worker stop | ||||
|       #     docker stop -a || true | ||||
|       #     docker kill -a || true | ||||
|       - name: Cleanup docker | ||||
|         if: always() && inputs.build-environment == 'linux-s390x-binary-manywheel' | ||||
|         shell: bash | ||||
|         run: | | ||||
|           # on s390x stop the container for clean worker stop | ||||
|           docker stop -a || true | ||||
|           docker kill -a || true | ||||
|  | ||||
							
								
								
									
										13
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -102,6 +102,19 @@ jobs: | ||||
|             exit 1 | ||||
|           fi | ||||
|  | ||||
|       - name: configure aws credentials | ||||
|         id: aws_creds | ||||
|         uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0 | ||||
|         with: | ||||
|           role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only | ||||
|           aws-region: us-east-1 | ||||
|           role-duration-seconds: 18000 | ||||
|  | ||||
|       - name: Login to Amazon ECR | ||||
|         id: login-ecr | ||||
|         continue-on-error: true | ||||
|         uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 | ||||
|  | ||||
|       - name: Calculate docker image | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							| @ -46,12 +46,10 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         include: [ | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda13.0",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda13.0",         runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda12.8",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda12.9",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda12.6",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda13.0",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda12.9",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda12.8",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda12.6",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "rocm6.4",           runner: "linux.9xlarge.ephemeral" }, | ||||
|  | ||||
							
								
								
									
										322
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										322
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -204,52 +204,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_10-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_10-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -453,52 +407,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_11-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_11-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -702,52 +610,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_12-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_12-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -951,52 +813,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1200,52 +1016,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13t-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13t-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1449,52 +1219,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_14-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       build_name: manywheel-py3_14-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1698,52 +1422,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_14t-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14t-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14t-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       build_name: manywheel-py3_14t-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|  | ||||
							
								
								
									
										74
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										74
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -248,74 +248,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   libtorch-cuda12_9-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: libtorch-cxx11-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: libtorch-cuda12_9-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda12_9-shared-with-deps-release-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-cuda12_9-shared-with-deps-release-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: libtorch-cxx11-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda12_9-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda12_9-shared-with-deps-release-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: libtorch-cuda12_9-shared-with-deps-release-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: libtorch-cxx11-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda12_9-shared-with-deps-release | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   libtorch-cuda13_0-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -426,9 +358,6 @@ jobs: | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -544,9 +473,6 @@ jobs: | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|  | ||||
							
								
								
									
										504
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										504
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -241,72 +241,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_10-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_10-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_10-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -413,9 +347,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -528,9 +459,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -907,72 +835,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_11-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_11-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_11-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1079,9 +941,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -1194,9 +1053,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -1573,72 +1429,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_12-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_12-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_12-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1745,9 +1535,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -1860,9 +1647,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -2239,72 +2023,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_13-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_13-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -2411,9 +2129,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -2526,9 +2241,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -2905,72 +2617,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_13t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_13t-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13t-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -3077,9 +2723,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -3192,9 +2835,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -3571,72 +3211,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_14-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_14-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       build_name: manywheel-py3_14-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       build_name: manywheel-py3_14-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -3743,9 +3317,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -3858,9 +3429,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -4237,72 +3805,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_14t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14t-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_14t-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       build_name: manywheel-py3_14t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14t-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14t-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       build_name: manywheel-py3_14t-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -4409,9 +3911,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -4524,9 +4023,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|  | ||||
							
								
								
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -788,256 +788,6 @@ jobs: | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|   libtorch-cuda12_9-shared-with-deps-debug-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       LIBTORCH_CONFIG: debug | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       # This is a dummy value for libtorch to work correctly with our batch scripts | ||||
|       # without this value pip does not get installed for some reason | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|     steps: | ||||
|       # NOTE: These environment variables are put here so that they can be applied on every job equally | ||||
|       #       They are also here because setting them at a workflow level doesn't give us access to the | ||||
|       #       runner.temp variable, which we need. | ||||
|       - name: Populate binary env | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}" | ||||
|           echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}" | ||||
|           echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}" | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
|         run: | | ||||
|           set -euo pipefail | ||||
|           function get_ec2_metadata() { | ||||
|             # Pulled from instance metadata endpoint for EC2 | ||||
|             # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html | ||||
|             category=$1 | ||||
|             curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}" | ||||
|           } | ||||
|           echo "ami-id: $(get_ec2_metadata ami-id)" | ||||
|           echo "instance-id: $(get_ec2_metadata instance-id)" | ||||
|           echo "instance-type: $(get_ec2_metadata instance-type)" | ||||
|           echo "system info $(uname -a)" | ||||
|       - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
|       - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon | ||||
|         shell: bash | ||||
|         run: | | ||||
|           git config --global core.longpaths true | ||||
|           git config --global core.symlinks true | ||||
|  | ||||
|           # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock | ||||
|           # the directory on Windows and prevent GHA from checking out as reported | ||||
|           # in https://github.com/actions/checkout/issues/1018 | ||||
|           git config --global core.fsmonitor false | ||||
|       # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560 | ||||
|       - name: Enable long paths on Windows | ||||
|         shell: powershell | ||||
|         run: | | ||||
|           Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1 | ||||
|       # Since it's just a defensive command, the workflow should continue even the command fails. This step can be | ||||
|       # removed once Windows Defender is removed from the AMI | ||||
|       - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch | ||||
|         continue-on-error: true | ||||
|         shell: powershell | ||||
|         run: | | ||||
|           Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore | ||||
|           # Let's both exclude the path and disable Windows Defender completely just to be sure | ||||
|           # that it doesn't interfere | ||||
|           Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore | ||||
|       - name: Checkout PyTorch | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|           submodules: recursive | ||||
|           path: pytorch | ||||
|           show-progress: false | ||||
|       - name: Clean PyTorch checkout | ||||
|         run: | | ||||
|           # Remove any artifacts from the previous checkouts | ||||
|           git clean -fxd | ||||
|         working-directory: pytorch | ||||
|       - name: Populate binary env | ||||
|         shell: bash | ||||
|         run: | | ||||
|           "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh" | ||||
|       - name: Build PyTorch binary | ||||
|         shell: bash | ||||
|         run: | | ||||
|           "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh" | ||||
|       - uses: actions/upload-artifact@v4.4.0 | ||||
|         if: always() | ||||
|         with: | ||||
|           name: libtorch-cuda12_9-shared-with-deps-debug | ||||
|           retention-days: 14 | ||||
|           if-no-files-found: error | ||||
|           path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}" | ||||
|       - name: Wait until all sessions have drained | ||||
|         shell: powershell | ||||
|         working-directory: pytorch | ||||
|         if: always() | ||||
|         timeout-minutes: 120 | ||||
|         run: | | ||||
|           .github\scripts\wait_for_ssh_to_drain.ps1 | ||||
|       - name: Kill active ssh sessions if still around (Useful if workflow was cancelled) | ||||
|         shell: powershell | ||||
|         working-directory: pytorch | ||||
|         if: always() | ||||
|         run: | | ||||
|           .github\scripts\kill_active_ssh_sessions.ps1 | ||||
|  | ||||
|   libtorch-cuda12_9-shared-with-deps-debug-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-cuda12_9-shared-with-deps-debug-build | ||||
|       - get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.g4dn.xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       LIBTORCH_CONFIG: debug | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       # This is a dummy value for libtorch to work correctly with our batch scripts | ||||
|       # without this value pip does not get installed for some reason | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
|         run: | | ||||
|           set -euo pipefail | ||||
|           function get_ec2_metadata() { | ||||
|             # Pulled from instance metadata endpoint for EC2 | ||||
|             # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html | ||||
|             category=$1 | ||||
|             curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}" | ||||
|           } | ||||
|           echo "ami-id: $(get_ec2_metadata ami-id)" | ||||
|           echo "instance-id: $(get_ec2_metadata instance-id)" | ||||
|           echo "instance-type: $(get_ec2_metadata instance-type)" | ||||
|           echo "system info $(uname -a)" | ||||
|       - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
|       - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon | ||||
|         shell: bash | ||||
|         run: | | ||||
|           git config --global core.longpaths true | ||||
|           git config --global core.symlinks true | ||||
|  | ||||
|           # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock | ||||
|           # the directory on Windows and prevent GHA from checking out as reported | ||||
|           # in https://github.com/actions/checkout/issues/1018 | ||||
|           git config --global core.fsmonitor false | ||||
|       # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560 | ||||
|       - name: Enable long paths on Windows | ||||
|         shell: powershell | ||||
|         run: | | ||||
|           Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1 | ||||
|       # Since it's just a defensive command, the workflow should continue even the command fails. This step can be | ||||
|       # removed once Windows Defender is removed from the AMI | ||||
|       - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch | ||||
|         continue-on-error: true | ||||
|         shell: powershell | ||||
|         run: | | ||||
|           Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore | ||||
|           # Let's both exclude the path and disable Windows Defender completely just to be sure | ||||
|           # that it doesn't interfere | ||||
|           Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore | ||||
|       - name: Checkout PyTorch | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|           submodules: recursive | ||||
|           path: pytorch | ||||
|           show-progress: false | ||||
|       - name: Clean PyTorch checkout | ||||
|         run: | | ||||
|           # Remove any artifacts from the previous checkouts | ||||
|           git clean -fxd | ||||
|         working-directory: pytorch | ||||
|       # NOTE: These environment variables are put here so that they can be applied on every job equally | ||||
|       #       They are also here because setting them at a workflow level doesn't give us access to the | ||||
|       #       runner.temp variable, which we need. | ||||
|       - name: Populate binary env | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}" | ||||
|           echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}" | ||||
|           echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}" | ||||
|       - uses: actions/download-artifact@v4.1.7 | ||||
|         name: Download Build Artifacts | ||||
|         with: | ||||
|           name: libtorch-cuda12_9-shared-with-deps-debug | ||||
|           path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}" | ||||
|       - name: Populate binary env | ||||
|         shell: bash | ||||
|         run: | | ||||
|           "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh" | ||||
|       - name: Test PyTorch binary | ||||
|         shell: bash | ||||
|         run: | | ||||
|           "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh" | ||||
|       - name: Wait until all sessions have drained | ||||
|         shell: powershell | ||||
|         working-directory: pytorch | ||||
|         if: always() | ||||
|         timeout-minutes: 120 | ||||
|         run: | | ||||
|           .github\scripts\wait_for_ssh_to_drain.ps1 | ||||
|       - name: Kill active ssh sessions if still around (Useful if workflow was cancelled) | ||||
|         shell: powershell | ||||
|         working-directory: pytorch | ||||
|         if: always() | ||||
|         run: | | ||||
|           .github\scripts\kill_active_ssh_sessions.ps1 | ||||
|   libtorch-cuda12_9-shared-with-deps-debug-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: libtorch-cuda12_9-shared-with-deps-debug-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       LIBTORCH_CONFIG: debug | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       # This is a dummy value for libtorch to work correctly with our batch scripts | ||||
|       # without this value pip does not get installed for some reason | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: libtorch-cuda12_9-shared-with-deps-debug | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|   libtorch-cuda13_0-shared-with-deps-debug-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|  | ||||
							
								
								
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										250
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -788,256 +788,6 @@ jobs: | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|   libtorch-cuda12_9-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       # This is a dummy value for libtorch to work correctly with our batch scripts | ||||
|       # without this value pip does not get installed for some reason | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|     steps: | ||||
|       # NOTE: These environment variables are put here so that they can be applied on every job equally | ||||
|       #       They are also here because setting them at a workflow level doesn't give us access to the | ||||
|       #       runner.temp variable, which we need. | ||||
|       - name: Populate binary env | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}" | ||||
|           echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}" | ||||
|           echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}" | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
|         run: | | ||||
|           set -euo pipefail | ||||
|           function get_ec2_metadata() { | ||||
|             # Pulled from instance metadata endpoint for EC2 | ||||
|             # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html | ||||
|             category=$1 | ||||
|             curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}" | ||||
|           } | ||||
|           echo "ami-id: $(get_ec2_metadata ami-id)" | ||||
|           echo "instance-id: $(get_ec2_metadata instance-id)" | ||||
|           echo "instance-type: $(get_ec2_metadata instance-type)" | ||||
|           echo "system info $(uname -a)" | ||||
|       - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
|       - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon | ||||
|         shell: bash | ||||
|         run: | | ||||
|           git config --global core.longpaths true | ||||
|           git config --global core.symlinks true | ||||
|  | ||||
|           # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock | ||||
|           # the directory on Windows and prevent GHA from checking out as reported | ||||
|           # in https://github.com/actions/checkout/issues/1018 | ||||
|           git config --global core.fsmonitor false | ||||
|       # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560 | ||||
|       - name: Enable long paths on Windows | ||||
|         shell: powershell | ||||
|         run: | | ||||
|           Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1 | ||||
|       # Since it's just a defensive command, the workflow should continue even the command fails. This step can be | ||||
|       # removed once Windows Defender is removed from the AMI | ||||
|       - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch | ||||
|         continue-on-error: true | ||||
|         shell: powershell | ||||
|         run: | | ||||
|           Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore | ||||
|           # Let's both exclude the path and disable Windows Defender completely just to be sure | ||||
|           # that it doesn't interfere | ||||
|           Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore | ||||
|       - name: Checkout PyTorch | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|           submodules: recursive | ||||
|           path: pytorch | ||||
|           show-progress: false | ||||
|       - name: Clean PyTorch checkout | ||||
|         run: | | ||||
|           # Remove any artifacts from the previous checkouts | ||||
|           git clean -fxd | ||||
|         working-directory: pytorch | ||||
|       - name: Populate binary env | ||||
|         shell: bash | ||||
|         run: | | ||||
|           "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh" | ||||
|       - name: Build PyTorch binary | ||||
|         shell: bash | ||||
|         run: | | ||||
|           "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh" | ||||
|       - uses: actions/upload-artifact@v4.4.0 | ||||
|         if: always() | ||||
|         with: | ||||
|           name: libtorch-cuda12_9-shared-with-deps-release | ||||
|           retention-days: 14 | ||||
|           if-no-files-found: error | ||||
|           path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}" | ||||
|       - name: Wait until all sessions have drained | ||||
|         shell: powershell | ||||
|         working-directory: pytorch | ||||
|         if: always() | ||||
|         timeout-minutes: 120 | ||||
|         run: | | ||||
|           .github\scripts\wait_for_ssh_to_drain.ps1 | ||||
|       - name: Kill active ssh sessions if still around (Useful if workflow was cancelled) | ||||
|         shell: powershell | ||||
|         working-directory: pytorch | ||||
|         if: always() | ||||
|         run: | | ||||
|           .github\scripts\kill_active_ssh_sessions.ps1 | ||||
|  | ||||
|   libtorch-cuda12_9-shared-with-deps-release-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-cuda12_9-shared-with-deps-release-build | ||||
|       - get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.g4dn.xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       # This is a dummy value for libtorch to work correctly with our batch scripts | ||||
|       # without this value pip does not get installed for some reason | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
|         run: | | ||||
|           set -euo pipefail | ||||
|           function get_ec2_metadata() { | ||||
|             # Pulled from instance metadata endpoint for EC2 | ||||
|             # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html | ||||
|             category=$1 | ||||
|             curl -H "X-aws-ec2-metadata-token: $(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 30")" -fsSL "http://169.254.169.254/latest/meta-data/${category}" | ||||
|           } | ||||
|           echo "ami-id: $(get_ec2_metadata ami-id)" | ||||
|           echo "instance-id: $(get_ec2_metadata instance-id)" | ||||
|           echo "instance-type: $(get_ec2_metadata instance-type)" | ||||
|           echo "system info $(uname -a)" | ||||
|       - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
|       - name: Enable git long paths and symlinks on Windows and disable fsmonitor daemon | ||||
|         shell: bash | ||||
|         run: | | ||||
|           git config --global core.longpaths true | ||||
|           git config --global core.symlinks true | ||||
|  | ||||
|           # https://git-scm.com/docs/git-fsmonitor--daemon.  The daemon could lock | ||||
|           # the directory on Windows and prevent GHA from checking out as reported | ||||
|           # in https://github.com/actions/checkout/issues/1018 | ||||
|           git config --global core.fsmonitor false | ||||
|       # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560 | ||||
|       - name: Enable long paths on Windows | ||||
|         shell: powershell | ||||
|         run: | | ||||
|           Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1 | ||||
|       # Since it's just a defensive command, the workflow should continue even the command fails. This step can be | ||||
|       # removed once Windows Defender is removed from the AMI | ||||
|       - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch | ||||
|         continue-on-error: true | ||||
|         shell: powershell | ||||
|         run: | | ||||
|           Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore | ||||
|           # Let's both exclude the path and disable Windows Defender completely just to be sure | ||||
|           # that it doesn't interfere | ||||
|           Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore | ||||
|       - name: Checkout PyTorch | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|           submodules: recursive | ||||
|           path: pytorch | ||||
|           show-progress: false | ||||
|       - name: Clean PyTorch checkout | ||||
|         run: | | ||||
|           # Remove any artifacts from the previous checkouts | ||||
|           git clean -fxd | ||||
|         working-directory: pytorch | ||||
|       # NOTE: These environment variables are put here so that they can be applied on every job equally | ||||
|       #       They are also here because setting them at a workflow level doesn't give us access to the | ||||
|       #       runner.temp variable, which we need. | ||||
|       - name: Populate binary env | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}" | ||||
|           echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}" | ||||
|           echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}" | ||||
|       - uses: actions/download-artifact@v4.1.7 | ||||
|         name: Download Build Artifacts | ||||
|         with: | ||||
|           name: libtorch-cuda12_9-shared-with-deps-release | ||||
|           path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}" | ||||
|       - name: Populate binary env | ||||
|         shell: bash | ||||
|         run: | | ||||
|           "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh" | ||||
|       - name: Test PyTorch binary | ||||
|         shell: bash | ||||
|         run: | | ||||
|           "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh" | ||||
|       - name: Wait until all sessions have drained | ||||
|         shell: powershell | ||||
|         working-directory: pytorch | ||||
|         if: always() | ||||
|         timeout-minutes: 120 | ||||
|         run: | | ||||
|           .github\scripts\wait_for_ssh_to_drain.ps1 | ||||
|       - name: Kill active ssh sessions if still around (Useful if workflow was cancelled) | ||||
|         shell: powershell | ||||
|         working-directory: pytorch | ||||
|         if: always() | ||||
|         run: | | ||||
|           .github\scripts\kill_active_ssh_sessions.ps1 | ||||
|   libtorch-cuda12_9-shared-with-deps-release-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: libtorch-cuda12_9-shared-with-deps-release-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       # This is a dummy value for libtorch to work correctly with our batch scripts | ||||
|       # without this value pip does not get installed for some reason | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: libtorch-cuda12_9-shared-with-deps-release | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|   libtorch-cuda13_0-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|  | ||||
							
								
								
									
										1666
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1666
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -130,7 +130,7 @@ jobs: | ||||
|     name: test-periodically | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: build | ||||
|     if: github.event.schedule == '15 0 * * 1-6' | ||||
|     if: github.event.schedule == '15 0,12 * * 1-6' | ||||
|     with: | ||||
|       build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm90 | ||||
|       dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true | ||||
|  | ||||
| @ -1,132 +0,0 @@ | ||||
| name: inductor-perf-nightly-rocm-mi300 | ||||
|  | ||||
| on: | ||||
|   push: | ||||
|     tags: | ||||
|       - ciflow/inductor-perf-test-nightly-rocm-mi300/* | ||||
|   schedule: | ||||
|     - cron: 15 0 * * * | ||||
|   # NB: GitHub has an upper limit of 10 inputs here, so before we can sort it | ||||
|   # out, let try to run torchao cudagraphs_low_precision as part of cudagraphs | ||||
|   workflow_dispatch: | ||||
|     inputs: | ||||
|       training: | ||||
|         description: Run training (on by default)? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       inference: | ||||
|         description: Run inference (on by default)? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       default: | ||||
|         description: Run inductor_default? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       dynamic: | ||||
|         description: Run inductor_dynamic_shapes? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       cppwrapper: | ||||
|         description: Run inductor_cpp_wrapper? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       cudagraphs: | ||||
|         description: Run inductor_cudagraphs? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       freezing_cudagraphs: | ||||
|         description: Run inductor_cudagraphs with freezing for inference? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       aotinductor: | ||||
|         description: Run aot_inductor for inference? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       maxautotune: | ||||
|         description: Run inductor_max_autotune? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       benchmark_configs: | ||||
|         description: The list of configs used the benchmark | ||||
|         required: false | ||||
|         type: string | ||||
|         default: inductor_huggingface_perf_rocm_mi300,inductor_timm_perf_rocm_mi300,inductor_torchbench_perf_rocm_mi300 | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: read-all | ||||
|  | ||||
| jobs: | ||||
|   get-label-type: | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|       curr_branch: ${{ github.head_ref || github.ref_name }} | ||||
|       curr_ref_type: ${{ github.ref_type }} | ||||
|       opt_out_experiments: lf | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-inductor-benchmark-build: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: rocm-py3_10-inductor-benchmark-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3_10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 1, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 2, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 3, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 4, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 5, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 6, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 7, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 1, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 2, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 3, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 4, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 5, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 6, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 7, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 8, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 9, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-inductor-benchmark-test: | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     name: rocm-py3_10-inductor-benchmark-test | ||||
|     uses: ./.github/workflows/_rocm-test.yml | ||||
|     needs: linux-jammy-rocm-py3_10-inductor-benchmark-build | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3_10 | ||||
|       dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true | ||||
|       docker-image: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.test-matrix }} | ||||
|       timeout-minutes: 720 | ||||
|       # Disable monitor in perf tests for more investigation | ||||
|       disable-monitor: true | ||||
|       monitor-log-interval: 10 | ||||
|       monitor-data-collect-interval: 2 | ||||
|     secrets: inherit | ||||
| @ -1,11 +1,11 @@ | ||||
| name: inductor-perf-nightly-rocm-mi355 | ||||
| name: inductor-perf-nightly-rocm | ||||
| 
 | ||||
| on: | ||||
|   push: | ||||
|     tags: | ||||
|       - ciflow/inductor-perf-test-nightly-rocm-mi355/* | ||||
|       - ciflow/inductor-perf-test-nightly-rocm/* | ||||
|   schedule: | ||||
|     - cron: 15 0 * * * | ||||
|     - cron: 0 7 * * 0,3 | ||||
|   # NB: GitHub has an upper limit of 10 inputs here, so before we can sort it | ||||
|   # out, let try to run torchao cudagraphs_low_precision as part of cudagraphs | ||||
|   workflow_dispatch: | ||||
| @ -59,7 +59,7 @@ on: | ||||
|         description: The list of configs used the benchmark | ||||
|         required: false | ||||
|         type: string | ||||
|         default: inductor_huggingface_perf_rocm_mi355,inductor_timm_perf_rocm_mi355,inductor_torchbench_perf_rocm_mi355 | ||||
|         default: inductor_huggingface_perf_rocm,inductor_timm_perf_rocm,inductor_torchbench_perf_rocm | ||||
| 
 | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
| @ -88,27 +88,23 @@ jobs: | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 1, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 2, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 3, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 4, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 5, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 6, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 7, num_shards: 7, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 1, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 2, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 3, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 4, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 5, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 6, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 7, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 8, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 9, num_shards: 9, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 1, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 2, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 3, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 4, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 1, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 2, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 3, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 4, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 5, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 6, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 7, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 8, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
| 
 | ||||
							
								
								
									
										7
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							| @ -12,7 +12,6 @@ on: | ||||
|       - landchecks/* | ||||
|     tags: | ||||
|       - ciflow/pull/* | ||||
|       - ciflow/trunk/* | ||||
|   workflow_dispatch: | ||||
|  | ||||
| permissions: read-all | ||||
| @ -33,12 +32,10 @@ jobs: | ||||
|     name: Get changed files | ||||
|     uses: ./.github/workflows/_get-changed-files.yml | ||||
|     with: | ||||
|       all_files: ${{ contains(github.event.pull_request.labels.*.name, 'lint-all-files') || contains(github.event.pull_request.labels.*.name, 'Reverted') || github.event_name == 'push' }} | ||||
|       all_files: ${{ contains(github.event.pull_request.labels.*.name, 'lint-all-files') || contains(github.event.pull_request.labels.*.name, 'Reverted') }} | ||||
|  | ||||
|   lintrunner-clang: | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | ||||
|     # Needed to prevent deduping on HUD | ||||
|     name: lintrunner-clang-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }} | ||||
|     needs: [get-label-type, get-changed-files] | ||||
|     # Only run if there are changed files relevant to clangtidy / clangformat | ||||
|     if: | | ||||
| @ -78,7 +75,6 @@ jobs: | ||||
|   #       fails to find types when it should | ||||
|   lintrunner-mypy: | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | ||||
|     name: lintrunner-mypy-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }} | ||||
|     needs: [get-label-type, get-changed-files] | ||||
|     # Only run if there are changed files relevant to mypy | ||||
|     if: | | ||||
| @ -103,7 +99,6 @@ jobs: | ||||
|  | ||||
|   lintrunner-noclang: | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | ||||
|     name: lintrunner-noclang-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }} | ||||
|     needs: [get-label-type, get-changed-files] | ||||
|     with: | ||||
|       timeout: 120 | ||||
|  | ||||
							
								
								
									
										10
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							| @ -182,11 +182,11 @@ jobs: | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "nogpu_AVX512", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "nogpu_AVX512", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "nogpu_AVX512", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "nogpu_AVX512", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|           { config: "nogpu_AVX512", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|           { config: "nogpu_AVX512", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|           { config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							| @ -127,7 +127,6 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: linux.2xlarge.memory | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-jammy-py3.10-clang18-asan | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-py3-clang18-asan | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/workflows/slow.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/slow.yml
									
									
									
									
										vendored
									
									
								
							| @ -140,7 +140,6 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: linux.2xlarge.memory | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-jammy-py3.10-clang18-asan | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-py3-clang18-asan | ||||
|  | ||||
							
								
								
									
										10
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							| @ -56,7 +56,7 @@ jobs: | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11 | ||||
|       build-generates-artifacts: false | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runner: "linux.c7i.4xlarge" | ||||
|       runner: "linux.4xlarge" | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1 }, | ||||
| @ -180,13 +180,13 @@ jobs: | ||||
|       disable-monitor: false | ||||
|     secrets: inherit | ||||
|  | ||||
|   win-vs2022-cuda12_8-py3-build: | ||||
|     name: win-vs2022-cuda12.8-py3 | ||||
|   win-vs2022-cuda12_6-py3-build: | ||||
|     name: win-vs2022-cuda12.6-py3 | ||||
|     uses: ./.github/workflows/_win-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       build-environment: win-vs2022-cuda12.8-py3 | ||||
|       cuda-version: "12.8" | ||||
|       build-environment: win-vs2022-cuda12.6-py3 | ||||
|       cuda-version: "12.6" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" | ||||
|     secrets: inherit | ||||
|  | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/workflows/vllm.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/vllm.yml
									
									
									
									
										vendored
									
									
								
							| @ -46,7 +46,7 @@ jobs: | ||||
|       runner: linux.24xlarge.memory | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config:  "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_basic_models_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_entrypoints_test", shard: 1, num_shards: 1,runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_regression_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
| @ -54,7 +54,7 @@ jobs: | ||||
|           { config: "vllm_pytorch_compilation_unit_tests", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_lora_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_multi_model_test_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"}, | ||||
|           { config: "vllm_language_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"}, | ||||
|           { config: "vllm_languagde_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"}, | ||||
|           { config: "vllm_distributed_test_2_gpu_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_lora_test", shard: 0, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_lora_test", shard: 1, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|  | ||||
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -395,4 +395,3 @@ android/pytorch_android_torchvision/.cxx | ||||
| CLAUDE.local.md | ||||
| /test_*.py | ||||
| /debug_*.py | ||||
| CLAUDE_CONTEXT/ | ||||
|  | ||||
| @ -256,7 +256,6 @@ endif() | ||||
| IF(USE_FBGEMM_GENAI) | ||||
|   set(FBGEMM_THIRD_PARTY ${PROJECT_SOURCE_DIR}/third_party/fbgemm/external/) | ||||
|   set(FBGEMM_GENAI_SRCS ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize) | ||||
|  | ||||
|   if(USE_CUDA) | ||||
|     # To avoid increasing the build time/binary size unnecessarily, use an allow-list of kernels to build. | ||||
|     # If you want to integrate a kernel from FBGEMM into torch, you have to add it here. | ||||
| @ -293,64 +292,58 @@ IF(USE_FBGEMM_GENAI) | ||||
|       "${FBGEMM_GENAI_SRCS}/cutlass_extensions/mx8mx8bf16_grouped/" | ||||
|     ) | ||||
|  | ||||
|     target_include_directories(fbgemm_genai PRIVATE | ||||
|     target_include_directories(fbgemm_genai PUBLIC | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/include | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/tools/util/include | ||||
|       ${fbgemm_genai_mx8mx8bf16_grouped} | ||||
|       ${FBGEMM_GENAI_SRCS}/common/include/   # includes fbgemm_gpu/quantize/utils.h, fbgemm_gpu/quantize/tuning_cache.hpp | ||||
|       ${FBGEMM_GENAI_SRCS}/include/          # includes fbgemm_gpu/torch_ops.h | ||||
|     ) | ||||
|   else() | ||||
|     if(USE_ROCM) | ||||
|       # Only include the kernels we want to build to avoid increasing binary size. | ||||
|       file(GLOB_RECURSE fbgemm_genai_native_rocm_hip | ||||
|         "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/kernels/fp8_rowwise_grouped*.hip" | ||||
|         "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/fp8_rowwise_grouped_gemm.hip") | ||||
|       set_source_files_properties(${fbgemm_genai_native_rocm_hip} PROPERTIES HIP_SOURCE_PROPERTY_FORMAT 1) | ||||
|  | ||||
|     # Add FBGEMM_GENAI include directories for torch_ops.h | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/include) | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/common/include) | ||||
|   elseif(USE_ROCM) | ||||
|     # Only include the kernels we want to build to avoid increasing binary size. | ||||
|     file(GLOB_RECURSE fbgemm_genai_native_rocm_hip | ||||
|       "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/kernels/fp8_rowwise_grouped*.hip" | ||||
|       "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/fp8_rowwise_grouped_gemm.hip") | ||||
|     set_source_files_properties(${fbgemm_genai_native_rocm_hip} PROPERTIES HIP_SOURCE_PROPERTY_FORMAT 1) | ||||
|       # Add additional HIPCC compiler flags for performance | ||||
|       set(FBGEMM_GENAI_EXTRA_HIPCC_FLAGS | ||||
|         -mllvm | ||||
|         -amdgpu-coerce-illegal-types=1 | ||||
|         -mllvm | ||||
|         -enable-post-misched=0 | ||||
|         -mllvm | ||||
|         -greedy-reverse-local-assignment=1 | ||||
|         -fhip-new-launch-api) | ||||
|  | ||||
|     # Add additional HIPCC compiler flags for performance | ||||
|     set(FBGEMM_GENAI_EXTRA_HIPCC_FLAGS | ||||
|       -mllvm | ||||
|       -amdgpu-coerce-illegal-types=1 | ||||
|       -mllvm | ||||
|       -enable-post-misched=0 | ||||
|       -mllvm | ||||
|       -greedy-reverse-local-assignment=1 | ||||
|       -fhip-new-launch-api) | ||||
|       # Only compile for gfx942 for now. | ||||
|       # This is rather hacky, I could not figure out a clean solution :( | ||||
|       set(HIP_CLANG_FLAGS_ORIGINAL ${HIP_CLANG_FLAGS}) | ||||
|       string(REGEX REPLACE "--offload-arch=[^ ]*" "" FILTERED_HIP_CLANG_FLAGS "${HIP_CLANG_FLAGS}") | ||||
|       if("gfx942" IN_LIST PYTORCH_ROCM_ARCH) | ||||
|         list(APPEND FILTERED_HIP_CLANG_FLAGS --offload-arch=gfx942;) | ||||
|       endif() | ||||
|       set(HIP_CLANG_FLAGS ${FILTERED_HIP_CLANG_FLAGS}) | ||||
|  | ||||
|     # Only compile for gfx942 for now. | ||||
|     # This is rather hacky, I could not figure out a clean solution :( | ||||
|     set(HIP_CLANG_FLAGS_ORIGINAL ${HIP_CLANG_FLAGS}) | ||||
|     string(REGEX REPLACE "--offload-arch=[^ ]*" "" FILTERED_HIP_CLANG_FLAGS "${HIP_CLANG_FLAGS}") | ||||
|     if("gfx942" IN_LIST PYTORCH_ROCM_ARCH) | ||||
|       list(APPEND FILTERED_HIP_CLANG_FLAGS --offload-arch=gfx942;) | ||||
|       hip_add_library( | ||||
|         fbgemm_genai STATIC | ||||
|         ${fbgemm_genai_native_rocm_hip} | ||||
|         HIPCC_OPTIONS ${HIP_HCC_FLAGS} ${FBGEMM_GENAI_EXTRA_HIPCC_FLAGS}) | ||||
|       set(HIP_CLANG_FLAGS ${HIP_CLANG_FLAGS_ORIGINAL}) | ||||
|       set_target_properties(fbgemm_genai PROPERTIES POSITION_INDEPENDENT_CODE ON) | ||||
|       target_compile_definitions(fbgemm_genai PRIVATE FBGEMM_GENAI_NO_EXTENDED_SHAPES) | ||||
|  | ||||
|       target_include_directories(fbgemm_genai PUBLIC | ||||
|         # FBGEMM version of Composable Kernel is used due to some customizations | ||||
|         ${FBGEMM_THIRD_PARTY}/composable_kernel/include | ||||
|         ${FBGEMM_THIRD_PARTY}/composable_kernel/library/include | ||||
|         ${FBGEMM_THIRD_PARTY}/cutlass/include | ||||
|         ${FBGEMM_THIRD_PARTY}/cutlass/tools/util/include | ||||
|         ${FBGEMM_GENAI_SRCS}/common/include/   # includes fbgemm_gpu/quantize/utils.h, fbgemm_gpu/quantize/tuning_cache.hpp | ||||
|         ${FBGEMM_GENAI_SRCS}/include/          # includes fbgemm_gpu/torch_ops.h | ||||
|       ) | ||||
|     endif() | ||||
|     set(HIP_CLANG_FLAGS ${FILTERED_HIP_CLANG_FLAGS}) | ||||
|  | ||||
|     hip_add_library( | ||||
|       fbgemm_genai STATIC | ||||
|       ${fbgemm_genai_native_rocm_hip} | ||||
|       HIPCC_OPTIONS ${HIP_HCC_FLAGS} ${FBGEMM_GENAI_EXTRA_HIPCC_FLAGS}) | ||||
|     set(HIP_CLANG_FLAGS ${HIP_CLANG_FLAGS_ORIGINAL}) | ||||
|     set_target_properties(fbgemm_genai PROPERTIES POSITION_INDEPENDENT_CODE ON) | ||||
|     target_compile_definitions(fbgemm_genai PRIVATE FBGEMM_GENAI_NO_EXTENDED_SHAPES) | ||||
|  | ||||
|     target_include_directories(fbgemm_genai PRIVATE | ||||
|       # FBGEMM version of Composable Kernel is used due to some customizations | ||||
|       ${FBGEMM_THIRD_PARTY}/composable_kernel/include | ||||
|       ${FBGEMM_THIRD_PARTY}/composable_kernel/library/include | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/include | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/tools/util/include | ||||
|       ${FBGEMM_GENAI_SRCS}/common/include/   # includes fbgemm_gpu/quantize/utils.h, fbgemm_gpu/quantize/tuning_cache.hpp | ||||
|       ${FBGEMM_GENAI_SRCS}/include/          # includes fbgemm_gpu/torch_ops.h | ||||
|     ) | ||||
|  | ||||
|     # Add FBGEMM_GENAI include directories for torch_ops.h | ||||
|     list(APPEND ATen_HIP_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/include) | ||||
|     list(APPEND ATen_HIP_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/common/include) | ||||
|   endif() | ||||
| endif() | ||||
|  | ||||
| @ -699,6 +692,12 @@ if(USE_CUDA AND NOT USE_ROCM) | ||||
|   list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/include) | ||||
|   list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/tools/util/include) | ||||
|  | ||||
|   # Add FBGEMM_GENAI include directories for torch_ops.h | ||||
|   if(USE_FBGEMM_GENAI) | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/include) | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/common/include) | ||||
|   endif() | ||||
|  | ||||
|   if($ENV{ATEN_STATIC_CUDA}) | ||||
|     if(CUDA_VERSION VERSION_LESS_EQUAL 12.9) | ||||
|       list(APPEND ATen_CUDA_DEPENDENCY_LIBS | ||||
|  | ||||
| @ -389,16 +389,37 @@ void fillVersion<DLManagedTensorVersioned>( | ||||
| // constructed out of ATen tensor | ||||
| template <class T> | ||||
| T* toDLPackImpl(const Tensor& src) { | ||||
|   auto view = src; | ||||
|  | ||||
|   // Detect whether there is need to normalize the strides | ||||
|   // Background: gh-83069 | ||||
|   // | ||||
|   // However, normalizing strides can come at a high-cost | ||||
|   // to slow down toDLPack conversion 3x, so we | ||||
|   // only normalize if needed. | ||||
|   // | ||||
|   // The following code detects whether the src follows | ||||
|   // a continuous pattern. If the src follows such pattern (common-case) | ||||
|   // then we do not need to normalize the strides. | ||||
|   bool need_normalize_strides = src.dim() == 1 && src.size(0) == 1 && src.stride(0) != 1; | ||||
|   // less common case, try normalizing the strides | ||||
|   if (need_normalize_strides) { | ||||
|     // create a new tensor with possibly normalized strides | ||||
|     // gh-83069 | ||||
|     auto shape = src.sizes(); | ||||
|     view = src.as_strided(shape, {1}, src.storage_offset()); | ||||
|   } | ||||
|  | ||||
|   ATenDLMTensor<T>* atDLMTensor(new ATenDLMTensor<T>); | ||||
|   atDLMTensor->handle = src; | ||||
|   atDLMTensor->handle = view; | ||||
|   atDLMTensor->tensor.manager_ctx = atDLMTensor; | ||||
|   atDLMTensor->tensor.deleter = &deleter<T>; | ||||
|   atDLMTensor->tensor.dl_tensor.data = src.data_ptr(); | ||||
|   atDLMTensor->tensor.dl_tensor.data = view.data_ptr(); | ||||
|   atDLMTensor->tensor.dl_tensor.device = torchDeviceToDLDevice(src.device()); | ||||
|   atDLMTensor->tensor.dl_tensor.ndim = static_cast<int32_t>(src.dim()); | ||||
|   atDLMTensor->tensor.dl_tensor.dtype = getDLDataType(src); | ||||
|   atDLMTensor->tensor.dl_tensor.shape = const_cast<int64_t*>(src.sizes().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.strides = const_cast<int64_t*>(src.strides().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.shape = const_cast<int64_t*>(view.sizes().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.strides = const_cast<int64_t*>(view.strides().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.byte_offset = 0; | ||||
|   fillVersion(&atDLMTensor->tensor); | ||||
|  | ||||
|  | ||||
| @ -52,16 +52,16 @@ struct DLPackTraits {}; | ||||
|  | ||||
| template <> | ||||
| struct DLPackTraits<DLManagedTensor> { | ||||
|   inline static constexpr const char* capsule = "dltensor"; | ||||
|   inline static constexpr const char* used = "used_dltensor"; | ||||
|   inline static const char* capsule = "dltensor"; | ||||
|   inline static const char* used = "used_dltensor"; | ||||
|   inline static auto toDLPack = at::toDLPack; | ||||
|   inline static auto fromDLPack = at::fromDLPack; | ||||
| }; | ||||
|  | ||||
| template <> | ||||
| struct DLPackTraits<DLManagedTensorVersioned> { | ||||
|   inline static constexpr const char* capsule = "dltensor_versioned"; | ||||
|   inline static constexpr const char* used = "used_dltensor_versioned"; | ||||
|   inline static const char* capsule = "dltensor_versioned"; | ||||
|   inline static const char* used = "used_dltensor_versioned"; | ||||
|   inline static auto toDLPack = at::toDLPackVersioned; | ||||
|   inline static auto fromDLPack = at::fromDLPackVersioned; | ||||
| }; | ||||
|  | ||||
| @ -16,8 +16,8 @@ inline void check_size_nonnegative(ArrayRef<int64_t> size) { | ||||
|  | ||||
| inline void check_size_nonnegative(ArrayRef<c10::SymInt> size) { | ||||
|   for (const auto& x : size) { | ||||
|     TORCH_SYM_CHECK( | ||||
|         x.sym_ge(0), | ||||
|     TORCH_CHECK( | ||||
|         x.expect_size(__FILE__, __LINE__), | ||||
|         "Trying to create tensor with negative dimension ", | ||||
|         x, | ||||
|         ": ", | ||||
|  | ||||
| @ -4,7 +4,6 @@ | ||||
| #include <c10/core/ScalarType.h> | ||||
| #include <c10/core/SymIntArrayRef.h> | ||||
| #include <c10/util/DimVector.h> | ||||
| #include <c10/util/Exception.h> | ||||
| #include <optional> | ||||
| #include <sstream> | ||||
| #include <vector> | ||||
| @ -27,7 +26,9 @@ inline void infer_size_impl( | ||||
|   std::optional<int64_t> infer_dim; | ||||
|   for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) { | ||||
|     if (TORCH_GUARD_OR_FALSE(sym_eq(shape[dim], -1))) { | ||||
|       TORCH_CHECK(!infer_dim, "only one dimension can be inferred"); | ||||
|       if (infer_dim) { | ||||
|         throw std::runtime_error("only one dimension can be inferred"); | ||||
|       } | ||||
|       infer_dim = dim; | ||||
|     } else { | ||||
|       // in case of unbacked shape[dim] we assume it's not -1 and add a runtime | ||||
|  | ||||
| @ -58,7 +58,7 @@ namespace at { | ||||
| namespace{ | ||||
|  | ||||
| // PyTorch allows operations to specify dim 0 and dim -1 on a scalar tensor. | ||||
| bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
| static bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
|   return dim == 0 || dim == -1; | ||||
| } | ||||
|  | ||||
| @ -365,7 +365,7 @@ Tensor select_batching_rule(const Tensor& self, int64_t dim, int64_t index) { | ||||
|   return self_physical.getPhysicalToLogicalMap().apply(result); | ||||
| } | ||||
|  | ||||
| int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) { | ||||
| static int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) { | ||||
|   return maybe_wrap_dim(dim, static_cast<int64_t>(input_sizes.size())) + num_batch_dims; | ||||
| } | ||||
|  | ||||
| @ -488,7 +488,7 @@ Tensor view_as_complex_batching_rule(const Tensor& self) { | ||||
| // Checks that the smallest batch stride is greater than the largest example | ||||
| // stride. This is something we can support but we choose not to because it's | ||||
| // potentially error prone. | ||||
| void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) { | ||||
| static void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) { | ||||
|   auto smallest_batch_stride = std::min_element( | ||||
|       physical_strides.begin(), physical_strides.begin() + num_batch_dims); | ||||
|   auto largest_example_stride = std::max_element( | ||||
| @ -508,7 +508,7 @@ void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_bat | ||||
| // given (sizes, strides, storage_offset) returns the maximum location that | ||||
| // can be indexed (or nullopt if such a location doesn't exist, e.g., tensors | ||||
| // with zero-size dims). | ||||
| std::optional<int64_t> maximum_indexable_location( | ||||
| static std::optional<int64_t> maximum_indexable_location( | ||||
|     IntArrayRef sizes, IntArrayRef strides, int64_t storage_offset) { | ||||
|   auto result = native::storage_size_for(sizes, strides); | ||||
|   if (result == 0) { | ||||
| @ -521,7 +521,7 @@ std::optional<int64_t> maximum_indexable_location( | ||||
| // This checks that the range of possible memory locations accessible by | ||||
| // x.as_strided(sizes, strides, maybe_storage_offset) | ||||
| // are within the bounds of possible memory locations accessible by x. | ||||
| void checkBasicAsStridedValidForSlice( | ||||
| static void checkBasicAsStridedValidForSlice( | ||||
|     const Tensor& physical_tensor, | ||||
|     int64_t num_batch_dims, | ||||
|     IntArrayRef sizes, | ||||
|  | ||||
| @ -42,14 +42,8 @@ const PythonTorchFunctionTLS& PythonTorchFunctionTLS::get_state() { | ||||
| } | ||||
|  | ||||
| bool torch_function_mode_enabled() { | ||||
|   // Manually flatten because gcc is refusing to inline here.  Note | ||||
|   // that we are still calling __tls_get_addr twice here with GCC, | ||||
|   // presumably because of | ||||
|   // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81501 (which says | ||||
|   // the fix ships in GCC 16), but forcing inlining still improves | ||||
|   // performance. | ||||
|   const auto& ptfs = pythonTorchFunctionState; | ||||
|   return ptfs.disabled_state_ != TorchFunctionDisabledState::ALL_DISABLED && !ptfs.stack_.empty(); | ||||
|   return PythonTorchFunctionTLS::get_disabled_state() != TorchFunctionDisabledState::ALL_DISABLED && | ||||
|          PythonTorchFunctionTLS::stack_len() > 0; | ||||
| } | ||||
|  | ||||
| // This is needed to disambiguate the ternary torch function disabled states | ||||
|  | ||||
| @ -27,7 +27,6 @@ struct TORCH_API PythonTorchFunctionTLS { | ||||
|   TorchFunctionDisabledState disabled_state_ = | ||||
|       TorchFunctionDisabledState::ENABLED; | ||||
|   std::vector<std::shared_ptr<c10::SafePyObject>> stack_; | ||||
|   friend TORCH_API bool torch_function_mode_enabled(); | ||||
| }; | ||||
|  | ||||
| TORCH_API bool torch_function_mode_enabled(); | ||||
|  | ||||
| @ -13,7 +13,7 @@ namespace { | ||||
|   // and left at true for the rest of the execution. | ||||
|   // It's an optimization so that users who never use default hooks don't need to | ||||
|   // read the thread_local variables pack_hook_ and unpack_hook_. | ||||
|   bool is_initialized(false); | ||||
|   static bool is_initialized(false); | ||||
| } | ||||
|  | ||||
| static void assertSavedTensorHooksNotDisabled() { | ||||
|  | ||||
| @ -56,7 +56,7 @@ inline void get_strides(int64_t* strides, ArrayRef<OperandInfo> operands, int64_ | ||||
|   } | ||||
| } | ||||
|  | ||||
| OptionalTensorRef make_otr(const TensorBase &tensor) { | ||||
| static OptionalTensorRef make_otr(const TensorBase &tensor) { | ||||
|   if (tensor.defined()) { | ||||
|     return OptionalTensorRef(tensor); | ||||
|   } else { | ||||
|  | ||||
| @ -103,7 +103,9 @@ std::string get_cpu_capability() { | ||||
| #elif defined(HAVE_ZVECTOR_CPU_DEFINITION) | ||||
|     case native::CPUCapability::ZVECTOR: | ||||
|       return "Z VECTOR"; | ||||
| #elif defined(HAVE_SVE256_CPU_DEFINITION) && defined(HAVE_ARM_BF16_CPU_DEFINITION) | ||||
| #elif defined(HAVE_SVE_CPU_DEFINITION) && defined(HAVE_ARM_BF16_CPU_DEFINITION) | ||||
|     case native::CPUCapability::SVE128: | ||||
|       return "SVE128"; | ||||
|     case native::CPUCapability::SVE256: | ||||
|       return "SVE256"; | ||||
| #else | ||||
|  | ||||
| @ -36,7 +36,7 @@ namespace { | ||||
| using weakref_type = c10::weak_intrusive_ptr<TensorImpl, UndefinedTensorImpl>; | ||||
| using val_type = std::tuple<weakref_type, Tensor>; | ||||
|  | ||||
| ska::flat_hash_map<TensorImpl*, val_type>& get_cached_casts() { | ||||
| static ska::flat_hash_map<TensorImpl*, val_type>& get_cached_casts() { | ||||
|   static ska::flat_hash_map<TensorImpl*, val_type> cached_casts; | ||||
|   return cached_casts; | ||||
| } | ||||
|  | ||||
| @ -6,9 +6,9 @@ namespace at { | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| std::array<HostAllocator*, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
| static std::array<HostAllocator*, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
|     allocator_array{}; | ||||
| std::array<uint8_t, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
| static std::array<uint8_t, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
|     allocator_priority{}; | ||||
|  | ||||
| } // anonymous namespace | ||||
|  | ||||
| @ -624,14 +624,7 @@ struct TORCH_API IValue final { | ||||
|   IValue(const c10::SymBool& i) { | ||||
|     if (auto mi = i.maybe_as_bool()) { | ||||
|       tag = Tag::Bool; | ||||
| #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ | ||||
|       payload.u.as_int = *mi; | ||||
| #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ | ||||
|       /* due to byteorder if value assigned as_int, as_bool actually is not set correctly */ | ||||
|       payload.u.as_bool = *mi; | ||||
| #else | ||||
| #error Unexpected or undefined __BYTE_ORDER__ | ||||
| #endif | ||||
|     } else { | ||||
|       tag = Tag::SymBool; | ||||
|       payload.u.as_intrusive_ptr = i.toSymNodeImpl().release(); | ||||
|  | ||||
| @ -102,8 +102,31 @@ struct VecReduceAllSIMD<float, Op> { | ||||
| #endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && | ||||
|        // !defined(C10_MOBILE) | ||||
|  | ||||
| #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) && \ | ||||
|     !defined(CPU_CAPABILITY_SVE) | ||||
| #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) | ||||
| #if defined(CPU_CAPABILITY_SVE256) | ||||
| template <typename Op> | ||||
| struct VecReduceAllSIMD<float, Op> { | ||||
|   static inline float apply( | ||||
|       const Op& vec_fun, | ||||
|       const Vectorized<float>& acc_vec) { | ||||
|     using Vec = Vectorized<float>; | ||||
|     Vec v = acc_vec; | ||||
|     // 128-bit shuffle | ||||
|     svuint32_t ind = svdupq_n_u32(4, 5, 6, 7); | ||||
|     Vec v1 = svtbl_f32(v, ind); | ||||
|     v = vec_fun(v, v1); | ||||
|     // 64-bit shuffle | ||||
|     ind = svdupq_n_u32(2, 3, 0, 1); | ||||
|     v1 = svtbl_f32(v, ind); | ||||
|     v = vec_fun(v, v1); | ||||
|     // 32-bit shuffle | ||||
|     ind = svdupq_n_u32(1, 0, 2, 3); | ||||
|     v1 = svtbl_f32(v, ind); | ||||
|     v = vec_fun(v, v1); | ||||
|     return svlasta(svpfalse(), v); | ||||
|   } | ||||
| }; | ||||
| #else | ||||
| template <typename Op> | ||||
| struct VecReduceAllSIMD<float, Op> { | ||||
|   static inline float apply( | ||||
| @ -140,35 +163,8 @@ struct VecReduceAllSIMD<float, std::plus<Vectorized<float>>> { | ||||
|     return vaddvq_f32(acc_vec); | ||||
|   } | ||||
| }; | ||||
| #endif // defined(CPU_CAPABILITY_SVE256) | ||||
| #endif // defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) | ||||
|        // && !defined(CPU_CAPABILITY_SVE) | ||||
|  | ||||
| #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) && \ | ||||
|     defined(CPU_CAPABILITY_SVE256) | ||||
| template <typename Op> | ||||
| struct VecReduceAllSIMD<float, Op> { | ||||
|   static inline float apply( | ||||
|       const Op& vec_fun, | ||||
|       const Vectorized<float>& acc_vec) { | ||||
|     using Vec = Vectorized<float>; | ||||
|     Vec v = acc_vec; | ||||
|     // 128-bit shuffle | ||||
|     svuint32_t ind = svdupq_n_u32(4, 5, 6, 7); | ||||
|     Vec v1 = svtbl_f32(v, ind); | ||||
|     v = vec_fun(v, v1); | ||||
|     // 64-bit shuffle | ||||
|     ind = svdupq_n_u32(2, 3, 0, 1); | ||||
|     v1 = svtbl_f32(v, ind); | ||||
|     v = vec_fun(v, v1); | ||||
|     // 32-bit shuffle | ||||
|     ind = svdupq_n_u32(1, 0, 2, 3); | ||||
|     v1 = svtbl_f32(v, ind); | ||||
|     v = vec_fun(v, v1); | ||||
|     return svlasta(svpfalse(), v); | ||||
|   } | ||||
| }; | ||||
| #endif // defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) | ||||
|        // && defined(CPU_CAPABILITY_SVE256) | ||||
|  | ||||
| template <typename scalar_t, typename Op> | ||||
| inline scalar_t vec_reduce_all( | ||||
|  | ||||
| @ -1,9 +1,21 @@ | ||||
| #pragma once | ||||
|  | ||||
| #include <ATen/cpu/vec/intrinsics.h> | ||||
| #include <c10/macros/Macros.h> | ||||
| #include <cstdint> | ||||
|  | ||||
| #include <ATen/cpu/vec/vec_base.h> | ||||
|  | ||||
| #if defined(__aarch64__) &&                     \ | ||||
|     (defined(AT_BUILD_ARM_VEC256_WITH_SLEEF) || \ | ||||
|      defined(AT_BUILD_ARM_VECSVE_WITH_SLEEF)) | ||||
| #define SLEEF_STATIC_LIBS | ||||
| #include <sleef.h> | ||||
| #define USE_SLEEF(sleef_code, non_sleef_code) sleef_code | ||||
| #else | ||||
| #define USE_SLEEF(sleef_code, non_sleef_code) non_sleef_code | ||||
| #endif | ||||
|  | ||||
| #if defined(CPU_CAPABILITY_SVE) | ||||
|  | ||||
| // Define the data type of VLS(vector-length specific). | ||||
|  | ||||
| @ -2,7 +2,6 @@ | ||||
|  | ||||
| #include <ATen/cpu/vec/intrinsics.h> | ||||
| #include <ATen/cpu/vec/sve/sve_helper.h> | ||||
| #include <ATen/cpu/vec/sve/vec_common_sve.h> | ||||
| #include <ATen/cpu/vec/sve/vec_float.h> | ||||
| #include <ATen/cpu/vec/vec_base.h> | ||||
| #include <c10/util/bit_cast.h> | ||||
|  | ||||
| @ -1,6 +1,8 @@ | ||||
| #pragma once | ||||
|  | ||||
| #if defined(CPU_CAPABILITY_AVX512) | ||||
| #if defined(__aarch64__) | ||||
| #include <ATen/cpu/vec/vec_common_aarch64.h> | ||||
| #elif defined(CPU_CAPABILITY_AVX512) | ||||
| #include <ATen/cpu/vec/vec512/vec512.h> | ||||
| #else | ||||
| #include <ATen/cpu/vec/vec128/vec128.h> | ||||
| @ -11,6 +13,34 @@ namespace at::vec { | ||||
| // See Note [CPU_CAPABILITY namespace] | ||||
| inline namespace CPU_CAPABILITY { | ||||
|  | ||||
| inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) { | ||||
|   stream << val.val_; | ||||
|   return stream; | ||||
| } | ||||
| inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) { | ||||
|   stream << static_cast<int>(val.val_); | ||||
|   return stream; | ||||
| } | ||||
| inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) { | ||||
|   stream << static_cast<unsigned int>(val.val_); | ||||
|   return stream; | ||||
| } | ||||
|  | ||||
| template <typename T> | ||||
| std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) { | ||||
|   T buf[Vectorized<T>::size()]; | ||||
|   vec.store(buf); | ||||
|   stream << "vec["; | ||||
|   for (int i = 0; i != Vectorized<T>::size(); i++) { | ||||
|     if (i != 0) { | ||||
|       stream << ", "; | ||||
|     } | ||||
|     stream << buf[i]; | ||||
|   } | ||||
|   stream << "]"; | ||||
|   return stream; | ||||
| } | ||||
|  | ||||
| inline Vectorized<bool> convert_to_bool(Vectorized<int8_t> x) { | ||||
|   __at_align__ bool buffer[x.size()]; | ||||
|   x.ne(Vectorized<int8_t>(0)).store(buffer); | ||||
|  | ||||
| @ -2,6 +2,7 @@ | ||||
|  | ||||
| // DO NOT DEFINE STATIC DATA IN THIS HEADER! | ||||
| // See Note [Do not compile initializers with AVX] | ||||
| #include <ATen/cpu/vec/sve/sve_helper.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_float_neon.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_reduced_precision_common_neon.h> | ||||
| #include <ATen/cpu/vec/vec_base.h> | ||||
| @ -262,6 +263,13 @@ class Vectorized<c10::BFloat16> : public Vectorized16< | ||||
|             c10::bit_cast<at_bfloat16_t>(val6.x), | ||||
|             c10::bit_cast<at_bfloat16_t>(val7.x)}) {} | ||||
|  | ||||
| #ifdef CPU_CAPABILITY_SVE128 | ||||
|   Vectorized(svbfloat16_t v) : Vectorized16(svget_neonq(v)) {} | ||||
|   operator svbfloat16_t() const { | ||||
|     return svset_neonq(svundef_bf16(), values); | ||||
|   } | ||||
| #endif | ||||
|  | ||||
|   static Vectorized<c10::BFloat16> blendv( | ||||
|       const Vectorized<c10::BFloat16>& a, | ||||
|       const Vectorized<c10::BFloat16>& b, | ||||
| @ -374,6 +382,23 @@ class Vectorized<c10::BFloat16> : public Vectorized16< | ||||
|   Vectorized ge(const Vectorized& other) const; | ||||
|   Vectorized lt(const Vectorized& other) const; | ||||
|   Vectorized le(const Vectorized& other) const; | ||||
|  | ||||
| #ifdef CPU_CAPABILITY_SVE128 | ||||
|  | ||||
|   template <typename step_t> | ||||
|   static Vectorized<BFloat16> arange( | ||||
|       BFloat16 base = 0.f, | ||||
|       step_t step = static_cast<step_t>(1)) { | ||||
|     __at_align__ BFloat16 buffer[size()]; | ||||
|     for (int64_t i = 0; i < size(); i++) { | ||||
|       buffer[i] = base + i * step; | ||||
|     } | ||||
|     return svget_neonq( | ||||
|         svld1_bf16(ptrue, reinterpret_cast<bfloat16_t*>(buffer))); | ||||
|   } | ||||
|  | ||||
| #endif // CPU_CAPABILITY_SVE128 | ||||
|  | ||||
| }; // Vectorized<c10::BFloat16> | ||||
|  | ||||
| inline std::tuple<Vectorized<float>, Vectorized<float>> convert_bfloat16_float( | ||||
| @ -397,6 +422,24 @@ inline Vectorized<c10::BFloat16> convert_float_bfloat16( | ||||
|   return Vectorized<c10::BFloat16>(at_vcombine_bf16(x1, x2)); | ||||
| } | ||||
|  | ||||
| inline void load_fp32_from_bf16(const BFloat16* data, Vectorized<float>& out) { | ||||
|   __at_align__ float values[Vectorized<float>::size()]; | ||||
|   for (const auto k : c10::irange(Vectorized<float>::size())) { | ||||
|     values[k] = data[k]; | ||||
|   } | ||||
|   out = Vectorized<float>::loadu(values); | ||||
| } | ||||
|  | ||||
| inline void load_fp32_from_bf16( | ||||
|     const BFloat16* data, | ||||
|     Vectorized<float>& out1, | ||||
|     Vectorized<float>& out2) { | ||||
|   Vectorized<BFloat16> bf16_vec = Vectorized<BFloat16>::loadu(data); | ||||
|   auto floats = convert_bfloat16_float(bf16_vec); | ||||
|   out1 = std::get<0>(floats); | ||||
|   out2 = std::get<1>(floats); | ||||
| } | ||||
|  | ||||
| template <typename Op> | ||||
| Vectorized<c10::BFloat16> binary_operator_via_float( | ||||
|     Op op, | ||||
| @ -579,6 +622,12 @@ Vectorized<c10::BFloat16> inline fnmsub( | ||||
|   return -a * b - c; | ||||
| } | ||||
|  | ||||
| #else // | ||||
|  | ||||
| CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16) | ||||
|  | ||||
| LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16) | ||||
|  | ||||
| #endif // !defined(C10_MOBILE) && defined(__aarch64__) | ||||
|  | ||||
| } // namespace CPU_CAPABILITY | ||||
|  | ||||
| @ -4,7 +4,7 @@ | ||||
|  | ||||
| namespace at::vec { | ||||
| inline namespace CPU_CAPABILITY { | ||||
| #if (defined(__aarch64__) && !defined(CPU_CAPABILITY_SVE256)) | ||||
| #if defined(__aarch64__) && !defined(CPU_CAPABILITY_SVE256) | ||||
| template <typename src_t> | ||||
| struct VecConvert< | ||||
|     float, | ||||
| @ -60,6 +60,7 @@ struct VecConvert<float, 1, BFloat16, 1> { | ||||
|   } | ||||
| }; | ||||
|  | ||||
| #endif // defined(__aarch64__) && !defined(CPU_CAPABILITY_SVE256) | ||||
| #endif // defined(__aarch64__) && (!defined(CPU_CAPABILITY_SVE) || | ||||
|        // defined(CPU_CAPABILITY_SVE128)) | ||||
| } // namespace CPU_CAPABILITY | ||||
| } // namespace at::vec | ||||
|  | ||||
| @ -4,13 +4,10 @@ | ||||
| // See Note [Do not compile initializers with AVX] | ||||
|  | ||||
| #include <ATen/cpu/vec/intrinsics.h> | ||||
| #include <ATen/cpu/vec/sve/sve_helper.h> | ||||
| #include <ATen/cpu/vec/vec_base.h> | ||||
| #include <c10/util/irange.h> | ||||
|  | ||||
| #if defined(__aarch64__) && defined(AT_BUILD_ARM_VEC256_WITH_SLEEF) | ||||
| #include <sleef.h> | ||||
| #endif | ||||
|  | ||||
| // Sleef offers vectorized versions of some transcedentals | ||||
| // such as sin, cos, tan etc.. | ||||
| // However for now opting for STL, since we are not building | ||||
| @ -35,12 +32,6 @@ inline namespace CPU_CAPABILITY { | ||||
| #error "Big endian is not supported." | ||||
| #endif | ||||
|  | ||||
| #if defined(AT_BUILD_ARM_VEC256_WITH_SLEEF) | ||||
| #define USE_SLEEF(sleef_code, non_sleef_code) sleef_code | ||||
| #else | ||||
| #define USE_SLEEF(sleef_code, non_sleef_code) non_sleef_code | ||||
| #endif | ||||
|  | ||||
| template <int index, bool mask_val> | ||||
| struct BlendRegs { | ||||
|   static float32x4_t impl( | ||||
| @ -94,6 +85,12 @@ class Vectorized<float> { | ||||
|   operator float32x4_t() const { | ||||
|     return values; | ||||
|   } | ||||
| #ifdef CPU_CAPABILITY_SVE128 | ||||
|   Vectorized(svfloat32_t v) : values(svget_neonq(v)) {} | ||||
|   operator svfloat32_t() const { | ||||
|     return svset_neonq(svundef_f32(), values); | ||||
|   } | ||||
| #endif | ||||
|   template <int64_t mask> | ||||
|   static Vectorized<float> blend( | ||||
|       const Vectorized<float>& a, | ||||
|  | ||||
| @ -4,7 +4,6 @@ | ||||
| // See Note [Do not compile initializers with AVX] | ||||
|  | ||||
| #include <ATen/cpu/vec/intrinsics.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_convert.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_float_neon.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_reduced_precision_common_neon.h> | ||||
| #include <ATen/cpu/vec/vec_base.h> | ||||
| @ -25,7 +24,6 @@ inline namespace CPU_CAPABILITY { | ||||
| //    https://bugs.llvm.org/show_bug.cgi?id=45824 | ||||
| // Most likely we will do aarch32 support with inline asm. | ||||
| #if !defined(C10_MOBILE) && defined(__aarch64__) | ||||
|  | ||||
| #ifdef __BIG_ENDIAN__ | ||||
| #error "Big endian is not supported." | ||||
| #endif | ||||
| @ -421,6 +419,24 @@ Vectorized<c10::Half> inline operator+( | ||||
| #endif | ||||
| } | ||||
|  | ||||
| inline void load_fp32_from_fp16(const c10::Half* data, Vectorized<float>& out) { | ||||
|   __at_align__ float values[Vectorized<float>::size()]; | ||||
|   for (const auto k : c10::irange(Vectorized<float>::size())) { | ||||
|     values[k] = data[k]; | ||||
|   } | ||||
|   out = Vectorized<float>::loadu(values); | ||||
| } | ||||
|  | ||||
| inline void load_fp32_from_fp16( | ||||
|     const c10::Half* data, | ||||
|     Vectorized<float>& out1, | ||||
|     Vectorized<float>& out2) { | ||||
|   Vectorized<c10::Half> f16_vec = Vectorized<c10::Half>::loadu(data); | ||||
|   auto floats = convert_half_float(f16_vec); | ||||
|   out1 = std::get<0>(floats); | ||||
|   out2 = std::get<1>(floats); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<c10::Half> inline operator-( | ||||
|     const Vectorized<c10::Half>& a, | ||||
| @ -656,6 +672,53 @@ Vectorized<c10::Half> inline fnmsub( | ||||
|   return -a * b - c; | ||||
| #endif | ||||
| } | ||||
|  | ||||
| #else | ||||
|  | ||||
| #define CONVERT_NON_VECTORIZED_INIT(type, name)                     \ | ||||
|   inline std::tuple<Vectorized<float>, Vectorized<float>>           \ | ||||
|       convert_##name##_float(const Vectorized<type>& a) {           \ | ||||
|     constexpr int64_t K = Vectorized<type>::size();                 \ | ||||
|     __at_align__ float arr[K];                                      \ | ||||
|     __at_align__ type arr2[K];                                      \ | ||||
|     a.store(arr2);                                                  \ | ||||
|     convert(arr2, arr, K);                                          \ | ||||
|     return std::make_tuple(                                         \ | ||||
|         Vectorized<float>::loadu(arr),                              \ | ||||
|         Vectorized<float>::loadu(arr + Vectorized<float>::size())); \ | ||||
|   }                                                                 \ | ||||
|   inline Vectorized<type> convert_float_##name(                     \ | ||||
|       const Vectorized<float>& a, const Vectorized<float>& b) {     \ | ||||
|     constexpr int64_t K = Vectorized<type>::size();                 \ | ||||
|     __at_align__ float arr[K];                                      \ | ||||
|     __at_align__ type arr2[K];                                      \ | ||||
|     a.store(arr);                                                   \ | ||||
|     b.store(arr + Vectorized<float>::size());                       \ | ||||
|     convert(arr, arr2, K);                                          \ | ||||
|     return Vectorized<type>::loadu(arr2);                           \ | ||||
|   } | ||||
|  | ||||
| #define LOAD_FP32_NON_VECTORIZED_INIT(type, name)                           \ | ||||
|   inline void load_fp32_from_##name(                                        \ | ||||
|       const type* data, Vectorized<float>& out) {                           \ | ||||
|     __at_align__ float values[Vectorized<float>::size()];                   \ | ||||
|     for (const auto k : c10::irange(Vectorized<float>::size())) {           \ | ||||
|       values[k] = data[k];                                                  \ | ||||
|     }                                                                       \ | ||||
|     out = Vectorized<float>::loadu(values);                                 \ | ||||
|   }                                                                         \ | ||||
|                                                                             \ | ||||
|   inline void load_fp32_from_##name(                                        \ | ||||
|       const type* data, Vectorized<float>& out1, Vectorized<float>& out2) { \ | ||||
|     load_fp32_from_##name(data, out1);                                      \ | ||||
|     data += Vectorized<float>::size();                                      \ | ||||
|     load_fp32_from_##name(data, out2);                                      \ | ||||
|   } | ||||
|  | ||||
| CONVERT_NON_VECTORIZED_INIT(Half, half) | ||||
|  | ||||
| LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16) | ||||
|  | ||||
| #endif // !defined(C10_MOBILE) && defined(__aarch64__) | ||||
|  | ||||
| } // namespace CPU_CAPABILITY | ||||
|  | ||||
| @ -9,21 +9,16 @@ | ||||
| #if !(                                                 \ | ||||
|     defined(__VSX__) || defined(CPU_CAPABILITY_VSX) || \ | ||||
|     defined(CPU_CAPABILITY_ZVECTOR)) | ||||
| #if defined(CPU_CAPABILITY_SVE256) | ||||
| #include <ATen/cpu/vec/sve/vec_common_sve.h> | ||||
| #else | ||||
| // clang-format off | ||||
| #include <ATen/cpu/vec/vec256/vec256_float.h> | ||||
| #include <ATen/cpu/vec/vec256/vec256_double.h> | ||||
| #include <ATen/cpu/vec/vec256/vec256_float.h> | ||||
| #include <ATen/cpu/vec/vec256/vec256_int.h> | ||||
| #include <ATen/cpu/vec/vec256/vec256_qint.h> | ||||
| #endif | ||||
| #if !defined(CPU_CAPABILITY_SVE256) || !defined(__ARM_FEATURE_BF16) | ||||
| #include <ATen/cpu/vec/vec256/vec256_bfloat16.h> | ||||
| #endif | ||||
| #include <ATen/cpu/vec/vec256/vec256_half.h> | ||||
| #include <ATen/cpu/vec/vec256/vec256_complex_float.h> | ||||
| #include <ATen/cpu/vec/vec256/vec256_complex_double.h> | ||||
| #include <ATen/cpu/vec/vec256/vec256_complex_float.h> | ||||
| #include <ATen/cpu/vec/vec256/vec256_half.h> | ||||
| // clang-format on | ||||
| #elif defined(__VSX__) || defined(CPU_CAPABILITY_VSX) | ||||
| #include <ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h> | ||||
| @ -56,34 +51,6 @@ namespace at::vec { | ||||
| // accessed as `at::vec`. | ||||
| inline namespace CPU_CAPABILITY { | ||||
|  | ||||
| inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) { | ||||
|   stream << val.val_; | ||||
|   return stream; | ||||
| } | ||||
| inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) { | ||||
|   stream << static_cast<int>(val.val_); | ||||
|   return stream; | ||||
| } | ||||
| inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) { | ||||
|   stream << static_cast<unsigned int>(val.val_); | ||||
|   return stream; | ||||
| } | ||||
|  | ||||
| template <typename T> | ||||
| std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) { | ||||
|   T buf[Vectorized<T>::size()]; | ||||
|   vec.store(buf); | ||||
|   stream << "vec["; | ||||
|   for (int i = 0; i != Vectorized<T>::size(); i++) { | ||||
|     if (i != 0) { | ||||
|       stream << ", "; | ||||
|     } | ||||
|     stream << buf[i]; | ||||
|   } | ||||
|   stream << "]"; | ||||
|   return stream; | ||||
| } | ||||
|  | ||||
| #if defined(CPU_CAPABILITY_AVX2) | ||||
|  | ||||
| // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX2) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
| @ -268,9 +268,7 @@ LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16) | ||||
|  | ||||
| #else // defined(CPU_CAPABILITY_AVX2) | ||||
|  | ||||
| #if !(                                                                      \ | ||||
|     defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) && \ | ||||
|     !defined(CPU_CAPABILITY_SVE256)) | ||||
| #if !(defined(__aarch64__)) | ||||
| CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16) | ||||
| #endif | ||||
|  | ||||
|  | ||||
| @ -268,9 +268,7 @@ LOAD_FP32_VECTORIZED_INIT(Half, fp16) | ||||
|  | ||||
| #else // defined(CPU_CAPABILITY_AVX2) | ||||
|  | ||||
| #if !(                                                                      \ | ||||
|     defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) && \ | ||||
|     !defined(CPU_CAPABILITY_SVE256)) | ||||
| #if !defined(__aarch64__) || defined(CPU_CAPABILITY_SVE256) | ||||
| CONVERT_NON_VECTORIZED_INIT(Half, half) | ||||
| #endif | ||||
|  | ||||
|  | ||||
| @ -5,6 +5,13 @@ | ||||
|  | ||||
| #include <ATen/cpu/vec/intrinsics.h> | ||||
| #include <ATen/cpu/vec/vec_base.h> | ||||
|  | ||||
| #ifdef __aarch64__ | ||||
| #if defined(CPU_CAPABILITY_SVE128) || !defined(CPU_CAPABILITY_SVE) | ||||
| #include <ATen/cpu/vec/vec128/vec128_float_neon.h> | ||||
| #endif | ||||
| #endif | ||||
|  | ||||
| #include <ATen/native/quantized/AffineQuantizerBase.h> | ||||
|  | ||||
| #include <c10/util/irange.h> | ||||
| @ -915,7 +922,7 @@ Vectorized<c10::quint8> inline maximum( | ||||
|   return a.maximum(b); | ||||
| } | ||||
|  | ||||
| #elif !defined(CPU_CAPABILITY_SVE256) | ||||
| #else | ||||
|  | ||||
| // NOTE: These are low-performance implementations that we fall back on | ||||
| // if we are not building with AVX2. This may not be an issue, because | ||||
| @ -1372,12 +1379,18 @@ Vectorized<c10::quint8> inline maximum( | ||||
|   return a.maximum(b); | ||||
| } | ||||
|  | ||||
| #endif // if defined(CPU_CAPABILITY_AVX2) | ||||
|  | ||||
| #if (defined(__aarch64__) && !defined(CPU_CAPABILITY_SVE256)) | ||||
| #if defined(__aarch64__) && \ | ||||
|     (defined(CPU_CAPABILITY_SVE128) || !defined(CPU_CAPABILITY_SVE)) | ||||
| std::pair<Vectorized<float>, Vectorized<float>> inline convert_int8_to_float( | ||||
|     at::vec::Vectorized<int8_t> src) { | ||||
|  | ||||
| #ifdef CPU_CAPABILITY_SVE | ||||
|   svint8_t x = src; | ||||
|   auto s8x8 = vget_low_s8(svget_neonq(x)); | ||||
| #else | ||||
|   auto s8x8 = vld1_s8(src.operator const int8_t*()); | ||||
| #endif | ||||
|  | ||||
|   auto s16x8 = vmovl_s8(s8x8); | ||||
|  | ||||
|   auto s32x4_hi = vmovl_s16(vget_high_s16(s16x8)); | ||||
| @ -1402,7 +1415,14 @@ std::pair<Vectorized<float>, Vectorized<float>> inline convert_int8_to_float( | ||||
|  | ||||
| Vectorized<float> inline convert_int8_half_register_to_float( | ||||
|     at::vec::Vectorized<int8_t> src) { | ||||
|  | ||||
| #ifdef CPU_CAPABILITY_SVE | ||||
|   svint8_t x = src; | ||||
|   auto s8x8 = vget_low_s8(svget_neonq(x)); | ||||
| #else | ||||
|   auto s8x8 = vld1_s8(src.operator const int8_t*()); | ||||
| #endif | ||||
|  | ||||
|   auto s16x8 = vmovl_s8(s8x8); | ||||
|  | ||||
|   auto s32x4_lo = vmovl_s16(vget_low_s16(s16x8)); | ||||
| @ -1420,5 +1440,8 @@ Vectorized<float> inline convert_int8_half_register_to_float( | ||||
| } | ||||
|  | ||||
| #endif | ||||
|  | ||||
| #endif // if defined(CPU_CAPABILITY_AVX2) | ||||
|  | ||||
| } // namespace CPU_CAPABILITY | ||||
| } // namespace at::vec | ||||
|  | ||||
| @ -31,34 +31,6 @@ namespace vec { | ||||
| // See Note [CPU_CAPABILITY namespace] | ||||
| inline namespace CPU_CAPABILITY { | ||||
|  | ||||
| inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) { | ||||
|   stream << val.val_; | ||||
|   return stream; | ||||
| } | ||||
| inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) { | ||||
|   stream << static_cast<int>(val.val_); | ||||
|   return stream; | ||||
| } | ||||
| inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) { | ||||
|   stream << static_cast<unsigned int>(val.val_); | ||||
|   return stream; | ||||
| } | ||||
|  | ||||
| template <typename T> | ||||
| std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) { | ||||
|   T buf[Vectorized<T>::size()]; | ||||
|   vec.store(buf); | ||||
|   stream << "vec["; | ||||
|   for (int i = 0; i != Vectorized<T>::size(); i++) { | ||||
|     if (i != 0) { | ||||
|       stream << ", "; | ||||
|     } | ||||
|     stream << buf[i]; | ||||
|   } | ||||
|   stream << "]"; | ||||
|   return stream; | ||||
| } | ||||
|  | ||||
| #if defined(CPU_CAPABILITY_AVX512) | ||||
|  | ||||
| // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX512) | ||||
|  | ||||
| @ -67,18 +67,7 @@ Windows llvm will not have this definition. | ||||
| #endif | ||||
| #define VECTOR_WIDTH 64 | ||||
| #define int_vector __m512i | ||||
| #elif defined(__aarch64__) && \ | ||||
|     !defined(CPU_CAPABILITY_SVE) // CPU_CAPABILITY_AVX512 | ||||
| // SVE code expects 256-vectors; leave that set for SVE? | ||||
| #if defined(__GNUC__) | ||||
| #define __at_align__ __attribute__((aligned(16))) | ||||
| #elif defined(_WIN32) | ||||
| #define __at_align__ __declspec(align(16)) | ||||
| #else | ||||
| #define __at_align__ | ||||
| #endif | ||||
| #define VECTOR_WIDTH 16 | ||||
| #else // CPU_CAPABILITY_AVX512 | ||||
| #elif defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_SVE256) | ||||
| #if defined(__GNUC__) | ||||
| #define __at_align__ __attribute__((aligned(32))) | ||||
| #elif defined(_WIN32) | ||||
| @ -88,7 +77,27 @@ Windows llvm will not have this definition. | ||||
| #endif | ||||
| #define VECTOR_WIDTH 32 | ||||
| #define int_vector __m256i | ||||
| #endif // CPU_CAPABILITY_AVX512 | ||||
| #elif defined(__aarch64__) | ||||
| // Define alignment and vector width for SVE128/Default (e.g., NEON) | ||||
| #if defined(__GNUC__) | ||||
| #define __at_align__ __attribute__((aligned(16))) | ||||
| #elif defined(_WIN32) | ||||
| #define __at_align__ __declspec(align(16)) | ||||
| #else | ||||
| #define __at_align__ | ||||
| #endif | ||||
| #define VECTOR_WIDTH 16 | ||||
| #else | ||||
| // Fallback: define default alignment and vector width | ||||
| #if defined(__GNUC__) | ||||
| #define __at_align__ __attribute__((aligned(32))) | ||||
| #elif defined(_WIN32) | ||||
| #define __at_align__ __declspec(align(32)) | ||||
| #else | ||||
| #define __at_align__ | ||||
| #endif | ||||
| #define VECTOR_WIDTH 32 | ||||
| #endif | ||||
|  | ||||
| namespace at::vec { | ||||
| // See Note [CPU_CAPABILITY namespace] | ||||
|  | ||||
| @ -8,13 +8,48 @@ | ||||
| #include <ATen/cpu/vec/sve/sve_helper.h> | ||||
| #include <ATen/cpu/vec/vec_base.h> | ||||
| 
 | ||||
| #if defined(CPU_CAPABILITY_SVE) | ||||
| #include <ATen/cpu/vec/sve/vec_bfloat16.h> | ||||
| #include <ATen/cpu/vec/sve/vec_double.h> | ||||
| #include <ATen/cpu/vec/sve/vec_float.h> | ||||
| #include <ATen/cpu/vec/sve/vec_int.h> | ||||
| #ifdef CPU_CAPABILITY_SVE128 | ||||
| 
 | ||||
| #include <ATen/cpu/vec/vec128/vec128_float_neon.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/vec128/vec128_bfloat16_neon.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/vec128/vec128_half_neon.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/vec128/vec128_convert.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/sve/vec_qint.h> | ||||
| #endif | ||||
| 
 | ||||
| #elif defined(CPU_CAPABILITY_SVE) | ||||
| 
 | ||||
| #include <ATen/cpu/vec/sve/vec_float.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/sve/vec_bfloat16.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/sve/vec_double.h> | ||||
| #include <ATen/cpu/vec/sve/vec_int.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/sve/vec_qint.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/vec256/vec256_half.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/vec256/vec256_convert.h> | ||||
| 
 | ||||
| #else // NEON
 | ||||
| 
 | ||||
| #include <ATen/cpu/vec/vec128/vec128_float_neon.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/vec128/vec128_half_neon.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/vec128/vec128_bfloat16_neon.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/vec128/vec128_convert.h> | ||||
| 
 | ||||
| #include <ATen/cpu/vec/vec256/vec256_qint.h> | ||||
| 
 | ||||
| #endif // defined(CPU_CAPABILITY_SVE128)
 | ||||
| 
 | ||||
| #include <ATen/cpu/vec/functional.h> | ||||
| 
 | ||||
| namespace at::vec { | ||||
| // Note [CPU_CAPABILITY namespace]
 | ||||
| @ -48,12 +83,6 @@ DEFINE_SVE_CAST(int32_t, s32, float, f32) | ||||
| DEFINE_SVE_CAST(int16_t, s16, float, f32) | ||||
| DEFINE_SVE_CAST(float, f32, double, f64) | ||||
| 
 | ||||
| #ifdef __ARM_FEATURE_BF16 | ||||
| DEFINE_SVE_CAST(int64_t, s64, c10::BFloat16, bf16) | ||||
| DEFINE_SVE_CAST(int32_t, s32, c10::BFloat16, bf16) | ||||
| DEFINE_SVE_CAST(int16_t, s16, c10::BFloat16, bf16) | ||||
| #endif // __ARM_FEATURE_BF16
 | ||||
| 
 | ||||
| // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | ||||
| 
 | ||||
| template <int64_t scale = 1> | ||||
| @ -173,9 +202,11 @@ std::pair< | ||||
|   // group cols crossing lanes:
 | ||||
|   //   return {a0, b0, a1, b1, a2, b2, a3, b3}
 | ||||
|   //          {a4, b4, a5, b5, a6, b6, a7, b7}
 | ||||
|   return std::make_pair( | ||||
|       Vectorized<c10::BFloat16>(svzip1_bf16(a, b)), | ||||
|       Vectorized<c10::BFloat16>(svzip2_bf16(a, b))); | ||||
|   svbfloat16_t aReg = a; | ||||
|   svbfloat16_t bReg = b; | ||||
|   Vectorized<c10::BFloat16> c = svzip1_bf16(aReg, bReg); | ||||
|   Vectorized<c10::BFloat16> d = svzip2_bf16(aReg, bReg); | ||||
|   return std::make_pair(c, d); | ||||
| } | ||||
| #endif // __ARM_FEATURE_BF16
 | ||||
| 
 | ||||
| @ -224,12 +255,27 @@ std::pair< | ||||
|   // swap lanes:
 | ||||
|   //   return {a0, a1, a2, a3, a4, a5, a6, a7}
 | ||||
|   //          {b0, b1, b2, b3, b4, b5, b6, b7}
 | ||||
|   return std::make_pair( | ||||
|       Vectorized<c10::BFloat16>(svuzp1_bf16((svbfloat16_t)a, (svbfloat16_t)b)), | ||||
|       Vectorized<c10::BFloat16>(svuzp2_bf16((svbfloat16_t)a, (svbfloat16_t)b))); | ||||
|   svbfloat16_t aReg = a; | ||||
|   svbfloat16_t bReg = b; | ||||
|   Vectorized<c10::BFloat16> c = svuzp1_bf16(aReg, bReg); | ||||
|   Vectorized<c10::BFloat16> d = svuzp2_bf16(aReg, bReg); | ||||
|   return std::make_pair(c, d); | ||||
| } | ||||
| #endif // __ARM_FEATURE_BF16
 | ||||
| 
 | ||||
| // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 | ||||
| #define DEFINE_FLIP_FUNC(type, sve_func)                    \ | ||||
|   inline Vectorized<type> flip(const Vectorized<type>& v) { \ | ||||
|     return Vectorized<type>(sve_func(v));                   \ | ||||
|   } | ||||
| // Use the macro to define the flip functions
 | ||||
| DEFINE_FLIP_FUNC(float, svrev_f32) | ||||
| DEFINE_FLIP_FUNC(double, svrev_f64) | ||||
| DEFINE_FLIP_FUNC(int64_t, svrev_s64) | ||||
| DEFINE_FLIP_FUNC(int32_t, svrev_s32) | ||||
| DEFINE_FLIP_FUNC(int16_t, svrev_s16) | ||||
| DEFINE_FLIP_FUNC(int8_t, svrev_s8) | ||||
| 
 | ||||
| #endif // defined(CPU_CAPABILITY_SVE)
 | ||||
| 
 | ||||
| } // namespace CPU_CAPABILITY
 | ||||
| @ -108,7 +108,7 @@ static hipblasStatus_t rocBLASStatusToHIPStatus(rocblas_status error) | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| cublasOperation_t _cublasOpFromChar(char op) { | ||||
| static cublasOperation_t _cublasOpFromChar(char op) { | ||||
|   // NOLINTNEXTLINE(bugprone-switch-missing-default-case) | ||||
|   switch (op) { | ||||
|     case 'n': | ||||
| @ -128,7 +128,7 @@ cublasOperation_t _cublasOpFromChar(char op) { | ||||
|       "_cublasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`"); | ||||
| } | ||||
|  | ||||
| void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) { | ||||
| static void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) { | ||||
|   // Note: leading dimensions generally are checked that they are > 0 | ||||
|   // and at least as big the result requires (even if the value won't | ||||
|   // be used). | ||||
| @ -142,7 +142,7 @@ void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) { | ||||
|     *lda = std::max<int64_t>(m, 1); | ||||
| } | ||||
|  | ||||
| void _cublasAdjustLdLevel3( | ||||
| static void _cublasAdjustLdLevel3( | ||||
|     char transa, | ||||
|     char transb, | ||||
|     int64_t m, | ||||
|  | ||||
| @ -15,19 +15,19 @@ namespace cuda::detail { | ||||
| namespace { | ||||
|  | ||||
| // Total number of gpus in the system. | ||||
| int64_t num_gpus; | ||||
| static int64_t num_gpus; | ||||
|  | ||||
| // Ensures default_gens_cuda is initialized once. | ||||
| std::deque<c10::once_flag> cuda_gens_init_flag; | ||||
| static std::deque<c10::once_flag> cuda_gens_init_flag; | ||||
|  | ||||
| // Default, global CUDA generators, one per GPU. | ||||
| std::vector<Generator> default_gens_cuda; | ||||
| static std::vector<Generator> default_gens_cuda; | ||||
|  | ||||
| /* | ||||
|  * Populates the global variables related to CUDA generators | ||||
|  * Warning: this function must only be called once! | ||||
|  */ | ||||
| void initCUDAGenVector() { | ||||
| static void initCUDAGenVector() { | ||||
|   // Ensures we only call cudaGetDeviceCount only once. | ||||
|   static bool num_gpu_init_flag [[maybe_unused]] = []() { | ||||
|     num_gpus = static_cast<int32_t>(c10::cuda::device_count()); | ||||
|  | ||||
| @ -2,8 +2,6 @@ | ||||
|  | ||||
| #include <ATen/ATen.h> | ||||
|  | ||||
| #include <c10/util/Exception.h> | ||||
|  | ||||
| namespace at::native { | ||||
|  | ||||
| cudnnDataType_t getCudnnDataTypeFromScalarType(const at::ScalarType dtype) { | ||||
| @ -22,10 +20,9 @@ cudnnDataType_t getCudnnDataTypeFromScalarType(const at::ScalarType dtype) { | ||||
|   } else if (dtype == at::kByte) { | ||||
|     return CUDNN_DATA_UINT8; | ||||
|   } | ||||
|   TORCH_CHECK(false, | ||||
|     "getCudnnDataTypeFromScalarType() not supported for ", | ||||
|     toString(dtype) | ||||
|   ); | ||||
|   std::string msg("getCudnnDataTypeFromScalarType() not supported for "); | ||||
|   msg += toString(dtype); | ||||
|   throw std::runtime_error(msg); | ||||
| } | ||||
|  | ||||
| cudnnDataType_t getCudnnDataType(const at::Tensor& tensor) { | ||||
|  | ||||
| @ -39,7 +39,7 @@ Tensor vdot_decomp(const Tensor& A, const Tensor& B) { | ||||
| // NB: I wrote this like this because we *might* want its for a future matmul | ||||
| // batch rule that isn't decomposed... | ||||
| // "tv" = tensor @ vector | ||||
| std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule( | ||||
| static std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule( | ||||
|     const Tensor& self, std::optional<int64_t> self_bdim, | ||||
|     const Tensor& other, std::optional<int64_t> other_bdim) { | ||||
|   if (self_bdim && other_bdim) { | ||||
| @ -66,7 +66,7 @@ std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule( | ||||
|   TORCH_INTERNAL_ASSERT(false, "can't get here"); | ||||
| } | ||||
|  | ||||
| std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule( | ||||
| static std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule( | ||||
|     const Tensor& self, std::optional<int64_t> self_bdim, | ||||
|     const Tensor& other, std::optional<int64_t> other_bdim) { | ||||
|   auto self_logical_rank = rankWithoutBatchDim(self, self_bdim); | ||||
| @ -79,7 +79,7 @@ std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule( | ||||
|   return tv_batch_rule(self, self_bdim, other, other_bdim); | ||||
| } | ||||
|  | ||||
| std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule( | ||||
| static std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule( | ||||
|     const Tensor& self, std::optional<int64_t> self_bdim, | ||||
|     const Tensor& other, std::optional<int64_t> other_bdim) { | ||||
|   auto self_logical_rank = rankWithoutBatchDim(self, self_bdim); | ||||
| @ -94,7 +94,7 @@ std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule( | ||||
|   return std::make_tuple( at::matmul(self_, other_), 0 ); | ||||
| } | ||||
|  | ||||
| std::tuple<Tensor, std::optional<int64_t>> bmm_batch_rule( | ||||
| static std::tuple<Tensor, std::optional<int64_t>> bmm_batch_rule( | ||||
|     const Tensor& self, std::optional<int64_t> self_bdim, | ||||
|     const Tensor& other, std::optional<int64_t> other_bdim) { | ||||
|   auto self_logical_rank = rankWithoutBatchDim(self, self_bdim); | ||||
| @ -250,7 +250,7 @@ struct LinalgCheckMatrixBinaryRuleHelper<op_name, F, Func, typelist<A, B, T...>> | ||||
|   } | ||||
| }; | ||||
|  | ||||
| void expect_at_least_rank( | ||||
| static void expect_at_least_rank( | ||||
|     const Tensor& tensor, | ||||
|     std::optional<int64_t> tensor_bdim, | ||||
|     int64_t expected_rank, | ||||
| @ -472,7 +472,7 @@ atol_rtol_tensor_batch_rule( | ||||
|   return std::make_tuple(Func(input_, atol_, rtol_, hermitian), 0); | ||||
| } | ||||
|  | ||||
| std::tuple<Tensor, std::optional<int64_t>> | ||||
| static std::tuple<Tensor, std::optional<int64_t>> | ||||
| pinv_batch_rule( | ||||
|     const Tensor& input, std::optional<int64_t> input_bdim, const std::optional<Tensor>& atol, | ||||
|     const std::optional<int64_t> atol_bdim, const std::optional<Tensor>& rtol, | ||||
|  | ||||
| @ -213,22 +213,40 @@ static cudnn_grid_sample_backward_batch_rule( | ||||
|   return grid_sample_backward_helper_out(std::move(bw_out), 0, 0, bdim_size); | ||||
| } | ||||
|  | ||||
| // uses functional formulation for one_hot under vmap to be compatible with | ||||
| // fakeTensor/dynamic shapes and compiled functorch transforms. | ||||
| // mirrors the meta path in aten/src/ATen/native/Onehot.cpp, | ||||
| // but requires explicit positive num_classes under vmap to avoid | ||||
| // data-dependent output shapes. | ||||
| // TODO: replace with targetable functionalization | ||||
| static Tensor one_hot_decomposition_hack(const Tensor &self, int64_t num_classes) { | ||||
|     TORCH_CHECK(self.dtype() == kLong, "one_hot is only applicable to index tensor."); | ||||
|     auto shape = self.sym_sizes().vec(); | ||||
|  | ||||
|     // empty tensor could be converted to one hot representation, | ||||
|     // but shape inference is not possible. | ||||
|     if (self.sym_numel() == 0) { | ||||
|         if (num_classes <= 0) { | ||||
|             TORCH_CHECK(false, "Can not infer total number of classes from empty tensor."); | ||||
|         } else { | ||||
|             shape.emplace_back(num_classes); | ||||
|             return at::empty_symint(shape, self.options()); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // disallow implicit inference under vmap; this would be data-dependent | ||||
|     // and is intentionally guarded by Dynamo in torch/_dynamo/variables/torch.py. | ||||
|     TORCH_CHECK(num_classes > 0, "When vmap-ing torch.nn.functional.one_hot, please " | ||||
|         "provide an explicit positive num_classes argument."); | ||||
|  | ||||
|     const auto options = self.options(); | ||||
|     at::Tensor index = at::arange(num_classes, options); | ||||
|     return at::eq(self.unsqueeze(-1), index).to(at::kLong); | ||||
|     // Disabling all of the following checks. This is OK because scatter has checks too. | ||||
|     // Maybe one_hot should be a primitive wrt autograd so we don't have to deal with this. | ||||
|     // // non-empty tensor | ||||
|     // if (self.device().type() != at::kCUDA) { | ||||
|     //   //for cuda, rely on device assert thrown by scatter | ||||
|     //   TORCH_CHECK(self.min().item().toLong() >= 0, "Class values must be non-negative."); | ||||
|     // } | ||||
|     // if (self.device().type() != at::kCUDA) { | ||||
|     //   //rely on device asserts from scatter to avoid sync here | ||||
|     //   TORCH_CHECK(num_classes > self.max().item().toLong(), "Class values must be smaller than num_classes."); | ||||
|     // } | ||||
|  | ||||
|     shape.emplace_back(num_classes); | ||||
|     Tensor ret = at::zeros_symint(shape, self.options()); | ||||
|     return ret.scatter(-1, self.unsqueeze(-1), 1); | ||||
| } | ||||
|  | ||||
| template <typename A, A a, typename C> | ||||
|  | ||||
| @ -12,14 +12,13 @@ | ||||
| #include <ATen/native/IndexKernel.h> | ||||
| #include <ATen/native/IndexingUtils.h> | ||||
| #include <torch/library.h> | ||||
| #include <c10/util/Exception.h> | ||||
|  | ||||
|  | ||||
| // NOLINTBEGIN(bugprone-unchecked-optional-access) | ||||
| namespace at::functorch { | ||||
|  | ||||
| namespace { | ||||
| bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) { | ||||
| static bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) { | ||||
|   for (const auto& bdim : bdims) { | ||||
|     if (bdim.has_value()) { | ||||
|       return true; | ||||
| @ -28,7 +27,7 @@ bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) { | ||||
|   return false; | ||||
| } | ||||
|  | ||||
| int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) { | ||||
| static int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) { | ||||
|   int64_t result = 0; | ||||
|   for (const auto& idx : indices) { | ||||
|     if (!idx.has_value() || !idx->defined()) { | ||||
| @ -40,7 +39,7 @@ int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) { | ||||
|   return result; | ||||
| } | ||||
|  | ||||
| int64_t get_max_index_logical_dim( | ||||
| static int64_t get_max_index_logical_dim( | ||||
|     ArrayRef<std::optional<Tensor>> indices, | ||||
|     ArrayRef<std::optional<int64_t>> indices_bdims) { | ||||
|   int64_t max_logical_dim = -1; | ||||
| @ -57,7 +56,7 @@ int64_t get_max_index_logical_dim( | ||||
|   return max_logical_dim; | ||||
| } | ||||
|  | ||||
| std::vector<std::optional<Tensor>> batchIndices( | ||||
| static std::vector<std::optional<Tensor>> batchIndices( | ||||
|   at::TensorOptions options, | ||||
|   ArrayRef<std::optional<Tensor>> indices, | ||||
|   ArrayRef<std::optional<int64_t>> indices_bdims, | ||||
| @ -95,10 +94,9 @@ std::vector<std::optional<Tensor>> batchIndices( | ||||
|     if (index.has_value() && index->sym_numel() != 0) { | ||||
|       const auto idx_bdim = indices_bdims[i]; | ||||
|       indices_.emplace_back(maybePadToLogicalRank(moveBatchDimToFront(index.value(), idx_bdim), idx_bdim, maxLogicalRank)); | ||||
|       TORCH_CHECK( | ||||
|         !(index.value().dtype() == kBool) || !indices_bdims[i].has_value(), | ||||
|         "vmap: We do not support batching operators that can support dynamic shape. Attempting to batch over indexing with a boolean mask." | ||||
|       ); | ||||
|       if (index.value().dtype() == kBool && indices_bdims[i].has_value()) { | ||||
|         throw std::runtime_error("vmap: We do not support batching operators that can support dynamic shape. Attempting to batch over indexing with a boolean mask."); | ||||
|       } | ||||
|     } else { | ||||
|       indices_.push_back(index); | ||||
|     } | ||||
| @ -126,7 +124,7 @@ std::vector<std::optional<Tensor>> batchIndices( | ||||
|  | ||||
| // Define an "advanced index" to be a selection object that is | ||||
| // a non-trivial Tensor (i.e. it does not represent :). | ||||
| bool is_advanced_index(const std::optional<Tensor>& idx) { | ||||
| static bool is_advanced_index(const std::optional<Tensor>& idx) { | ||||
|   if (!idx.has_value()) { | ||||
|     return false; | ||||
|   } | ||||
| @ -137,7 +135,7 @@ bool is_advanced_index(const std::optional<Tensor>& idx) { | ||||
| } | ||||
|  | ||||
| // See NOTE: [advanced indices adjacent] for definition | ||||
| bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indices) { | ||||
| static bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indices) { | ||||
|   int64_t num_advanced_indices_regions = 0; | ||||
|   bool in_advanced_indices_region = false; | ||||
|   for (const auto& idx : indices) { | ||||
| @ -165,7 +163,7 @@ bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indices) { | ||||
| // - result: Tensor[B, 4, 5, 6, 2, 3, 7, 8] | ||||
| //                     -------  ---- | ||||
| //                     region2  region1 | ||||
| Tensor swap_regions(const Tensor& tensor, int64_t first_region_size, int64_t second_region_size) { | ||||
| static Tensor swap_regions(const Tensor& tensor, int64_t first_region_size, int64_t second_region_size) { | ||||
|   VmapDimVector permutation(tensor.dim(), 0); | ||||
|   std::iota(permutation.begin(), permutation.end(), 0); | ||||
|   std::rotate( | ||||
| @ -553,7 +551,7 @@ Tensor &_index_put_impl__plumbing(Tensor &self, const List<std::optional<Tensor> | ||||
|   return self; | ||||
| } | ||||
|  | ||||
| Tensor maybe_permute_values( | ||||
| static Tensor maybe_permute_values( | ||||
|     const Tensor& values, | ||||
|     ArrayRef<std::optional<Tensor>> orig_indices, | ||||
|     ArrayRef<std::optional<int64_t>> orig_indices_bdims) { | ||||
| @ -1052,7 +1050,7 @@ std::tuple<Tensor, std::optional<int64_t>> index_add_batch_rule( | ||||
|                                    other, other_bdim, alpha, false); | ||||
| } | ||||
|  | ||||
| std::tuple<Tensor,Tensor> binary_pointwise_align( | ||||
| static std::tuple<Tensor,Tensor> binary_pointwise_align( | ||||
|     const Tensor & self, | ||||
|     std::optional<int64_t> self_bdim, | ||||
|     const Tensor & mask, | ||||
|  | ||||
| @ -346,7 +346,7 @@ std::tuple<Tensor, std::optional<int64_t>> slice_batch_rule( | ||||
|   return std::make_tuple(std::move(result), 0); | ||||
| } | ||||
|  | ||||
| bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
| static bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
|   return dim == 0 || dim == -1; | ||||
| } | ||||
|  | ||||
|  | ||||
| @ -3,7 +3,6 @@ | ||||
| #include <ATen/functorch/Macros.h> | ||||
| #include <ATen/core/dispatch/Dispatcher.h> | ||||
| #include <c10/core/impl/LocalDispatchKeySet.h> | ||||
| #include <c10/util/Exception.h> | ||||
| #include <optional> | ||||
| #include <bitset> | ||||
| #include <utility> | ||||
| @ -107,10 +106,9 @@ struct VmapInterpreterMeta { | ||||
|  | ||||
|   template <typename T> | ||||
|   friend void to_json(T& json_j, const VmapInterpreterMeta& json_t) { | ||||
|     TORCH_CHECK( | ||||
|       !json_t.batchSize_.is_heap_allocated(), | ||||
|       "Serialization for heap-allocated SymInt is not implemented yet" | ||||
|     ); | ||||
|     if (json_t.batchSize_.is_heap_allocated()) { | ||||
|       throw std::runtime_error("Serialization for heap-allocated SymInt is not implemented yet"); | ||||
|     } | ||||
|     json_j["batchSize"] = json_t.batchSize_.as_int_unchecked(); | ||||
|     json_j["randomness"] = static_cast<int64_t>(json_t.randomness_); | ||||
|   } | ||||
| @ -304,7 +302,7 @@ struct Interpreter { | ||||
|     } else if (meta.contains("Functionalize")) { | ||||
|       json_t.meta_.emplace<FunctionalizeInterpreterMeta>(meta["Functionalize"].template get<FunctionalizeInterpreterMeta>()); | ||||
|     } else { | ||||
|       TORCH_CHECK(false, "unknown interpreter metadata type"); | ||||
|       throw std::runtime_error("unknown interpreter metadata type"); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|  | ||||
| @ -68,18 +68,18 @@ namespace at::functorch { | ||||
|  | ||||
| namespace{ | ||||
| // PyTorch allows operations to specify dim 0 and dim -1 on a scalar tensor. | ||||
| bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
| static bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
|   return dim == 0 || dim == -1; | ||||
| } | ||||
|  | ||||
| int64_t get_current_level() { | ||||
| static int64_t get_current_level() { | ||||
|   auto maybe_level = maybeCurrentDynamicLayer(); | ||||
|   TORCH_INTERNAL_ASSERT(maybe_level.has_value()); | ||||
|   return maybe_level->layerId(); | ||||
| } | ||||
|  | ||||
| // This check should probably go into the dispatcher... | ||||
| bool participatesInCurrentLevel(const Tensor& self) { | ||||
| static bool participatesInCurrentLevel(const Tensor& self) { | ||||
|   auto current_level = get_current_level(); | ||||
|   auto* maybe_batched_impl = maybeGetBatchedImpl(self); | ||||
|   if (!maybe_batched_impl) { | ||||
| @ -90,7 +90,7 @@ bool participatesInCurrentLevel(const Tensor& self) { | ||||
|   return self_level == current_level; | ||||
| } | ||||
|  | ||||
| bool participatesInCurrentLevel(ITensorListRef self) { | ||||
| static bool participatesInCurrentLevel(ITensorListRef self) { | ||||
|   for (const Tensor& tensor : self) { | ||||
|     if (participatesInCurrentLevel(tensor)) { | ||||
|       return true; | ||||
| @ -285,7 +285,7 @@ std::vector<Tensor> unbind_batching_rule(const Tensor& self, int64_t dim) { | ||||
| // given (sizes, strides, storage_offset) returns the maximum location that | ||||
| // can be indexed (or nullopt if such a location doesn't exist, e.g., tensors | ||||
| // with zero-size dims). | ||||
| std::optional<c10::SymInt> maximum_indexable_location( | ||||
| static std::optional<c10::SymInt> maximum_indexable_location( | ||||
|     c10::SymIntArrayRef sizes, c10::SymIntArrayRef strides, const c10::SymInt& storage_offset) { | ||||
|   auto result = native::storage_size_for(sizes, strides); | ||||
|   if (result == 0) { | ||||
| @ -298,7 +298,7 @@ std::optional<c10::SymInt> maximum_indexable_location( | ||||
| // This checks that the range of possible memory locations accessible by | ||||
| // x.as_strided(sizes, strides, maybe_storage_offset) | ||||
| // are within the bounds of possible memory locations accessible by x. | ||||
| void checkBasicAsStridedValidForSlice( | ||||
| static void checkBasicAsStridedValidForSlice( | ||||
|     const Tensor& physical_tensor, | ||||
|     int64_t num_batch_dims, | ||||
|     c10::SymIntArrayRef sizes, | ||||
|  | ||||
| @ -6,7 +6,6 @@ | ||||
| #include <ATen/functorch/BatchedTensorImpl.h> | ||||
| #include <ATen/Dispatch.h> | ||||
| #include <c10/util/irange.h> | ||||
| #include <c10/util/Exception.h> | ||||
| #include <ATen/NamedTensorUtils.h> | ||||
| #include <ATen/native/LinearAlgebraUtils.h> | ||||
| #include <ATen/native/xnnpack/Engine.h> | ||||
| @ -71,7 +70,7 @@ Tensor linear_hack(const Tensor& input, const Tensor& weight, const std::optiona | ||||
|   return output; | ||||
| } | ||||
|  | ||||
| inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) { | ||||
| static inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) { | ||||
|   if (reduction == at::Reduction::Mean) { | ||||
|     return unreduced.mean(); | ||||
|   } else if (reduction == at::Reduction::Sum) { | ||||
| @ -109,7 +108,9 @@ Tensor binary_cross_entropy_with_logits_hack( | ||||
| } | ||||
|  | ||||
| Tensor trace_backward_decomp(const Tensor& grad, IntArrayRef sizes) { | ||||
|   TORCH_CHECK(sizes.size() == 2, "expected matrix input"); | ||||
|   if (sizes.size() != 2) { | ||||
|     throw std::runtime_error("expected matrix input"); | ||||
|   } | ||||
|   auto grad_input = at::zeros(sizes[0] * sizes[1], grad.options()); | ||||
|   auto indices = at::arange(0, grad_input.numel(), sizes[1] + 1, grad.options().dtype(at::kLong)); | ||||
|   // Workaround using index_put instead of yet unsupported index_fill_ | ||||
| @ -127,7 +128,7 @@ namespace { | ||||
| template<bool inplace> | ||||
| using Ctype = std::conditional_t<inplace, Tensor&, Tensor>; | ||||
|  | ||||
| Tensor make_feature_noise(const Tensor& input) { | ||||
| static Tensor make_feature_noise(const Tensor& input) { | ||||
|   auto input_sizes = input.sizes(); | ||||
|   TORCH_CHECK(input.dim() >= 2, "Feature dropout requires at least 2 dimensions in the input"); | ||||
|   std::vector<int64_t> sizes; | ||||
| @ -141,7 +142,7 @@ Tensor make_feature_noise(const Tensor& input) { | ||||
|   return at::empty(sizes, input.options()); | ||||
| } | ||||
|  | ||||
| bool is_fused_kernel_acceptable(const Tensor& input, double p) { | ||||
| static bool is_fused_kernel_acceptable(const Tensor& input, double p) { | ||||
|   return (input.is_cuda() || input.is_xpu() || input.is_lazy() || input.is_privateuseone()) && p > 0 && p < 1 && input.numel() > 0; | ||||
| } | ||||
|  | ||||
| @ -210,7 +211,7 @@ ALIAS_SPECIALIZATION(_feature_dropout,       true,  false) | ||||
| ALIAS_SPECIALIZATION(_alpha_dropout,         false, true ) | ||||
| ALIAS_SPECIALIZATION(_feature_alpha_dropout, true,  true ) | ||||
|  | ||||
| Tensor dropout(const Tensor& input, double p, bool train) { | ||||
| static Tensor dropout(const Tensor& input, double p, bool train) { | ||||
|   auto result = [&]() { | ||||
|     NoNamesGuard guard; | ||||
|     if (train && is_fused_kernel_acceptable(input, p)) { | ||||
|  | ||||
| @ -24,7 +24,7 @@ namespace at::native { | ||||
| namespace { | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void adaptive_avg_pool3d_out_frame( | ||||
| static void adaptive_avg_pool3d_out_frame( | ||||
|     const scalar_t* input_p, | ||||
|     scalar_t* output_p, | ||||
|     int64_t sizeD, | ||||
| @ -176,7 +176,7 @@ void adaptive_avg_pool3d_out_cpu_template( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void adaptive_avg_pool3d_backward_out_frame( | ||||
| static void adaptive_avg_pool3d_backward_out_frame( | ||||
|     scalar_t* gradInput_p, | ||||
|     const scalar_t* gradOutput_p, | ||||
|     int64_t sizeD, | ||||
|  | ||||
| @ -93,7 +93,7 @@ namespace { | ||||
| // 5d tensor B x D x T x H x W | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void adaptive_max_pool3d_single_out_frame( | ||||
| static void adaptive_max_pool3d_single_out_frame( | ||||
|           const scalar_t *input_p, | ||||
|           scalar_t *output_p, | ||||
|           int64_t *ind_p, | ||||
| @ -170,7 +170,7 @@ void adaptive_max_pool3d_single_out_frame( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void adaptive_max_pool3d_out_frame( | ||||
| static void adaptive_max_pool3d_out_frame( | ||||
|           const scalar_t *input_data, | ||||
|           scalar_t *output_data, | ||||
|           int64_t *indices_data, | ||||
| @ -202,7 +202,7 @@ void adaptive_max_pool3d_out_frame( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void adaptive_max_pool3d_backward_single_out_frame( | ||||
| static void adaptive_max_pool3d_backward_single_out_frame( | ||||
|           scalar_t *gradInput_p, | ||||
|           const scalar_t *gradOutput_p, | ||||
|           const int64_t *ind_p, | ||||
| @ -241,7 +241,7 @@ void adaptive_max_pool3d_backward_single_out_frame( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void adaptive_max_pool3d_backward_out_frame( | ||||
| static void adaptive_max_pool3d_backward_out_frame( | ||||
|           scalar_t *gradInput_data, | ||||
|           const scalar_t *gradOutput_data, | ||||
|           const int64_t *indices_data, | ||||
|  | ||||
| @ -153,7 +153,7 @@ namespace at::native { | ||||
| namespace { | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void avg_pool3d_out_frame( | ||||
| static void avg_pool3d_out_frame( | ||||
|           const scalar_t *input_p, | ||||
|           scalar_t *output_p, | ||||
|           int64_t nslices, | ||||
| @ -333,7 +333,7 @@ TORCH_IMPL_FUNC(avg_pool3d_out_cpu) ( | ||||
| namespace { | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void avg_pool3d_backward_out_frame( | ||||
| static void avg_pool3d_backward_out_frame( | ||||
|           scalar_t *gradInput_p, | ||||
|           const scalar_t *gradOutput_p, | ||||
|           int64_t nslices, | ||||
|  | ||||
| @ -143,13 +143,13 @@ Tensor& cholesky_inverse_kernel_impl(Tensor& result, Tensor& infos, bool upper) | ||||
|  For more info see https://github.com/pytorch/pytorch/issues/145801#issuecomment-2631781776 | ||||
| */ | ||||
| template <typename T> | ||||
| inline | ||||
| static inline | ||||
| std::enable_if_t<std::is_floating_point_v<T>, int> lapack_work_to_int(const T val) { | ||||
|     const auto next_after = std::nextafter(val, std::numeric_limits<T>::infinity()); | ||||
|     return std::max<int>(1, std::ceil(next_after)); | ||||
| } | ||||
| template <typename T> | ||||
| inline | ||||
| static inline | ||||
| std::enable_if_t<c10::is_complex<T>::value, int> lapack_work_to_int(const T val) { | ||||
|     return lapack_work_to_int(val.real()); | ||||
| } | ||||
| @ -343,7 +343,7 @@ void linalg_eigh_kernel(const Tensor& eigenvalues, const Tensor& eigenvectors, c | ||||
|   For further details, please see the LAPACK documentation for GEQRF. | ||||
| */ | ||||
| template <typename scalar_t> | ||||
| void apply_geqrf(const Tensor& input, const Tensor& tau) { | ||||
| static void apply_geqrf(const Tensor& input, const Tensor& tau) { | ||||
| #if !AT_BUILD_WITH_LAPACK() | ||||
|   TORCH_CHECK( | ||||
|       false, | ||||
| @ -1039,7 +1039,7 @@ void lu_solve_kernel(const Tensor& LU, const Tensor& pivots, const Tensor& B, Tr | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void apply_svd(const Tensor& A, | ||||
| static void apply_svd(const Tensor& A, | ||||
|                       const bool full_matrices, | ||||
|                       const bool compute_uv, | ||||
|                       const Tensor& U, | ||||
| @ -1157,103 +1157,103 @@ REGISTER_AVX512_DISPATCH(cholesky_stub, &cholesky_kernel) | ||||
| REGISTER_AVX2_DISPATCH(cholesky_stub, &cholesky_kernel) | ||||
| REGISTER_VSX_DISPATCH(cholesky_stub, &cholesky_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(cholesky_stub, &cholesky_kernel) | ||||
| REGISTER_SVE256_DISPATCH(cholesky_stub, &cholesky_kernel) | ||||
| REGISTER_SVE_DISPATCH(cholesky_stub, &cholesky_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(cholesky_inverse_stub, DEFAULT, &cholesky_inverse_kernel_impl) | ||||
| REGISTER_AVX512_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl) | ||||
| REGISTER_AVX2_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl) | ||||
| REGISTER_VSX_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl) | ||||
| REGISTER_ZVECTOR_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl) | ||||
| REGISTER_SVE256_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl) | ||||
| REGISTER_SVE_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(linalg_eig_stub, DEFAULT, &linalg_eig_kernel) | ||||
| REGISTER_AVX512_DISPATCH(linalg_eig_stub, &linalg_eig_kernel) | ||||
| REGISTER_AVX2_DISPATCH(linalg_eig_stub, &linalg_eig_kernel) | ||||
| REGISTER_VSX_DISPATCH(linalg_eig_stub, &linalg_eig_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(linalg_eig_stub, &linalg_eig_kernel) | ||||
| REGISTER_SVE256_DISPATCH(linalg_eig_stub, &linalg_eig_kernel) | ||||
| REGISTER_SVE_DISPATCH(linalg_eig_stub, &linalg_eig_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(linalg_eigh_stub, DEFAULT, &linalg_eigh_kernel) | ||||
| REGISTER_AVX512_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel) | ||||
| REGISTER_AVX2_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel) | ||||
| REGISTER_VSX_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel) | ||||
| REGISTER_SVE256_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel) | ||||
| REGISTER_SVE_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(geqrf_stub, DEFAULT, &geqrf_kernel) | ||||
| REGISTER_AVX512_DISPATCH(geqrf_stub, &geqrf_kernel) | ||||
| REGISTER_AVX2_DISPATCH(geqrf_stub, &geqrf_kernel) | ||||
| REGISTER_VSX_DISPATCH(geqrf_stub, &geqrf_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(geqrf_stub, &geqrf_kernel) | ||||
| REGISTER_SVE256_DISPATCH(geqrf_stub, &geqrf_kernel) | ||||
| REGISTER_SVE_DISPATCH(geqrf_stub, &geqrf_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(orgqr_stub, DEFAULT, &orgqr_kernel_impl) | ||||
| REGISTER_AVX512_DISPATCH(orgqr_stub, &orgqr_kernel_impl) | ||||
| REGISTER_AVX2_DISPATCH(orgqr_stub, &orgqr_kernel_impl) | ||||
| REGISTER_VSX_DISPATCH(orgqr_stub, &orgqr_kernel_impl) | ||||
| REGISTER_ZVECTOR_DISPATCH(orgqr_stub, &orgqr_kernel_impl) | ||||
| REGISTER_SVE256_DISPATCH(orgqr_stub, &orgqr_kernel_impl) | ||||
| REGISTER_SVE_DISPATCH(orgqr_stub, &orgqr_kernel_impl) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(ormqr_stub, DEFAULT, &ormqr_kernel) | ||||
| REGISTER_AVX512_DISPATCH(ormqr_stub, &ormqr_kernel) | ||||
| REGISTER_AVX2_DISPATCH(ormqr_stub, &ormqr_kernel) | ||||
| REGISTER_VSX_DISPATCH(ormqr_stub, &ormqr_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(ormqr_stub, &ormqr_kernel) | ||||
| REGISTER_SVE256_DISPATCH(ormqr_stub, &ormqr_kernel) | ||||
| REGISTER_SVE_DISPATCH(ormqr_stub, &ormqr_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(lstsq_stub, DEFAULT, &lstsq_kernel) | ||||
| REGISTER_AVX512_DISPATCH(lstsq_stub, &lstsq_kernel) | ||||
| REGISTER_AVX2_DISPATCH(lstsq_stub, &lstsq_kernel) | ||||
| REGISTER_VSX_DISPATCH(lstsq_stub, &lstsq_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(lstsq_stub, &lstsq_kernel) | ||||
| REGISTER_SVE256_DISPATCH(lstsq_stub, &lstsq_kernel) | ||||
| REGISTER_SVE_DISPATCH(lstsq_stub, &lstsq_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(triangular_solve_stub, DEFAULT, &triangular_solve_kernel) | ||||
| REGISTER_AVX512_DISPATCH(triangular_solve_stub, &triangular_solve_kernel) | ||||
| REGISTER_AVX2_DISPATCH(triangular_solve_stub, &triangular_solve_kernel) | ||||
| REGISTER_VSX_DISPATCH(triangular_solve_stub, &triangular_solve_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(triangular_solve_stub, &triangular_solve_kernel) | ||||
| REGISTER_SVE256_DISPATCH(triangular_solve_stub, &triangular_solve_kernel) | ||||
| REGISTER_SVE_DISPATCH(triangular_solve_stub, &triangular_solve_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(lu_factor_stub, DEFAULT, &lu_factor_kernel) | ||||
| REGISTER_AVX512_DISPATCH(lu_factor_stub, &lu_factor_kernel) | ||||
| REGISTER_AVX2_DISPATCH(lu_factor_stub, &lu_factor_kernel) | ||||
| REGISTER_VSX_DISPATCH(lu_factor_stub, &lu_factor_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(lu_factor_stub, &lu_factor_kernel) | ||||
| REGISTER_SVE256_DISPATCH(lu_factor_stub, &lu_factor_kernel) | ||||
| REGISTER_SVE_DISPATCH(lu_factor_stub, &lu_factor_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(ldl_factor_stub, DEFAULT, &ldl_factor_kernel) | ||||
| REGISTER_AVX512_DISPATCH(ldl_factor_stub, &ldl_factor_kernel) | ||||
| REGISTER_AVX2_DISPATCH(ldl_factor_stub, &ldl_factor_kernel) | ||||
| REGISTER_VSX_DISPATCH(ldl_factor_stub, &ldl_factor_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(ldl_factor_stub, &ldl_factor_kernel) | ||||
| REGISTER_SVE256_DISPATCH(ldl_factor_stub, &ldl_factor_kernel) | ||||
| REGISTER_SVE_DISPATCH(ldl_factor_stub, &ldl_factor_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(ldl_solve_stub, DEFAULT, &ldl_solve_kernel) | ||||
| REGISTER_AVX512_DISPATCH(ldl_solve_stub, &ldl_solve_kernel) | ||||
| REGISTER_AVX2_DISPATCH(ldl_solve_stub, &ldl_solve_kernel) | ||||
| REGISTER_VSX_DISPATCH(ldl_solve_stub, &ldl_solve_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(ldl_solve_stub, &ldl_solve_kernel) | ||||
| REGISTER_SVE256_DISPATCH(ldl_solve_stub, &ldl_solve_kernel) | ||||
| REGISTER_SVE_DISPATCH(ldl_solve_stub, &ldl_solve_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(lu_solve_stub, DEFAULT, &lu_solve_kernel) | ||||
| REGISTER_AVX512_DISPATCH(lu_solve_stub, &lu_solve_kernel) | ||||
| REGISTER_AVX2_DISPATCH(lu_solve_stub, &lu_solve_kernel) | ||||
| REGISTER_VSX_DISPATCH(lu_solve_stub, &lu_solve_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(lu_solve_stub, &lu_solve_kernel) | ||||
| REGISTER_SVE256_DISPATCH(lu_solve_stub, &lu_solve_kernel) | ||||
| REGISTER_SVE_DISPATCH(lu_solve_stub, &lu_solve_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(svd_stub, DEFAULT, &svd_kernel) | ||||
| REGISTER_AVX512_DISPATCH(svd_stub, &svd_kernel) | ||||
| REGISTER_AVX2_DISPATCH(svd_stub, &svd_kernel) | ||||
| REGISTER_VSX_DISPATCH(svd_stub, &svd_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(svd_stub, &svd_kernel) | ||||
| REGISTER_SVE256_DISPATCH(svd_stub, &svd_kernel) | ||||
| REGISTER_SVE_DISPATCH(svd_stub, &svd_kernel) | ||||
|  | ||||
| REGISTER_ARCH_DISPATCH(unpack_pivots_stub, DEFAULT, &unpack_pivots_cpu_kernel) | ||||
| REGISTER_AVX512_DISPATCH(unpack_pivots_stub, &unpack_pivots_cpu_kernel) | ||||
| REGISTER_AVX2_DISPATCH(unpack_pivots_stub, &unpack_pivots_cpu_kernel) | ||||
| REGISTER_VSX_DISPATCH(unpack_pivots_stub, &unpack_pivots_cpu_kernel) | ||||
| REGISTER_ZVECTOR_DISPATCH(unpack_pivots_stub, &unpack_pivots_cpu_kernel) | ||||
| REGISTER_SVE256_DISPATCH(unpack_pivots_stub, &unpack_pivots_cpu_kernel) | ||||
| REGISTER_SVE_DISPATCH(unpack_pivots_stub, &unpack_pivots_cpu_kernel) | ||||
| } // namespace at::native | ||||
|  | ||||
| @ -71,7 +71,7 @@ | ||||
| namespace at::native { | ||||
| namespace { | ||||
|  | ||||
| void col2im_out_cpu_template( | ||||
| static void col2im_out_cpu_template( | ||||
|     Tensor& output, | ||||
|     const Tensor& input_, | ||||
|     IntArrayRef output_size, | ||||
|  | ||||
| @ -25,7 +25,7 @@ namespace at::native { | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| Tensor compute_columns2d( | ||||
| static Tensor compute_columns2d( | ||||
|     const Tensor& input, | ||||
|     IntArrayRef padding, | ||||
|     IntArrayRef stride, | ||||
| @ -93,7 +93,7 @@ Tensor compute_columns2d( | ||||
|   return columns.contiguous(); | ||||
| } | ||||
|  | ||||
| inline void slow_conv2d_shape_check( | ||||
| static inline void slow_conv2d_shape_check( | ||||
|     const Tensor& input, | ||||
|     const Tensor& grad_output, | ||||
|     const Tensor& weight, | ||||
| @ -205,7 +205,7 @@ inline void slow_conv2d_shape_check( | ||||
|   } | ||||
| } | ||||
|  | ||||
| inline Tensor view_weight_2d(const Tensor& weight_, | ||||
| static inline Tensor view_weight_2d(const Tensor& weight_, | ||||
|     at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) { | ||||
|   Tensor weight = weight_.contiguous(memory_format); | ||||
|   if (weight.dim() == 4) { | ||||
| @ -220,7 +220,7 @@ inline Tensor view_weight_2d(const Tensor& weight_, | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void slow_conv2d_update_output_frame( | ||||
| static void slow_conv2d_update_output_frame( | ||||
|     TensorAccessor<const scalar_t, 3> input, | ||||
|     TensorAccessor<scalar_t, 3> output, | ||||
|     TensorAccessor<const scalar_t, 2> weight, | ||||
| @ -480,7 +480,7 @@ void slow_conv2d_backward_weight_frame( | ||||
|   } | ||||
| } | ||||
|  | ||||
| void slow_conv2d_backward_weight_out_cpu_template( | ||||
| static void slow_conv2d_backward_weight_out_cpu_template( | ||||
|     Tensor& grad_weight, | ||||
|     const Tensor& input, | ||||
|     const Tensor& grad_output_, | ||||
|  | ||||
| @ -28,7 +28,7 @@ namespace at::native { | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| Tensor compute_columns3d( | ||||
| static Tensor compute_columns3d( | ||||
|     const Tensor& input_, | ||||
|     IntArrayRef stride, | ||||
|     IntArrayRef padding, | ||||
| @ -108,7 +108,7 @@ Tensor compute_columns3d( | ||||
|   return columns; | ||||
| } | ||||
|  | ||||
| inline void slow_conv3d_shape_check( | ||||
| static inline void slow_conv3d_shape_check( | ||||
|     const Tensor& input, | ||||
|     const Tensor& grad_output, | ||||
|     const Tensor& weight, | ||||
| @ -273,7 +273,7 @@ inline void slow_conv3d_shape_check( | ||||
|   } | ||||
| } | ||||
|  | ||||
| Tensor view_weight_2d(const Tensor& weight_) { | ||||
| static Tensor view_weight_2d(const Tensor& weight_) { | ||||
|   Tensor weight = weight_.contiguous(); | ||||
|   if (weight.dim() == 5) { | ||||
|     const int64_t s1 = weight.size(0); | ||||
| @ -286,7 +286,7 @@ Tensor view_weight_2d(const Tensor& weight_) { | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void slow_conv3d_update_output_frame( | ||||
| static void slow_conv3d_update_output_frame( | ||||
|     TensorAccessor<const scalar_t, 4> input, | ||||
|     TensorAccessor<scalar_t, 4> output, | ||||
|     TensorAccessor<const scalar_t, 2> weight, | ||||
| @ -515,7 +515,7 @@ void slow_conv3d_backward_weight_frame( | ||||
|       grad_weight.data(), ldc, grad_weight.stride(0) * n); | ||||
| } | ||||
|  | ||||
| void slow_conv3d_backward_parameters_out_cpu_template( | ||||
| static void slow_conv3d_backward_parameters_out_cpu_template( | ||||
|     Tensor& grad_weight, | ||||
|     const Tensor& input, | ||||
|     const Tensor& grad_output, | ||||
|  | ||||
| @ -39,19 +39,21 @@ static CPUCapability compute_cpu_capability() { | ||||
|     } | ||||
| #elif defined(HAVE_SVE_CPU_DEFINITION) | ||||
|     int sve_vl = cpuinfo_get_max_arm_sve_length(); //Returns maximum SVE VL supported by your HW. | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|     if (envar == "sve256") { | ||||
|     if (envar == "sve") { | ||||
|       // Select SVE capability based on the maximum SVE VL supported by the HW. | ||||
|       if (sve_vl == 256) { | ||||
| #ifdef HAVE_ARM_BF16_CPU_DEFINITION | ||||
|         if (cpuinfo_has_arm_bf16()) { | ||||
|           return CPUCapability::SVE256; | ||||
|         } | ||||
| #endif | ||||
|       } else if (sve_vl == 128) { | ||||
|         if (cpuinfo_has_arm_bf16()) { | ||||
|           return CPUCapability::SVE128; | ||||
|         } | ||||
|       } else { | ||||
|         TORCH_WARN("SVE capability not available on hardware. Falling back to DEFAULT"); | ||||
|         return CPUCapability::DEFAULT; | ||||
|       } | ||||
|       TORCH_WARN("SVE256 capability not available on hardware. Falling back to DEFAULT"); | ||||
|       return CPUCapability::DEFAULT; | ||||
|     } | ||||
| #endif | ||||
| #else | ||||
| #ifdef HAVE_AVX512_CPU_DEFINITION | ||||
|     if (envar == "avx512") { | ||||
| @ -113,6 +115,11 @@ static CPUCapability compute_cpu_capability() { | ||||
|         #endif | ||||
|         } | ||||
|     #endif | ||||
|     #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|         if (sve_vl == 128) { // Check for SVE128 | ||||
|             return CPUCapability::SVE128; | ||||
|         } | ||||
|     #endif | ||||
|     // Return the default CPU capability. | ||||
|     return CPUCapability::DEFAULT; | ||||
|   } | ||||
| @ -147,6 +154,9 @@ DispatchResult DispatchStubImpl::try_get_call_ptr( | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|   , void *SVE256 | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|   , void *SVE128 | ||||
| #endif | ||||
| ) { | ||||
|   constexpr auto supported_devices = c10::array_of<c10::DeviceType>( | ||||
|         c10::DeviceType::CPU, | ||||
| @ -184,6 +194,9 @@ DispatchResult DispatchStubImpl::try_get_call_ptr( | ||||
| #endif | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|           , SVE256 | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|           , SVE128 | ||||
| #endif | ||||
|         ); | ||||
|         if (!std::holds_alternative<ErrorType>(result)) { | ||||
| @ -242,6 +255,9 @@ void* DispatchStubImpl::get_call_ptr( | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|   , void *SVE256 | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|   , void *SVE128 | ||||
| #endif | ||||
| ) { | ||||
|  | ||||
|   auto result = try_get_call_ptr( | ||||
| @ -266,6 +282,10 @@ void* DispatchStubImpl::get_call_ptr( | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|       , | ||||
|       SVE256 | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|       , | ||||
|       SVE128 | ||||
| #endif | ||||
|   ); | ||||
|   if (std::holds_alternative<ErrorType>(result)) { | ||||
| @ -300,6 +320,9 @@ DispatchResult DispatchStubImpl::try_choose_cpu_impl( | ||||
| #endif | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|     , void *SVE256 | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|     , void *SVE128 | ||||
| #endif | ||||
|   ){ | ||||
|  | ||||
| @ -342,6 +365,16 @@ DispatchResult DispatchStubImpl::try_choose_cpu_impl( | ||||
|       return DispatchResult(SVE256); | ||||
|     } | ||||
|   } | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|   if (capability >= static_cast<int>(CPUCapability::SVE128)) { | ||||
|     if (C10_UNLIKELY(!SVE128)) { | ||||
|       // dispatch to DEFAULT, since the SVE kernel is missing | ||||
|       return DEFAULT != nullptr ? DispatchResult(DEFAULT) : ErrorType::MissingDeviceKernel; | ||||
|     } else { | ||||
|       return DispatchResult(SVE128); | ||||
|     } | ||||
|   } | ||||
| #endif | ||||
|   return DEFAULT != nullptr ? DispatchResult(DEFAULT) : ErrorType::MissingDeviceKernel; | ||||
| } | ||||
| @ -363,6 +396,9 @@ void* DispatchStubImpl::choose_cpu_impl( | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|   , void *SVE256 | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|   , void *SVE128 | ||||
| #endif | ||||
| ) { | ||||
|   auto capability = static_cast<int>(get_cpu_capability()); | ||||
|   (void)capability; | ||||
| @ -408,6 +444,17 @@ void* DispatchStubImpl::choose_cpu_impl( | ||||
|       return SVE256; | ||||
|     } | ||||
|   } | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|   if (capability >= static_cast<int>(CPUCapability::SVE128)) { | ||||
|     if (C10_UNLIKELY(!SVE128)) { | ||||
|       // dispatch to DEFAULT, since the SVE kernel is missing | ||||
|       TORCH_INTERNAL_ASSERT(DEFAULT, "DispatchStub: missing default kernel"); | ||||
|       return DEFAULT; | ||||
|     } else { | ||||
|       return SVE128; | ||||
|     } | ||||
|   } | ||||
| #endif | ||||
|   TORCH_INTERNAL_ASSERT(DEFAULT, "DispatchStub: missing default kernel"); | ||||
|   return DEFAULT; | ||||
|  | ||||
| @ -64,8 +64,9 @@ enum class CPUCapability { | ||||
|   VSX = 1, | ||||
| #elif defined(HAVE_ZVECTOR_CPU_DEFINITION) | ||||
|   ZVECTOR = 1, | ||||
| #elif defined(HAVE_SVE256_CPU_DEFINITION) && defined(HAVE_ARM_BF16_CPU_DEFINITION) | ||||
| #elif defined(HAVE_SVE_CPU_DEFINITION) && defined(HAVE_ARM_BF16_CPU_DEFINITION) | ||||
|   SVE256 = 1, | ||||
|   SVE128 = 2, | ||||
| #else | ||||
|   AVX2 = 1, | ||||
|   AVX512 = 2, | ||||
| @ -117,6 +118,9 @@ struct TORCH_API DispatchStubImpl { | ||||
| #endif | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|       , void *SVE256 | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|       , void *SVE128 | ||||
| #endif | ||||
|   ); | ||||
|  | ||||
| @ -138,6 +142,9 @@ struct TORCH_API DispatchStubImpl { | ||||
| #endif | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|     , void *SVE256 | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|     , void *SVE128 | ||||
| #endif | ||||
|   ); | ||||
|  | ||||
| @ -159,6 +166,9 @@ struct TORCH_API DispatchStubImpl { | ||||
| #endif | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|       , void *SVE256 | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|       , void *SVE128 | ||||
| #endif | ||||
|   ); | ||||
|  | ||||
| @ -183,6 +193,9 @@ struct TORCH_API DispatchStubImpl { | ||||
| #endif | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|     , void *SVE256 | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|     , void *SVE128 | ||||
| #endif | ||||
|   ); | ||||
|  | ||||
| @ -240,6 +253,9 @@ private: | ||||
| #endif | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|       , reinterpret_cast<void*>(SVE256) | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|       , reinterpret_cast<void*>(SVE128) | ||||
| #endif | ||||
|       ) | ||||
|     ); | ||||
| @ -301,6 +317,9 @@ public: | ||||
| #endif | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|       , reinterpret_cast<void*>(SVE256) | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|       , reinterpret_cast<void*>(SVE128) | ||||
| #endif | ||||
|       ); | ||||
|     if (std::holds_alternative<ErrorType>(result)){ | ||||
| @ -325,6 +344,9 @@ public: | ||||
| #ifdef HAVE_SVE256_CPU_DEFINITION | ||||
|   static TORCH_API FnPtr SVE256; | ||||
| #endif | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
|   static TORCH_API FnPtr SVE128; | ||||
| #endif | ||||
| private: | ||||
|   DispatchStubImpl impl; | ||||
| }; | ||||
| @ -432,6 +454,12 @@ struct RegisterPRIVATEUSE1Dispatch { | ||||
| #define REGISTER_SVE256_DISPATCH(name, fn) | ||||
| #endif | ||||
|  | ||||
| #ifdef HAVE_SVE128_CPU_DEFINITION | ||||
| #define REGISTER_SVE128_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, SVE128, fn) | ||||
| #else | ||||
| #define REGISTER_SVE128_DISPATCH(name, fn) | ||||
| #endif | ||||
|  | ||||
| // Macro to register the same kernel for all CPU arch types. This is useful | ||||
| // if a kernel does not benefit from being recompiled across different arch types. | ||||
| #define REGISTER_ALL_CPU_DISPATCH(name, fn)                                    \ | ||||
| @ -440,6 +468,11 @@ struct RegisterPRIVATEUSE1Dispatch { | ||||
|   REGISTER_AVX2_DISPATCH(name, fn)                                             \ | ||||
|   REGISTER_VSX_DISPATCH(name, fn)                                              \ | ||||
|   REGISTER_ZVECTOR_DISPATCH(name, fn)                                          \ | ||||
|   REGISTER_SVE256_DISPATCH(name, fn)                                           \ | ||||
|   REGISTER_SVE128_DISPATCH(name, fn) | ||||
|  | ||||
| #define REGISTER_SVE_DISPATCH(name, fn)                                        \ | ||||
|   REGISTER_SVE128_DISPATCH(name, fn)                                           \ | ||||
|   REGISTER_SVE256_DISPATCH(name, fn) | ||||
|  | ||||
| #define REGISTER_NO_CPU_DISPATCH(name)                                         \ | ||||
| @ -482,6 +515,7 @@ struct RegisterPRIVATEUSE1Dispatch { | ||||
| // REGISTER_DISPATCH now dispatches an AVX512 kernel to nullptr but registers other dispatches. | ||||
| // ALSO_REGISTER_AVX512_DISPATCH should be used for ensuring AVX512 dispatch, among others. | ||||
| // ALSO_REGISTER_SVE256_DISPATCH should be used for ensuring SVE256 dispatch, among others. | ||||
| // ALSO_REGISTER_SVE128_DISPATCH should be used for ensuring SVE128 dispatch, among others. | ||||
| #ifdef CPU_CAPABILITY_AVX512 | ||||
| #define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, ((void*)(fn) ? nullptr : nullptr)) | ||||
| #else | ||||
| @ -489,6 +523,7 @@ struct RegisterPRIVATEUSE1Dispatch { | ||||
| #endif | ||||
| #define ALSO_REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn) | ||||
| #define ALSO_REGISTER_SVE256_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn) | ||||
| #define ALSO_REGISTER_SVE128_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn) | ||||
| #endif | ||||
| } // namespace at::native | ||||
|  | ||||
|  | ||||
| @ -108,7 +108,7 @@ bool is_fast_path(const Tensor& src, const std::optional<Tensor>& scale, Tensor& | ||||
| // index_add (using add_indices as the index), without creating an intermediary | ||||
| // tensor to hold the selected embeddings | ||||
| template <typename data_t, typename index_t> | ||||
| std::enable_if_t<std::is_same_v<data_t, double>, void> | ||||
| static std::enable_if_t<std::is_same_v<data_t, double>, void> | ||||
| index_select_add( | ||||
|     const Tensor& select_indices, | ||||
|     const Tensor& add_indices, | ||||
| @ -494,7 +494,7 @@ index_select_add(const Tensor &select_indices, | ||||
| // mul (scaling by per_sample_weights) | ||||
| // index_add (using add_indices as the index) | ||||
| template <typename data_t, typename index_t> | ||||
| std::enable_if_t<std::is_same_v<data_t, double>, void> | ||||
| static std::enable_if_t<std::is_same_v<data_t, double>, void> | ||||
| index_select_scale_add( | ||||
|     const Tensor& select_indices, | ||||
|     const Tensor& add_indices, | ||||
|  | ||||
| @ -130,7 +130,7 @@ namespace native { | ||||
| namespace { | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void fractional_max_pool2d_out_single_batch_frame( | ||||
| static void fractional_max_pool2d_out_single_batch_frame( | ||||
|   const scalar_t* input, | ||||
|   scalar_t* output, | ||||
|   int64_t* indices, | ||||
| @ -188,7 +188,7 @@ void fractional_max_pool2d_out_single_batch_frame( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void fractional_max_pool2d_out_frame( | ||||
| static void fractional_max_pool2d_out_frame( | ||||
|   const scalar_t* input, | ||||
|   scalar_t* output, | ||||
|   int64_t* indices, | ||||
| @ -220,7 +220,7 @@ void fractional_max_pool2d_out_frame( | ||||
|   } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void fractional_max_pool2d_backward_out_single_batch_frame( | ||||
| static void fractional_max_pool2d_backward_out_single_batch_frame( | ||||
|   scalar_t* gradInput, | ||||
|   const scalar_t* gradOutput, | ||||
|   const int64_t* indices, | ||||
| @ -247,7 +247,7 @@ void fractional_max_pool2d_backward_out_single_batch_frame( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void fractional_max_pool2d_backward_out_frame( | ||||
| static void fractional_max_pool2d_backward_out_frame( | ||||
|   scalar_t* gradInput, | ||||
|   const scalar_t* gradOutput, | ||||
|   const int64_t* indices, | ||||
|  | ||||
| @ -99,7 +99,7 @@ namespace at::native { | ||||
| namespace { | ||||
|  | ||||
| template<typename scalar_t> | ||||
| void fractional_max_pool3d_out_single_batch_frame( | ||||
| static void fractional_max_pool3d_out_single_batch_frame( | ||||
|   const scalar_t* input, | ||||
|   scalar_t* output, | ||||
|   int64_t* indices, | ||||
| @ -169,7 +169,7 @@ void fractional_max_pool3d_out_single_batch_frame( | ||||
| } | ||||
|  | ||||
| template<typename scalar_t> | ||||
| void fractional_max_pool3d_out_frame( | ||||
| static void fractional_max_pool3d_out_frame( | ||||
|   const scalar_t* input, | ||||
|   scalar_t* output, | ||||
|   int64_t* indices, | ||||
| @ -257,7 +257,7 @@ TORCH_IMPL_FUNC(fractional_max_pool3d_out_cpu)( | ||||
| namespace { | ||||
|  | ||||
| template<typename scalar_t> | ||||
| void fractional_max_pool3d_backward_out_single_batch_frame( | ||||
| static void fractional_max_pool3d_backward_out_single_batch_frame( | ||||
|   scalar_t* gradInput, | ||||
|   const scalar_t* gradOutput, | ||||
|   const int64_t* indices, | ||||
| @ -287,7 +287,7 @@ void fractional_max_pool3d_backward_out_single_batch_frame( | ||||
| } | ||||
|  | ||||
| template<typename scalar_t> | ||||
| void fractional_max_pool3d_backward_out_frame( | ||||
| static void fractional_max_pool3d_backward_out_frame( | ||||
|   scalar_t* gradInput, | ||||
|   const scalar_t* gradOutput, | ||||
|   const int64_t* indices, | ||||
|  | ||||
| @ -19,7 +19,7 @@ | ||||
| namespace at::native { | ||||
| namespace { | ||||
|  | ||||
| void im2col_out_cpu_template( | ||||
| static void im2col_out_cpu_template( | ||||
|     Tensor& output, | ||||
|     const Tensor& input_, | ||||
|     IntArrayRef kernel_size, | ||||
|  | ||||
| @ -61,7 +61,7 @@ | ||||
| constexpr float EPSILON = 1e-12; | ||||
|  | ||||
| namespace { | ||||
|   inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) { | ||||
|   static inline at::Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) { | ||||
|     if (reduction == at::Reduction::Mean) { | ||||
|       return unreduced.mean(); | ||||
|     } else if (reduction == at::Reduction::Sum) { | ||||
|  | ||||
| @ -44,7 +44,7 @@ namespace { | ||||
|  | ||||
| // this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1]) note that no bound-checking is done | ||||
| template<typename target_t> | ||||
| inline int64_t get_target_prime(target_t* target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK) { | ||||
| static inline int64_t get_target_prime(target_t* target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK) { | ||||
|   if (idx % 2 == 0) { | ||||
|     return BLANK; | ||||
|   } else { | ||||
|  | ||||
| @ -58,7 +58,7 @@ inline scalar_t multilabel_margin_loss_forward_inner_sum_cpu( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void multilabel_margin_loss_forward_out_frame( | ||||
| static void multilabel_margin_loss_forward_out_frame( | ||||
|     const Tensor& input_contiguous, | ||||
|     const Tensor& target_contiguous, | ||||
|     Tensor& output, | ||||
| @ -108,7 +108,7 @@ void multilabel_margin_loss_forward_out_frame( | ||||
|   } | ||||
| } | ||||
|  | ||||
| void multilabel_margin_loss_forward_out_cpu_template( | ||||
| static void multilabel_margin_loss_forward_out_cpu_template( | ||||
|     const Tensor& input, | ||||
|     const Tensor& target, | ||||
|     Tensor& output, | ||||
| @ -153,7 +153,7 @@ void multilabel_margin_loss_forward_out_cpu_template( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void multilabel_margin_loss_backward_out_frame( | ||||
| static void multilabel_margin_loss_backward_out_frame( | ||||
|     Tensor& grad_input, | ||||
|     const Tensor& grad_output, | ||||
|     const Tensor& input_contiguous, | ||||
| @ -222,7 +222,7 @@ void multilabel_margin_loss_backward_out_frame( | ||||
|   } | ||||
| } | ||||
|  | ||||
| void multilabel_margin_loss_backward_out_cpu_template( | ||||
| static void multilabel_margin_loss_backward_out_cpu_template( | ||||
|     Tensor& grad_input, | ||||
|     const Tensor& grad_output, | ||||
|     const Tensor& input, | ||||
|  | ||||
| @ -57,7 +57,7 @@ inline int64_t target_index_checked( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| inline void multi_margin_loss_cpu_kernel( | ||||
| static inline void multi_margin_loss_cpu_kernel( | ||||
|     Tensor& output, | ||||
|     const scalar_t* input_data, | ||||
|     const int64_t* target_data, | ||||
| @ -148,7 +148,7 @@ void multi_margin_loss_out_cpu_template( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void multi_margin_loss_backward_cpu_kernel( | ||||
| static void multi_margin_loss_backward_cpu_kernel( | ||||
|     scalar_t* grad_input_data, | ||||
|     const Tensor& grad_output, | ||||
|     const scalar_t* input_data, | ||||
|  | ||||
| @ -159,7 +159,7 @@ inline scalar_t* optional_data(const Tensor& source) { | ||||
| } | ||||
|  | ||||
| template <typename scalar_t, typename target_t> | ||||
| void nll_loss_out_frame( | ||||
| static void nll_loss_out_frame( | ||||
|     const Tensor& output, | ||||
|     const Tensor& total_weight, | ||||
|     const Tensor& input, | ||||
| @ -338,7 +338,7 @@ void nll_loss_forward_out_cpu_template( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t, typename target_t> | ||||
| void nll_loss_backward_out_frame( | ||||
| static void nll_loss_backward_out_frame( | ||||
|     const Tensor& grad_input, | ||||
|     const Tensor& grad_output, | ||||
|     const Tensor& input, | ||||
|  | ||||
| @ -99,7 +99,7 @@ inline void check_gradout_shape_nll_loss2d( | ||||
|  | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void nll_loss2d_forward_out_frame( | ||||
| static void nll_loss2d_forward_out_frame( | ||||
|     Tensor& output, | ||||
|     Tensor& total_weight, | ||||
|     const Tensor& input, | ||||
| @ -280,7 +280,7 @@ void nll_loss2d_forward_out_cpu_template( | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| void nll_loss2d_backward_out_frame( | ||||
| static void nll_loss2d_backward_out_frame( | ||||
|     Tensor& grad_input, | ||||
|     const Tensor& grad_output, | ||||
|     const Tensor& input, | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	