mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-10-31 20:34:54 +08:00 
			
		
		
		
	Compare commits
	
		
			1 Commits
		
	
	
		
			cpp-docs-d
			...
			gh/anshul-
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 2896500455 | 
| @ -8,8 +8,6 @@ if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0" | ||||
| elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0" | ||||
| elif [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0" | ||||
| elif [[ "$GPU_ARCH_VERSION" == *"13.0"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;11.0;12.0+PTX" | ||||
| fi | ||||
|  | ||||
| @ -113,7 +113,6 @@ case "$tag" in | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     TRITON=yes | ||||
|     INSTALL_MINGW=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11) | ||||
|     CUDA_VERSION=13.0.0 | ||||
| @ -362,7 +361,6 @@ docker build \ | ||||
|        --build-arg "OPENBLAS=${OPENBLAS:-}" \ | ||||
|        --build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \ | ||||
|        --build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \ | ||||
|        --build-arg "INSTALL_MINGW=${INSTALL_MINGW:-}" \ | ||||
|        -f $(dirname ${DOCKERFILE})/Dockerfile \ | ||||
|        -t "$tmp_tag" \ | ||||
|        "$@" \ | ||||
|  | ||||
| @ -83,6 +83,10 @@ function build_cpython { | ||||
|         py_suffix=${py_ver::-1} | ||||
|         py_folder=$py_suffix | ||||
|     fi | ||||
|     # Update to rc2 due to https://github.com/python/cpython/commit/c72699086fe4 | ||||
|     if [ "$py_suffix" == "3.14.0" ]; then | ||||
|         py_suffix="3.14.0rc2" | ||||
|     fi | ||||
|     wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz | ||||
|     do_cpython_build $py_ver Python-$py_suffix | ||||
|  | ||||
|  | ||||
| @ -1,10 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Install MinGW-w64 for Windows cross-compilation | ||||
| apt-get update | ||||
| apt-get install -y g++-mingw-w64-x86-64-posix | ||||
|  | ||||
| echo "MinGW-w64 installed successfully" | ||||
| x86_64-w64-mingw32-g++ --version | ||||
| @ -20,7 +20,7 @@ pip_install \ | ||||
|  | ||||
| pip_install coloredlogs packaging | ||||
| pip_install onnxruntime==1.23.0 | ||||
| pip_install onnxscript==0.5.4 | ||||
| pip_install onnxscript==0.5.3 | ||||
|  | ||||
| # Cache the transformers model to be used later by ONNX tests. We need to run the transformers | ||||
| # package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/ | ||||
|  | ||||
| @ -39,13 +39,9 @@ case ${DOCKER_TAG_PREFIX} in | ||||
|         DOCKER_GPU_BUILD_ARG="" | ||||
|         ;; | ||||
|     rocm*) | ||||
|         # we want the patch version of 7.0 instead | ||||
|         if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2" | ||||
|         fi | ||||
|         # we want the patch version of 6.4 instead | ||||
|         if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.4" | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2" | ||||
|         fi | ||||
|         BASE_TARGET=rocm | ||||
|         GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete | ||||
|  | ||||
| @ -75,13 +75,9 @@ case ${image} in | ||||
|         DOCKERFILE_SUFFIX="_cuda_aarch64" | ||||
|         ;; | ||||
|     manylinux2_28-builder:rocm*) | ||||
|         # we want the patch version of 7.0 instead | ||||
|         if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2" | ||||
|         fi | ||||
|         # we want the patch version of 6.4 instead | ||||
|         if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.4" | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2" | ||||
|         fi | ||||
|         TARGET=rocm_final | ||||
|         MANY_LINUX_VERSION="2_28" | ||||
|  | ||||
| @ -1,11 +1,15 @@ | ||||
| sphinx==7.2.6 | ||||
| sphinx==5.3.0 | ||||
| #Description: This is used to generate PyTorch docs | ||||
| #Pinned versions: 7.2.6 | ||||
| #Pinned versions: 5.3.0 | ||||
|  | ||||
| pytorch_sphinx_theme2==0.1.0 | ||||
| #Description: This is needed to generate PyTorch docs | ||||
| #Pinned versions: 0.1.0 | ||||
| standard-imghdr==3.13.0; python_version >= "3.13" | ||||
| #Description: This is needed by Sphinx, so it needs to be added here. | ||||
| # The reasons are as follows: | ||||
| # 1) This module has been removed from the Python standard library since Python 3.13(https://peps.python.org/pep-0594/#imghdr); | ||||
| # 2) The current version of Sphinx (5.3.0) is not compatible with Python 3.13. | ||||
| # Once Sphinx is upgraded to a version compatible with Python 3.13 or later, we can remove this dependency. | ||||
|  | ||||
| -e git+https://github.com/pytorch/pytorch_sphinx_theme.git@71e55749be14ceb56e7f8211a9fb649866b87ad4#egg=pytorch_sphinx_theme2 | ||||
| # TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering | ||||
| # but it doesn't seem to work and hangs around idly. The initial thought that it is probably | ||||
| # something related to Docker setup. We can investigate this later. | ||||
| @ -32,17 +36,17 @@ tensorboard==2.18.0 ; python_version >= "3.13" | ||||
| #Description: This is used to generate PyTorch docs | ||||
| #Pinned versions: 2.13.0 | ||||
|  | ||||
| breathe==4.36.0 | ||||
| breathe==4.34.0 | ||||
| #Description: This is used to generate PyTorch C++ docs | ||||
| #Pinned versions: 4.36.0 | ||||
| #Pinned versions: 4.34.0 | ||||
|  | ||||
| exhale==0.3.7 | ||||
| exhale==0.2.3 | ||||
| #Description: This is used to generate PyTorch C++ docs | ||||
| #Pinned versions: 0.3.7 | ||||
| #Pinned versions: 0.2.3 | ||||
|  | ||||
| docutils==0.20 | ||||
| docutils==0.16 | ||||
| #Description: This is used to generate PyTorch C++ docs | ||||
| #Pinned versions: 0.20 | ||||
| #Pinned versions: 0.16 | ||||
|  | ||||
| bs4==0.0.1 | ||||
| #Description: This is used to generate PyTorch C++ docs | ||||
| @ -52,13 +56,13 @@ IPython==8.12.0 | ||||
| #Description: This is used to generate PyTorch functorch docs | ||||
| #Pinned versions: 8.12.0 | ||||
|  | ||||
| myst-nb==1.3.0 | ||||
| myst-nb==0.17.2 | ||||
| #Description: This is used to generate PyTorch functorch and torch.compile docs. | ||||
| #Pinned versions: 1.3.0 | ||||
| #Pinned versions: 0.17.2 | ||||
|  | ||||
| # The following are required to build torch.distributed.elastic.rendezvous.etcd* docs | ||||
| python-etcd==0.4.5 | ||||
| sphinx-copybutton==0.5.0 | ||||
| sphinx-design==0.6.1 | ||||
| sphinx-design==0.4.0 | ||||
| sphinxcontrib-mermaid==1.0.0 | ||||
| myst-parser==4.0.1 | ||||
| myst-parser==0.18.1 | ||||
|  | ||||
| @ -103,11 +103,6 @@ COPY ci_commit_pins/torchbench.txt torchbench.txt | ||||
| RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi | ||||
| RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt | ||||
|  | ||||
| ARG INSTALL_MINGW | ||||
| COPY ./common/install_mingw.sh install_mingw.sh | ||||
| RUN if [ -n "${INSTALL_MINGW}" ]; then bash ./install_mingw.sh; fi | ||||
| RUN rm install_mingw.sh | ||||
|  | ||||
| ARG TRITON | ||||
| ARG TRITON_CPU | ||||
|  | ||||
|  | ||||
| @ -57,8 +57,8 @@ def clone_external_repo(target: str, repo: str, dst: str = "", update_submodules | ||||
|         logger.info("Successfully cloned %s", target) | ||||
|         return r, commit | ||||
|  | ||||
|     except GitCommandError: | ||||
|         logger.exception("Git operation failed") | ||||
|     except GitCommandError as e: | ||||
|         logger.error("Git operation failed: %s", e) | ||||
|         raise | ||||
|  | ||||
|  | ||||
|  | ||||
| @ -143,7 +143,7 @@ def sample_vllm_test_library(): | ||||
|                 "pytest -v -s compile/test_decorator.py", | ||||
|             ], | ||||
|         }, | ||||
|         "vllm_language_model_test_extended_generation_28_failure_test": { | ||||
|         "vllm_languagde_model_test_extended_generation_28_failure_test": { | ||||
|             "title": "Language Models Test (Extended Generation) 2.8 release failure", | ||||
|             "id": "vllm_languagde_model_test_extended_generation_28_failure_test", | ||||
|             "package_install": [ | ||||
|  | ||||
| @ -63,7 +63,7 @@ class VllmBuildParameters: | ||||
|     # DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True" | ||||
|     use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True) | ||||
|     dockerfile_path: Path = env_path_field( | ||||
|         "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile" | ||||
|         "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm" | ||||
|     ) | ||||
|  | ||||
|     # the cleaning script to remove torch dependencies from pip | ||||
|  | ||||
| @ -187,22 +187,19 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then | ||||
|             export USE_CUFILE=0 | ||||
|         else | ||||
|             DEPS_LIST+=( | ||||
|                 "/usr/local/cuda/lib64/libnvToolsExt.so.1" | ||||
|                 "/usr/local/cuda/lib64/libcublas.so.12" | ||||
|                 "/usr/local/cuda/lib64/libcublasLt.so.12" | ||||
|                 "/usr/local/cuda/lib64/libcudart.so.12" | ||||
|                 "/usr/local/cuda/lib64/libnvrtc.so.12" | ||||
|                 "/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12") | ||||
|             DEPS_SONAME+=( | ||||
|                 "libnvToolsExt.so.1" | ||||
|                 "libcublas.so.12" | ||||
|                 "libcublasLt.so.12" | ||||
|                 "libcudart.so.12" | ||||
|                 "libnvrtc.so.12" | ||||
|                 "libcupti.so.12") | ||||
|  | ||||
|             if [[ $CUDA_VERSION != 12.9* ]]; then | ||||
|                 DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1") | ||||
|                 DEPS_SONAME+=("libnvToolsExt.so.1") | ||||
|             fi | ||||
|         fi | ||||
|     else | ||||
|         echo "Using nvidia libs from pypi." | ||||
|  | ||||
| @ -102,18 +102,8 @@ if [ "$is_main_doc" = true ]; then | ||||
|     echo coverage output not found | ||||
|     exit 1 | ||||
|   elif [ $undocumented -gt 0 ]; then | ||||
|     echo "======================================" | ||||
|     echo "ERROR: $undocumented undocumented objects found!" | ||||
|     echo "======================================" | ||||
|     echo "" | ||||
|     echo "Full coverage report:" | ||||
|     echo undocumented objects found: | ||||
|     cat build/coverage/python.txt | ||||
|     echo "" | ||||
|     echo "======================================" | ||||
|     echo "Undocumented modules/objects (lines after TOTAL):" | ||||
|     tail -n +$((lines - undocumented + 1)) build/coverage/python.txt | ||||
|     echo "======================================" | ||||
|     echo "" | ||||
|     echo "Make sure you've updated relevant .rsts in docs/source!" | ||||
|     echo "You can reproduce locally by running 'cd docs && make coverage && cat build/coverage/python.txt'" | ||||
|     exit 1 | ||||
|  | ||||
| @ -485,22 +485,6 @@ test_inductor_aoti() { | ||||
|   /usr/bin/env "${TEST_ENVS[@]}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference cpp/test_vec_half_AVX2 -dist=loadfile | ||||
| } | ||||
|  | ||||
| test_inductor_aoti_cross_compile_for_windows() { | ||||
|  | ||||
|   TEST_REPORTS_DIR=$(pwd)/test/test-reports | ||||
|   mkdir -p "$TEST_REPORTS_DIR" | ||||
|  | ||||
|   # Set WINDOWS_CUDA_HOME environment variable | ||||
|   WINDOWS_CUDA_HOME="$(pwd)/win-torch-wheel-extracted" | ||||
|   export WINDOWS_CUDA_HOME | ||||
|  | ||||
|   echo "WINDOWS_CUDA_HOME is set to: $WINDOWS_CUDA_HOME" | ||||
|   echo "Contents:" | ||||
|   ls -lah "$(pwd)/win-torch-wheel-extracted/lib/x64/" || true | ||||
|  | ||||
|   python test/inductor/test_aoti_cross_compile_windows.py -k compile --package-dir "$TEST_REPORTS_DIR" --win-torch-lib-dir "$(pwd)/win-torch-wheel-extracted/torch/lib" | ||||
| } | ||||
|  | ||||
| test_inductor_cpp_wrapper_shard() { | ||||
|   if [[ -z "$NUM_TEST_SHARDS" ]]; then | ||||
|     echo "NUM_TEST_SHARDS must be defined to run a Python test shard" | ||||
| @ -916,7 +900,7 @@ test_inductor_set_cpu_affinity(){ | ||||
|   export LD_PRELOAD="$JEMALLOC_LIB":"$LD_PRELOAD" | ||||
|   export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:-1" | ||||
|  | ||||
|   if [[ "$(uname -m)" != "aarch64" ]]; then | ||||
|   if [[ "${TEST_CONFIG}" != *aarch64* ]]; then | ||||
|     # Use Intel OpenMP for x86 | ||||
|     IOMP_LIB="$(dirname "$(which python)")/../lib/libiomp5.so" | ||||
|     export LD_PRELOAD="$IOMP_LIB":"$LD_PRELOAD" | ||||
| @ -930,7 +914,7 @@ test_inductor_set_cpu_affinity(){ | ||||
|   cores=$((cpus / thread_per_core)) | ||||
|  | ||||
|   # Set number of cores to 16 on aarch64 for performance runs | ||||
|   if [[ "$(uname -m)" == "aarch64" && $cores -gt 16 ]]; then | ||||
|   if [[ "${TEST_CONFIG}" == *aarch64* && $cores -gt 16 ]]; then | ||||
|     cores=16 | ||||
|   fi | ||||
|   export OMP_NUM_THREADS=$cores | ||||
| @ -1631,7 +1615,6 @@ test_operator_benchmark() { | ||||
|   TEST_REPORTS_DIR=$(pwd)/test/test-reports | ||||
|   mkdir -p "$TEST_REPORTS_DIR" | ||||
|   TEST_DIR=$(pwd) | ||||
|   ARCH=$(uname -m) | ||||
|  | ||||
|   test_inductor_set_cpu_affinity | ||||
|  | ||||
| @ -1646,7 +1629,7 @@ test_operator_benchmark() { | ||||
|   pip_install pandas | ||||
|   python check_perf_csv.py \ | ||||
|       --actual "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv" \ | ||||
|       --expected "${ARCH}_expected_ci_operator_benchmark_eager_float32_cpu.csv" | ||||
|       --expected "expected_ci_operator_benchmark_eager_float32_cpu.csv" | ||||
| } | ||||
|  | ||||
| test_operator_microbenchmark() { | ||||
| @ -1683,7 +1666,7 @@ if [[ "${TEST_CONFIG}" == *numpy_2* ]]; then | ||||
|     python -m pip install --pre numpy==2.0.2 scipy==1.13.1 numba==0.60.0 | ||||
|   fi | ||||
|   python test/run_test.py --include dynamo/test_functions.py dynamo/test_unspec.py test_binary_ufuncs.py test_fake_tensor.py test_linalg.py test_numpy_interop.py test_tensor_creation_ops.py test_torch.py torch_np/test_basic.py | ||||
| elif [[ "${BUILD_ENVIRONMENT}" == *aarch64* && "${TEST_CONFIG}" == 'default' ]]; then | ||||
| elif [[ "${BUILD_ENVIRONMENT}" == *aarch64* && "${TEST_CONFIG}" != *perf_cpu_aarch64* ]]; then | ||||
|   test_linux_aarch64 | ||||
| elif [[ "${TEST_CONFIG}" == *backward* ]]; then | ||||
|   test_forward_backward_compatibility | ||||
| @ -1734,8 +1717,6 @@ elif [[ "${TEST_CONFIG}" == *inductor-triton-cpu* ]]; then | ||||
|   test_inductor_triton_cpu | ||||
| elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then | ||||
|   test_inductor_micro_benchmark | ||||
| elif [[ "${TEST_CONFIG}" == *aoti_cross_compile_for_windows* ]]; then | ||||
|   test_inductor_aoti_cross_compile_for_windows | ||||
| elif [[ "${TEST_CONFIG}" == *huggingface* ]]; then | ||||
|   install_torchvision | ||||
|   id=$((SHARD_NUMBER-1)) | ||||
|  | ||||
| @ -163,13 +163,8 @@ if [[ "$(uname)" != Darwin ]]; then | ||||
|   MEMORY_LIMIT_MAX_JOBS=12 | ||||
|   NUM_CPUS=$(( $(nproc) - 2 )) | ||||
|  | ||||
|   if [[ "$(uname)" == Linux ]]; then | ||||
|     # Defaults here for **binary** linux builds so they can be changed in one place | ||||
|     export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))} | ||||
|   else | ||||
|     # For other builds | ||||
|     export MAX_JOBS=${NUM_CPUS} | ||||
|   fi | ||||
|   # Defaults here for **binary** linux builds so they can be changed in one place | ||||
|   export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))} | ||||
|  | ||||
|   cat >>"$envfile" <<EOL | ||||
|   export MAX_JOBS="${MAX_JOBS}" | ||||
|  | ||||
							
								
								
									
										6
									
								
								.flake8
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								.flake8
									
									
									
									
									
								
							| @ -7,12 +7,16 @@ max-line-length = 120 | ||||
| # C408 ignored because we like the dict keyword argument syntax | ||||
| # E501 is not flexible enough, we're using B950 instead | ||||
| ignore = | ||||
|     E203,E305,E402,E501,E704,E741,F405,F841,F999,W503,W504,C408,E302,W291,E303,F824, | ||||
|     E203,E305,E402,E501,E704,E721,E741,F405,F841,F999,W503,W504,C408,E302,W291,E303,F824, | ||||
|     # shebang has extra meaning in fbcode lints, so I think it's not worth trying | ||||
|     # to line this up with executable bit | ||||
|     EXE001, | ||||
|     # these ignores are from flake8-bugbear; please fix! | ||||
|     B007,B008,B017,B019,B023,B028,B903,B905,B906,B907,B908,B910 | ||||
|     # these ignores are from flake8-comprehensions; please fix! | ||||
|     C407, | ||||
|     # these ignores are from flake8-logging-format; please fix! | ||||
|     G100,G101,G200 | ||||
|     # these ignores are from flake8-simplify. please fix or ignore with commented reason | ||||
|     SIM105,SIM108,SIM110,SIM111,SIM113,SIM114,SIM115,SIM116,SIM117,SIM118,SIM119,SIM12, | ||||
|     # SIM104 is already covered by pyupgrade ruff | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/ISSUE_TEMPLATE/ci-sev.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/ISSUE_TEMPLATE/ci-sev.md
									
									
									
									
										vendored
									
									
								
							| @ -8,7 +8,6 @@ assignees: '' | ||||
| --- | ||||
|  | ||||
| > NOTE: Remember to label this issue with "`ci: sev`" | ||||
| >       If you want autorevert to be disabled, keep the ci: disable-autorevert label | ||||
|  | ||||
|  <!-- Add the `merge blocking` label to this PR to prevent PRs from being merged while this issue is open --> | ||||
|  | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/ISSUE_TEMPLATE/disable-autorevert.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/ISSUE_TEMPLATE/disable-autorevert.md
									
									
									
									
										vendored
									
									
								
							| @ -1,7 +1,7 @@ | ||||
| --- | ||||
| name: "D❌\U0001F519 ISABLE AUTOREVERT" | ||||
| name: DISABLE AUTOREVERT | ||||
| about: Disables autorevert when open | ||||
| title: "[DISABLE AUTOREVERT]" | ||||
| title: "❌\U0001F519 [DISABLE AUTOREVERT]" | ||||
| labels: 'ci: disable-autorevert' | ||||
| assignees: '' | ||||
|  | ||||
|  | ||||
| @ -65,7 +65,7 @@ runs: | ||||
|           cd .ci/lumen_cli | ||||
|           python3 -m pip install -e . | ||||
|         ) | ||||
|         MAX_JOBS="$(nproc --ignore=10)" | ||||
|         MAX_JOBS="$(nproc --ignore=6)" | ||||
|         export MAX_JOBS | ||||
|  | ||||
|         # Split the comma-separated list and build each target | ||||
|  | ||||
							
								
								
									
										13
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							| @ -111,16 +111,3 @@ runs: | ||||
|         # This video group ID maps to subgid 1 inside the docker image due to the /etc/subgid entries. | ||||
|         # The group name corresponding to group ID 1 can change depending on the OS, so both are necessary. | ||||
|         echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd $DEVICE_FLAG --group-add video --group-add $render_gid --group-add daemon --group-add bin --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --network=host" >> "${GITHUB_ENV}" | ||||
|  | ||||
|     - name: configure aws credentials | ||||
|       id: aws_creds | ||||
|       uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0 | ||||
|       with: | ||||
|         role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only | ||||
|         aws-region: us-east-1 | ||||
|         role-duration-seconds: 18000 | ||||
|  | ||||
|     - name: Login to Amazon ECR | ||||
|       id: login-ecr | ||||
|       continue-on-error: true | ||||
|       uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| 69bbe7363897764f9e758d851cd0340147d27f94 | ||||
| 87ff22e49ed0e92576c4935ccb8c143daac4a3cd | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| faffd5cf673615583da6517275e361cb3dbc77e6 | ||||
| 966da7e46f65d6d49df3e31214470a4fe5cc8e66 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/vllm.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/vllm.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| e5192819208c4d68194844b7dfafbc00020d0dea | ||||
| 0ad9951c416d33c5da4f7a504fb162cbe62386f5 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| 0fa6e3129e61143224663e1ec67980d12b7ec4eb | ||||
| 2a9138a26ee257fef05310ad3fecf7c55fe80d73 | ||||
|  | ||||
| @ -1,41 +1,59 @@ | ||||
| # TODO(elainwy): remove this file after the torch nightly dockerfile is in sync in vllm repo | ||||
| # The vLLM Dockerfile is used to construct vLLM image against torch nightly and torch main that can be directly used for testing | ||||
| 
 | ||||
| ARG CUDA_VERSION=12.8.1 | ||||
| ARG PYTHON_VERSION=3.12 | ||||
| 
 | ||||
| # BUILD_BASE_IMAGE: used to setup python build xformers, and vllm wheels, It can be replaced with a different base image from local machine, | ||||
| # by default, it uses the torch-nightly-base stage from this docker image | ||||
| ARG BUILD_BASE_IMAGE=torch-nightly-base | ||||
| 
 | ||||
| # FINAL_BASE_IMAGE: used to set up vllm-instaled environment and build flashinfer, | ||||
| # by default, it uses devel-ubuntu22.04 official image. | ||||
| ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 | ||||
| 
 | ||||
| # The logic is copied from https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile | ||||
| ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py" | ||||
| 
 | ||||
| 
 | ||||
| #################### TORCH NIGHTLY BASE IMAGE #################### | ||||
| # A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci | ||||
| FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base | ||||
| 
 | ||||
| ARG CUDA_VERSION | ||||
| ARG PYTHON_VERSION | ||||
| ARG GET_PIP_URL | ||||
| 
 | ||||
| # Install system dependencies and uv, then create Python virtual environment | ||||
| # Install Python and other dependencies | ||||
| RUN apt-get update -y \ | ||||
|     && apt-get install -y ccache software-properties-common git curl sudo vim python3-pip \ | ||||
|     && curl -LsSf https://astral.sh/uv/install.sh | sh \ | ||||
|     && $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \ | ||||
|     && rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \ | ||||
|     && ln -s /opt/venv/bin/python3 /usr/bin/python3 \ | ||||
|     && ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \ | ||||
|     && ln -s /opt/venv/bin/pip /usr/bin/pip \ | ||||
|     && apt-get install -y ccache software-properties-common git curl wget sudo vim \ | ||||
|     && add-apt-repository -y ppa:deadsnakes/ppa \ | ||||
|     && apt-get update -y \ | ||||
|     && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ | ||||
|     && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ | ||||
|     && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ | ||||
|     && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ | ||||
|     && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \ | ||||
|     && python3 --version && python3 -m pip --version | ||||
| 
 | ||||
| # Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519 | ||||
| # as it was causing spam when compiling the CUTLASS kernels | ||||
| RUN apt-get install -y gcc-10 g++-10 | ||||
| RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10 | ||||
| RUN <<EOF | ||||
| gcc --version | ||||
| EOF | ||||
| # Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519) | ||||
| RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \ | ||||
|     if command -v apt-get >/dev/null; then \ | ||||
|         if [ "$current_gcc_version" -lt 10 ]; then \ | ||||
|             echo "GCC version is $current_gcc_version, installing gcc-10..."; \ | ||||
|             apt-get update \ | ||||
|             && apt-get install -y gcc-10 g++-10 \ | ||||
|             && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 \ | ||||
|             && update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \ | ||||
|         else \ | ||||
|             echo "GCC version is $current_gcc_version, no need to install gcc-10."; \ | ||||
|         fi \ | ||||
|     fi \ | ||||
|     && gcc --version && g++ --version | ||||
| 
 | ||||
| # Install uv for faster pip installs | ||||
| # install uv for faster pip installs | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     python3 -m pip install uv==0.8.4 | ||||
| 
 | ||||
| @ -43,32 +61,36 @@ ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| #################### TORCH NIGHTLY  BASE IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| #################### BASE BUILD IMAGE #################### | ||||
| # A base image for building vLLM with torch nightly or torch wheels | ||||
| # prepare basic build environment | ||||
| FROM ${BUILD_BASE_IMAGE} AS base | ||||
| USER root | ||||
| 
 | ||||
| ARG CUDA_VERSION | ||||
| ARG PYTHON_VERSION | ||||
| 
 | ||||
| # Only work with PyTorch manylinux builder | ||||
| # TODO (huydhn): Only work with PyTorch manylinux builder | ||||
| ENV PATH="/opt/python/cp312-cp312/bin:${PATH}" | ||||
| 
 | ||||
| # Install some system dependencies and double check python version | ||||
| RUN if command -v apt-get >/dev/null; then \ | ||||
|         apt-get update -y \ | ||||
|         && apt-get install -y ccache software-properties-common git wget sudo vim; \ | ||||
|         && apt-get install -y ccache software-properties-common git curl wget sudo vim; \ | ||||
|     else \ | ||||
|         dnf install -y git wget sudo; \ | ||||
|         dnf install -y git curl wget sudo; \ | ||||
|     fi \ | ||||
|     && python3 --version && python3 -m pip --version | ||||
| 
 | ||||
| # Install uv for faster pip installs if not existed | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     python3 -m pip install uv==0.8.4 | ||||
| 
 | ||||
|     if ! python3 -m uv --version >/dev/null 2>&1; then \ | ||||
|         python3 -m pip install uv==0.8.4; \ | ||||
|     fi | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| @ -76,15 +98,15 @@ ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| WORKDIR /workspace | ||||
| 
 | ||||
| # Install build and runtime dependencies | ||||
| # install build and runtime dependencies | ||||
| COPY requirements/common.txt requirements/common.txt | ||||
| COPY use_existing_torch.py use_existing_torch.py | ||||
| COPY pyproject.toml pyproject.toml | ||||
| 
 | ||||
| # Install build and runtime dependencies without stable torch version | ||||
| # install build and runtime dependencies without stable torch version | ||||
| RUN python3 use_existing_torch.py | ||||
| 
 | ||||
| # Default mount file as placeholder, this just avoid the mount error | ||||
| # default mount file as placeholder, this just avoid the mount error | ||||
| # change to a different vllm folder if this does not exist anymore | ||||
| ARG TORCH_WHEELS_PATH="./requirements" | ||||
| ARG PINNED_TORCH_VERSION | ||||
| @ -116,36 +138,56 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/common.txt | ||||
| 
 | ||||
| # Must put before installing xformers, so it can install the correct version of xfomrers. | ||||
| ARG xformers_cuda_arch_list='7.5;8.0+PTX;9.0a' | ||||
| ENV TORCH_CUDA_ARCH_LIST=${xformers_cuda_arch_list} | ||||
| 
 | ||||
| ARG max_jobs=16 | ||||
| ENV MAX_JOBS=${max_jobs} | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH' | ||||
|     export TORCH_CUDA_ARCH_LIST='7.5 8.0+PTX 9.0a' | ||||
|     git clone https://github.com/facebookresearch/xformers.git | ||||
| RUN echo ${TORCH_CUDA_ARCH_LIST} | ||||
| RUN echo ${MAX_JOBS} | ||||
| RUN pip freeze | grep -E 'ninja' | ||||
| 
 | ||||
|     pushd xformers | ||||
|     git checkout v0.0.32.post2 | ||||
|     git submodule update --init --recursive | ||||
|     python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose | ||||
|     popd | ||||
| # Build xformers with cuda and torch nightly/wheel | ||||
| # following official xformers guidance: https://github.com/facebookresearch/xformers#build | ||||
| # sha for https://github.com/facebookresearch/xformers/tree/v0.0.32.post2 | ||||
| ARG XFORMERS_COMMIT=5d4b92a5e5a9c6c6d4878283f47d82e17995b468 | ||||
| ENV CCACHE_DIR=/root/.cache/ccache | ||||
| 
 | ||||
|     rm -rf xformers | ||||
| BASH | ||||
| RUN --mount=type=cache,target=/root/.cache/ccache \ | ||||
|     --mount=type=cache,target=/root/.cache/uv \ | ||||
|     echo 'git clone xformers...' \ | ||||
|     && git clone https://github.com/facebookresearch/xformers.git --recursive \ | ||||
|     && cd xformers \ | ||||
|     && git checkout ${XFORMERS_COMMIT} \ | ||||
|     && git submodule update --init --recursive \ | ||||
|     && echo 'finish git clone xformers...' \ | ||||
|     && rm -rf build \ | ||||
|     && python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose \ | ||||
|     && cd .. \ | ||||
|     && rm -rf xformers | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system xformers-dist/*.whl | ||||
|     uv pip install --system xformers-dist/*.whl --verbose | ||||
| 
 | ||||
| # Build can take a long time, and the torch nightly version fetched from url can be different in next docker stage. | ||||
| # track the nightly torch version used in the build, when we set up runtime environment we can make sure the version is the same | ||||
| RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio' > torch_build_versions.txt | ||||
| 
 | ||||
| RUN cat torch_build_versions.txt | ||||
| RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio' | ||||
| 
 | ||||
| #################### BASE BUILD IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| #################### WHEEL BUILD IMAGE #################### | ||||
| # Image used to build vllm wheel | ||||
| FROM base AS build | ||||
| ARG TARGETPLATFORM | ||||
| 
 | ||||
| COPY . . | ||||
| 
 | ||||
| RUN python3 use_existing_torch.py | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
| @ -155,17 +197,20 @@ ARG GIT_REPO_CHECK=0 | ||||
| RUN --mount=type=bind,source=.git,target=.git \ | ||||
|     if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi | ||||
| 
 | ||||
| # Max jobs used by Ninja to build extensions | ||||
| ARG max_jobs=16 | ||||
| ENV MAX_JOBS=${max_jobs} | ||||
| ARG nvcc_threads=8 | ||||
| ARG nvcc_threads=4 | ||||
| ENV NVCC_THREADS=$nvcc_threads | ||||
| ARG torch_cuda_arch_list='8.0 8.6 8.9 9.0' | ||||
| ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} | ||||
| 
 | ||||
| ARG USE_SCCACHE | ||||
| ARG SCCACHE_BUCKET_NAME=vllm-build-sccache | ||||
| ARG SCCACHE_REGION_NAME=us-west-2 | ||||
| ARG SCCACHE_S3_NO_CREDENTIALS=0 | ||||
| 
 | ||||
| # Use sccache to speed up compilation | ||||
| # if USE_SCCACHE is set, use sccache to speed up compilation | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     --mount=type=bind,source=.git,target=.git \ | ||||
|     if [ "$USE_SCCACHE" = "1" ]; then \ | ||||
| @ -190,9 +235,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|         && sccache --show-stats; \ | ||||
|     fi | ||||
| 
 | ||||
| ARG torch_cuda_arch_list='8.0 8.6 8.9 9.0' | ||||
| ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} | ||||
| 
 | ||||
| ARG vllm_target_device="cuda" | ||||
| ENV VLLM_TARGET_DEVICE=${vllm_target_device} | ||||
| ENV CCACHE_DIR=/root/.cache/ccache | ||||
| @ -206,10 +248,17 @@ RUN --mount=type=cache,target=/root/.cache/ccache \ | ||||
|         export VLLM_DOCKER_BUILD_CONTEXT=1 && \ | ||||
|         python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \ | ||||
|     fi | ||||
| 
 | ||||
| RUN echo "[INFO] Listing current directory:" && \ | ||||
|     ls -al && \ | ||||
|     echo "[INFO] Showing torch_build_versions.txt content:" && \ | ||||
|     cat torch_build_versions.txt | ||||
| 
 | ||||
| #################### WHEEL BUILD IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| ################### VLLM INSTALLED IMAGE #################### | ||||
| # Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer | ||||
| FROM ${FINAL_BASE_IMAGE} AS vllm-base | ||||
| USER root | ||||
| 
 | ||||
| @ -217,7 +266,7 @@ ARG CUDA_VERSION | ||||
| ARG PYTHON_VERSION | ||||
| ARG GET_PIP_URL | ||||
| 
 | ||||
| # Only work with PyTorch manylinux builder | ||||
| # TODO (huydhn): Only work with PyTorch manylinux builder | ||||
| ENV PATH="/opt/python/cp312-cp312/bin:${PATH}" | ||||
| 
 | ||||
| # prepare for environment starts | ||||
| @ -226,19 +275,20 @@ WORKDIR /workspace | ||||
| # Install Python and other dependencies | ||||
| RUN if command -v apt-get >/dev/null; then \ | ||||
|         apt-get update -y \ | ||||
|         && apt-get install -y ccache software-properties-common git sudo vim python3-pip; \ | ||||
|         && apt-get install -y ccache software-properties-common git curl wget sudo vim \ | ||||
|         && add-apt-repository -y ppa:deadsnakes/ppa \ | ||||
|         && apt-get update -y \ | ||||
|         && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ | ||||
|         && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ | ||||
|         && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ | ||||
|         && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ | ||||
|         && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION}; \ | ||||
|     else \ | ||||
|         dnf install -y git wget sudo; \ | ||||
|         dnf install -y git curl wget sudo; \ | ||||
|     fi \ | ||||
|     && curl -LsSf https://astral.sh/uv/install.sh | sh \ | ||||
|     && $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \ | ||||
|     && rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \ | ||||
|     && ln -s /opt/venv/bin/python3 /usr/bin/python3 \ | ||||
|     && ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \ | ||||
|     && ln -s /opt/venv/bin/pip /usr/bin/pip \ | ||||
|     && python3 --version && python3 -m pip --version | ||||
| 
 | ||||
| # Get the torch versions, and whls used in previous stage | ||||
| # Get the torch versions, and whls used in previous stagtes for consistency | ||||
| COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt | ||||
| COPY --from=base /workspace/xformers-dist /wheels/xformers | ||||
| COPY --from=build /workspace/vllm-dist /wheels/vllm | ||||
| @ -247,29 +297,33 @@ RUN echo "[INFO] Listing current directory before torch install step:" && \ | ||||
|     echo "[INFO] Showing torch_build_versions.txt content:" && \ | ||||
|     cat torch_build_versions.txt | ||||
| 
 | ||||
| # Install uv for faster pip installs if not existed | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     python3 -m pip install uv==0.8.4 | ||||
| 
 | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| # Install build and runtime dependencies, this is needed for flashinfer install | ||||
| COPY requirements/build.txt requirements/build.txt | ||||
| COPY use_existing_torch.py use_existing_torch.py | ||||
| RUN python3 use_existing_torch.py | ||||
| RUN cat requirements/build.txt | ||||
| 
 | ||||
| # Install uv for faster pip installs if not existed | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     if ! python3 -m uv --version > /dev/null 2>&1; then \ | ||||
|         python3 -m pip install uv==0.8.4; \ | ||||
|     fi | ||||
| 
 | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/build.txt | ||||
| 
 | ||||
| 
 | ||||
| # Default mount file as placeholder, this just avoid the mount error | ||||
| ARG TORCH_WHEELS_PATH="./requirements" | ||||
| # Install torch, torchaudio and torchvision. If TORCH_WHEELS_PATH is default | ||||
| # to ./requirements, it will pull the nightly versions using pip. Otherwise, | ||||
| # it will use the local wheels from TORCH_WHEELS_PATH | ||||
| # Install torch, torchaudio and torchvision | ||||
| # if TORCH_WHEELS_PATH is default "./requirements", it will pull the nightly versions using pip using torch_build_versions.txt | ||||
| # otherwise, it will use the whls from TORCH_WHEELS_PATH from the host machine | ||||
| RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \ | ||||
|     --mount=type=cache,target=/root/.cache/uv \ | ||||
|     if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \ | ||||
| @ -290,14 +344,18 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
| # Install xformers wheel from previous stage | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system /wheels/xformers/*.whl --verbose | ||||
| 
 | ||||
| # Build FlashInfer from source | ||||
| # Build flashinfer from source. | ||||
| ARG torch_cuda_arch_list='8.0;8.9;9.0a;10.0a;12.0' | ||||
| # install package for build flashinfer | ||||
| # see issue: https://github.com/flashinfer-ai/flashinfer/issues/738 | ||||
| 
 | ||||
| RUN pip freeze | grep -E 'setuptools|packaging|build' | ||||
| 
 | ||||
| ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} | ||||
| 
 | ||||
| # Build flashinfer for torch nightly from source around 10 mins | ||||
| ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git" | ||||
| # Keep this in sync with https://github.com/vllm-project/vllm/blob/main/requirements/cuda.txt | ||||
| ARG FLASHINFER_GIT_REF="v0.2.14.post1" | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     git clone --depth 1 --recursive --shallow-submodules \ | ||||
|         --branch ${FLASHINFER_GIT_REF} \ | ||||
| @ -309,7 +367,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     && cd .. \ | ||||
|     && rm -rf flashinfer | ||||
| 
 | ||||
| # Install FlashInfer | ||||
| # install flashinfer python | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system wheels/flashinfer/*.whl --verbose | ||||
| 
 | ||||
| @ -319,6 +377,49 @@ RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio\|^xformers\|^vllm | ||||
| ################### VLLM INSTALLED IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| #################### UNITTEST IMAGE ############################# | ||||
| FROM vllm-base as test | ||||
| 
 | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| COPY tests/ tests/ | ||||
| COPY examples examples | ||||
| COPY benchmarks benchmarks | ||||
| COPY ./vllm/collect_env.py . | ||||
| COPY requirements/common.txt requirements/common.txt | ||||
| COPY use_existing_torch.py use_existing_torch.py | ||||
| COPY pyproject.toml pyproject.toml | ||||
| # Install build and runtime dependencies without stable torch version | ||||
| COPY requirements/nightly_torch_test.txt requirements/nightly_torch_test.txt | ||||
| 
 | ||||
| RUN python3 use_existing_torch.py | ||||
| 
 | ||||
| # install packages | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/common.txt | ||||
| # enable fast downloads from hf (for testing) | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system hf_transfer | ||||
| ENV HF_HUB_ENABLE_HF_TRANSFER 1 | ||||
| 
 | ||||
| # install development dependencies (for testing) | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -e tests/vllm_test_utils | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/nightly_torch_test.txt | ||||
| 
 | ||||
| # Logging to confirm the torch versions | ||||
| RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer' | ||||
| 
 | ||||
| # Logging to confirm all the packages are installed | ||||
| RUN pip freeze | ||||
| 
 | ||||
| #################### UNITTEST IMAGE ############################# | ||||
| 
 | ||||
| #################### EXPORT STAGE #################### | ||||
| FROM scratch as export-wheels | ||||
| 
 | ||||
							
								
								
									
										29
									
								
								.github/labeler.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										29
									
								
								.github/labeler.yml
									
									
									
									
										vendored
									
									
								
							| @ -133,32 +133,3 @@ | ||||
|  | ||||
| "ciflow/vllm": | ||||
| - .github/ci_commit_pins/vllm.txt | ||||
|  | ||||
| "ciflow/b200": | ||||
| - test/test_matmul_cuda.py | ||||
| - test/test_scaled_matmul_cuda.py | ||||
| - test/inductor/test_fp8.py | ||||
| - aten/src/ATen/native/cuda/Blas.cpp | ||||
| - torch/**/*cublas* | ||||
| - torch/_inductor/kernel/mm.py | ||||
| - test/inductor/test_max_autotune.py | ||||
| - third_party/fbgemm | ||||
|  | ||||
| "ciflow/h100": | ||||
| - test/test_matmul_cuda.py | ||||
| - test/test_scaled_matmul_cuda.py | ||||
| - test/inductor/test_fp8.py | ||||
| - aten/src/ATen/native/cuda/Blas.cpp | ||||
| - torch/**/*cublas* | ||||
| - torch/_inductor/kernel/mm.py | ||||
| - test/inductor/test_max_autotune.py | ||||
| - third_party/fbgemm | ||||
|  | ||||
| "ciflow/rocm": | ||||
| - test/test_matmul_cuda.py | ||||
| - test/test_scaled_matmul_cuda.py | ||||
| - test/inductor/test_fp8.py | ||||
| - aten/src/ATen/native/cuda/Blas.cpp | ||||
| - torch/_inductor/kernel/mm.py | ||||
| - test/inductor/test_max_autotune.py | ||||
| - third_party/fbgemm | ||||
|  | ||||
							
								
								
									
										5
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							| @ -3,7 +3,6 @@ ciflow_tracking_issue: 64124 | ||||
| ciflow_push_tags: | ||||
| - ciflow/b200 | ||||
| - ciflow/b200-symm-mem | ||||
| - ciflow/b200-distributed | ||||
| - ciflow/binaries | ||||
| - ciflow/binaries_libtorch | ||||
| - ciflow/binaries_wheel | ||||
| @ -16,8 +15,7 @@ ciflow_push_tags: | ||||
| - ciflow/inductor-micro-benchmark | ||||
| - ciflow/inductor-micro-benchmark-cpu-x86 | ||||
| - ciflow/inductor-perf-compare | ||||
| - ciflow/inductor-perf-test-nightly-rocm-mi300 | ||||
| - ciflow/inductor-perf-test-nightly-rocm-mi355 | ||||
| - ciflow/inductor-perf-test-nightly-rocm | ||||
| - ciflow/inductor-perf-test-nightly-x86-zen | ||||
| - ciflow/inductor-periodic | ||||
| - ciflow/inductor-rocm | ||||
| @ -33,7 +31,6 @@ ciflow_push_tags: | ||||
| - ciflow/rocm | ||||
| - ciflow/rocm-mi300 | ||||
| - ciflow/rocm-mi355 | ||||
| - ciflow/rocm-navi31 | ||||
| - ciflow/s390 | ||||
| - ciflow/slow | ||||
| - ciflow/torchbench | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							| @ -512,8 +512,6 @@ def perform_misc_tasks( | ||||
|         "keep-going", | ||||
|         branch == MAIN_BRANCH | ||||
|         or bool(tag and re.match(r"^trunk/[a-f0-9]{40}$", tag)) | ||||
|         # Pattern for tags created via manual run on HUD | ||||
|         or bool(tag and re.match(r"^ciflow/[^/]+/[a-f0-9]{40}$", tag)) | ||||
|         or check_for_setting(labels, pr_body, "keep-going"), | ||||
|     ) | ||||
|     set_output( | ||||
|  | ||||
							
								
								
									
										38
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										38
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							| @ -16,18 +16,16 @@ from typing import Optional | ||||
|  | ||||
|  | ||||
| # NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this | ||||
| CUDA_ARCHES = ["12.6", "12.8", "12.9", "13.0"] | ||||
| CUDA_ARCHES = ["12.6", "12.8", "13.0"] | ||||
| CUDA_STABLE = "12.8" | ||||
| CUDA_ARCHES_FULL_VERSION = { | ||||
|     "12.6": "12.6.3", | ||||
|     "12.8": "12.8.1", | ||||
|     "12.9": "12.9.1", | ||||
|     "13.0": "13.0.0", | ||||
| } | ||||
| CUDA_ARCHES_CUDNN_VERSION = { | ||||
|     "12.6": "9", | ||||
|     "12.8": "9", | ||||
|     "12.9": "9", | ||||
|     "13.0": "9", | ||||
| } | ||||
|  | ||||
| @ -40,7 +38,7 @@ CPU_AARCH64_ARCH = ["cpu-aarch64"] | ||||
|  | ||||
| CPU_S390X_ARCH = ["cpu-s390x"] | ||||
|  | ||||
| CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "12.9-aarch64", "13.0-aarch64"] | ||||
| CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "13.0-aarch64"] | ||||
|  | ||||
|  | ||||
| PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { | ||||
| @ -78,23 +76,6 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { | ||||
|         "nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | " | ||||
|         "nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'" | ||||
|     ), | ||||
|     "12.9": ( | ||||
|         "nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | " | ||||
|         "nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | " | ||||
|         "nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | " | ||||
|         "nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | " | ||||
|         "nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | " | ||||
|         "nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | " | ||||
|         "nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | " | ||||
|         "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | " | ||||
|         "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | " | ||||
|         "nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | " | ||||
|         "nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | " | ||||
|         "nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | " | ||||
|         "nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'" | ||||
|     ), | ||||
|     "13.0": ( | ||||
|         "nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | " | ||||
| @ -241,11 +222,7 @@ def generate_libtorch_matrix( | ||||
|             arches += CUDA_ARCHES | ||||
|             arches += ROCM_ARCHES | ||||
|         elif os == "windows": | ||||
|             # TODO (huydhn): Only build CUDA 12.9 for Linux. This logic is to be cleaned up | ||||
|             # in 2.10 | ||||
|             windows_cuda_arches = CUDA_ARCHES.copy() | ||||
|             windows_cuda_arches.remove("12.9") | ||||
|             arches += windows_cuda_arches | ||||
|             arches += CUDA_ARCHES | ||||
|     if libtorch_variants is None: | ||||
|         libtorch_variants = [ | ||||
|             "shared-with-deps", | ||||
| @ -309,11 +286,7 @@ def generate_wheels_matrix( | ||||
|         if os == "linux": | ||||
|             arches += CUDA_ARCHES + ROCM_ARCHES + XPU_ARCHES | ||||
|         elif os == "windows": | ||||
|             # TODO (huydhn): Only build CUDA 12.9 for Linux. This logic is to be cleaned up | ||||
|             # in 2.10 | ||||
|             windows_cuda_arches = CUDA_ARCHES.copy() | ||||
|             windows_cuda_arches.remove("12.9") | ||||
|             arches += windows_cuda_arches + XPU_ARCHES | ||||
|             arches += CUDA_ARCHES + XPU_ARCHES | ||||
|         elif os == "linux-aarch64": | ||||
|             # Separate new if as the CPU type is different and | ||||
|             # uses different build/test scripts | ||||
| @ -349,7 +322,7 @@ def generate_wheels_matrix( | ||||
|             # cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install | ||||
|  | ||||
|             if ( | ||||
|                 arch_version in ["13.0", "12.9", "12.8", "12.6"] | ||||
|                 arch_version in ["13.0", "12.8", "12.6"] | ||||
|                 and os == "linux" | ||||
|                 or arch_version in CUDA_AARCH64_ARCHES | ||||
|             ): | ||||
| @ -413,6 +386,5 @@ def generate_wheels_matrix( | ||||
|  | ||||
|  | ||||
| validate_nccl_dep_consistency("13.0") | ||||
| validate_nccl_dep_consistency("12.9") | ||||
| validate_nccl_dep_consistency("12.8") | ||||
| validate_nccl_dep_consistency("12.6") | ||||
|  | ||||
							
								
								
									
										6
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							| @ -1092,7 +1092,7 @@ class GitHubPR: | ||||
|         editor = node["editor"] | ||||
|         return GitHubComment( | ||||
|             body_text=node["bodyText"], | ||||
|             created_at=node.get("createdAt", ""), | ||||
|             created_at=node["createdAt"] if "createdAt" in node else "", | ||||
|             author_login=node["author"]["login"], | ||||
|             author_url=node["author"].get("url", None), | ||||
|             author_association=node["authorAssociation"], | ||||
| @ -2042,6 +2042,10 @@ def validate_revert( | ||||
|             f"[{', '.join(allowed_reverters)}], but instead is {author_association}." | ||||
|         ) | ||||
|  | ||||
|     # Raises exception if matching rule is not found, but ignores all status checks | ||||
|     find_matching_merge_rule( | ||||
|         pr, repo, skip_mandatory_checks=True, skip_internal_checks=True | ||||
|     ) | ||||
|     commit_sha = get_pr_commit_sha(repo, pr) | ||||
|     return (author_login, commit_sha) | ||||
|  | ||||
|  | ||||
| @ -177,9 +177,6 @@ jobs: | ||||
|     runs-on: linux.rocm.gpu.mi250 | ||||
|     timeout-minutes: !{{ common.timeout_minutes }} | ||||
|     !{{ upload.binary_env(config) }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|  | ||||
| @ -26,8 +26,9 @@ name: !{{ build_environment }} | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "!{{ py_ver.strip('t') + ('.4' if '3.14' not in py_ver else '.0') }}" | ||||
|           python-version: "!{{ (py_ver.strip('t') + '.4') if '3.14' not in py_ver else '3.14.0-rc.2' }}" | ||||
|           freethreaded: !{{ "true" if py_ver.endswith('t') else "false" }} | ||||
| {%- endmacro %} | ||||
|  | ||||
|  | ||||
| @ -79,9 +79,9 @@ jobs: | ||||
|     runs-on: "windows-11-arm64-preview" | ||||
|     {%- else %} | ||||
|     {%- if branches == "nightly" %} | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     {%- else %} | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge.nonephemeral" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" | ||||
|     {%- endif %} | ||||
|     {%- endif %} | ||||
|     timeout-minutes: !{{ common.timeout_minutes_windows_binary }} | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							| @ -72,7 +72,7 @@ jobs: | ||||
|             # Let's try to figure out how this can be improved | ||||
|             timeout-minutes: 360 | ||||
|           - docs_type: python | ||||
|             runner: ${{ inputs.runner_prefix }}linux.c7i.2xlarge | ||||
|             runner: ${{ inputs.runner_prefix }}linux.2xlarge | ||||
|             # It takes less than 30m to finish python docs unless there are issues | ||||
|             timeout-minutes: 30 | ||||
|     # Set a fixed name for this job instead of using the current matrix-generated name, i.e. build-docs (cpp, linux.12xlarge, 180) | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/_linux-build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/_linux-build.yml
									
									
									
									
										vendored
									
									
								
							| @ -37,7 +37,7 @@ on: | ||||
|       runner: | ||||
|         required: false | ||||
|         type: string | ||||
|         default: "linux.c7i.2xlarge" | ||||
|         default: "linux.2xlarge" | ||||
|         description: | | ||||
|           Label of the runner this job should run on. | ||||
|       test-matrix: | ||||
|  | ||||
							
								
								
									
										40
									
								
								.github/workflows/_linux-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										40
									
								
								.github/workflows/_linux-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -224,46 +224,6 @@ jobs: | ||||
|         continue-on-error: true | ||||
|         uses: ./.github/actions/download-td-artifacts | ||||
|  | ||||
|       - name: Download Windows torch wheel for cross-compilation | ||||
|         if: matrix.win_torch_wheel_artifact != '' | ||||
|         uses: seemethere/download-artifact-s3@1da556a7aa0a088e3153970611f6c432d58e80e6 # v4.2.0 | ||||
|         with: | ||||
|           name: ${{ matrix.win_torch_wheel_artifact }} | ||||
|           path: win-torch-wheel | ||||
|  | ||||
|       - name: Extract Windows wheel and setup CUDA libraries | ||||
|         if: matrix.win_torch_wheel_artifact != '' | ||||
|         shell: bash | ||||
|         run: | | ||||
|           set -x | ||||
|  | ||||
|           # Find the wheel file | ||||
|           WHEEL_FILE=$(find win-torch-wheel -name "*.whl" -type f | head -n 1) | ||||
|           if [ -z "$WHEEL_FILE" ]; then | ||||
|             echo "Error: No wheel file found in win-torch-wheel directory" | ||||
|             exit 1 | ||||
|           fi | ||||
|           echo "Found wheel file: $WHEEL_FILE" | ||||
|  | ||||
|           # Unzip the wheel file | ||||
|           unzip -q "$WHEEL_FILE" -d win-torch-wheel-extracted | ||||
|           echo "Extracted wheel contents" | ||||
|  | ||||
|           # Setup CUDA libraries (cuda.lib and cudart.lib) directory | ||||
|           mkdir -p win-torch-wheel-extracted/lib/x64 | ||||
|           if [ -f "win-torch-wheel/cuda.lib" ]; then | ||||
|             mv win-torch-wheel/cuda.lib win-torch-wheel-extracted/lib/x64/ | ||||
|             echo "Moved cuda.lib to win-torch-wheel-extracted/lib/x64/" | ||||
|           fi | ||||
|           if [ -f "win-torch-wheel/cudart.lib" ]; then | ||||
|             mv win-torch-wheel/cudart.lib win-torch-wheel-extracted/lib/x64/ | ||||
|             echo "Moved cudart.lib to win-torch-wheel-extracted/lib/x64/" | ||||
|           fi | ||||
|  | ||||
|           # Verify CUDA libraries are present | ||||
|           echo "CUDA libraries:" | ||||
|           ls -la win-torch-wheel-extracted/lib/x64/ || echo "No CUDA libraries found" | ||||
|  | ||||
|       - name: Parse ref | ||||
|         id: parse-ref | ||||
|         run: .github/scripts/parse_ref.py | ||||
|  | ||||
							
								
								
									
										13
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -102,6 +102,19 @@ jobs: | ||||
|             exit 1 | ||||
|           fi | ||||
|  | ||||
|       - name: configure aws credentials | ||||
|         id: aws_creds | ||||
|         uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0 | ||||
|         with: | ||||
|           role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only | ||||
|           aws-region: us-east-1 | ||||
|           role-duration-seconds: 18000 | ||||
|  | ||||
|       - name: Login to Amazon ECR | ||||
|         id: login-ecr | ||||
|         continue-on-error: true | ||||
|         uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 | ||||
|  | ||||
|       - name: Calculate docker image | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|  | ||||
							
								
								
									
										25
									
								
								.github/workflows/_win-build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										25
									
								
								.github/workflows/_win-build.yml
									
									
									
									
										vendored
									
									
								
							| @ -168,31 +168,6 @@ jobs: | ||||
|         run: | | ||||
|           .ci/pytorch/win-build.sh | ||||
|  | ||||
|       # Collect Windows torch libs and CUDA libs for cross-compilation | ||||
|       - name: Collect Windows CUDA libs for cross-compilation | ||||
|         if: steps.build.outcome != 'skipped' && inputs.cuda-version != 'cpu' | ||||
|         shell: bash | ||||
|         run: | | ||||
|           set -ex | ||||
|  | ||||
|           # Create directory structure if does not exist | ||||
|           mkdir -p /c/${{ github.run_id }}/build-results | ||||
|  | ||||
|           # Copy CUDA libs | ||||
|           CUDA_PATH="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${{ inputs.cuda-version }}" | ||||
|  | ||||
|           if [ -f "${CUDA_PATH}/lib/x64/cuda.lib" ]; then | ||||
|             cp "${CUDA_PATH}/lib/x64/cuda.lib" /c/${{ github.run_id }}/build-results/ | ||||
|           fi | ||||
|  | ||||
|           if [ -f "${CUDA_PATH}/lib/x64/cudart.lib" ]; then | ||||
|             cp "${CUDA_PATH}/lib/x64/cudart.lib" /c/${{ github.run_id }}/build-results/ | ||||
|           fi | ||||
|  | ||||
|           # List collected files | ||||
|           echo "Collected CUDA libs:" | ||||
|           ls -lah /c/${{ github.run_id }}/build-results/*.lib | ||||
|  | ||||
|       # Upload to github so that people can click and download artifacts | ||||
|       - name: Upload artifacts to s3 | ||||
|         if: steps.build.outcome != 'skipped' | ||||
|  | ||||
							
								
								
									
										62
									
								
								.github/workflows/b200-distributed.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										62
									
								
								.github/workflows/b200-distributed.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,62 +0,0 @@ | ||||
| name: CI for distributed tests on B200 | ||||
|  | ||||
| on: | ||||
|   pull_request: | ||||
|     paths: | ||||
|       - .github/workflows/b200-distributed.yml | ||||
|   workflow_dispatch: | ||||
|   push: | ||||
|     tags: | ||||
|       - ciflow/b200-distributed/* | ||||
|   schedule: | ||||
|     - cron: 46 8 * * *  # about 1:46am PDT | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: | ||||
|   id-token: write | ||||
|   contents: read | ||||
|  | ||||
| jobs: | ||||
|  | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|       curr_branch: ${{ github.head_ref || github.ref_name }} | ||||
|       curr_ref_type: ${{ github.ref_type }} | ||||
|  | ||||
|   linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200: | ||||
|     name: linux-jammy-cuda12.8-py3.10-gcc11-build-distributed-b200 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runner: linux.12xlarge.memory | ||||
|       build-environment: linux-jammy-cuda12.8-py3.10-gcc11-distributed-b200 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11 | ||||
|       cuda-arch-list: '10.0' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "distributed", shard: 1, num_shards: 2, runner: "linux.dgx.b200.8" }, | ||||
|           { config: "distributed", shard: 2, num_shards: 2, runner: "linux.dgx.b200.8" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-cuda12_8-py3_10-gcc11-test-distributed-b200: | ||||
|     name: linux-jammy-cuda12.8-py3.10-gcc11-test-b200 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: | ||||
|       - linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200 | ||||
|     with: | ||||
|       timeout-minutes: 1200 | ||||
|       build-environment: linux-jammy-cuda12.8-py3.10-gcc11-distributed-b200 | ||||
|       docker-image: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200.outputs.test-matrix }} | ||||
|       aws-role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only | ||||
|     secrets: inherit | ||||
							
								
								
									
										4
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							| @ -46,12 +46,10 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         include: [ | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda13.0",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda13.0",         runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda12.8",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda12.9",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda12.6",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda13.0",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda12.9",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda12.8",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda12.6",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "rocm6.4",           runner: "linux.9xlarge.ephemeral" }, | ||||
|  | ||||
							
								
								
									
										19
									
								
								.github/workflows/build-vllm-wheel.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										19
									
								
								.github/workflows/build-vllm-wheel.yml
									
									
									
									
										vendored
									
									
								
							| @ -27,8 +27,9 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         python-version: [ '3.12' ] | ||||
|         # TODO (huydhn): Add cu130 after https://github.com/vllm-project/vllm/issues/24464 is resolved | ||||
|         platform: [ 'manylinux_2_28_x86_64', 'manylinux_2_28_aarch64' ] | ||||
|         device: [ 'cu128', 'cu129', 'cu130' ] | ||||
|         device: [ 'cu128', 'cu129' ] | ||||
|         include: | ||||
|           - platform: manylinux_2_28_x86_64 | ||||
|             device: cu128 | ||||
| @ -38,10 +39,6 @@ jobs: | ||||
|             device: cu129 | ||||
|             manylinux-image: 'pytorch/manylinux2_28-builder:cuda12.9' | ||||
|             runner: linux.12xlarge.memory | ||||
|           - platform: manylinux_2_28_x86_64 | ||||
|             device: cu130 | ||||
|             manylinux-image: 'pytorch/manylinux2_28-builder:cuda13.0' | ||||
|             runner: linux.12xlarge.memory | ||||
|           - platform: manylinux_2_28_aarch64 | ||||
|             device: cu128 | ||||
|             manylinux-image: 'pytorch/manylinuxaarch64-builder:cuda12.8' | ||||
| @ -50,11 +47,6 @@ jobs: | ||||
|             device: cu129 | ||||
|             manylinux-image: 'pytorch/manylinuxaarch64-builder:cuda12.9' | ||||
|             runner: linux.arm64.r7g.12xlarge.memory | ||||
|         exclude: | ||||
|           # TODO (huydhn): Add cu130 aarch64 once PyTorch is on 2.9+ and | ||||
|           # xformers is update to support 13.0 | ||||
|           - platform: manylinux_2_28_aarch64 | ||||
|             device: cu130 | ||||
|     name: "Build ${{ matrix.device }} vLLM wheel on ${{ matrix.platform }}" | ||||
|     runs-on: ${{ matrix.runner }} | ||||
|     timeout-minutes: 480 | ||||
| @ -177,12 +169,7 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         platform: [ 'manylinux_2_28_x86_64', 'manylinux_2_28_aarch64' ] | ||||
|         device: [ 'cu128', 'cu129', 'cu130' ] | ||||
|         exclude: | ||||
|           # TODO (huydhn): Add cu130 aarch64 once PyTorch is on 2.9+ and | ||||
|           # xformers is update to support 13.0 | ||||
|           - platform: manylinux_2_28_aarch64 | ||||
|             device: cu130 | ||||
|         device: [ 'cu128', 'cu129' ] | ||||
|     env: | ||||
|       PLATFORM: ${{ matrix.platform }} | ||||
|       BUILD_DEVICE: ${{ matrix.device }} | ||||
|  | ||||
							
								
								
									
										322
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										322
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -204,52 +204,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_10-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_10-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -453,52 +407,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_11-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_11-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -702,52 +610,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_12-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_12-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -951,52 +813,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1200,52 +1016,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13t-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13t-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1449,52 +1219,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_14-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       build_name: manywheel-py3_14-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1698,52 +1422,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_14t-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14t-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14t-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       build_name: manywheel-py3_14t-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|  | ||||
							
								
								
									
										74
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										74
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -248,74 +248,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   libtorch-cuda12_9-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: libtorch-cxx11-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: libtorch-cuda12_9-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda12_9-shared-with-deps-release-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-cuda12_9-shared-with-deps-release-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: libtorch-cxx11-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda12_9-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda12_9-shared-with-deps-release-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: libtorch-cuda12_9-shared-with-deps-release-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: libtorch-cxx11-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda12_9-shared-with-deps-release | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   libtorch-cuda13_0-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -426,9 +358,6 @@ jobs: | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -544,9 +473,6 @@ jobs: | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|  | ||||
							
								
								
									
										504
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										504
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -241,72 +241,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_10-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_10-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_10-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -413,9 +347,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -528,9 +459,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -907,72 +835,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_11-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_11-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_11-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1079,9 +941,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -1194,9 +1053,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -1573,72 +1429,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_12-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_12-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_12-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1745,9 +1535,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -1860,9 +1647,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -2239,72 +2023,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_13-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_13-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -2411,9 +2129,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -2526,9 +2241,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -2905,72 +2617,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_13t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_13t-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13t-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -3077,9 +2723,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -3192,9 +2835,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -3571,72 +3211,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_14-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_14-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       build_name: manywheel-py3_14-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       build_name: manywheel-py3_14-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -3743,9 +3317,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -3858,9 +3429,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -4237,72 +3805,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_14t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14t-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_14t-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       build_name: manywheel-py3_14t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14t-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14t-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       build_name: manywheel-py3_14t-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -4409,9 +3911,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -4524,9 +4023,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/workflows/generated-macos-arm64-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/generated-macos-arm64-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -63,6 +63,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.10.4" | ||||
|           freethreaded: false | ||||
|  | ||||
							
								
								
									
										11
									
								
								.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -59,6 +59,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.10.4" | ||||
|           freethreaded: false | ||||
| @ -168,6 +169,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.11.4" | ||||
|           freethreaded: false | ||||
| @ -277,6 +279,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.12.4" | ||||
|           freethreaded: false | ||||
| @ -386,6 +389,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.13.4" | ||||
|           freethreaded: false | ||||
| @ -495,6 +499,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.13.4" | ||||
|           freethreaded: true | ||||
| @ -604,8 +609,9 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.14.0" | ||||
|           python-version: "3.14.0-rc.2" | ||||
|           freethreaded: false | ||||
|       - name: Checkout PyTorch | ||||
|         uses: actions/checkout@v4 | ||||
| @ -713,8 +719,9 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.14.0" | ||||
|           python-version: "3.14.0-rc.2" | ||||
|           freethreaded: true | ||||
|       - name: Checkout PyTorch | ||||
|         uses: actions/checkout@v4 | ||||
|  | ||||
							
								
								
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -44,7 +44,7 @@ jobs: | ||||
|   libtorch-cpu-shared-with-deps-debug-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -291,7 +291,7 @@ jobs: | ||||
|   libtorch-cuda12_6-shared-with-deps-debug-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -541,7 +541,7 @@ jobs: | ||||
|   libtorch-cuda12_8-shared-with-deps-debug-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -791,7 +791,7 @@ jobs: | ||||
|   libtorch-cuda13_0-shared-with-deps-debug-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|  | ||||
							
								
								
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -44,7 +44,7 @@ jobs: | ||||
|   libtorch-cpu-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -291,7 +291,7 @@ jobs: | ||||
|   libtorch-cuda12_6-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -541,7 +541,7 @@ jobs: | ||||
|   libtorch-cuda12_8-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -791,7 +791,7 @@ jobs: | ||||
|   libtorch-cuda13_0-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|  | ||||
							
								
								
									
										70
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										70
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -44,7 +44,7 @@ jobs: | ||||
|   wheel-py3_10-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -279,7 +279,7 @@ jobs: | ||||
|   wheel-py3_10-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -517,7 +517,7 @@ jobs: | ||||
|   wheel-py3_10-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -755,7 +755,7 @@ jobs: | ||||
|   wheel-py3_10-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -993,7 +993,7 @@ jobs: | ||||
|   wheel-py3_10-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -1229,7 +1229,7 @@ jobs: | ||||
|   wheel-py3_11-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -1464,7 +1464,7 @@ jobs: | ||||
|   wheel-py3_11-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -1702,7 +1702,7 @@ jobs: | ||||
|   wheel-py3_11-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -1940,7 +1940,7 @@ jobs: | ||||
|   wheel-py3_11-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -2178,7 +2178,7 @@ jobs: | ||||
|   wheel-py3_11-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -2414,7 +2414,7 @@ jobs: | ||||
|   wheel-py3_12-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -2649,7 +2649,7 @@ jobs: | ||||
|   wheel-py3_12-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -2887,7 +2887,7 @@ jobs: | ||||
|   wheel-py3_12-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -3125,7 +3125,7 @@ jobs: | ||||
|   wheel-py3_12-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -3363,7 +3363,7 @@ jobs: | ||||
|   wheel-py3_12-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -3599,7 +3599,7 @@ jobs: | ||||
|   wheel-py3_13-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -3834,7 +3834,7 @@ jobs: | ||||
|   wheel-py3_13-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -4072,7 +4072,7 @@ jobs: | ||||
|   wheel-py3_13-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -4310,7 +4310,7 @@ jobs: | ||||
|   wheel-py3_13-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -4548,7 +4548,7 @@ jobs: | ||||
|   wheel-py3_13-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -4784,7 +4784,7 @@ jobs: | ||||
|   wheel-py3_13t-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -5019,7 +5019,7 @@ jobs: | ||||
|   wheel-py3_13t-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -5257,7 +5257,7 @@ jobs: | ||||
|   wheel-py3_13t-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -5495,7 +5495,7 @@ jobs: | ||||
|   wheel-py3_13t-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -5733,7 +5733,7 @@ jobs: | ||||
|   wheel-py3_13t-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -5969,7 +5969,7 @@ jobs: | ||||
|   wheel-py3_14-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -6204,7 +6204,7 @@ jobs: | ||||
|   wheel-py3_14-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -6442,7 +6442,7 @@ jobs: | ||||
|   wheel-py3_14-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -6680,7 +6680,7 @@ jobs: | ||||
|   wheel-py3_14-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -6918,7 +6918,7 @@ jobs: | ||||
|   wheel-py3_14-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -7154,7 +7154,7 @@ jobs: | ||||
|   wheel-py3_14t-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -7389,7 +7389,7 @@ jobs: | ||||
|   wheel-py3_14t-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -7627,7 +7627,7 @@ jobs: | ||||
|   wheel-py3_14t-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -7865,7 +7865,7 @@ jobs: | ||||
|   wheel-py3_14t-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -8103,7 +8103,7 @@ jobs: | ||||
|   wheel-py3_14t-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|  | ||||
| @ -130,7 +130,7 @@ jobs: | ||||
|     name: test-periodically | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: build | ||||
|     if: github.event.schedule == '15 0 * * 1-6' | ||||
|     if: github.event.schedule == '15 0,12 * * 1-6' | ||||
|     with: | ||||
|       build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm90 | ||||
|       dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true | ||||
|  | ||||
| @ -1,132 +0,0 @@ | ||||
| name: inductor-perf-nightly-rocm-mi300 | ||||
|  | ||||
| on: | ||||
|   push: | ||||
|     tags: | ||||
|       - ciflow/inductor-perf-test-nightly-rocm-mi300/* | ||||
|   schedule: | ||||
|     - cron: 15 0 * * * | ||||
|   # NB: GitHub has an upper limit of 10 inputs here, so before we can sort it | ||||
|   # out, let try to run torchao cudagraphs_low_precision as part of cudagraphs | ||||
|   workflow_dispatch: | ||||
|     inputs: | ||||
|       training: | ||||
|         description: Run training (on by default)? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       inference: | ||||
|         description: Run inference (on by default)? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       default: | ||||
|         description: Run inductor_default? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       dynamic: | ||||
|         description: Run inductor_dynamic_shapes? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       cppwrapper: | ||||
|         description: Run inductor_cpp_wrapper? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       cudagraphs: | ||||
|         description: Run inductor_cudagraphs? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       freezing_cudagraphs: | ||||
|         description: Run inductor_cudagraphs with freezing for inference? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       aotinductor: | ||||
|         description: Run aot_inductor for inference? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       maxautotune: | ||||
|         description: Run inductor_max_autotune? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       benchmark_configs: | ||||
|         description: The list of configs used the benchmark | ||||
|         required: false | ||||
|         type: string | ||||
|         default: inductor_huggingface_perf_rocm_mi300,inductor_timm_perf_rocm_mi300,inductor_torchbench_perf_rocm_mi300 | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: read-all | ||||
|  | ||||
| jobs: | ||||
|   get-label-type: | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|       curr_branch: ${{ github.head_ref || github.ref_name }} | ||||
|       curr_ref_type: ${{ github.ref_type }} | ||||
|       opt_out_experiments: lf | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-inductor-benchmark-build: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: rocm-py3_10-inductor-benchmark-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3_10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 1, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 2, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 3, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 4, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 5, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 6, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 7, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 1, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 2, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 3, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 4, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 5, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 6, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 7, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 8, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 9, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-inductor-benchmark-test: | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     name: rocm-py3_10-inductor-benchmark-test | ||||
|     uses: ./.github/workflows/_rocm-test.yml | ||||
|     needs: linux-jammy-rocm-py3_10-inductor-benchmark-build | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3_10 | ||||
|       dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true | ||||
|       docker-image: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.test-matrix }} | ||||
|       timeout-minutes: 720 | ||||
|       # Disable monitor in perf tests for more investigation | ||||
|       disable-monitor: true | ||||
|       monitor-log-interval: 10 | ||||
|       monitor-data-collect-interval: 2 | ||||
|     secrets: inherit | ||||
| @ -1,11 +1,11 @@ | ||||
| name: inductor-perf-nightly-rocm-mi355 | ||||
| name: inductor-perf-nightly-rocm | ||||
| 
 | ||||
| on: | ||||
|   push: | ||||
|     tags: | ||||
|       - ciflow/inductor-perf-test-nightly-rocm-mi355/* | ||||
|       - ciflow/inductor-perf-test-nightly-rocm/* | ||||
|   schedule: | ||||
|     - cron: 15 0 * * * | ||||
|     - cron: 0 7 * * 0,3 | ||||
|   # NB: GitHub has an upper limit of 10 inputs here, so before we can sort it | ||||
|   # out, let try to run torchao cudagraphs_low_precision as part of cudagraphs | ||||
|   workflow_dispatch: | ||||
| @ -59,7 +59,7 @@ on: | ||||
|         description: The list of configs used the benchmark | ||||
|         required: false | ||||
|         type: string | ||||
|         default: inductor_huggingface_perf_rocm_mi355,inductor_timm_perf_rocm_mi355,inductor_torchbench_perf_rocm_mi355 | ||||
|         default: inductor_huggingface_perf_rocm,inductor_timm_perf_rocm,inductor_torchbench_perf_rocm | ||||
| 
 | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
| @ -88,27 +88,23 @@ jobs: | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 1, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 2, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 3, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 4, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 5, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 6, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 7, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 1, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 2, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 3, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 4, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 5, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 6, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 7, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 8, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 9, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 1, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 2, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 3, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 4, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 1, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 2, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 3, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 4, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 5, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 6, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 7, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 8, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
| 
 | ||||
							
								
								
									
										11
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							| @ -12,7 +12,6 @@ on: | ||||
|       - landchecks/* | ||||
|     tags: | ||||
|       - ciflow/pull/* | ||||
|       - ciflow/trunk/* | ||||
|   workflow_dispatch: | ||||
|  | ||||
| permissions: read-all | ||||
| @ -33,12 +32,10 @@ jobs: | ||||
|     name: Get changed files | ||||
|     uses: ./.github/workflows/_get-changed-files.yml | ||||
|     with: | ||||
|       all_files: ${{ contains(github.event.pull_request.labels.*.name, 'lint-all-files') || contains(github.event.pull_request.labels.*.name, 'Reverted') || github.event_name == 'push' }} | ||||
|       all_files: ${{ contains(github.event.pull_request.labels.*.name, 'lint-all-files') || contains(github.event.pull_request.labels.*.name, 'Reverted') }} | ||||
|  | ||||
|   lintrunner-clang: | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | ||||
|     # Needed to prevent deduping on HUD | ||||
|     name: lintrunner-clang-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }} | ||||
|     needs: [get-label-type, get-changed-files] | ||||
|     # Only run if there are changed files relevant to clangtidy / clangformat | ||||
|     if: | | ||||
| @ -78,7 +75,6 @@ jobs: | ||||
|   #       fails to find types when it should | ||||
|   lintrunner-mypy: | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | ||||
|     name: lintrunner-mypy-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }} | ||||
|     needs: [get-label-type, get-changed-files] | ||||
|     # Only run if there are changed files relevant to mypy | ||||
|     if: | | ||||
| @ -103,7 +99,6 @@ jobs: | ||||
|  | ||||
|   lintrunner-noclang: | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | ||||
|     name: lintrunner-noclang-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }} | ||||
|     needs: [get-label-type, get-changed-files] | ||||
|     with: | ||||
|       timeout: 120 | ||||
| @ -118,9 +113,9 @@ jobs: | ||||
|         CHANGED_FILES="${{ needs.get-changed-files.outputs.changed-files }}" | ||||
|         echo "Running all other linters" | ||||
|         if [ "$CHANGED_FILES" = '*' ]; then | ||||
|           ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT,PYREFLY --all-files" .github/scripts/lintrunner.sh | ||||
|           ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT --all-files" .github/scripts/lintrunner.sh | ||||
|         else | ||||
|           ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT,PYREFLY ${CHANGED_FILES}" .github/scripts/lintrunner.sh | ||||
|           ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT ${CHANGED_FILES}" .github/scripts/lintrunner.sh | ||||
|         fi | ||||
|  | ||||
|   quick-checks: | ||||
|  | ||||
							
								
								
									
										49
									
								
								.github/workflows/operator_benchmark.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										49
									
								
								.github/workflows/operator_benchmark.yml
									
									
									
									
										vendored
									
									
								
							| @ -7,11 +7,9 @@ on: | ||||
|   workflow_dispatch: | ||||
|     inputs: | ||||
|       test_mode: | ||||
|         type: choice | ||||
|         options: | ||||
|           - 'short' | ||||
|           - 'long' | ||||
|           - 'all' | ||||
|         required: false | ||||
|         type: string | ||||
|         default: 'short' | ||||
|         description: tag filter for operator benchmarks, options from long, short, all | ||||
|   schedule: | ||||
|     # Run at 07:00 UTC every Sunday | ||||
| @ -30,49 +28,38 @@ permissions: | ||||
|   contents: read | ||||
|  | ||||
| jobs: | ||||
|   x86-opbenchmark-build: | ||||
|   opbenchmark-build: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: x86-opbenchmark-build | ||||
|     name: opbenchmark-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.10-gcc11-build | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-py3-gcc11-inductor-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "cpu_operator_benchmark_${{ inputs.test_mode || 'short' }}", shard: 1, num_shards: 1, runner: "linux.12xlarge" }, | ||||
|           { config: "cpu_operator_benchmark_short", shard: 1, num_shards: 1, runner: "linux.12xlarge" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   x86-opbenchmark-test: | ||||
|     name: x86-opbenchmark-test | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: x86-opbenchmark-build | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.10-gcc11-build | ||||
|       docker-image: ${{ needs.x86-opbenchmark-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.x86-opbenchmark-build.outputs.test-matrix }} | ||||
|     secrets: inherit | ||||
|  | ||||
|   aarch64-opbenchmark-build: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: aarch64-opbenchmark-build | ||||
|   opbenchmark-on-demand-build: | ||||
|     if: ${{ github.event_name == 'workflow_dispatch' && github.repository_owner == 'pytorch' }} | ||||
|     name: opbenchmark-on-demand-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-aarch64-py3.10 | ||||
|       runner: linux.arm64.m7g.4xlarge | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-aarch64-py3.10-gcc11 | ||||
|       build-environment: linux-jammy-py3.10-gcc11-build | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-py3-gcc11-inductor-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "cpu_operator_benchmark_short", shard: 1, num_shards: 1, runner: "linux.arm64.m8g.4xlarge" }, | ||||
|           { config: "cpu_operator_benchmark_${{ inputs.test_mode }}", shard: 1, num_shards: 1, runner: "linux.12xlarge" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   aarch64-opbenchmark-test: | ||||
|     name: aarch64-opbenchmark-test | ||||
|   opbenchmark-test: | ||||
|     name: opbenchmark-test | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: aarch64-opbenchmark-build | ||||
|     needs: opbenchmark-build | ||||
|     with: | ||||
|       build-environment: linux-jammy-aarch64-py3.10 | ||||
|       docker-image: ${{ needs.aarch64-opbenchmark-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.aarch64-opbenchmark-build.outputs.test-matrix }} | ||||
|       build-environment: linux-jammy-py3.10-gcc11-build | ||||
|       docker-image: ${{ needs.opbenchmark-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.opbenchmark-build.outputs.test-matrix }} | ||||
|     secrets: inherit | ||||
|  | ||||
							
								
								
									
										10
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							| @ -182,11 +182,11 @@ jobs: | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "nogpu_AVX512", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "nogpu_AVX512", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "nogpu_AVX512", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "nogpu_AVX512", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|           { config: "nogpu_AVX512", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|           { config: "nogpu_AVX512", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|           { config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
							
								
								
									
										12
									
								
								.github/workflows/rocm-mi355.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								.github/workflows/rocm-mi355.yml
									
									
									
									
										vendored
									
									
								
							| @ -45,12 +45,12 @@ jobs: | ||||
|       sync-tag: rocm-build | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 2, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 3, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 4, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 5, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 6, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "default", shard: 2, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "default", shard: 3, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "default", shard: 4, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "default", shard: 5, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "default", shard: 6, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|  | ||||
							
								
								
									
										63
									
								
								.github/workflows/rocm-navi31.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										63
									
								
								.github/workflows/rocm-navi31.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,63 +0,0 @@ | ||||
| name: rocm-navi31 | ||||
|  | ||||
| on: | ||||
|   push: | ||||
|     tags: | ||||
|       - ciflow/rocm-navi31/* | ||||
|   workflow_dispatch: | ||||
|   schedule: | ||||
|     # We have several schedules so jobs can check github.event.schedule to activate only for a fraction of the runs. | ||||
|     # Also run less frequently on weekends. | ||||
|     - cron: 45 */2 * * 1-5 | ||||
|     - cron: 45 4,12 * * 0,6 | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: read-all | ||||
|  | ||||
| jobs: | ||||
|   target-determination: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: before-test | ||||
|     uses: ./.github/workflows/target_determination.yml | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-build: | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     name: linux-jammy-rocm-py3.10 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" }, | ||||
|           { config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-test: | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     name: linux-jammy-rocm-py3_10 | ||||
|     uses: ./.github/workflows/_rocm-test.yml | ||||
|     needs: | ||||
|       - linux-jammy-rocm-py3_10-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }} | ||||
|       tests-to-include: >- | ||||
|          ${{ github.event_name == 'schedule' && 'test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs | ||||
|          test_autograd inductor/test_torchinductor inductor/test_kernel_benchmark | ||||
|          inductor/test_pad_mm inductor/test_benchmark_fusion inductor/test_aot_inductor | ||||
|          inductor/test_torchinductor inductor/test_decompose_mem_bound_mm | ||||
|          inductor/test_flex_attention inductor/test_max_autotune' || '' }} | ||||
|     secrets: inherit | ||||
							
								
								
									
										26
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										26
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							| @ -59,3 +59,29 @@ jobs: | ||||
|       docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }} | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-gfx1100-test: | ||||
|     if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     name: linux-jammy-rocm-py3_10-gfx1100 | ||||
|     uses: ./.github/workflows/_rocm-test.yml | ||||
|     needs: | ||||
|       - linux-jammy-rocm-py3_10-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }} | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" }, | ||||
|           { config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" }, | ||||
|         ]} | ||||
|       tests-to-include: > | ||||
|          test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs | ||||
|          test_autograd inductor/test_torchinductor inductor/test_kernel_benchmark | ||||
|          inductor/test_pad_mm inductor/test_benchmark_fusion inductor/test_aot_inductor | ||||
|          inductor/test_torchinductor inductor/test_decompose_mem_bound_mm | ||||
|          inductor/test_flex_attention inductor/test_max_autotune | ||||
|     secrets: inherit | ||||
|  | ||||
							
								
								
									
										59
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										59
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							| @ -180,50 +180,16 @@ jobs: | ||||
|       disable-monitor: false | ||||
|     secrets: inherit | ||||
|  | ||||
|   win-vs2022-cuda12_8-py3-build: | ||||
|     name: win-vs2022-cuda12.8-py3 | ||||
|   win-vs2022-cuda12_6-py3-build: | ||||
|     name: win-vs2022-cuda12.6-py3 | ||||
|     uses: ./.github/workflows/_win-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       build-environment: win-vs2022-cuda12.8-py3 | ||||
|       cuda-version: "12.8" | ||||
|       build-environment: win-vs2022-cuda12.6-py3 | ||||
|       cuda-version: "12.6" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-build: | ||||
|     if: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/trunk') }} | ||||
|     name: linux-jammy-rocm-py3.10 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-test: | ||||
|     if: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/trunk') }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     name: linux-jammy-rocm-py3.10 | ||||
|     uses: ./.github/workflows/_rocm-test.yml | ||||
|     needs: | ||||
|       - linux-jammy-rocm-py3_10-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }} | ||||
|       tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor" | ||||
|     secrets: inherit | ||||
|  | ||||
|   inductor-build: | ||||
|     name: inductor-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
| @ -234,23 +200,6 @@ jobs: | ||||
|       cuda-arch-list: '8.0' | ||||
|     secrets: inherit | ||||
|  | ||||
|   # Test cross-compiled models with Windows libs extracted from wheel | ||||
|   cross-compile-linux-test: | ||||
|     name: cross-compile-linux-test | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: | ||||
|       - linux-jammy-cuda12_8-py3_10-gcc11-build | ||||
|       - get-label-type | ||||
|       - win-vs2022-cuda12_8-py3-build | ||||
|     with: | ||||
|       build-environment: linux-jammy-cuda12.8-py3.10-gcc11 | ||||
|       docker-image: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc11-build.outputs.docker-image }} | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "aoti_cross_compile_for_windows", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", win_torch_wheel_artifact: "win-vs2022-cuda12.8-py3" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   verify-cachebench-cpu-build: | ||||
|     name: verify-cachebench-cpu-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/workflows/vllm.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/vllm.yml
									
									
									
									
										vendored
									
									
								
							| @ -46,7 +46,7 @@ jobs: | ||||
|       runner: linux.24xlarge.memory | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config:  "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_basic_models_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_entrypoints_test", shard: 1, num_shards: 1,runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_regression_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
| @ -54,7 +54,7 @@ jobs: | ||||
|           { config: "vllm_pytorch_compilation_unit_tests", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_lora_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_multi_model_test_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"}, | ||||
|           { config: "vllm_language_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"}, | ||||
|           { config: "vllm_languagde_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"}, | ||||
|           { config: "vllm_distributed_test_2_gpu_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_lora_test", shard: 0, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_lora_test", shard: 1, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|  | ||||
							
								
								
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -374,7 +374,6 @@ third_party/ruy/ | ||||
| third_party/glog/ | ||||
|  | ||||
| # Virtualenv | ||||
| .venv/ | ||||
| venv/ | ||||
|  | ||||
| # Log files | ||||
| @ -396,4 +395,3 @@ android/pytorch_android_torchvision/.cxx | ||||
| CLAUDE.local.md | ||||
| /test_*.py | ||||
| /debug_*.py | ||||
| CLAUDE_CONTEXT/ | ||||
|  | ||||
| @ -209,46 +209,6 @@ command = [ | ||||
|     '@{{PATHSFILE}}' | ||||
| ] | ||||
|  | ||||
|  | ||||
| [[linter]] | ||||
| code = 'PYREFLY' | ||||
| include_patterns = [ | ||||
|     'torch/**/*.py', | ||||
|     'torch/**/*.pyi', | ||||
|     'torchgen/**/*.py', | ||||
|     'torchgen/**/*.pyi', | ||||
|     'functorch/**/*.py', | ||||
|     'functorch/**/*.pyi', | ||||
| ] | ||||
| exclude_patterns = [] | ||||
| command = [ | ||||
|     'python3', | ||||
|     'tools/linter/adapters/pyrefly_linter.py', | ||||
|     '--config=pyrefly.toml', | ||||
| ] | ||||
| init_command = [ | ||||
|     'python3', | ||||
|     'tools/linter/adapters/pip_init.py', | ||||
|     '--dry-run={{DRYRUN}}', | ||||
|     'numpy==2.1.0 ; python_version >= "3.12"', | ||||
|     'expecttest==0.3.0', | ||||
|     'pyrefly==0.36.2', | ||||
|     'sympy==1.13.3', | ||||
|     'types-requests==2.27.25', | ||||
|     'types-pyyaml==6.0.2', | ||||
|     'types-tabulate==0.8.8', | ||||
|     'types-protobuf==5.29.1.20250403', | ||||
|     'types-setuptools==79.0.0.20250422', | ||||
|     'types-jinja2==2.11.9', | ||||
|     'types-colorama==0.4.6', | ||||
|     'filelock==3.18.0', | ||||
|     'junitparser==2.1.1', | ||||
|     'rich==14.1.0', | ||||
|     'optree==0.17.0', | ||||
|     'types-openpyxl==3.1.5.20250919', | ||||
|     'types-python-dateutil==2.9.0.20251008' | ||||
| ] | ||||
|  | ||||
| [[linter]] | ||||
| code = 'CLANGTIDY' | ||||
| include_patterns = [ | ||||
|  | ||||
							
								
								
									
										14
									
								
								CODEOWNERS
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								CODEOWNERS
									
									
									
									
									
								
							| @ -201,17 +201,3 @@ torch/backends/cudnn/ @eqy @syed-ahmed @Aidyn-A | ||||
| /torch/csrc/stable/ @janeyx99 @mikaylagawarecki | ||||
| /torch/headeronly/ @janeyx99 | ||||
| /torch/header_only_apis.txt @janeyx99 | ||||
|  | ||||
| # FlexAttention | ||||
| /torch/nn/attention/flex_attention.py @drisspg | ||||
| /torch/_higher_order_ops/flex_attention.py @drisspg | ||||
| /torch/_inductor/kernel/flex/ @drisspg | ||||
| /torch/_inductor/codegen/cpp_flex_attention_template.py @drisspg | ||||
| /test/inductor/test_flex_attention.py @drisspg | ||||
| /test/inductor/test_flex_decoding.py @drisspg | ||||
|  | ||||
| # Low Precision GEMMs | ||||
| /aten/src/ATen/native/cuda/Blas.cpp @drisspg @slayton58 | ||||
| /aten/src/ATen/cuda/CUDABlas.cpp @drisspg @slayton58 | ||||
| /aten/src/ATen/cuda/CUDABlas.h @drisspg @slayton58 | ||||
| /test/test_scaled_matmul_cuda.py @drisspg @slayton58 | ||||
|  | ||||
| @ -256,7 +256,6 @@ endif() | ||||
| IF(USE_FBGEMM_GENAI) | ||||
|   set(FBGEMM_THIRD_PARTY ${PROJECT_SOURCE_DIR}/third_party/fbgemm/external/) | ||||
|   set(FBGEMM_GENAI_SRCS ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize) | ||||
|  | ||||
|   if(USE_CUDA) | ||||
|     # To avoid increasing the build time/binary size unnecessarily, use an allow-list of kernels to build. | ||||
|     # If you want to integrate a kernel from FBGEMM into torch, you have to add it here. | ||||
| @ -293,65 +292,58 @@ IF(USE_FBGEMM_GENAI) | ||||
|       "${FBGEMM_GENAI_SRCS}/cutlass_extensions/mx8mx8bf16_grouped/" | ||||
|     ) | ||||
|  | ||||
|     target_include_directories(fbgemm_genai PRIVATE | ||||
|     target_include_directories(fbgemm_genai PUBLIC | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/include | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/tools/util/include | ||||
|       ${fbgemm_genai_mx8mx8bf16_grouped} | ||||
|       ${FBGEMM_GENAI_SRCS}/common/include/   # includes fbgemm_gpu/quantize/utils.h, fbgemm_gpu/quantize/tuning_cache.hpp | ||||
|       ${FBGEMM_GENAI_SRCS}/include/          # includes fbgemm_gpu/torch_ops.h | ||||
|     ) | ||||
|   else() | ||||
|     if(USE_ROCM) | ||||
|       # Only include the kernels we want to build to avoid increasing binary size. | ||||
|       file(GLOB_RECURSE fbgemm_genai_native_rocm_hip | ||||
|         "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/kernels/fp8_rowwise_grouped*.hip" | ||||
|         "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/fp8_rowwise_grouped_gemm.hip") | ||||
|       set_source_files_properties(${fbgemm_genai_native_rocm_hip} PROPERTIES HIP_SOURCE_PROPERTY_FORMAT 1) | ||||
|  | ||||
|     # Add FBGEMM_GENAI include directories for torch_ops.h | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/include) | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/common/include) | ||||
|   elseif(USE_ROCM) | ||||
|     # Only include the kernels we want to build to avoid increasing binary size. | ||||
|     file(GLOB_RECURSE fbgemm_genai_native_rocm_hip | ||||
|       "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/kernels/fp8_rowwise_grouped*.hip" | ||||
|       "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/fp8_rowwise_grouped_gemm.hip") | ||||
|     set_source_files_properties(${fbgemm_genai_native_rocm_hip} PROPERTIES HIP_SOURCE_PROPERTY_FORMAT 1) | ||||
|       # Add additional HIPCC compiler flags for performance | ||||
|       set(FBGEMM_GENAI_EXTRA_HIPCC_FLAGS | ||||
|         -mllvm | ||||
|         -amdgpu-coerce-illegal-types=1 | ||||
|         -mllvm | ||||
|         -enable-post-misched=0 | ||||
|         -mllvm | ||||
|         -greedy-reverse-local-assignment=1 | ||||
|         -fhip-new-launch-api) | ||||
|  | ||||
|     # Add additional HIPCC compiler flags for performance | ||||
|     set(FBGEMM_GENAI_EXTRA_HIPCC_FLAGS | ||||
|       -mllvm | ||||
|       -enable-post-misched=0 | ||||
|       -mllvm | ||||
|       -greedy-reverse-local-assignment=1 | ||||
|       -fhip-new-launch-api) | ||||
|     if(DEFINED ROCM_VERSION_DEV AND ROCM_VERSION_DEV VERSION_LESS "7.2.0") | ||||
|         list(PREPEND FBGEMM_GENAI_EXTRA_HIPCC_FLAGS -mllvm -amdgpu-coerce-illegal-types=1) | ||||
|       # Only compile for gfx942 for now. | ||||
|       # This is rather hacky, I could not figure out a clean solution :( | ||||
|       set(HIP_CLANG_FLAGS_ORIGINAL ${HIP_CLANG_FLAGS}) | ||||
|       string(REGEX REPLACE "--offload-arch=[^ ]*" "" FILTERED_HIP_CLANG_FLAGS "${HIP_CLANG_FLAGS}") | ||||
|       if("gfx942" IN_LIST PYTORCH_ROCM_ARCH) | ||||
|         list(APPEND FILTERED_HIP_CLANG_FLAGS --offload-arch=gfx942;) | ||||
|       endif() | ||||
|       set(HIP_CLANG_FLAGS ${FILTERED_HIP_CLANG_FLAGS}) | ||||
|  | ||||
|     # Only compile for gfx942 for now. | ||||
|     # This is rather hacky, I could not figure out a clean solution :( | ||||
|     set(HIP_CLANG_FLAGS_ORIGINAL ${HIP_CLANG_FLAGS}) | ||||
|     string(REGEX REPLACE "--offload-arch=[^ ]*" "" FILTERED_HIP_CLANG_FLAGS "${HIP_CLANG_FLAGS}") | ||||
|     if("gfx942" IN_LIST PYTORCH_ROCM_ARCH) | ||||
|       list(APPEND FILTERED_HIP_CLANG_FLAGS --offload-arch=gfx942;) | ||||
|       hip_add_library( | ||||
|         fbgemm_genai STATIC | ||||
|         ${fbgemm_genai_native_rocm_hip} | ||||
|         HIPCC_OPTIONS ${HIP_HCC_FLAGS} ${FBGEMM_GENAI_EXTRA_HIPCC_FLAGS}) | ||||
|       set(HIP_CLANG_FLAGS ${HIP_CLANG_FLAGS_ORIGINAL}) | ||||
|       set_target_properties(fbgemm_genai PROPERTIES POSITION_INDEPENDENT_CODE ON) | ||||
|       target_compile_definitions(fbgemm_genai PRIVATE FBGEMM_GENAI_NO_EXTENDED_SHAPES) | ||||
|  | ||||
|       target_include_directories(fbgemm_genai PUBLIC | ||||
|         # FBGEMM version of Composable Kernel is used due to some customizations | ||||
|         ${FBGEMM_THIRD_PARTY}/composable_kernel/include | ||||
|         ${FBGEMM_THIRD_PARTY}/composable_kernel/library/include | ||||
|         ${FBGEMM_THIRD_PARTY}/cutlass/include | ||||
|         ${FBGEMM_THIRD_PARTY}/cutlass/tools/util/include | ||||
|         ${FBGEMM_GENAI_SRCS}/common/include/   # includes fbgemm_gpu/quantize/utils.h, fbgemm_gpu/quantize/tuning_cache.hpp | ||||
|         ${FBGEMM_GENAI_SRCS}/include/          # includes fbgemm_gpu/torch_ops.h | ||||
|       ) | ||||
|     endif() | ||||
|     set(HIP_CLANG_FLAGS ${FILTERED_HIP_CLANG_FLAGS}) | ||||
|  | ||||
|     hip_add_library( | ||||
|       fbgemm_genai STATIC | ||||
|       ${fbgemm_genai_native_rocm_hip} | ||||
|       HIPCC_OPTIONS ${HIP_HCC_FLAGS} ${FBGEMM_GENAI_EXTRA_HIPCC_FLAGS}) | ||||
|     set(HIP_CLANG_FLAGS ${HIP_CLANG_FLAGS_ORIGINAL}) | ||||
|     set_target_properties(fbgemm_genai PROPERTIES POSITION_INDEPENDENT_CODE ON) | ||||
|     target_compile_definitions(fbgemm_genai PRIVATE FBGEMM_GENAI_NO_EXTENDED_SHAPES) | ||||
|  | ||||
|     target_include_directories(fbgemm_genai PRIVATE | ||||
|       # FBGEMM version of Composable Kernel is used due to some customizations | ||||
|       ${FBGEMM_THIRD_PARTY}/composable_kernel/include | ||||
|       ${FBGEMM_THIRD_PARTY}/composable_kernel/library/include | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/include | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/tools/util/include | ||||
|       ${FBGEMM_GENAI_SRCS}/common/include/   # includes fbgemm_gpu/quantize/utils.h, fbgemm_gpu/quantize/tuning_cache.hpp | ||||
|       ${FBGEMM_GENAI_SRCS}/include/          # includes fbgemm_gpu/torch_ops.h | ||||
|     ) | ||||
|  | ||||
|     # Add FBGEMM_GENAI include directories for torch_ops.h | ||||
|     list(APPEND ATen_HIP_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/include) | ||||
|     list(APPEND ATen_HIP_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/common/include) | ||||
|   endif() | ||||
| endif() | ||||
|  | ||||
| @ -700,6 +692,12 @@ if(USE_CUDA AND NOT USE_ROCM) | ||||
|   list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/include) | ||||
|   list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/tools/util/include) | ||||
|  | ||||
|   # Add FBGEMM_GENAI include directories for torch_ops.h | ||||
|   if(USE_FBGEMM_GENAI) | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/include) | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/common/include) | ||||
|   endif() | ||||
|  | ||||
|   if($ENV{ATEN_STATIC_CUDA}) | ||||
|     if(CUDA_VERSION VERSION_LESS_EQUAL 12.9) | ||||
|       list(APPEND ATen_CUDA_DEPENDENCY_LIBS | ||||
|  | ||||
| @ -389,16 +389,37 @@ void fillVersion<DLManagedTensorVersioned>( | ||||
| // constructed out of ATen tensor | ||||
| template <class T> | ||||
| T* toDLPackImpl(const Tensor& src) { | ||||
|   auto view = src; | ||||
|  | ||||
|   // Detect whether there is need to normalize the strides | ||||
|   // Background: gh-83069 | ||||
|   // | ||||
|   // However, normalizing strides can come at a high-cost | ||||
|   // to slow down toDLPack conversion 3x, so we | ||||
|   // only normalize if needed. | ||||
|   // | ||||
|   // The following code detects whether the src follows | ||||
|   // a continuous pattern. If the src follows such pattern (common-case) | ||||
|   // then we do not need to normalize the strides. | ||||
|   bool need_normalize_strides = src.dim() == 1 && src.size(0) == 1 && src.stride(0) != 1; | ||||
|   // less common case, try normalizing the strides | ||||
|   if (need_normalize_strides) { | ||||
|     // create a new tensor with possibly normalized strides | ||||
|     // gh-83069 | ||||
|     auto shape = src.sizes(); | ||||
|     view = src.as_strided(shape, {1}, src.storage_offset()); | ||||
|   } | ||||
|  | ||||
|   ATenDLMTensor<T>* atDLMTensor(new ATenDLMTensor<T>); | ||||
|   atDLMTensor->handle = src; | ||||
|   atDLMTensor->handle = view; | ||||
|   atDLMTensor->tensor.manager_ctx = atDLMTensor; | ||||
|   atDLMTensor->tensor.deleter = &deleter<T>; | ||||
|   atDLMTensor->tensor.dl_tensor.data = src.data_ptr(); | ||||
|   atDLMTensor->tensor.dl_tensor.data = view.data_ptr(); | ||||
|   atDLMTensor->tensor.dl_tensor.device = torchDeviceToDLDevice(src.device()); | ||||
|   atDLMTensor->tensor.dl_tensor.ndim = static_cast<int32_t>(src.dim()); | ||||
|   atDLMTensor->tensor.dl_tensor.dtype = getDLDataType(src); | ||||
|   atDLMTensor->tensor.dl_tensor.shape = const_cast<int64_t*>(src.sizes().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.strides = const_cast<int64_t*>(src.strides().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.shape = const_cast<int64_t*>(view.sizes().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.strides = const_cast<int64_t*>(view.strides().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.byte_offset = 0; | ||||
|   fillVersion(&atDLMTensor->tensor); | ||||
|  | ||||
|  | ||||
| @ -52,16 +52,16 @@ struct DLPackTraits {}; | ||||
|  | ||||
| template <> | ||||
| struct DLPackTraits<DLManagedTensor> { | ||||
|   inline static constexpr const char* capsule = "dltensor"; | ||||
|   inline static constexpr const char* used = "used_dltensor"; | ||||
|   inline static const char* capsule = "dltensor"; | ||||
|   inline static const char* used = "used_dltensor"; | ||||
|   inline static auto toDLPack = at::toDLPack; | ||||
|   inline static auto fromDLPack = at::fromDLPack; | ||||
| }; | ||||
|  | ||||
| template <> | ||||
| struct DLPackTraits<DLManagedTensorVersioned> { | ||||
|   inline static constexpr const char* capsule = "dltensor_versioned"; | ||||
|   inline static constexpr const char* used = "used_dltensor_versioned"; | ||||
|   inline static const char* capsule = "dltensor_versioned"; | ||||
|   inline static const char* used = "used_dltensor_versioned"; | ||||
|   inline static auto toDLPack = at::toDLPackVersioned; | ||||
|   inline static auto fromDLPack = at::fromDLPackVersioned; | ||||
| }; | ||||
|  | ||||
| @ -58,7 +58,7 @@ namespace at { | ||||
| namespace{ | ||||
|  | ||||
| // PyTorch allows operations to specify dim 0 and dim -1 on a scalar tensor. | ||||
| bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
| static bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
|   return dim == 0 || dim == -1; | ||||
| } | ||||
|  | ||||
| @ -365,7 +365,7 @@ Tensor select_batching_rule(const Tensor& self, int64_t dim, int64_t index) { | ||||
|   return self_physical.getPhysicalToLogicalMap().apply(result); | ||||
| } | ||||
|  | ||||
| int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) { | ||||
| static int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) { | ||||
|   return maybe_wrap_dim(dim, static_cast<int64_t>(input_sizes.size())) + num_batch_dims; | ||||
| } | ||||
|  | ||||
| @ -488,7 +488,7 @@ Tensor view_as_complex_batching_rule(const Tensor& self) { | ||||
| // Checks that the smallest batch stride is greater than the largest example | ||||
| // stride. This is something we can support but we choose not to because it's | ||||
| // potentially error prone. | ||||
| void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) { | ||||
| static void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) { | ||||
|   auto smallest_batch_stride = std::min_element( | ||||
|       physical_strides.begin(), physical_strides.begin() + num_batch_dims); | ||||
|   auto largest_example_stride = std::max_element( | ||||
| @ -508,7 +508,7 @@ void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_bat | ||||
| // given (sizes, strides, storage_offset) returns the maximum location that | ||||
| // can be indexed (or nullopt if such a location doesn't exist, e.g., tensors | ||||
| // with zero-size dims). | ||||
| std::optional<int64_t> maximum_indexable_location( | ||||
| static std::optional<int64_t> maximum_indexable_location( | ||||
|     IntArrayRef sizes, IntArrayRef strides, int64_t storage_offset) { | ||||
|   auto result = native::storage_size_for(sizes, strides); | ||||
|   if (result == 0) { | ||||
| @ -521,7 +521,7 @@ std::optional<int64_t> maximum_indexable_location( | ||||
| // This checks that the range of possible memory locations accessible by | ||||
| // x.as_strided(sizes, strides, maybe_storage_offset) | ||||
| // are within the bounds of possible memory locations accessible by x. | ||||
| void checkBasicAsStridedValidForSlice( | ||||
| static void checkBasicAsStridedValidForSlice( | ||||
|     const Tensor& physical_tensor, | ||||
|     int64_t num_batch_dims, | ||||
|     IntArrayRef sizes, | ||||
|  | ||||
| @ -42,14 +42,8 @@ const PythonTorchFunctionTLS& PythonTorchFunctionTLS::get_state() { | ||||
| } | ||||
|  | ||||
| bool torch_function_mode_enabled() { | ||||
|   // Manually flatten because gcc is refusing to inline here.  Note | ||||
|   // that we are still calling __tls_get_addr twice here with GCC, | ||||
|   // presumably because of | ||||
|   // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81501 (which says | ||||
|   // the fix ships in GCC 16), but forcing inlining still improves | ||||
|   // performance. | ||||
|   const auto& ptfs = pythonTorchFunctionState; | ||||
|   return ptfs.disabled_state_ != TorchFunctionDisabledState::ALL_DISABLED && !ptfs.stack_.empty(); | ||||
|   return PythonTorchFunctionTLS::get_disabled_state() != TorchFunctionDisabledState::ALL_DISABLED && | ||||
|          PythonTorchFunctionTLS::stack_len() > 0; | ||||
| } | ||||
|  | ||||
| // This is needed to disambiguate the ternary torch function disabled states | ||||
|  | ||||
| @ -27,7 +27,6 @@ struct TORCH_API PythonTorchFunctionTLS { | ||||
|   TorchFunctionDisabledState disabled_state_ = | ||||
|       TorchFunctionDisabledState::ENABLED; | ||||
|   std::vector<std::shared_ptr<c10::SafePyObject>> stack_; | ||||
|   friend TORCH_API bool torch_function_mode_enabled(); | ||||
| }; | ||||
|  | ||||
| TORCH_API bool torch_function_mode_enabled(); | ||||
|  | ||||
| @ -13,7 +13,7 @@ namespace { | ||||
|   // and left at true for the rest of the execution. | ||||
|   // It's an optimization so that users who never use default hooks don't need to | ||||
|   // read the thread_local variables pack_hook_ and unpack_hook_. | ||||
|   bool is_initialized(false); | ||||
|   static bool is_initialized(false); | ||||
| } | ||||
|  | ||||
| static void assertSavedTensorHooksNotDisabled() { | ||||
|  | ||||
| @ -56,7 +56,7 @@ inline void get_strides(int64_t* strides, ArrayRef<OperandInfo> operands, int64_ | ||||
|   } | ||||
| } | ||||
|  | ||||
| OptionalTensorRef make_otr(const TensorBase &tensor) { | ||||
| static OptionalTensorRef make_otr(const TensorBase &tensor) { | ||||
|   if (tensor.defined()) { | ||||
|     return OptionalTensorRef(tensor); | ||||
|   } else { | ||||
|  | ||||
| @ -36,7 +36,7 @@ namespace { | ||||
| using weakref_type = c10::weak_intrusive_ptr<TensorImpl, UndefinedTensorImpl>; | ||||
| using val_type = std::tuple<weakref_type, Tensor>; | ||||
|  | ||||
| ska::flat_hash_map<TensorImpl*, val_type>& get_cached_casts() { | ||||
| static ska::flat_hash_map<TensorImpl*, val_type>& get_cached_casts() { | ||||
|   static ska::flat_hash_map<TensorImpl*, val_type> cached_casts; | ||||
|   return cached_casts; | ||||
| } | ||||
|  | ||||
| @ -6,9 +6,9 @@ namespace at { | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| std::array<HostAllocator*, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
| static std::array<HostAllocator*, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
|     allocator_array{}; | ||||
| std::array<uint8_t, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
| static std::array<uint8_t, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
|     allocator_priority{}; | ||||
|  | ||||
| } // anonymous namespace | ||||
|  | ||||
| @ -39,7 +39,7 @@ struct HostBlock { | ||||
| }; | ||||
|  | ||||
| template <typename B> | ||||
| struct alignas(hardware_destructive_interference_size) FreeBlockList { | ||||
| struct alignas(64) FreeBlockList { | ||||
|   std::mutex mutex_; | ||||
|   std::deque<B*> list_; | ||||
| }; | ||||
| @ -122,7 +122,7 @@ struct TORCH_API HostStats { | ||||
| // Struct containing memory allocator summary statistics for host, as they | ||||
| // are staged for reporting. This is a temporary struct that is used to | ||||
| // avoid locking the allocator while collecting stats. | ||||
| struct alignas(hardware_destructive_interference_size) HostStatsStaged { | ||||
| struct alignas(64) HostStatsStaged { | ||||
|   std::mutex timing_mutex_; | ||||
|   // COUNT: total allocations (active + free) | ||||
|   // LOCK: access to this stat is protected by the allocator's blocks_mutex_ | ||||
| @ -669,7 +669,7 @@ struct CachingHostAllocatorImpl { | ||||
|     TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for query_event"); | ||||
|   } | ||||
|  | ||||
|   alignas(hardware_destructive_interference_size) std::mutex blocks_mutex_; | ||||
|   alignas(64) std::mutex blocks_mutex_; | ||||
|   ska::flat_hash_set<B*> blocks_; // block list | ||||
|   ska::flat_hash_map<void*, B*> ptr_to_block_; | ||||
|  | ||||
| @ -677,17 +677,17 @@ struct CachingHostAllocatorImpl { | ||||
|   // size. This allows us to quickly find a free block of the right size. | ||||
|   // We use deque to store per size free list and guard the list with its own | ||||
|   // mutex. | ||||
|   alignas(hardware_destructive_interference_size) std::vector<FreeBlockList<B>> free_list_ = | ||||
|   alignas(64) std::vector<FreeBlockList<B>> free_list_ = | ||||
|       std::vector<FreeBlockList<B>>(MAX_SIZE_INDEX); | ||||
|  | ||||
|   alignas(hardware_destructive_interference_size) std::mutex events_mutex_; | ||||
|   alignas(64) std::mutex events_mutex_; | ||||
|   std::deque<std::pair<E, B*>> events_; // event queue paired with block | ||||
|  | ||||
|   // Indicates whether the object is active. | ||||
|   // Set to false in the destructor to signal background threads to stop. | ||||
|   std::atomic<bool> active_{true}; | ||||
| protected: | ||||
|   alignas(hardware_destructive_interference_size) HostStatsStaged stats_; | ||||
|   alignas(64) HostStatsStaged stats_; | ||||
| }; | ||||
|  | ||||
| struct TORCH_API HostAllocator : public at::Allocator { | ||||
|  | ||||
| @ -229,10 +229,10 @@ private: | ||||
|   } | ||||
|  | ||||
|  | ||||
|   static constexpr uint32_t kPhilox10A = 0x9E3779B9; | ||||
|   static constexpr uint32_t kPhilox10B = 0xBB67AE85; | ||||
|   static constexpr uint32_t kPhiloxSA = 0xD2511F53; | ||||
|   static constexpr uint32_t kPhiloxSB = 0xCD9E8D57; | ||||
|   static const uint32_t kPhilox10A = 0x9E3779B9; | ||||
|   static const uint32_t kPhilox10B = 0xBB67AE85; | ||||
|   static const uint32_t kPhiloxSA = 0xD2511F53; | ||||
|   static const uint32_t kPhiloxSB = 0xCD9E8D57; | ||||
| }; | ||||
|  | ||||
| typedef philox_engine Philox4_32; | ||||
|  | ||||
| @ -624,14 +624,7 @@ struct TORCH_API IValue final { | ||||
|   IValue(const c10::SymBool& i) { | ||||
|     if (auto mi = i.maybe_as_bool()) { | ||||
|       tag = Tag::Bool; | ||||
| #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ | ||||
|       payload.u.as_int = *mi; | ||||
| #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ | ||||
|       /* due to byteorder if value assigned as_int, as_bool actually is not set correctly */ | ||||
|       payload.u.as_bool = *mi; | ||||
| #else | ||||
| #error Unexpected or undefined __BYTE_ORDER__ | ||||
| #endif | ||||
|     } else { | ||||
|       tag = Tag::SymBool; | ||||
|       payload.u.as_intrusive_ptr = i.toSymNodeImpl().release(); | ||||
|  | ||||
| @ -8,7 +8,6 @@ | ||||
| #include <ATen/cpu/vec/vec128/vec128_bfloat16_neon.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_float_neon.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_half_neon.h> | ||||
| #include <ATen/cpu/vec/vec128/vec128_int_aarch64.h> | ||||
| #endif | ||||
|  | ||||
| #include <ATen/cpu/vec/vec128/vec128_convert.h> | ||||
|  | ||||
| @ -1,794 +0,0 @@ | ||||
| #pragma once | ||||
|  | ||||
| #include <ATen/cpu/vec/intrinsics.h> | ||||
| #include <ATen/cpu/vec/vec_base.h> | ||||
| #include <c10/macros/Macros.h> | ||||
| #include <c10/util/irange.h> | ||||
|  | ||||
| namespace at::vec { | ||||
| // Note [CPU_CAPABILITY namespace] | ||||
| // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
| // This header, and all of its subheaders, will be compiled with | ||||
| // different architecture flags for each supported set of vector | ||||
| // intrinsics. So we need to make sure they aren't inadvertently | ||||
| // linked together. We do this by declaring objects in an `inline | ||||
| // namespace` which changes the name mangling, but can still be | ||||
| // accessed as `at::vec`. | ||||
| inline namespace CPU_CAPABILITY { | ||||
|  | ||||
| #define VEC_INT_NEON_TEMPLATE(vl, bit)                                        \ | ||||
|   template <>                                                                 \ | ||||
|   struct is_vec_specialized_for<int##bit##_t> : std::bool_constant<true> {};  \ | ||||
|                                                                               \ | ||||
|   template <>                                                                 \ | ||||
|   class Vectorized<int##bit##_t> {                                            \ | ||||
|     using neon_type = int##bit##x##vl##_t;                                    \ | ||||
|                                                                               \ | ||||
|    private:                                                                   \ | ||||
|     neon_type values;                                                         \ | ||||
|                                                                               \ | ||||
|    public:                                                                    \ | ||||
|     using value_type = int##bit##_t;                                          \ | ||||
|     using size_type = int;                                                    \ | ||||
|     static constexpr size_type size() {                                       \ | ||||
|       return vl;                                                              \ | ||||
|     }                                                                         \ | ||||
|     Vectorized() {                                                            \ | ||||
|       values = vdupq_n_s##bit(0);                                             \ | ||||
|     }                                                                         \ | ||||
|     Vectorized(neon_type v) : values(v) {}                                    \ | ||||
|     Vectorized(int##bit##_t val);                                             \ | ||||
|     template <                                                                \ | ||||
|         typename... Args,                                                     \ | ||||
|         typename = std::enable_if_t<(sizeof...(Args) == size())>>             \ | ||||
|     Vectorized(Args... vals) {                                                \ | ||||
|       __at_align__ int##bit##_t buffer[size()] = {vals...};                   \ | ||||
|       values = vld1q_s##bit(buffer);                                          \ | ||||
|     }                                                                         \ | ||||
|     operator neon_type() const {                                              \ | ||||
|       return values;                                                          \ | ||||
|     }                                                                         \ | ||||
|     static Vectorized<int##bit##_t> loadu(                                    \ | ||||
|         const void* ptr,                                                      \ | ||||
|         int64_t count = size());                                              \ | ||||
|     void store(void* ptr, int64_t count = size()) const;                      \ | ||||
|     template <int64_t mask>                                                   \ | ||||
|     static Vectorized<int##bit##_t> blend(                                    \ | ||||
|         const Vectorized<int##bit##_t>& a,                                    \ | ||||
|         const Vectorized<int##bit##_t>& b);                                   \ | ||||
|     static Vectorized<int##bit##_t> blendv(                                   \ | ||||
|         const Vectorized<int##bit##_t>& a,                                    \ | ||||
|         const Vectorized<int##bit##_t>& b,                                    \ | ||||
|         const Vectorized<int##bit##_t>& mask_) {                              \ | ||||
|       return vbslq_s##bit(vreinterpretq_u##bit##_s##bit(mask_.values), b, a); \ | ||||
|     }                                                                         \ | ||||
|     template <typename step_t>                                                \ | ||||
|     static Vectorized<int##bit##_t> arange(                                   \ | ||||
|         value_type base = 0,                                                  \ | ||||
|         step_t step = static_cast<step_t>(1));                                \ | ||||
|     static Vectorized<int##bit##_t> set(                                      \ | ||||
|         const Vectorized<int##bit##_t>& a,                                    \ | ||||
|         const Vectorized<int##bit##_t>& b,                                    \ | ||||
|         int64_t count = size());                                              \ | ||||
|     const int##bit##_t& operator[](int idx) const = delete;                   \ | ||||
|     int##bit##_t& operator[](int idx) = delete;                               \ | ||||
|     Vectorized<int##bit##_t> abs() const {                                    \ | ||||
|       return vabsq_s##bit(values);                                            \ | ||||
|     }                                                                         \ | ||||
|     Vectorized<int##bit##_t> real() const {                                   \ | ||||
|       return values;                                                          \ | ||||
|     }                                                                         \ | ||||
|     Vectorized<int##bit##_t> imag() const {                                   \ | ||||
|       return vdupq_n_s##bit(0);                                               \ | ||||
|     }                                                                         \ | ||||
|     Vectorized<int##bit##_t> conj() const {                                   \ | ||||
|       return values;                                                          \ | ||||
|     }                                                                         \ | ||||
|     Vectorized<int##bit##_t> neg() const {                                    \ | ||||
|       return vnegq_s##bit(values);                                            \ | ||||
|     }                                                                         \ | ||||
|     int##bit##_t reduce_add() const {                                         \ | ||||
|       return vaddvq_s##bit(values);                                           \ | ||||
|     }                                                                         \ | ||||
|     int##bit##_t reduce_max() const;                                          \ | ||||
|     Vectorized<int##bit##_t> operator==(                                      \ | ||||
|         const Vectorized<int##bit##_t>& other) const {                        \ | ||||
|       return Vectorized<value_type>(                                          \ | ||||
|           vreinterpretq_s##bit##_u##bit(vceqq_s##bit(values, other.values))); \ | ||||
|     }                                                                         \ | ||||
|     Vectorized<int##bit##_t> operator!=(                                      \ | ||||
|         const Vectorized<int##bit##_t>& other) const;                         \ | ||||
|     Vectorized<int##bit##_t> operator<(                                       \ | ||||
|         const Vectorized<int##bit##_t>& other) const {                        \ | ||||
|       return Vectorized<value_type>(                                          \ | ||||
|           vreinterpretq_s##bit##_u##bit(vcltq_s##bit(values, other.values))); \ | ||||
|     }                                                                         \ | ||||
|     Vectorized<int##bit##_t> operator<=(                                      \ | ||||
|         const Vectorized<int##bit##_t>& other) const {                        \ | ||||
|       return Vectorized<value_type>(                                          \ | ||||
|           vreinterpretq_s##bit##_u##bit(vcleq_s##bit(values, other.values))); \ | ||||
|     }                                                                         \ | ||||
|     Vectorized<int##bit##_t> operator>(                                       \ | ||||
|         const Vectorized<int##bit##_t>& other) const {                        \ | ||||
|       return Vectorized<value_type>(                                          \ | ||||
|           vreinterpretq_s##bit##_u##bit(vcgtq_s##bit(values, other.values))); \ | ||||
|     }                                                                         \ | ||||
|     Vectorized<int##bit##_t> operator>=(                                      \ | ||||
|         const Vectorized<int##bit##_t>& other) const {                        \ | ||||
|       return Vectorized<value_type>(                                          \ | ||||
|           vreinterpretq_s##bit##_u##bit(vcgeq_s##bit(values, other.values))); \ | ||||
|     }                                                                         \ | ||||
|     Vectorized<int##bit##_t> eq(const Vectorized<int##bit##_t>& other) const; \ | ||||
|     Vectorized<int##bit##_t> ne(const Vectorized<int##bit##_t>& other) const; \ | ||||
|     Vectorized<int##bit##_t> gt(const Vectorized<int##bit##_t>& other) const; \ | ||||
|     Vectorized<int##bit##_t> ge(const Vectorized<int##bit##_t>& other) const; \ | ||||
|     Vectorized<int##bit##_t> lt(const Vectorized<int##bit##_t>& other) const; \ | ||||
|     Vectorized<int##bit##_t> le(const Vectorized<int##bit##_t>& other) const; \ | ||||
|   };                                                                          \ | ||||
|   template <>                                                                 \ | ||||
|   Vectorized<int##bit##_t> inline operator+(                                  \ | ||||
|       const Vectorized<int##bit##_t>& a, const Vectorized<int##bit##_t>& b) { \ | ||||
|     return vaddq_s##bit(a, b);                                                \ | ||||
|   }                                                                           \ | ||||
|   template <>                                                                 \ | ||||
|   Vectorized<int##bit##_t> inline operator-(                                  \ | ||||
|       const Vectorized<int##bit##_t>& a, const Vectorized<int##bit##_t>& b) { \ | ||||
|     return vsubq_s##bit(a, b);                                                \ | ||||
|   }                                                                           \ | ||||
|   template <>                                                                 \ | ||||
|   Vectorized<int##bit##_t> inline operator&(                                  \ | ||||
|       const Vectorized<int##bit##_t>& a, const Vectorized<int##bit##_t>& b) { \ | ||||
|     return vandq_s##bit(a, b);                                                \ | ||||
|   }                                                                           \ | ||||
|   template <>                                                                 \ | ||||
|   Vectorized<int##bit##_t> inline operator|(                                  \ | ||||
|       const Vectorized<int##bit##_t>& a, const Vectorized<int##bit##_t>& b) { \ | ||||
|     return vorrq_s##bit(a, b);                                                \ | ||||
|   }                                                                           \ | ||||
|   template <>                                                                 \ | ||||
|   Vectorized<int##bit##_t> inline operator^(                                  \ | ||||
|       const Vectorized<int##bit##_t>& a, const Vectorized<int##bit##_t>& b) { \ | ||||
|     return veorq_s##bit(a, b);                                                \ | ||||
|   }                                                                           \ | ||||
|   Vectorized<int##bit##_t> inline Vectorized<int##bit##_t>::eq(               \ | ||||
|       const Vectorized<int##bit##_t>& other) const {                          \ | ||||
|     return (*this == other) & Vectorized<int##bit##_t>(1);                    \ | ||||
|   }                                                                           \ | ||||
|   Vectorized<int##bit##_t> inline Vectorized<int##bit##_t>::ne(               \ | ||||
|       const Vectorized<int##bit##_t>& other) const {                          \ | ||||
|     return (*this != other) & Vectorized<int##bit##_t>(1);                    \ | ||||
|   }                                                                           \ | ||||
|   Vectorized<int##bit##_t> inline Vectorized<int##bit##_t>::gt(               \ | ||||
|       const Vectorized<int##bit##_t>& other) const {                          \ | ||||
|     return (*this > other) & Vectorized<int##bit##_t>(1);                     \ | ||||
|   }                                                                           \ | ||||
|   Vectorized<int##bit##_t> inline Vectorized<int##bit##_t>::ge(               \ | ||||
|       const Vectorized<int##bit##_t>& other) const {                          \ | ||||
|     return (*this >= other) & Vectorized<int##bit##_t>(1);                    \ | ||||
|   }                                                                           \ | ||||
|   Vectorized<int##bit##_t> inline Vectorized<int##bit##_t>::lt(               \ | ||||
|       const Vectorized<int##bit##_t>& other) const {                          \ | ||||
|     return (*this < other) & Vectorized<int##bit##_t>(1);                     \ | ||||
|   }                                                                           \ | ||||
|   Vectorized<int##bit##_t> inline Vectorized<int##bit##_t>::le(               \ | ||||
|       const Vectorized<int##bit##_t>& other) const {                          \ | ||||
|     return (*this <= other) & Vectorized<int##bit##_t>(1);                    \ | ||||
|   } | ||||
|  | ||||
| VEC_INT_NEON_TEMPLATE(2, 64) | ||||
| VEC_INT_NEON_TEMPLATE(4, 32) | ||||
| VEC_INT_NEON_TEMPLATE(8, 16) | ||||
| VEC_INT_NEON_TEMPLATE(16, 8) | ||||
|  | ||||
| inline int32_t Vectorized<int32_t>::reduce_max() const { | ||||
|   return vmaxvq_s32(values); | ||||
| } | ||||
|  | ||||
| inline int16_t Vectorized<int16_t>::reduce_max() const { | ||||
|   return vmaxvq_s16(values); | ||||
| } | ||||
|  | ||||
| inline int8_t Vectorized<int8_t>::reduce_max() const { | ||||
|   return vmaxvq_s8(values); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int32_t> inline operator*( | ||||
|     const Vectorized<int32_t>& a, | ||||
|     const Vectorized<int32_t>& b) { | ||||
|   return vmulq_s32(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int16_t> inline operator*( | ||||
|     const Vectorized<int16_t>& a, | ||||
|     const Vectorized<int16_t>& b) { | ||||
|   return vmulq_s16(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int8_t> inline operator*( | ||||
|     const Vectorized<int8_t>& a, | ||||
|     const Vectorized<int8_t>& b) { | ||||
|   return vmulq_s8(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| inline Vectorized<int64_t> operator~(const Vectorized<int64_t>& a) { | ||||
|   int64x2_t val = a; | ||||
|   return ~val; | ||||
| } | ||||
|  | ||||
| template <> | ||||
| inline Vectorized<int32_t> operator~(const Vectorized<int32_t>& a) { | ||||
|   return vmvnq_s32(a); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| inline Vectorized<int16_t> operator~(const Vectorized<int16_t>& a) { | ||||
|   return vmvnq_s16(a); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| inline Vectorized<int8_t> operator~(const Vectorized<int8_t>& a) { | ||||
|   return vmvnq_s8(a); | ||||
| } | ||||
|  | ||||
| inline Vectorized<int64_t> Vectorized<int64_t>::operator!=( | ||||
|     const Vectorized<int64_t>& other) const { | ||||
|   return ~(*this == other); | ||||
| } | ||||
|  | ||||
| inline Vectorized<int32_t> Vectorized<int32_t>::operator!=( | ||||
|     const Vectorized<int32_t>& other) const { | ||||
|   return ~(*this == other); | ||||
| } | ||||
|  | ||||
| inline Vectorized<int16_t> Vectorized<int16_t>::operator!=( | ||||
|     const Vectorized<int16_t>& other) const { | ||||
|   return ~(*this == other); | ||||
| } | ||||
|  | ||||
| inline Vectorized<int8_t> Vectorized<int8_t>::operator!=( | ||||
|     const Vectorized<int8_t>& other) const { | ||||
|   return ~(*this == other); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int32_t> inline minimum( | ||||
|     const Vectorized<int32_t>& a, | ||||
|     const Vectorized<int32_t>& b) { | ||||
|   return vminq_s32(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int16_t> inline minimum( | ||||
|     const Vectorized<int16_t>& a, | ||||
|     const Vectorized<int16_t>& b) { | ||||
|   return vminq_s16(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int8_t> inline minimum( | ||||
|     const Vectorized<int8_t>& a, | ||||
|     const Vectorized<int8_t>& b) { | ||||
|   return vminq_s8(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int32_t> inline maximum( | ||||
|     const Vectorized<int32_t>& a, | ||||
|     const Vectorized<int32_t>& b) { | ||||
|   return vmaxq_s32(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int16_t> inline maximum( | ||||
|     const Vectorized<int16_t>& a, | ||||
|     const Vectorized<int16_t>& b) { | ||||
|   return vmaxq_s16(a, b); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int8_t> inline maximum( | ||||
|     const Vectorized<int8_t>& a, | ||||
|     const Vectorized<int8_t>& b) { | ||||
|   return vmaxq_s8(a, b); | ||||
| } | ||||
|  | ||||
| template <int64_t mask> | ||||
| Vectorized<int64_t> Vectorized<int64_t>::blend( | ||||
|     const Vectorized<int64_t>& a, | ||||
|     const Vectorized<int64_t>& b) { | ||||
|   // Build an array of flags: each bit of element is 1 if the corresponding bit | ||||
|   // in 'mask' is set, 0 otherwise. | ||||
|   uint64x2_t maskArray = { | ||||
|       (mask & 1LL) ? 0xFFFFFFFFFFFFFFFF : 0, | ||||
|       (mask & 2LL) ? 0xFFFFFFFFFFFFFFFF : 0}; | ||||
|   // Use BSL to select elements from b where the mask is 1, else from a | ||||
|   return vbslq_s64(maskArray, b.values, a.values); | ||||
| } | ||||
|  | ||||
| template <int64_t mask> | ||||
| Vectorized<int32_t> Vectorized<int32_t>::blend( | ||||
|     const Vectorized<int32_t>& a, | ||||
|     const Vectorized<int32_t>& b) { | ||||
|   // Build an array of flags: each bit of element is 1 if the corresponding bit | ||||
|   // in 'mask' is set, 0 otherwise. | ||||
|   uint32x4_t maskArray = { | ||||
|       (mask & 1LL) ? 0xFFFFFFFF : 0, | ||||
|       (mask & 2LL) ? 0xFFFFFFFF : 0, | ||||
|       (mask & 4LL) ? 0xFFFFFFFF : 0, | ||||
|       (mask & 8LL) ? 0xFFFFFFFF : 0}; | ||||
|   // Use BSL to select elements from b where the mask is 1, else from a | ||||
|   return vbslq_s32(maskArray, b.values, a.values); | ||||
| } | ||||
|  | ||||
| template <int64_t mask> | ||||
| Vectorized<int16_t> Vectorized<int16_t>::blend( | ||||
|     const Vectorized<int16_t>& a, | ||||
|     const Vectorized<int16_t>& b) { | ||||
|   // Build an array of flags: each bit of element is 1 if the corresponding bit | ||||
|   // in 'mask' is set, 0 otherwise. | ||||
|   uint16x8_t maskArray = { | ||||
|       (mask & 1LL) ? 0xFFFF : 0, | ||||
|       (mask & 2LL) ? 0xFFFF : 0, | ||||
|       (mask & 4LL) ? 0xFFFF : 0, | ||||
|       (mask & 8LL) ? 0xFFFF : 0, | ||||
|       (mask & 16LL) ? 0xFFFF : 0, | ||||
|       (mask & 32LL) ? 0xFFFF : 0, | ||||
|       (mask & 64LL) ? 0xFFFF : 0, | ||||
|       (mask & 128LL) ? 0xFFFF : 0}; | ||||
|   // Use BSL to select elements from b where the mask is 1, else from a | ||||
|   return vbslq_s16(maskArray, b.values, a.values); | ||||
| } | ||||
|  | ||||
| template <int64_t mask> | ||||
| Vectorized<int8_t> Vectorized<int8_t>::blend( | ||||
|     const Vectorized<int8_t>& a, | ||||
|     const Vectorized<int8_t>& b) { | ||||
|   // Build an array of flags: each bit of element is 1 if the corresponding bit | ||||
|   // in 'mask' is set, 0 otherwise. | ||||
|   uint8x16_t maskArray = { | ||||
|       (mask & 1LL) ? 0xFF : 0, | ||||
|       (mask & 2LL) ? 0xFF : 0, | ||||
|       (mask & 4LL) ? 0xFF : 0, | ||||
|       (mask & 8LL) ? 0xFF : 0, | ||||
|       (mask & 16LL) ? 0xFF : 0, | ||||
|       (mask & 32LL) ? 0xFF : 0, | ||||
|       (mask & 64LL) ? 0xFF : 0, | ||||
|       (mask & 128LL) ? 0xFF : 0, | ||||
|       (mask & 256LL) ? 0xFF : 0, | ||||
|       (mask & 512LL) ? 0xFF : 0, | ||||
|       (mask & 1024LL) ? 0xFF : 0, | ||||
|       (mask & 2048LL) ? 0xFF : 0, | ||||
|       (mask & 4096LL) ? 0xFF : 0, | ||||
|       (mask & 8192LL) ? 0xFF : 0, | ||||
|       (mask & 16384LL) ? 0xFF : 0, | ||||
|       (mask & 32768LL) ? 0xFF : 0}; | ||||
|   // Use BSL to select elements from b where the mask is 1, else from a | ||||
|   return vbslq_s8(maskArray, b.values, a.values); | ||||
| } | ||||
|  | ||||
| #define VEC_INT_NEON_OPS(vl, bit)                                             \ | ||||
|   inline Vectorized<int##bit##_t>::Vectorized(int##bit##_t val) {             \ | ||||
|     values = vdupq_n_s##bit(val);                                             \ | ||||
|   }                                                                           \ | ||||
|   inline Vectorized<int##bit##_t> Vectorized<int##bit##_t>::loadu(            \ | ||||
|       const void* ptr, int64_t count) {                                       \ | ||||
|     if (count == size()) {                                                    \ | ||||
|       return vld1q_s##bit(reinterpret_cast<const int##bit##_t*>(ptr));        \ | ||||
|     } else {                                                                  \ | ||||
|       __at_align__ int##bit##_t tmp_values[size()];                           \ | ||||
|       for (const auto i : c10::irange(size())) {                              \ | ||||
|         tmp_values[i] = 0;                                                    \ | ||||
|       }                                                                       \ | ||||
|       std::memcpy(                                                            \ | ||||
|           tmp_values,                                                         \ | ||||
|           reinterpret_cast<const int##bit##_t*>(ptr),                         \ | ||||
|           count * sizeof(int##bit##_t));                                      \ | ||||
|       return vld1q_s##bit(reinterpret_cast<const int##bit##_t*>(tmp_values)); \ | ||||
|     }                                                                         \ | ||||
|   }                                                                           \ | ||||
|   inline void Vectorized<int##bit##_t>::store(void* ptr, int64_t count)       \ | ||||
|       const {                                                                 \ | ||||
|     if (count == size()) {                                                    \ | ||||
|       vst1q_s##bit(reinterpret_cast<int##bit##_t*>(ptr), values);             \ | ||||
|     } else {                                                                  \ | ||||
|       int##bit##_t tmp_values[size()];                                        \ | ||||
|       vst1q_s##bit(reinterpret_cast<int##bit##_t*>(tmp_values), values);      \ | ||||
|       std::memcpy(ptr, tmp_values, count * sizeof(int##bit##_t));             \ | ||||
|     }                                                                         \ | ||||
|   } | ||||
|  | ||||
| VEC_INT_NEON_OPS(2, 64) | ||||
| VEC_INT_NEON_OPS(4, 32) | ||||
| VEC_INT_NEON_OPS(8, 16) | ||||
| VEC_INT_NEON_OPS(16, 8) | ||||
|  | ||||
| template <> | ||||
| Vectorized<int64_t> inline operator*( | ||||
|     const Vectorized<int64_t>& a, | ||||
|     const Vectorized<int64_t>& b) { | ||||
|   int64x2_t x = a; | ||||
|   int64x2_t y = b; | ||||
|   return x * y; | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int64_t> inline operator/( | ||||
|     const Vectorized<int64_t>& a, | ||||
|     const Vectorized<int64_t>& b) { | ||||
|   int64x2_t x = a; | ||||
|   int64x2_t y = b; | ||||
|   return x / y; | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int32_t> inline operator/( | ||||
|     const Vectorized<int32_t>& a, | ||||
|     const Vectorized<int32_t>& b) { | ||||
|   int32x4_t x = a; | ||||
|   int32x4_t y = b; | ||||
|   return x / y; | ||||
| } | ||||
|  | ||||
| inline int64_t Vectorized<int64_t>::reduce_max() const { | ||||
|   return std::max(values[0], values[1]); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int64_t> inline minimum( | ||||
|     const Vectorized<int64_t>& a, | ||||
|     const Vectorized<int64_t>& b) { | ||||
|   int64x2_t x = a; | ||||
|   int64x2_t y = b; | ||||
|   return {std::min(x[0], y[0]), std::min(x[1], y[1])}; | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int64_t> inline maximum( | ||||
|     const Vectorized<int64_t>& a, | ||||
|     const Vectorized<int64_t>& b) { | ||||
|   int64x2_t x = a; | ||||
|   int64x2_t y = b; | ||||
|   return {std::max(x[0], y[0]), std::max(x[1], y[1])}; | ||||
| } | ||||
|  | ||||
| template <typename step_t> | ||||
| inline Vectorized<int64_t> Vectorized<int64_t>::arange( | ||||
|     int64_t base, | ||||
|     step_t step) { | ||||
|   const Vectorized<int64_t> base_vec(base); | ||||
|   const Vectorized<int64_t> step_vec(step); | ||||
|   const int64x2_t step_sizes = {0, 1}; | ||||
|   return base_vec.values + step_sizes * step_vec.values; | ||||
| } | ||||
|  | ||||
| template <typename step_t> | ||||
| inline Vectorized<int32_t> Vectorized<int32_t>::arange( | ||||
|     int32_t base, | ||||
|     step_t step) { | ||||
|   const Vectorized<int32_t> base_vec(base); | ||||
|   const Vectorized<int32_t> step_vec(step); | ||||
|   const int32x4_t step_sizes = {0, 1, 2, 3}; | ||||
|   return vmlaq_s32(base_vec, step_sizes, step_vec); | ||||
| } | ||||
|  | ||||
| template <typename step_t> | ||||
| inline Vectorized<int16_t> Vectorized<int16_t>::arange( | ||||
|     int16_t base, | ||||
|     step_t step) { | ||||
|   const Vectorized<int16_t> base_vec(base); | ||||
|   const Vectorized<int16_t> step_vec(step); | ||||
|   const int16x8_t step_sizes = {0, 1, 2, 3, 4, 5, 6, 7}; | ||||
|   return vmlaq_s16(base_vec, step_sizes, step_vec); | ||||
| } | ||||
|  | ||||
| template <typename step_t> | ||||
| inline Vectorized<int8_t> Vectorized<int8_t>::arange(int8_t base, step_t step) { | ||||
|   const Vectorized<int8_t> base_vec(base); | ||||
|   const Vectorized<int8_t> step_vec(step); | ||||
|   const int8x16_t step_sizes = { | ||||
|       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; | ||||
|   return vmlaq_s8(base_vec, step_sizes, step_vec); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int64_t> inline operator>>( | ||||
|     const Vectorized<int64_t>& a, | ||||
|     const Vectorized<int64_t>& b) { | ||||
|   int64x2_t x = a; | ||||
|   int64x2_t y = b; | ||||
|   uint64x2_t u = vreinterpretq_u64_s64(y); | ||||
|   uint64x2_t z = {std::min(u[0], (uint64_t)63), std::min(u[1], (uint64_t)63)}; | ||||
|   return x >> vreinterpretq_s64_u64(z); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int32_t> inline operator>>( | ||||
|     const Vectorized<int32_t>& a, | ||||
|     const Vectorized<int32_t>& b) { | ||||
|   int32x4_t x = a; | ||||
|   int32x4_t y = b; | ||||
|   uint32x4_t bound = vdupq_n_u32(31); | ||||
|   uint32x4_t z = vminq_u32(vreinterpretq_u32_s32(y), bound); | ||||
|   return x >> vreinterpretq_s32_u32(z); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int16_t> inline operator>>( | ||||
|     const Vectorized<int16_t>& a, | ||||
|     const Vectorized<int16_t>& b) { | ||||
|   int16x8_t x = a; | ||||
|   int16x8_t y = b; | ||||
|   uint16x8_t bound = vdupq_n_u16(15); | ||||
|   uint16x8_t z = vminq_u16(vreinterpretq_u16_s16(y), bound); | ||||
|   return x >> vreinterpretq_s16_u16(z); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int8_t> inline operator>>( | ||||
|     const Vectorized<int8_t>& a, | ||||
|     const Vectorized<int8_t>& b) { | ||||
|   int8x16_t x = a; | ||||
|   int8x16_t y = b; | ||||
|   uint8x16_t bound = vdupq_n_u8(7); | ||||
|   int8x16_t z = vreinterpretq_s8_u8(vminq_u8(vreinterpretq_u8_s8(y), bound)); | ||||
|   return x >> z; | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int64_t> inline operator<<( | ||||
|     const Vectorized<int64_t>& a, | ||||
|     const Vectorized<int64_t>& b) { | ||||
|   int64x2_t y = b; | ||||
|   uint64x2_t u = vreinterpretq_u64_s64(y); | ||||
|   uint64x2_t z = {std::min(u[0], (uint64_t)64), std::min(u[1], (uint64_t)64)}; | ||||
|   return vshlq_s64(a, vreinterpretq_s64_u64(z)); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int32_t> inline operator<<( | ||||
|     const Vectorized<int32_t>& a, | ||||
|     const Vectorized<int32_t>& b) { | ||||
|   int32x4_t y = b; | ||||
|   uint32x4_t bound = vdupq_n_u32(32); | ||||
|   uint32x4_t z = vminq_u32(vreinterpretq_u32_s32(y), bound); | ||||
|   return vshlq_s32(a, vreinterpretq_s32_u32(z)); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int16_t> inline operator<<( | ||||
|     const Vectorized<int16_t>& a, | ||||
|     const Vectorized<int16_t>& b) { | ||||
|   int16x8_t y = b; | ||||
|   uint16x8_t bound = vdupq_n_u16(16); | ||||
|   uint16x8_t z = vminq_u16(vreinterpretq_u16_s16(y), bound); | ||||
|   return vshlq_s16(a, vreinterpretq_s16_u16(z)); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int8_t> inline operator<<( | ||||
|     const Vectorized<int8_t>& a, | ||||
|     const Vectorized<int8_t>& b) { | ||||
|   int8x16_t y = b; | ||||
|   uint8x16_t bound = vdupq_n_u8(8); | ||||
|   int8x16_t z = vreinterpretq_s8_u8(vminq_u8(vreinterpretq_u8_s8(y), bound)); | ||||
|   return vshlq_s8(a, z); | ||||
| } | ||||
|  | ||||
| inline Vectorized<int64_t> Vectorized<int64_t>::set( | ||||
|     const Vectorized<int64_t>& a, | ||||
|     const Vectorized<int64_t>& b, | ||||
|     int64_t count) { | ||||
|   if (count == 0) { | ||||
|     return a; | ||||
|   } else if (count >= 2) { | ||||
|     return b; | ||||
|   } else { | ||||
|     int64x2_t c = {b.values[0], a.values[1]}; | ||||
|     return c; | ||||
|   } | ||||
| } | ||||
|  | ||||
| inline Vectorized<int32_t> Vectorized<int32_t>::set( | ||||
|     const Vectorized<int32_t>& a, | ||||
|     const Vectorized<int32_t>& b, | ||||
|     int64_t count) { | ||||
|   if (count == 0) { | ||||
|     return a; | ||||
|   } else if (count >= 4) { | ||||
|     return b; | ||||
|   } else { | ||||
|     // Build an array of flags: each bit of element is 1 if the corresponding | ||||
|     // bit in 'mask' is set, 0 otherwise. | ||||
|     uint32x4_t maskArray = { | ||||
|         (count >= 1LL) ? 0xFFFFFFFF : 0, | ||||
|         (count >= 2LL) ? 0xFFFFFFFF : 0, | ||||
|         (count >= 3LL) ? 0xFFFFFFFF : 0, | ||||
|         0}; | ||||
|     // Use BSL to select elements from b where the mask is 1, else from a | ||||
|     return vbslq_s32(maskArray, b.values, a.values); | ||||
|   } | ||||
| } | ||||
|  | ||||
| inline Vectorized<int16_t> Vectorized<int16_t>::set( | ||||
|     const Vectorized<int16_t>& a, | ||||
|     const Vectorized<int16_t>& b, | ||||
|     int64_t count) { | ||||
|   if (count == 0) { | ||||
|     return a; | ||||
|   } else if (count >= 8) { | ||||
|     return b; | ||||
|   } else { | ||||
|     // Build an array of flags: each bit of element is 1 if the corresponding | ||||
|     // bit in 'mask' is set, 0 otherwise. | ||||
|     uint16x8_t maskArray = { | ||||
|         static_cast<uint16_t>((count >= 1LL) ? 0xFFFF : 0), | ||||
|         static_cast<uint16_t>((count >= 2LL) ? 0xFFFF : 0), | ||||
|         static_cast<uint16_t>((count >= 3LL) ? 0xFFFF : 0), | ||||
|         static_cast<uint16_t>((count >= 4LL) ? 0xFFFF : 0), | ||||
|         static_cast<uint16_t>((count >= 5LL) ? 0xFFFF : 0), | ||||
|         static_cast<uint16_t>((count >= 6LL) ? 0xFFFF : 0), | ||||
|         static_cast<uint16_t>((count >= 7LL) ? 0xFFFF : 0), | ||||
|         0}; | ||||
|     // Use BSL to select elements from b where the mask is 1, else from a | ||||
|     return vbslq_s16(maskArray, b.values, a.values); | ||||
|   } | ||||
| } | ||||
|  | ||||
| inline Vectorized<int8_t> Vectorized<int8_t>::set( | ||||
|     const Vectorized<int8_t>& a, | ||||
|     const Vectorized<int8_t>& b, | ||||
|     int64_t count) { | ||||
|   if (count == 0) { | ||||
|     return a; | ||||
|   } else if (count >= 16) { | ||||
|     return b; | ||||
|   } else { | ||||
|     // Build an array of flags: each bit of element is 1 if the corresponding | ||||
|     // bit in 'mask' is set, 0 otherwise. | ||||
|     uint8x16_t maskArray = { | ||||
|         static_cast<uint8_t>((count >= 1LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 2LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 3LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 4LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 5LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 6LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 7LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 8LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 9LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 10LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 11LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 12LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 13LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 14LL) ? 0xFF : 0), | ||||
|         static_cast<uint8_t>((count >= 15LL) ? 0xFF : 0), | ||||
|         0}; | ||||
|  | ||||
|     // Use BSL to select elements from b where the mask is 1, else from a | ||||
|     return vbslq_s8(maskArray, b.values, a.values); | ||||
|   } | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int16_t> inline operator/( | ||||
|     const Vectorized<int16_t>& a, | ||||
|     const Vectorized<int16_t>& b) { | ||||
|   Vectorized<int32_t> highBitsA = vmovl_high_s16(a); | ||||
|   Vectorized<int32_t> highBitsB = vmovl_high_s16(b); | ||||
|   Vectorized<int32_t> lowBitsA = vmovl_s16(vget_low_s16(a)); | ||||
|   Vectorized<int32_t> lowBitsB = vmovl_s16(vget_low_s16(b)); | ||||
|   int32x4_t highBitsResult = highBitsA / highBitsB; | ||||
|   int32x4_t lowBitsResult = lowBitsA / lowBitsB; | ||||
|   return vuzp1q_s16( | ||||
|       vreinterpretq_s16_s32(lowBitsResult), | ||||
|       vreinterpretq_s16_s32(highBitsResult)); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int8_t> inline operator/( | ||||
|     const Vectorized<int8_t>& a, | ||||
|     const Vectorized<int8_t>& b) { | ||||
|   Vectorized<int16_t> highBitsA = vmovl_high_s8(a); | ||||
|   Vectorized<int16_t> highBitsB = vmovl_high_s8(b); | ||||
|   Vectorized<int16_t> lowBitsA = vmovl_s8(vget_low_s8(a)); | ||||
|   Vectorized<int16_t> lowBitsB = vmovl_s8(vget_low_s8(b)); | ||||
|   int16x8_t highBitsResult = highBitsA / highBitsB; | ||||
|   int16x8_t lowBitsResult = lowBitsA / lowBitsB; | ||||
|   return vuzp1q_s8( | ||||
|       vreinterpretq_s8_s16(lowBitsResult), | ||||
|       vreinterpretq_s8_s16(highBitsResult)); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int64_t> inline clamp( | ||||
|     const Vectorized<int64_t>& a, | ||||
|     const Vectorized<int64_t>& min, | ||||
|     const Vectorized<int64_t>& max) { | ||||
|   return minimum(max, maximum(min, a)); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int32_t> inline clamp( | ||||
|     const Vectorized<int32_t>& a, | ||||
|     const Vectorized<int32_t>& min, | ||||
|     const Vectorized<int32_t>& max) { | ||||
|   return minimum(max, maximum(min, a)); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int16_t> inline clamp( | ||||
|     const Vectorized<int16_t>& a, | ||||
|     const Vectorized<int16_t>& min, | ||||
|     const Vectorized<int16_t>& max) { | ||||
|   return minimum(max, maximum(min, a)); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int8_t> inline clamp( | ||||
|     const Vectorized<int8_t>& a, | ||||
|     const Vectorized<int8_t>& min, | ||||
|     const Vectorized<int8_t>& max) { | ||||
|   return minimum(max, maximum(min, a)); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int64_t> inline clamp_max( | ||||
|     const Vectorized<int64_t>& a, | ||||
|     const Vectorized<int64_t>& max) { | ||||
|   return minimum(max, a); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int32_t> inline clamp_max( | ||||
|     const Vectorized<int32_t>& a, | ||||
|     const Vectorized<int32_t>& max) { | ||||
|   return minimum(max, a); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int16_t> inline clamp_max( | ||||
|     const Vectorized<int16_t>& a, | ||||
|     const Vectorized<int16_t>& max) { | ||||
|   return minimum(max, a); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int8_t> inline clamp_max( | ||||
|     const Vectorized<int8_t>& a, | ||||
|     const Vectorized<int8_t>& max) { | ||||
|   return minimum(max, a); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int64_t> inline clamp_min( | ||||
|     const Vectorized<int64_t>& a, | ||||
|     const Vectorized<int64_t>& min) { | ||||
|   return maximum(min, a); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int32_t> inline clamp_min( | ||||
|     const Vectorized<int32_t>& a, | ||||
|     const Vectorized<int32_t>& min) { | ||||
|   return maximum(min, a); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int16_t> inline clamp_min( | ||||
|     const Vectorized<int16_t>& a, | ||||
|     const Vectorized<int16_t>& min) { | ||||
|   return maximum(min, a); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| Vectorized<int8_t> inline clamp_min( | ||||
|     const Vectorized<int8_t>& a, | ||||
|     const Vectorized<int8_t>& min) { | ||||
|   return maximum(min, a); | ||||
| } | ||||
|  | ||||
| } // namespace CPU_CAPABILITY | ||||
| } // namespace at::vec | ||||
| @ -1377,7 +1377,7 @@ Vectorized<c10::quint8> inline maximum( | ||||
| #if (defined(__aarch64__) && !defined(CPU_CAPABILITY_SVE256)) | ||||
| std::pair<Vectorized<float>, Vectorized<float>> inline convert_int8_to_float( | ||||
|     at::vec::Vectorized<int8_t> src) { | ||||
|   auto s8x8 = vget_low_s8(src); | ||||
|   auto s8x8 = vld1_s8(src.operator const int8_t*()); | ||||
|   auto s16x8 = vmovl_s8(s8x8); | ||||
|  | ||||
|   auto s32x4_hi = vmovl_s16(vget_high_s16(s16x8)); | ||||
| @ -1402,7 +1402,7 @@ std::pair<Vectorized<float>, Vectorized<float>> inline convert_int8_to_float( | ||||
|  | ||||
| Vectorized<float> inline convert_int8_half_register_to_float( | ||||
|     at::vec::Vectorized<int8_t> src) { | ||||
|   auto s8x8 = vget_low_s8(src); | ||||
|   auto s8x8 = vld1_s8(src.operator const int8_t*()); | ||||
|   auto s16x8 = vmovl_s8(s8x8); | ||||
|  | ||||
|   auto s32x4_lo = vmovl_s16(vget_low_s16(s16x8)); | ||||
|  | ||||
| @ -16,8 +16,6 @@ | ||||
| #include <c10/util/irange.h> | ||||
| #include <c10/core/ScalarType.h> | ||||
|  | ||||
| #include <ATen/cuda/detail/BLASConstants.h> | ||||
|  | ||||
| #ifdef USE_ROCM | ||||
| #include <c10/cuda/CUDAStream.h> | ||||
| #include <hipblaslt/hipblaslt-ext.hpp> | ||||
| @ -110,7 +108,7 @@ static hipblasStatus_t rocBLASStatusToHIPStatus(rocblas_status error) | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| cublasOperation_t _cublasOpFromChar(char op) { | ||||
| static cublasOperation_t _cublasOpFromChar(char op) { | ||||
|   // NOLINTNEXTLINE(bugprone-switch-missing-default-case) | ||||
|   switch (op) { | ||||
|     case 'n': | ||||
| @ -130,7 +128,7 @@ cublasOperation_t _cublasOpFromChar(char op) { | ||||
|       "_cublasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`"); | ||||
| } | ||||
|  | ||||
| void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) { | ||||
| static void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) { | ||||
|   // Note: leading dimensions generally are checked that they are > 0 | ||||
|   // and at least as big the result requires (even if the value won't | ||||
|   // be used). | ||||
| @ -144,7 +142,7 @@ void _cublasAdjustLdLevel2(int64_t m, int64_t n, int64_t* lda) { | ||||
|     *lda = std::max<int64_t>(m, 1); | ||||
| } | ||||
|  | ||||
| void _cublasAdjustLdLevel3( | ||||
| static void _cublasAdjustLdLevel3( | ||||
|     char transa, | ||||
|     char transb, | ||||
|     int64_t m, | ||||
| @ -1956,15 +1954,13 @@ void scaled_gemm( | ||||
|     const void *result_scale_ptr, | ||||
|     int64_t result_ld, | ||||
|     ScalarType result_dtype, | ||||
|     bool use_fast_accum, | ||||
|     const std::optional<Tensor>& alpha) { | ||||
|     bool use_fast_accum) { | ||||
|   // Note: see `cublasCommonArgs` for various non-intuitive manupulations | ||||
|   // of input arguments to this function. | ||||
|   const auto computeType = CUBLAS_COMPUTE_32F; | ||||
|   const auto scaleType = CUDA_R_32F; | ||||
|   // Note: alpha_val may change later depending on user-passed argument | ||||
|   float alpha_val = 1.0; | ||||
|   float beta_val = 0.0; | ||||
|   const float alpha_val = 1.0; | ||||
|   const float beta_val = 0.0; | ||||
|   CuBlasLtMatmulDescriptor computeDesc(computeType, scaleType); | ||||
|   computeDesc.setAttribute(CUBLASLT_MATMUL_DESC_TRANSA, _cublasOpFromChar(transa)); | ||||
|   computeDesc.setAttribute(CUBLASLT_MATMUL_DESC_TRANSB, _cublasOpFromChar(transb)); | ||||
| @ -2035,33 +2031,6 @@ void scaled_gemm( | ||||
|     computeDesc.setAttribute(CUBLASLT_MATMUL_DESC_EPILOGUE, CUBLASLT_EPILOGUE_BIAS); | ||||
|     computeDesc.setAttribute(CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE, ScalarTypeToCudaDataType(bias_dtype)); | ||||
|   } | ||||
|  | ||||
|   // Handle user-passed alpha | ||||
|   float *alpha_ptr = &alpha_val; | ||||
|   float *beta_ptr = &beta_val; | ||||
|  | ||||
|   if (alpha.has_value()) { | ||||
|     auto& a = alpha.value(); | ||||
|  | ||||
|     // if device-tensor | ||||
|     if (a.is_cuda()) { | ||||
|       // NOTE: there are lifetime requirements on device-side pointers for alpha/beta -- the value must be | ||||
|       //       valid & correct until the cublas call finishes (not is scheduled like host-side values). Thus | ||||
|       //       we need to use allocations for alpha/beta that have some guarantees on lifetime - a statically | ||||
|       //       managed 4B buffer for alpha that we'll copy the passed alpha value into, and constant memory | ||||
|       //       for beta respectively. | ||||
|       float *user_alpha_ptr = at::cuda::detail::get_user_alpha_ptr(); | ||||
|       at::Tensor user_alpha = at::from_blob(user_alpha_ptr, {1}, TensorOptions().device(kCUDA).dtype(kFloat)); | ||||
|       user_alpha.copy_(a); | ||||
|       // Tell cublasLt we're using device-side pointers for alpha/beta | ||||
|       auto pointer_mode = CUBLASLT_POINTER_MODE_DEVICE; | ||||
|       computeDesc.setAttribute(CUBLASLT_MATMUL_DESC_POINTER_MODE, pointer_mode); | ||||
|       alpha_ptr = user_alpha.data_ptr<float>(); | ||||
|       beta_ptr = at::cuda::detail::get_cublas_device_zero(); | ||||
|     } else { | ||||
|       alpha_val = a.item<float>(); | ||||
|     } | ||||
|   } | ||||
|     // For other data types, use the get_scale_mode function based on scaling type | ||||
|     // The SCALE_MODE attrs only exist in cuBLAS 12.8+/ROCm 7.0 or in recent hipblaslt, | ||||
|     // but we must invoke get_scale_mode anyways to trigger the version checks. | ||||
| @ -2079,7 +2048,6 @@ void scaled_gemm( | ||||
|   cublasLtMatmulHeuristicResult_t heuristicResult = {}; | ||||
|   int returnedResult = 0; | ||||
|   cublasLtHandle_t ltHandle = at::cuda::getCurrentCUDABlasLtHandle(); | ||||
|  | ||||
|   TORCH_CUDABLAS_CHECK(cublasLtMatmulAlgoGetHeuristic( | ||||
|       ltHandle, | ||||
|       computeDesc.descriptor(), | ||||
| @ -2120,10 +2088,10 @@ void scaled_gemm( | ||||
|         auto is_valid_status = hipblaslt_ext::matmulIsAlgoSupported( | ||||
|                 ltHandle, | ||||
|                 computeDesc.descriptor(), | ||||
|                 alpha_ptr, | ||||
|                 &alpha_val, | ||||
|                 Adesc.descriptor(), | ||||
|                 Bdesc.descriptor(), | ||||
|                 beta_ptr, | ||||
|                 &beta_val, | ||||
|                 Cdesc.descriptor(), | ||||
|                 Ddesc.descriptor(), | ||||
|                 all_algos[i].algo, | ||||
| @ -2142,14 +2110,17 @@ void scaled_gemm( | ||||
|   cublasStatus_t cublasStatus = cublasLtMatmul( | ||||
|       ltHandle, | ||||
|       computeDesc.descriptor(), | ||||
|       alpha_ptr, | ||||
|       &alpha_val, | ||||
|       mat1_ptr, | ||||
|       Adesc.descriptor(), | ||||
|       mat2_ptr, | ||||
|       Bdesc.descriptor(), | ||||
|       beta_ptr, | ||||
|       // NOTE: always use result_ptr here, because cuBLASLt w/device beta=0 can't handle nullptr either | ||||
|       &beta_val, | ||||
| #ifdef USE_ROCM | ||||
|       result_ptr, // unused, since beta_val is 0, but hipblaslt can't handle nullptr | ||||
| #else | ||||
|       nullptr, | ||||
| #endif // ifdef USE_ROCM | ||||
|       Cdesc.descriptor(), | ||||
|       result_ptr, | ||||
|       Ddesc.descriptor(), | ||||
|  | ||||
| @ -161,8 +161,7 @@ void scaled_gemm( | ||||
|     const void* result_scale_ptr, | ||||
|     int64_t result_ld, | ||||
|     ScalarType result_dtype, | ||||
|     bool use_fast_accum, | ||||
|     const std::optional<Tensor>& alpha); | ||||
|     bool use_fast_accum); | ||||
|  | ||||
| #define CUDABLAS_BGEMM_ARGTYPES(Dtype)  CUDABLAS_BGEMM_ARGTYPES_AND_C_DTYPE(Dtype, Dtype) | ||||
|  | ||||
|  | ||||
| @ -15,19 +15,19 @@ namespace cuda::detail { | ||||
| namespace { | ||||
|  | ||||
| // Total number of gpus in the system. | ||||
| int64_t num_gpus; | ||||
| static int64_t num_gpus; | ||||
|  | ||||
| // Ensures default_gens_cuda is initialized once. | ||||
| std::deque<c10::once_flag> cuda_gens_init_flag; | ||||
| static std::deque<c10::once_flag> cuda_gens_init_flag; | ||||
|  | ||||
| // Default, global CUDA generators, one per GPU. | ||||
| std::vector<Generator> default_gens_cuda; | ||||
| static std::vector<Generator> default_gens_cuda; | ||||
|  | ||||
| /* | ||||
|  * Populates the global variables related to CUDA generators | ||||
|  * Warning: this function must only be called once! | ||||
|  */ | ||||
| void initCUDAGenVector() { | ||||
| static void initCUDAGenVector() { | ||||
|   // Ensures we only call cudaGetDeviceCount only once. | ||||
|   static bool num_gpu_init_flag [[maybe_unused]] = []() { | ||||
|     num_gpus = static_cast<int32_t>(c10::cuda::device_count()); | ||||
| @ -325,9 +325,9 @@ uint64_t CUDAGeneratorImpl::seed() { | ||||
|  */ | ||||
| c10::intrusive_ptr<c10::TensorImpl> CUDAGeneratorImpl::get_state() const { | ||||
|   // The RNG state comprises the seed, and an offset used for Philox. | ||||
|   constexpr size_t seed_size = sizeof(uint64_t); | ||||
|   constexpr size_t offset_size = sizeof(int64_t); | ||||
|   constexpr size_t total_size = seed_size + offset_size; | ||||
|   static const size_t seed_size = sizeof(uint64_t); | ||||
|   static const size_t offset_size = sizeof(int64_t); | ||||
|   static const size_t total_size = seed_size + offset_size; | ||||
|  | ||||
|   auto state_tensor = at::detail::empty_cpu({(int64_t)total_size}, ScalarType::Byte, std::nullopt, std::nullopt, std::nullopt, std::nullopt); | ||||
|   auto rng_state = state_tensor.data_ptr<uint8_t>(); | ||||
| @ -346,9 +346,9 @@ c10::intrusive_ptr<c10::TensorImpl> CUDAGeneratorImpl::get_state() const { | ||||
|  * and size of the internal state. | ||||
|  */ | ||||
| void CUDAGeneratorImpl::set_state(const c10::TensorImpl& new_state) { | ||||
|   constexpr size_t seed_size = sizeof(uint64_t); | ||||
|   constexpr size_t offset_size = sizeof(int64_t); | ||||
|   constexpr size_t total_size = seed_size + offset_size; | ||||
|   static const size_t seed_size = sizeof(uint64_t); | ||||
|   static const size_t offset_size = sizeof(int64_t); | ||||
|   static const size_t total_size = seed_size + offset_size; | ||||
|  | ||||
|   detail::check_rng_state(new_state); | ||||
|  | ||||
|  | ||||
| @ -183,6 +183,11 @@ struct CUDACachingHostAllocatorImpl | ||||
|     return true; | ||||
|   } | ||||
|  | ||||
|   bool pinned_use_background_threads() override { | ||||
|     return c10::cuda::CUDACachingAllocator::CUDAAllocatorConfig:: | ||||
|         pinned_use_background_threads(); | ||||
|   } | ||||
|  | ||||
|   EventPool::Event create_event_internal(DeviceIndex idx) { | ||||
|     // Leak the event pool to avoid shutdown issue. | ||||
|     static auto* event_pool = new EventPool(); | ||||
|  | ||||
| @ -177,6 +177,7 @@ inline void segmented_sort_pairs( | ||||
|   } | ||||
| } | ||||
|  | ||||
| #if CUB_SUPPORTS_UNIQUE_BY_KEY() | ||||
| template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename ValuesOutputIteratorT, typename NumSelectedIteratorT> | ||||
| inline void unique_by_key( | ||||
|   KeysInputIteratorT keys_in, ValuesInputIteratorT values_in, | ||||
| @ -192,6 +193,7 @@ inline void unique_by_key( | ||||
|   CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::UniqueByKey, | ||||
|     keys_in, values_in, keys_out_, values_out, num_selected, num_input_items, c10::cuda::getCurrentCUDAStream()); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| namespace impl { | ||||
|  | ||||
| @ -577,6 +579,7 @@ inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT | ||||
| #endif | ||||
| } | ||||
|  | ||||
| #if CUB_SUPPORTS_SCAN_BY_KEY() | ||||
|  | ||||
| template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename ValuesOutputIteratorT> | ||||
| inline void inclusive_sum_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, int64_t num_items) { | ||||
| @ -604,6 +607,7 @@ inline void inclusive_scan_by_key(KeysInputIteratorT keys, ValuesInputIteratorT | ||||
| #endif | ||||
| } | ||||
|  | ||||
| #endif | ||||
|  | ||||
| template <typename InputIteratorT, typename OutputIteratorT, typename NumSelectedIteratorT> | ||||
| void unique(InputIteratorT input, OutputIteratorT output, | ||||
|  | ||||
| @ -28,6 +28,22 @@ | ||||
| #define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false | ||||
| #endif | ||||
|  | ||||
| // cub support for UniqueByKey is added to cub 1.16 in: | ||||
| // https://github.com/NVIDIA/cub/pull/405 | ||||
| #if CUB_VERSION >= 101600 | ||||
| #define CUB_SUPPORTS_UNIQUE_BY_KEY() true | ||||
| #else | ||||
| #define CUB_SUPPORTS_UNIQUE_BY_KEY() false | ||||
| #endif | ||||
|  | ||||
| // cub support for scan by key is added to cub 1.15 | ||||
| // in https://github.com/NVIDIA/cub/pull/376 | ||||
| #if CUB_VERSION >= 101500 | ||||
| #define CUB_SUPPORTS_SCAN_BY_KEY() 1 | ||||
| #else | ||||
| #define CUB_SUPPORTS_SCAN_BY_KEY() 0 | ||||
| #endif | ||||
|  | ||||
| // cub support for cub::FutureValue is added to cub 1.15 in: | ||||
| // https://github.com/NVIDIA/cub/pull/305 | ||||
| #if CUB_VERSION >= 101500 | ||||
|  | ||||
| @ -1,54 +0,0 @@ | ||||
| #include <ATen/Functions.h> | ||||
| #include <ATen/Tensor.h> | ||||
| #include <ATen/cuda/Exceptions.h> | ||||
|  | ||||
| #include <mutex> | ||||
|  | ||||
| namespace at { | ||||
| namespace cuda { | ||||
| namespace detail { | ||||
|  | ||||
| __device__ __constant__ float cublas_one_device; | ||||
| __device__ __constant__ float cublas_zero_device; | ||||
|  | ||||
| float *get_cublas_device_one() { | ||||
|   static c10::once_flag init_flag; | ||||
|  | ||||
|   c10::call_once(init_flag, []() { | ||||
|     const float one = 1.f; | ||||
|     AT_CUDA_CHECK(cudaMemcpyToSymbol(cublas_one_device, &one, sizeof(float))); | ||||
|   }); | ||||
|  | ||||
|   float *ptr; | ||||
|   AT_CUDA_CHECK(cudaGetSymbolAddress(reinterpret_cast<void**>(&ptr), cublas_one_device)); | ||||
|   return ptr; | ||||
| } | ||||
|  | ||||
| float *get_cublas_device_zero() { | ||||
|   static c10::once_flag init_flag; | ||||
|  | ||||
|   c10::call_once(init_flag, []() { | ||||
|     const float zero = 0.f; | ||||
|     AT_CUDA_CHECK(cudaMemcpyToSymbol(cublas_zero_device, &zero, sizeof(float))); | ||||
|   }); | ||||
|  | ||||
|   float *ptr; | ||||
|   AT_CUDA_CHECK(cudaGetSymbolAddress(reinterpret_cast<void**>(&ptr), cublas_zero_device)); | ||||
|   return ptr; | ||||
| } | ||||
|  | ||||
| float *get_user_alpha_ptr() { | ||||
|   static float *alpha_ptr; | ||||
|  | ||||
|   static c10::once_flag init_flag; | ||||
|  | ||||
|   c10::call_once(init_flag, []() { | ||||
|     AT_CUDA_CHECK(cudaMalloc(&alpha_ptr, sizeof(float))); | ||||
|   }); | ||||
|  | ||||
|   return alpha_ptr; | ||||
| } | ||||
|  | ||||
| } // namespace detail | ||||
| } // namespace cuda | ||||
| } // namespace at | ||||
| @ -1,11 +0,0 @@ | ||||
| #pragma once | ||||
|  | ||||
| #include <ATen/core/TensorBase.h> | ||||
|  | ||||
| namespace at::cuda::detail { | ||||
|  | ||||
| float *get_cublas_device_one(); | ||||
| float *get_cublas_device_zero(); | ||||
| float *get_user_alpha_ptr(); | ||||
|  | ||||
| } // namespace at::cuda::detail | ||||
| @ -13,7 +13,6 @@ | ||||
| #include <c10/core/ScalarType.h> | ||||
|  | ||||
| #include <ATen/cuda/tunable/TunableOp.h> | ||||
| #include <ATen/cuda/tunable/Tunable.h> | ||||
| #include <ATen/cuda/CUDABlas.h> | ||||
| #include <ATen/cuda/Exceptions.h> | ||||
| #include <c10/util/StringUtil.h> | ||||
| @ -151,7 +150,6 @@ inline std::string ScalarTypeToBLASType(c10::ScalarType scalar_type) { | ||||
|       BLASType = "unknown"; | ||||
|   } | ||||
|   return BLASType; | ||||
|  | ||||
| } | ||||
|  | ||||
| // Similar to Compute Type in GemmRocblas.h | ||||
| @ -246,25 +244,33 @@ inline std::string to_string_epilogue(const at::cuda::blas::GEMMAndBiasActivatio | ||||
|  | ||||
| namespace detail { | ||||
|  | ||||
| static bool NumericalCheck(ScalarType dtype, void* c, void* other_c, int64_t size, const NumericalCheckConfig& config) { | ||||
|  | ||||
|   if (!config.enabled) { | ||||
|     return true; // skip when disabled | ||||
|   } | ||||
|  | ||||
| static bool NumericalCheck(ScalarType dtype, void* c, void* other_c, int64_t size) { | ||||
|   auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA); | ||||
|   // comparison done as 1D tensor | ||||
|   at::Tensor ref = at::from_blob(c,       {size}, options); | ||||
|   at::Tensor oth = at::from_blob(other_c, {size}, options); | ||||
|   at::Tensor ref_float = ref.to(at::kFloat); | ||||
|   at::Tensor oth_float = oth.to(at::kFloat); | ||||
|  | ||||
|   const bool ok = at::allclose(ref_float, oth_float, config.rtol, config.atol); | ||||
|   if (ok) { | ||||
|     TUNABLE_LOG3("├──verify numerics: PASSED with atol=", config.atol, ", rtol=", config.rtol); | ||||
|   } else { | ||||
|     TUNABLE_LOG3("├──verify numerics: FAILED with atol=", config.atol, ", rtol=", config.rtol); | ||||
|   std::vector<double> atols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5}; | ||||
|   std::vector<double> rtols{1e-1, 1e-2, 1e-3, 1e-4, 1e-5}; | ||||
|   double last_succeed_atol = 1; | ||||
|   double last_succeed_rtol = 1; | ||||
|   for (auto& atol : atols) { | ||||
|     for (auto& rtol : rtols) { | ||||
|       if (at::allclose(ref_float, oth_float, rtol, atol)) { | ||||
|         last_succeed_atol = atol; | ||||
|         last_succeed_rtol = rtol; | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   return ok; | ||||
|   if (last_succeed_atol == 1) { | ||||
|     return false; | ||||
|   } | ||||
|   else { | ||||
|     TUNABLE_LOG3("├──verify numerics: atol=", last_succeed_atol, ", rtol=", last_succeed_rtol); | ||||
|   } | ||||
|  | ||||
|   return true; | ||||
| } | ||||
|  | ||||
| } | ||||
| @ -349,10 +355,8 @@ struct GemmParams : OpParams { | ||||
|   } | ||||
|  | ||||
|   TuningStatus NumericalCheck(GemmParams<T> *other) { | ||||
|     auto* ctx = getTuningContext(); | ||||
|     auto cfg = ctx->GetNumericalCheckConfig(); | ||||
|     auto c_dtype = c10::CppTypeToScalarType<T>::value; | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T), cfg) ? OK : FAIL; | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T)) ? OK : FAIL; | ||||
|   } | ||||
|  | ||||
|   char transa{}; | ||||
| @ -445,10 +449,8 @@ struct GemmAndBiasParams : OpParams { | ||||
|   } | ||||
|  | ||||
|   TuningStatus NumericalCheck(GemmAndBiasParams<T> *other) { | ||||
|     auto* ctx = getTuningContext(); | ||||
|     auto cfg = ctx->GetNumericalCheckConfig(); | ||||
|     auto c_dtype = c10::CppTypeToScalarType<T>::value; | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T), cfg) ? OK : FAIL; | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T)) ? OK : FAIL; | ||||
|   } | ||||
|  | ||||
|   char transa{}; | ||||
| @ -544,10 +546,8 @@ struct GemmStridedBatchedParams : OpParams { | ||||
|   } | ||||
|  | ||||
|   TuningStatus NumericalCheck(GemmStridedBatchedParams<T> *other) { | ||||
|     auto* ctx = getTuningContext(); | ||||
|     auto cfg = ctx->GetNumericalCheckConfig(); | ||||
|     auto c_dtype = c10::CppTypeToScalarType<C_Dtype>::value; | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T), cfg) ? OK : FAIL; | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T)) ? OK : FAIL; | ||||
|   } | ||||
|  | ||||
|   char transa{}; | ||||
| @ -663,9 +663,7 @@ struct ScaledGemmParams : OpParams { | ||||
|   } | ||||
|  | ||||
|   TuningStatus NumericalCheck(ScaledGemmParams<T> *other) { | ||||
|     auto* ctx = getTuningContext(); | ||||
|     auto cfg = ctx->GetNumericalCheckConfig(); | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T), cfg) ? OK : FAIL; | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T)) ? OK : FAIL; | ||||
|   } | ||||
|  | ||||
|   char transa{}; | ||||
|  | ||||
| @ -145,7 +145,7 @@ programmatically since the settings become fixed. Use the C++ or Python APIs ins | ||||
| | PYTORCH_TUNABLEOP_VERBOSE | Default is 0. Set to 1 to enable basic logging. 2 for basic tuning status. 3 for full trace. | | ||||
| | PYTORCH_TUNABLEOP_VERBOSE_FILENAME | Default is "err" for stderr. Set to "out" for stdout or a filename for capturing verbose logging. | | ||||
| | PYTORCH_TUNABLEOP_FILENAME | Default is 'tunableop_results.csv'. | | ||||
| | PYTORCH_TUNABLEOP_NUMERICAL_CHECK | Default is off. Set 'atol_rtol' to enable, for example "1e-5_1e-5". | | ||||
| | PYTORCH_TUNABLEOP_NUMERICAL_CHECK | Default is 0. Set to 1 to enable. | | ||||
| | PYTORCH_TUNABLEOP_ROCBLAS_ENABLED | Default is 1. Set to 0 to disable rocblas being considered during tuning. | | ||||
| | PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED | Default is 1. Set to 0 to disable hipblaslt being considered during tuning. | | ||||
| | PYTORCH_TUNABLEOP_MAX_TUNING_DURATION_MS | Default is 30. Unit is milliseconds. | | ||||
| @ -173,9 +173,10 @@ All python APIs exist in the `torch.cuda.tunable` module. | ||||
| | get_max_tuning_iterations() -> int | | | ||||
| | set_filename(filename: str, insert_device_ordinal: bool = False) -> None | | | ||||
| | get_filename() -> str | | | ||||
| | set_numerical_check_tolerances(enable: bool, atol: float, rtol: float) -> None | Enable or disable numerical checking; atol and rtol default to 1e-5. | ||||
| | get_results() -> Tuple[str, str, str, float] | | | ||||
| | get_validators() -> Tuple[str, str] | | | ||||
| | write_file_on_exit(val: bool) -> None | Default is True. | | ||||
| | write_file(filename: Optional[str] = None) -> None | If filename not given, it will call get_filename(). | | ||||
| | read_file(filename: Optional[str] = None) -> None | If filename not given, it will call get_filename(). | | ||||
| | tune_gemm_in_file(filename: str) -> None | read an untuned file and tune GEMMs in it. | | ||||
| | mgpu_tune_gemm_in_file(filename_pattern: str, num_gpus: int) -> None: -> None | read one or more untuned files and tune all unique GEMMs on one or more GPUs. | | ||||
|  | ||||
| @ -107,30 +107,14 @@ void TuningResultsManager::AddImpl(const std::string& op_signature, | ||||
| } | ||||
|  | ||||
| void TuningResultsManager::Add(const std::string& op_signature, const std::string& params_signature, ResultEntry best) { | ||||
|   bool is_new = false; | ||||
|   ResultEntry inserted = ResultEntry::Null(); | ||||
|   std::scoped_lock l{lock_}; | ||||
|  | ||||
|   // ---- mutate maps under results lock ---- | ||||
|   { | ||||
|     std::scoped_lock l{lock_}; | ||||
|     auto& km = results_[op_signature];  // creates if missing | ||||
|     is_new = (km.find(params_signature) == km.end()); | ||||
|     AddImpl(op_signature, params_signature, std::move(best), km); | ||||
|     if (is_new) { | ||||
|       inserted = km.at(params_signature);  // snapshot for I/O after unlocking | ||||
|     } | ||||
|   } | ||||
|    if (!is_new) return;  // only write once per unique (op, params) | ||||
|  | ||||
|    TuningContext* ctx = getTuningContext(); | ||||
|   if (ctx->IsTuningEnabled() && !ctx->IsRecordUntunedEnabled()) { | ||||
|     InitRealtimeAppend(ctx->GetFilename(), ctx->GetTuningResultsValidator().GetAllValidators()); | ||||
|  | ||||
|     if (is_new && realtime_out_ && realtime_out_->good()) { | ||||
|       AppendResultLine(op_signature, params_signature, inserted); | ||||
|     } | ||||
|   auto it = results_.find(op_signature); | ||||
|   if (it == results_.end()) { | ||||
|     it = results_.insert({op_signature, {}}).first; | ||||
|   } | ||||
|  | ||||
|   AddImpl(op_signature, params_signature, std::move(best), it->second); | ||||
| } | ||||
|  | ||||
| void TuningResultsManager::RecordUntuned( std::ofstream& untuned_file, const std::string& op_signature, | ||||
| @ -166,77 +150,6 @@ void TuningResultsManager::RecordUntuned( std::ofstream& untuned_file, const std | ||||
|   } | ||||
| } | ||||
|  | ||||
| void TuningResultsManager::InitRealtimeAppend(const std::string& filename, const std::unordered_map<std::string, std::string>& validators) { | ||||
|   std::scoped_lock fl{realtime_file_mutex_}; | ||||
|  | ||||
|   if (realtime_out_ && realtime_out_->good() && realtime_filename_ == filename) { | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   if (realtime_out_ && realtime_filename_ != filename) { | ||||
|     realtime_out_->flush(); | ||||
|     realtime_out_->close(); | ||||
|     realtime_out_.reset(); | ||||
|     validators_written_ = false; | ||||
|   } | ||||
|  | ||||
|   bool file_exists = false; | ||||
|   bool file_empty = true; | ||||
|  | ||||
|   { | ||||
|     std::ifstream check_file(filename); | ||||
|     if (check_file.good()) { | ||||
|       file_exists = true; | ||||
|       file_empty = (check_file.peek() == std::ifstream::traits_type::eof()); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   realtime_out_ = std::make_unique<std::ofstream>(filename, std::ios::out | std::ios::app); | ||||
|  | ||||
|   if (!realtime_out_->good()) { | ||||
|     TORCH_WARN("TunableOp realtime append: failed to open '", filename,"'"); | ||||
|     realtime_out_.reset(); | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   if(!file_exists || file_empty) { | ||||
|     for(const auto& [key, val] : validators) { | ||||
|       (*realtime_out_) << "Validator," << key << "," << val << std::endl; | ||||
|       realtime_out_->flush(); | ||||
|     } | ||||
|     validators_written_ = true; | ||||
|  | ||||
|     TUNABLE_LOG2("Wrote validators to realtime output file"); | ||||
|   } | ||||
|  | ||||
|   realtime_filename_ = filename; | ||||
| } | ||||
|  | ||||
| void TuningResultsManager::AppendResultLine(const std::string& op_sig, const std::string& param_sig, const ResultEntry& result) { | ||||
|   std::scoped_lock fl{realtime_file_mutex_}; | ||||
|  | ||||
|   if(!realtime_out_ || !realtime_out_->good()) { | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   (*realtime_out_) << op_sig << "," << param_sig << "," << result << std::endl; | ||||
|   realtime_out_->flush(); //ensure immediate write to disk | ||||
|  | ||||
|   TUNABLE_LOG3("Realtime append: ", op_sig, "(", param_sig, ") -> ", result); | ||||
| } | ||||
|  | ||||
| void TuningResultsManager::CloseRealtimeAppend() { | ||||
|   std::scoped_lock fl{realtime_file_mutex_}; | ||||
|  | ||||
|  | ||||
|   if(realtime_out_) { | ||||
|     realtime_out_->flush(); | ||||
|     realtime_out_->close(); | ||||
|     realtime_out_.reset(); | ||||
|     TUNABLE_LOG2("Closed realtime output file"); | ||||
|   } | ||||
| } | ||||
|  | ||||
| void TuningResultsManager::Delete(const std::string& op_signature, const std::string& params_signature) { | ||||
|   std::scoped_lock l{lock_}; | ||||
|  | ||||
| @ -483,6 +396,7 @@ TuningContext::TuningContext() : | ||||
|     tuning_enable_{true}, | ||||
|     record_untuned_enable_{false}, | ||||
|     manager_initialized_{false}, | ||||
|     write_file_on_exit_{true}, | ||||
|     numerics_check_enable_{false}, | ||||
|     max_tuning_duration_ms_{30}, | ||||
|     max_tuning_iterations_{100}, | ||||
| @ -503,8 +417,20 @@ TuningContext::~TuningContext() { | ||||
|     // but doesn't do any computation itself. | ||||
|     return; | ||||
|   } | ||||
|   TUNABLE_LOG1("Closing File"); | ||||
|   GetTuningResultsManager().CloseRealtimeAppend(); // Since, we do instant logging by default now. | ||||
|   auto filename = GetFilename(); | ||||
|   if (IsTunableOpEnabled() && IsTuningEnabled() && !filename.empty() && write_file_on_exit_) { | ||||
|     if (results_count_from_input_file_ < GetTuningResultsManager().GetSize()) { | ||||
|       if (results_count_from_input_file_ > 0) { | ||||
|         TUNABLE_LOG1("additional tuning results available, rewriting file ", filename); | ||||
|       } | ||||
|       else { | ||||
|         TUNABLE_LOG1("writing file ", filename); | ||||
|       } | ||||
|       if (!WriteFile(filename)) { | ||||
|         TUNABLE_LOG1("failed to write file ", filename); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   if (untuned_file_.good()) { | ||||
|     untuned_file_.close(); | ||||
| @ -585,54 +511,20 @@ std::ofstream& TuningContext::GetUntunedFile(){ | ||||
|   return untuned_file_; | ||||
| } | ||||
|  | ||||
| void TuningContext::WriteFileOnExit(bool value) { | ||||
|   write_file_on_exit_ = value; | ||||
| } | ||||
|  | ||||
| void TuningContext::EnableNumericsCheck(bool value) { | ||||
|   numerics_check_enable_ = value; | ||||
| } | ||||
|  | ||||
| NumericalCheckConfig TuningContext::GetNumericalCheckConfig() const { | ||||
|   const auto env_opt = c10::utils::get_env("PYTORCH_TUNABLEOP_NUMERICAL_CHECK"); | ||||
|  | ||||
|   if (!env_opt.has_value()) { | ||||
|     return numerics_cfg_; | ||||
|   } | ||||
|  | ||||
|   const std::string& env = env_opt.value(); | ||||
|  | ||||
|   if (env == "0") { | ||||
|     return NumericalCheckConfig(false, 1e-5, 1e-5); | ||||
|   } | ||||
|  | ||||
|   const size_t underscore = env.find('_'); | ||||
|  | ||||
|   TORCH_CHECK( | ||||
|       underscore != std::string::npos, | ||||
|       "Invalid PYTORCH_TUNABLEOP_NUMERICAL_CHECK format. " | ||||
|       "Expected 'atol_rtol', got: ", | ||||
|       env); | ||||
|  | ||||
|   double atol = 0.0; | ||||
|   double rtol = 0.0; | ||||
|  | ||||
|   try { | ||||
|     atol = std::stod(env.substr(0, underscore)); | ||||
|     rtol = std::stod(env.substr(underscore + 1)); | ||||
|   } catch (const std::exception& e) { | ||||
|     TORCH_CHECK(false, "Failed to parse PYTORCH_TUNABLEOP_NUMERICAL_CHECK: ", e.what()); | ||||
|   } | ||||
|  | ||||
|   TORCH_CHECK( atol > 0.0 && rtol > 0.0, "Tolerance values must be positive. atol=", atol, ", rtol=", rtol); | ||||
|   return NumericalCheckConfig(true, atol, rtol); | ||||
| } | ||||
|  | ||||
| void TuningContext::SetNumericalCheckConfig(bool enabled, double atol, double rtol) { | ||||
|   TORCH_CHECK(atol > 0.0 && rtol > 0.0, "Numerical check tolerances must be positive"); | ||||
|   numerics_cfg_ = {enabled, atol, rtol}; | ||||
| } | ||||
|  | ||||
| bool TuningContext::IsNumericsCheckEnabled() const { | ||||
|   const auto cfg = GetNumericalCheckConfig(); | ||||
|   return cfg.enabled || numerics_check_enable_; | ||||
|   const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_NUMERICAL_CHECK"); | ||||
|   if (env == "1") { | ||||
|     return true; | ||||
|   } | ||||
|   return numerics_check_enable_; | ||||
| } | ||||
|  | ||||
| void TuningContext::SetMaxTuningDurationMs(int max_duration_ms) { | ||||
| @ -742,6 +634,11 @@ TuningResultsManager& TuningContext::GetTuningResultsManager() { | ||||
|     auto filename = GetFilename(); | ||||
|     if (!filename.empty() && !IsRecordUntunedEnabled()) { | ||||
|       ReadFile(filename); | ||||
|       // attempt immediately to open file for writing to catch errors early | ||||
|       std::ofstream file(filename, std::ios::out | std::ios::app); | ||||
|       if (!file.good()) { | ||||
|         TORCH_WARN("failed to open file '", filename, "' for writing; your tuning results will not be saved"); | ||||
|       } | ||||
|     } | ||||
|   }); | ||||
|   return manager_; | ||||
| @ -847,6 +744,27 @@ bool TuningContext::ReadFile(const std::string& filename_) { | ||||
|   return true; | ||||
| } | ||||
|  | ||||
| bool TuningContext::WriteFile(const std::string& filename_) { | ||||
|   std::string filename = filename_.empty() ? GetFilename() : filename_; | ||||
|   std::ofstream file(filename, std::ios::out | std::ios::trunc); | ||||
|   if (!file.good()) { | ||||
|     TUNABLE_LOG1("error opening tuning results file for writing ", filename); | ||||
|     return false; | ||||
|   } | ||||
|   auto validators = GetTuningResultsValidator().GetAllValidators(); | ||||
|   for (const auto& [key, val] : validators) { | ||||
|     file << "Validator," << key << "," << val << std::endl; | ||||
|   } | ||||
|   auto results = GetTuningResultsManager().Dump(); | ||||
|   for (const auto& [op_sig, kernelmap] : results) { | ||||
|     for (const auto& [param_sig, result] : kernelmap) { | ||||
|       file << op_sig << "," << param_sig << "," << result << std::endl; | ||||
|     } | ||||
|   } | ||||
|   file.close(); | ||||
|   return true; | ||||
| } | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| struct MaybeDelete { | ||||
|  | ||||
| @ -103,24 +103,10 @@ class TORCH_CUDA_CPP_API TuningResultsManager { | ||||
|  | ||||
|     void RecordUntuned( std::ofstream& untuned_file, const std::string& op_signature, | ||||
|       const std::string& params_signature, const std::string& blas_signature); | ||||
|  | ||||
|     void InitRealtimeAppend( | ||||
|         const std::string& filename, | ||||
|         const std::unordered_map<std::string, std::string>& validators); | ||||
|  | ||||
|     void AppendResultLine(const std::string& op_sig, | ||||
|                          const std::string& param_sig, | ||||
|                          const ResultEntry& result); | ||||
|  | ||||
|     void CloseRealtimeAppend();  // For clean shutdown | ||||
|   private: | ||||
|     std::mutex lock_; | ||||
|     std::mutex realtime_file_mutex_; | ||||
|     std::unique_ptr<std::ofstream> realtime_out_; | ||||
|     std::string realtime_filename_; | ||||
|     ResultsMap results_; | ||||
|     UntunedMap untuned_results_; | ||||
|     bool validators_written_ = false; | ||||
|  | ||||
| }; | ||||
|  | ||||
| @ -148,16 +134,6 @@ class TORCH_CUDA_CPP_API TuningResultsValidator { | ||||
|     GetValidateFuncs validators_; | ||||
| }; | ||||
|  | ||||
| struct NumericalCheckConfig { | ||||
|   bool   enabled{false}; | ||||
|   double atol{1e-5}; | ||||
|   double rtol{1e-5}; | ||||
|  | ||||
|   NumericalCheckConfig() = default; | ||||
|   NumericalCheckConfig(bool e, double a, double r) : enabled(e), atol(a), rtol(r) {} | ||||
| }; | ||||
|  | ||||
|  | ||||
| class TORCH_CUDA_CPP_API TuningContext { | ||||
|   public: | ||||
|     TuningContext(); | ||||
| @ -179,8 +155,6 @@ class TORCH_CUDA_CPP_API TuningContext { | ||||
|  | ||||
|     void EnableNumericsCheck(bool value); | ||||
|     bool IsNumericsCheckEnabled() const; | ||||
|     void SetNumericalCheckConfig(bool enabled, double atol, double rtol); | ||||
|     NumericalCheckConfig GetNumericalCheckConfig() const; | ||||
|  | ||||
|     void SetMaxTuningDurationMs(int max_duration_ms); | ||||
|     int GetMaxTuningDurationMs() const; | ||||
| @ -211,7 +185,10 @@ class TORCH_CUDA_CPP_API TuningContext { | ||||
|     void SetFilename(const std::string& filename, bool insert_device_ordinal=false); | ||||
|     std::string GetFilename() const; | ||||
|  | ||||
|     void WriteFileOnExit(bool value); | ||||
|  | ||||
|     bool ReadFile(const std::string& filename={}); | ||||
|     bool WriteFile(const std::string& filename={}); | ||||
|  | ||||
|     template<class... Types> | ||||
|     void Log(int level, Types... args) { | ||||
| @ -230,6 +207,7 @@ class TORCH_CUDA_CPP_API TuningContext { | ||||
|     bool tuning_enable_; | ||||
|     bool record_untuned_enable_; | ||||
|     bool manager_initialized_; | ||||
|     bool write_file_on_exit_; | ||||
|     bool numerics_check_enable_; | ||||
|     int max_tuning_duration_ms_; | ||||
|     int max_tuning_iterations_; | ||||
| @ -244,8 +222,6 @@ class TORCH_CUDA_CPP_API TuningContext { | ||||
|     std::ofstream untuned_file_; | ||||
|     size_t results_count_from_input_file_; | ||||
|     bool is_shutting_down_; | ||||
|  | ||||
|     NumericalCheckConfig numerics_cfg_{}; | ||||
| }; | ||||
|  | ||||
| TORCH_CUDA_CPP_API TuningContext* getTuningContext(); | ||||
|  | ||||
| @ -109,8 +109,7 @@ class DefaultScaledGemmOp : public Callable<ScaledGemmParams<T>> { | ||||
|           params->c_scale_ptr, | ||||
|           params->ldc, | ||||
|           params->c_dtype, | ||||
|           params->use_fast_accum, | ||||
|           std::nullopt /* alpha */); | ||||
|           params->use_fast_accum); | ||||
|       return OK; | ||||
|     } | ||||
| }; | ||||
|  | ||||
| @ -267,10 +267,27 @@ class TunableOp { | ||||
|       for (size_t i = 0; i < op_names_.size(); i++) { | ||||
|         auto* candidate = ops_[op_names_[i]].get(); // borrow pointer | ||||
|  | ||||
|         auto status = candidate->Call(reusable_params[0]); | ||||
|         if (status != OK) { | ||||
|           TUNABLE_LOG3("├──unsupported id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|           continue; | ||||
|         if (do_numerics_check) { | ||||
|           ParamsT* numerical_params = params->DeepCopy(false); | ||||
|           auto status = candidate->Call(numerical_params); | ||||
|           if (status != OK) { | ||||
|             numerical_params->Delete(); | ||||
|             TUNABLE_LOG3("├──unsupported id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|             continue; | ||||
|           } | ||||
|           status = reference_params->NumericalCheck(numerical_params); | ||||
|           numerical_params->Delete(); | ||||
|           if (status != OK) { | ||||
|             TUNABLE_LOG3("├──numerics check failed for id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|             continue; | ||||
|           } | ||||
|         } | ||||
|         else { | ||||
|           auto status = candidate->Call(reusable_params[0]); | ||||
|           if (status != OK) { | ||||
|             TUNABLE_LOG3("├──unsupported id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|             continue; | ||||
|           } | ||||
|         } | ||||
|  | ||||
|         // collect a small profile | ||||
| @ -293,22 +310,6 @@ class TunableOp { | ||||
|           continue; | ||||
|         } | ||||
|  | ||||
|         if (do_numerics_check) { | ||||
|           ParamsT* numerical_params = params->DeepCopy(false); | ||||
|           auto status = candidate->Call(numerical_params); | ||||
|           if (status != OK) { | ||||
|             numerical_params->Delete(); | ||||
|             TUNABLE_LOG3("├──unsupported id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|             continue; | ||||
|           } | ||||
|           status = reference_params->NumericalCheck(numerical_params); | ||||
|           numerical_params->Delete(); | ||||
|           if (status != OK) { | ||||
|             TUNABLE_LOG3("├──numerics check failed for id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|             continue; | ||||
|           } | ||||
|         } | ||||
|  | ||||
|         // for warmup does user set max duration, max iters, or both? | ||||
|         // warmup is skipped by default, i.e. warmup_iter = 0 | ||||
|         // warmup will be set to the non-zero value of max_warmup_duration | ||||
|  | ||||
| @ -39,7 +39,7 @@ Tensor vdot_decomp(const Tensor& A, const Tensor& B) { | ||||
| // NB: I wrote this like this because we *might* want its for a future matmul | ||||
| // batch rule that isn't decomposed... | ||||
| // "tv" = tensor @ vector | ||||
| std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule( | ||||
| static std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule( | ||||
|     const Tensor& self, std::optional<int64_t> self_bdim, | ||||
|     const Tensor& other, std::optional<int64_t> other_bdim) { | ||||
|   if (self_bdim && other_bdim) { | ||||
| @ -66,7 +66,7 @@ std::tuple<Tensor, std::optional<int64_t>> tv_batch_rule( | ||||
|   TORCH_INTERNAL_ASSERT(false, "can't get here"); | ||||
| } | ||||
|  | ||||
| std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule( | ||||
| static std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule( | ||||
|     const Tensor& self, std::optional<int64_t> self_bdim, | ||||
|     const Tensor& other, std::optional<int64_t> other_bdim) { | ||||
|   auto self_logical_rank = rankWithoutBatchDim(self, self_bdim); | ||||
| @ -79,7 +79,7 @@ std::tuple<Tensor, std::optional<int64_t>> mv_batch_rule( | ||||
|   return tv_batch_rule(self, self_bdim, other, other_bdim); | ||||
| } | ||||
|  | ||||
| std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule( | ||||
| static std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule( | ||||
|     const Tensor& self, std::optional<int64_t> self_bdim, | ||||
|     const Tensor& other, std::optional<int64_t> other_bdim) { | ||||
|   auto self_logical_rank = rankWithoutBatchDim(self, self_bdim); | ||||
| @ -94,7 +94,7 @@ std::tuple<Tensor, std::optional<int64_t>> mm_batch_rule( | ||||
|   return std::make_tuple( at::matmul(self_, other_), 0 ); | ||||
| } | ||||
|  | ||||
| std::tuple<Tensor, std::optional<int64_t>> bmm_batch_rule( | ||||
| static std::tuple<Tensor, std::optional<int64_t>> bmm_batch_rule( | ||||
|     const Tensor& self, std::optional<int64_t> self_bdim, | ||||
|     const Tensor& other, std::optional<int64_t> other_bdim) { | ||||
|   auto self_logical_rank = rankWithoutBatchDim(self, self_bdim); | ||||
| @ -250,7 +250,7 @@ struct LinalgCheckMatrixBinaryRuleHelper<op_name, F, Func, typelist<A, B, T...>> | ||||
|   } | ||||
| }; | ||||
|  | ||||
| void expect_at_least_rank( | ||||
| static void expect_at_least_rank( | ||||
|     const Tensor& tensor, | ||||
|     std::optional<int64_t> tensor_bdim, | ||||
|     int64_t expected_rank, | ||||
| @ -472,7 +472,7 @@ atol_rtol_tensor_batch_rule( | ||||
|   return std::make_tuple(Func(input_, atol_, rtol_, hermitian), 0); | ||||
| } | ||||
|  | ||||
| std::tuple<Tensor, std::optional<int64_t>> | ||||
| static std::tuple<Tensor, std::optional<int64_t>> | ||||
| pinv_batch_rule( | ||||
|     const Tensor& input, std::optional<int64_t> input_bdim, const std::optional<Tensor>& atol, | ||||
|     const std::optional<int64_t> atol_bdim, const std::optional<Tensor>& rtol, | ||||
|  | ||||
| @ -213,22 +213,40 @@ static cudnn_grid_sample_backward_batch_rule( | ||||
|   return grid_sample_backward_helper_out(std::move(bw_out), 0, 0, bdim_size); | ||||
| } | ||||
|  | ||||
| // uses functional formulation for one_hot under vmap to be compatible with | ||||
| // fakeTensor/dynamic shapes and compiled functorch transforms. | ||||
| // mirrors the meta path in aten/src/ATen/native/Onehot.cpp, | ||||
| // but requires explicit positive num_classes under vmap to avoid | ||||
| // data-dependent output shapes. | ||||
| // TODO: replace with targetable functionalization | ||||
| static Tensor one_hot_decomposition_hack(const Tensor &self, int64_t num_classes) { | ||||
|     TORCH_CHECK(self.dtype() == kLong, "one_hot is only applicable to index tensor."); | ||||
|     auto shape = self.sym_sizes().vec(); | ||||
|  | ||||
|     // empty tensor could be converted to one hot representation, | ||||
|     // but shape inference is not possible. | ||||
|     if (self.sym_numel() == 0) { | ||||
|         if (num_classes <= 0) { | ||||
|             TORCH_CHECK(false, "Can not infer total number of classes from empty tensor."); | ||||
|         } else { | ||||
|             shape.emplace_back(num_classes); | ||||
|             return at::empty_symint(shape, self.options()); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // disallow implicit inference under vmap; this would be data-dependent | ||||
|     // and is intentionally guarded by Dynamo in torch/_dynamo/variables/torch.py. | ||||
|     TORCH_CHECK(num_classes > 0, "When vmap-ing torch.nn.functional.one_hot, please " | ||||
|         "provide an explicit positive num_classes argument."); | ||||
|  | ||||
|     const auto options = self.options(); | ||||
|     at::Tensor index = at::arange(num_classes, options); | ||||
|     return at::eq(self.unsqueeze(-1), index).to(at::kLong); | ||||
|     // Disabling all of the following checks. This is OK because scatter has checks too. | ||||
|     // Maybe one_hot should be a primitive wrt autograd so we don't have to deal with this. | ||||
|     // // non-empty tensor | ||||
|     // if (self.device().type() != at::kCUDA) { | ||||
|     //   //for cuda, rely on device assert thrown by scatter | ||||
|     //   TORCH_CHECK(self.min().item().toLong() >= 0, "Class values must be non-negative."); | ||||
|     // } | ||||
|     // if (self.device().type() != at::kCUDA) { | ||||
|     //   //rely on device asserts from scatter to avoid sync here | ||||
|     //   TORCH_CHECK(num_classes > self.max().item().toLong(), "Class values must be smaller than num_classes."); | ||||
|     // } | ||||
|  | ||||
|     shape.emplace_back(num_classes); | ||||
|     Tensor ret = at::zeros_symint(shape, self.options()); | ||||
|     return ret.scatter(-1, self.unsqueeze(-1), 1); | ||||
| } | ||||
|  | ||||
| template <typename A, A a, typename C> | ||||
|  | ||||
| @ -19,7 +19,7 @@ | ||||
| namespace at::functorch { | ||||
|  | ||||
| namespace { | ||||
| bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) { | ||||
| static bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) { | ||||
|   for (const auto& bdim : bdims) { | ||||
|     if (bdim.has_value()) { | ||||
|       return true; | ||||
| @ -28,7 +28,7 @@ bool any_has_value(ArrayRef<std::optional<int64_t>> bdims) { | ||||
|   return false; | ||||
| } | ||||
|  | ||||
| int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) { | ||||
| static int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) { | ||||
|   int64_t result = 0; | ||||
|   for (const auto& idx : indices) { | ||||
|     if (!idx.has_value() || !idx->defined()) { | ||||
| @ -40,7 +40,7 @@ int64_t get_num_leading_nones(ArrayRef<std::optional<Tensor>> indices) { | ||||
|   return result; | ||||
| } | ||||
|  | ||||
| int64_t get_max_index_logical_dim( | ||||
| static int64_t get_max_index_logical_dim( | ||||
|     ArrayRef<std::optional<Tensor>> indices, | ||||
|     ArrayRef<std::optional<int64_t>> indices_bdims) { | ||||
|   int64_t max_logical_dim = -1; | ||||
| @ -57,7 +57,7 @@ int64_t get_max_index_logical_dim( | ||||
|   return max_logical_dim; | ||||
| } | ||||
|  | ||||
| std::vector<std::optional<Tensor>> batchIndices( | ||||
| static std::vector<std::optional<Tensor>> batchIndices( | ||||
|   at::TensorOptions options, | ||||
|   ArrayRef<std::optional<Tensor>> indices, | ||||
|   ArrayRef<std::optional<int64_t>> indices_bdims, | ||||
| @ -126,7 +126,7 @@ std::vector<std::optional<Tensor>> batchIndices( | ||||
|  | ||||
| // Define an "advanced index" to be a selection object that is | ||||
| // a non-trivial Tensor (i.e. it does not represent :). | ||||
| bool is_advanced_index(const std::optional<Tensor>& idx) { | ||||
| static bool is_advanced_index(const std::optional<Tensor>& idx) { | ||||
|   if (!idx.has_value()) { | ||||
|     return false; | ||||
|   } | ||||
| @ -137,7 +137,7 @@ bool is_advanced_index(const std::optional<Tensor>& idx) { | ||||
| } | ||||
|  | ||||
| // See NOTE: [advanced indices adjacent] for definition | ||||
| bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indices) { | ||||
| static bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indices) { | ||||
|   int64_t num_advanced_indices_regions = 0; | ||||
|   bool in_advanced_indices_region = false; | ||||
|   for (const auto& idx : indices) { | ||||
| @ -165,7 +165,7 @@ bool are_advanced_indices_adjacent(ArrayRef<std::optional<Tensor>> indices) { | ||||
| // - result: Tensor[B, 4, 5, 6, 2, 3, 7, 8] | ||||
| //                     -------  ---- | ||||
| //                     region2  region1 | ||||
| Tensor swap_regions(const Tensor& tensor, int64_t first_region_size, int64_t second_region_size) { | ||||
| static Tensor swap_regions(const Tensor& tensor, int64_t first_region_size, int64_t second_region_size) { | ||||
|   VmapDimVector permutation(tensor.dim(), 0); | ||||
|   std::iota(permutation.begin(), permutation.end(), 0); | ||||
|   std::rotate( | ||||
| @ -553,7 +553,7 @@ Tensor &_index_put_impl__plumbing(Tensor &self, const List<std::optional<Tensor> | ||||
|   return self; | ||||
| } | ||||
|  | ||||
| Tensor maybe_permute_values( | ||||
| static Tensor maybe_permute_values( | ||||
|     const Tensor& values, | ||||
|     ArrayRef<std::optional<Tensor>> orig_indices, | ||||
|     ArrayRef<std::optional<int64_t>> orig_indices_bdims) { | ||||
| @ -1052,7 +1052,7 @@ std::tuple<Tensor, std::optional<int64_t>> index_add_batch_rule( | ||||
|                                    other, other_bdim, alpha, false); | ||||
| } | ||||
|  | ||||
| std::tuple<Tensor,Tensor> binary_pointwise_align( | ||||
| static std::tuple<Tensor,Tensor> binary_pointwise_align( | ||||
|     const Tensor & self, | ||||
|     std::optional<int64_t> self_bdim, | ||||
|     const Tensor & mask, | ||||
|  | ||||
| @ -346,7 +346,7 @@ std::tuple<Tensor, std::optional<int64_t>> slice_batch_rule( | ||||
|   return std::make_tuple(std::move(result), 0); | ||||
| } | ||||
|  | ||||
| bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
| static bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
|   return dim == 0 || dim == -1; | ||||
| } | ||||
|  | ||||
|  | ||||
| @ -160,10 +160,6 @@ constexpr DispatchKeySet kKeysToPropagateToWrapper({ | ||||
|   DispatchKey::CUDA, | ||||
|   DispatchKey::CPU, | ||||
|   DispatchKey::PrivateUse1, | ||||
|   DispatchKey::SparseCPU, | ||||
|   DispatchKey::SparseCUDA, | ||||
|   DispatchKey::SparseCsrCPU, | ||||
|   DispatchKey::SparseCsrCUDA, | ||||
| }); | ||||
|  | ||||
| inline DispatchKeySet getKeysToPropagateToWrapper(const Tensor& tensor, DispatchKeySet to_propagate=kKeysToPropagateToWrapper) { | ||||
|  | ||||
| @ -68,18 +68,18 @@ namespace at::functorch { | ||||
|  | ||||
| namespace{ | ||||
| // PyTorch allows operations to specify dim 0 and dim -1 on a scalar tensor. | ||||
| bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
| static bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
|   return dim == 0 || dim == -1; | ||||
| } | ||||
|  | ||||
| int64_t get_current_level() { | ||||
| static int64_t get_current_level() { | ||||
|   auto maybe_level = maybeCurrentDynamicLayer(); | ||||
|   TORCH_INTERNAL_ASSERT(maybe_level.has_value()); | ||||
|   return maybe_level->layerId(); | ||||
| } | ||||
|  | ||||
| // This check should probably go into the dispatcher... | ||||
| bool participatesInCurrentLevel(const Tensor& self) { | ||||
| static bool participatesInCurrentLevel(const Tensor& self) { | ||||
|   auto current_level = get_current_level(); | ||||
|   auto* maybe_batched_impl = maybeGetBatchedImpl(self); | ||||
|   if (!maybe_batched_impl) { | ||||
| @ -90,7 +90,7 @@ bool participatesInCurrentLevel(const Tensor& self) { | ||||
|   return self_level == current_level; | ||||
| } | ||||
|  | ||||
| bool participatesInCurrentLevel(ITensorListRef self) { | ||||
| static bool participatesInCurrentLevel(ITensorListRef self) { | ||||
|   for (const Tensor& tensor : self) { | ||||
|     if (participatesInCurrentLevel(tensor)) { | ||||
|       return true; | ||||
| @ -285,7 +285,7 @@ std::vector<Tensor> unbind_batching_rule(const Tensor& self, int64_t dim) { | ||||
| // given (sizes, strides, storage_offset) returns the maximum location that | ||||
| // can be indexed (or nullopt if such a location doesn't exist, e.g., tensors | ||||
| // with zero-size dims). | ||||
| std::optional<c10::SymInt> maximum_indexable_location( | ||||
| static std::optional<c10::SymInt> maximum_indexable_location( | ||||
|     c10::SymIntArrayRef sizes, c10::SymIntArrayRef strides, const c10::SymInt& storage_offset) { | ||||
|   auto result = native::storage_size_for(sizes, strides); | ||||
|   if (result == 0) { | ||||
| @ -298,7 +298,7 @@ std::optional<c10::SymInt> maximum_indexable_location( | ||||
| // This checks that the range of possible memory locations accessible by | ||||
| // x.as_strided(sizes, strides, maybe_storage_offset) | ||||
| // are within the bounds of possible memory locations accessible by x. | ||||
| void checkBasicAsStridedValidForSlice( | ||||
| static void checkBasicAsStridedValidForSlice( | ||||
|     const Tensor& physical_tensor, | ||||
|     int64_t num_batch_dims, | ||||
|     c10::SymIntArrayRef sizes, | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	