mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-11-01 04:54:55 +08:00 
			
		
		
		
	Compare commits
	
		
			1 Commits
		
	
	
		
			csl/remove
			...
			fixbugh100
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 56b58b1b6e | 
| @ -8,8 +8,6 @@ if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0" | ||||
| elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0" | ||||
| elif [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0" | ||||
| elif [[ "$GPU_ARCH_VERSION" == *"13.0"* ]]; then | ||||
|     export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;11.0;12.0+PTX" | ||||
| fi | ||||
|  | ||||
| @ -113,7 +113,6 @@ case "$tag" in | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     TRITON=yes | ||||
|     INSTALL_MINGW=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11) | ||||
|     CUDA_VERSION=13.0.0 | ||||
| @ -362,7 +361,6 @@ docker build \ | ||||
|        --build-arg "OPENBLAS=${OPENBLAS:-}" \ | ||||
|        --build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \ | ||||
|        --build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \ | ||||
|        --build-arg "INSTALL_MINGW=${INSTALL_MINGW:-}" \ | ||||
|        -f $(dirname ${DOCKERFILE})/Dockerfile \ | ||||
|        -t "$tmp_tag" \ | ||||
|        "$@" \ | ||||
|  | ||||
| @ -83,6 +83,10 @@ function build_cpython { | ||||
|         py_suffix=${py_ver::-1} | ||||
|         py_folder=$py_suffix | ||||
|     fi | ||||
|     # Update to rc2 due to https://github.com/python/cpython/commit/c72699086fe4 | ||||
|     if [ "$py_suffix" == "3.14.0" ]; then | ||||
|         py_suffix="3.14.0rc2" | ||||
|     fi | ||||
|     wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz | ||||
|     do_cpython_build $py_ver Python-$py_suffix | ||||
|  | ||||
|  | ||||
| @ -150,7 +150,7 @@ function install_130 { | ||||
|   CUDNN_VERSION=9.13.0.50 | ||||
|   echo "Installing CUDA 13.0 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1" | ||||
|   # install CUDA 13.0 in the same container | ||||
|   install_cuda 13.0.2 cuda_13.0.2_580.95.05_linux | ||||
|   install_cuda 13.0.0 cuda_13.0.0_580.65.06_linux | ||||
|  | ||||
|   # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|   install_cudnn 13 $CUDNN_VERSION | ||||
|  | ||||
| @ -1,10 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Install MinGW-w64 for Windows cross-compilation | ||||
| apt-get update | ||||
| apt-get install -y g++-mingw-w64-x86-64-posix | ||||
|  | ||||
| echo "MinGW-w64 installed successfully" | ||||
| x86_64-w64-mingw32-g++ --version | ||||
| @ -19,8 +19,8 @@ pip_install \ | ||||
|   transformers==4.36.2 | ||||
|  | ||||
| pip_install coloredlogs packaging | ||||
| pip_install onnxruntime==1.23.1 | ||||
| pip_install onnxscript==0.5.4 | ||||
| pip_install onnxruntime==1.23.0 | ||||
| pip_install onnxscript==0.5.3 | ||||
|  | ||||
| # Cache the transformers model to be used later by ONNX tests. We need to run the transformers | ||||
| # package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/ | ||||
|  | ||||
| @ -39,13 +39,9 @@ case ${DOCKER_TAG_PREFIX} in | ||||
|         DOCKER_GPU_BUILD_ARG="" | ||||
|         ;; | ||||
|     rocm*) | ||||
|         # we want the patch version of 7.0 instead | ||||
|         if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2" | ||||
|         fi | ||||
|         # we want the patch version of 6.4 instead | ||||
|         if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.4" | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2" | ||||
|         fi | ||||
|         BASE_TARGET=rocm | ||||
|         GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete | ||||
|  | ||||
| @ -75,13 +75,9 @@ case ${image} in | ||||
|         DOCKERFILE_SUFFIX="_cuda_aarch64" | ||||
|         ;; | ||||
|     manylinux2_28-builder:rocm*) | ||||
|         # we want the patch version of 7.0 instead | ||||
|         if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2" | ||||
|         fi | ||||
|         # we want the patch version of 6.4 instead | ||||
|         if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.4" | ||||
|             GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2" | ||||
|         fi | ||||
|         TARGET=rocm_final | ||||
|         MANY_LINUX_VERSION="2_28" | ||||
|  | ||||
| @ -334,12 +334,12 @@ sympy==1.13.3 | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| onnx==1.19.1 | ||||
| onnx==1.18.0 | ||||
| #Description: Required by onnx tests, and mypy and test_public_bindings.py when checking torch.onnx._internal | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| onnxscript==0.5.4 | ||||
| onnxscript==0.5.3 | ||||
| #Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| @ -100,16 +100,9 @@ COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt | ||||
| COPY ci_commit_pins/timm.txt timm.txt | ||||
| COPY ci_commit_pins/torchbench.txt torchbench.txt | ||||
| # Only build aoti cpp tests when INDUCTOR_BENCHMARKS is set to True | ||||
| ENV BUILD_AOT_INDUCTOR_TEST ${INDUCTOR_BENCHMARKS} | ||||
| RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi | ||||
| RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt | ||||
|  | ||||
| ARG INSTALL_MINGW | ||||
| COPY ./common/install_mingw.sh install_mingw.sh | ||||
| RUN if [ -n "${INSTALL_MINGW}" ]; then bash ./install_mingw.sh; fi | ||||
| RUN rm install_mingw.sh | ||||
|  | ||||
| ARG TRITON | ||||
| ARG TRITON_CPU | ||||
|  | ||||
|  | ||||
| @ -57,8 +57,8 @@ def clone_external_repo(target: str, repo: str, dst: str = "", update_submodules | ||||
|         logger.info("Successfully cloned %s", target) | ||||
|         return r, commit | ||||
|  | ||||
|     except GitCommandError: | ||||
|         logger.exception("Git operation failed") | ||||
|     except GitCommandError as e: | ||||
|         logger.error("Git operation failed: %s", e) | ||||
|         raise | ||||
|  | ||||
|  | ||||
|  | ||||
| @ -143,7 +143,7 @@ def sample_vllm_test_library(): | ||||
|                 "pytest -v -s compile/test_decorator.py", | ||||
|             ], | ||||
|         }, | ||||
|         "vllm_language_model_test_extended_generation_28_failure_test": { | ||||
|         "vllm_languagde_model_test_extended_generation_28_failure_test": { | ||||
|             "title": "Language Models Test (Extended Generation) 2.8 release failure", | ||||
|             "id": "vllm_languagde_model_test_extended_generation_28_failure_test", | ||||
|             "package_install": [ | ||||
|  | ||||
| @ -63,7 +63,7 @@ class VllmBuildParameters: | ||||
|     # DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True" | ||||
|     use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True) | ||||
|     dockerfile_path: Path = env_path_field( | ||||
|         "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile" | ||||
|         "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm" | ||||
|     ) | ||||
|  | ||||
|     # the cleaning script to remove torch dependencies from pip | ||||
|  | ||||
| @ -6,7 +6,7 @@ dependencies = [ | ||||
|     "GitPython==3.1.45", | ||||
|     "docker==7.1.0", | ||||
|     "pytest==7.3.2", | ||||
|     "uv==0.9.5" | ||||
|     "uv==0.8.6" | ||||
| ] | ||||
|  | ||||
| [tool.setuptools] | ||||
|  | ||||
| @ -187,22 +187,19 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then | ||||
|             export USE_CUFILE=0 | ||||
|         else | ||||
|             DEPS_LIST+=( | ||||
|                 "/usr/local/cuda/lib64/libnvToolsExt.so.1" | ||||
|                 "/usr/local/cuda/lib64/libcublas.so.12" | ||||
|                 "/usr/local/cuda/lib64/libcublasLt.so.12" | ||||
|                 "/usr/local/cuda/lib64/libcudart.so.12" | ||||
|                 "/usr/local/cuda/lib64/libnvrtc.so.12" | ||||
|                 "/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12") | ||||
|             DEPS_SONAME+=( | ||||
|                 "libnvToolsExt.so.1" | ||||
|                 "libcublas.so.12" | ||||
|                 "libcublasLt.so.12" | ||||
|                 "libcudart.so.12" | ||||
|                 "libnvrtc.so.12" | ||||
|                 "libcupti.so.12") | ||||
|  | ||||
|             if [[ $CUDA_VERSION != 12.9* ]]; then | ||||
|                 DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1") | ||||
|                 DEPS_SONAME+=("libnvToolsExt.so.1") | ||||
|             fi | ||||
|         fi | ||||
|     else | ||||
|         echo "Using nvidia libs from pypi." | ||||
|  | ||||
| @ -460,37 +460,31 @@ test_inductor_shard() { | ||||
|     --verbose | ||||
| } | ||||
|  | ||||
| test_inductor_aoti_cpp() { | ||||
| test_inductor_aoti() { | ||||
|   # docker build uses bdist_wheel which does not work with test_aot_inductor | ||||
|   # TODO: need a faster way to build | ||||
|   if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then | ||||
|     # We need to hipify before building again | ||||
|     python3 tools/amd_build/build_amd.py | ||||
|   fi | ||||
|   if [[ "$BUILD_ENVIRONMENT" == *sm86* ]]; then | ||||
|     BUILD_COMMAND=(TORCH_CUDA_ARCH_LIST=8.6 USE_FLASH_ATTENTION=OFF python -m pip install --no-build-isolation -v -e .) | ||||
|     # TODO: Replace me completely, as one should not use conda libstdc++, nor need special path to TORCH_LIB | ||||
|     TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="/opt/conda/envs/py_3.10/lib:${TORCH_LIB_DIR}:${LD_LIBRARY_PATH}") | ||||
|   else | ||||
|     BUILD_COMMAND=(python -m pip install --no-build-isolation -v -e .) | ||||
|     TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}") | ||||
|   fi | ||||
|  | ||||
|   # aoti cmake custom command requires `torch` to be installed | ||||
|   # initialize the cmake build cache and install torch | ||||
|   /usr/bin/env "${BUILD_COMMAND[@]}" | ||||
|   # rebuild with the build cache with `BUILD_AOT_INDUCTOR_TEST` enabled | ||||
|   /usr/bin/env CMAKE_FRESH=1 BUILD_AOT_INDUCTOR_TEST=1 "${BUILD_COMMAND[@]}" | ||||
|  | ||||
|   /usr/bin/env "${TEST_ENVS[@]}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference cpp/test_vec_half_AVX2 -dist=loadfile | ||||
| } | ||||
|  | ||||
| test_inductor_aoti_cross_compile_for_windows() { | ||||
|  | ||||
|   TEST_REPORTS_DIR=$(pwd)/test/test-reports | ||||
|   mkdir -p "$TEST_REPORTS_DIR" | ||||
|  | ||||
|   # Set WINDOWS_CUDA_HOME environment variable | ||||
|   WINDOWS_CUDA_HOME="$(pwd)/win-torch-wheel-extracted" | ||||
|   export WINDOWS_CUDA_HOME | ||||
|  | ||||
|   echo "WINDOWS_CUDA_HOME is set to: $WINDOWS_CUDA_HOME" | ||||
|   echo "Contents:" | ||||
|   ls -lah "$(pwd)/win-torch-wheel-extracted/lib/x64/" || true | ||||
|  | ||||
|   python test/inductor/test_aoti_cross_compile_windows.py -k compile --package-dir "$TEST_REPORTS_DIR" --win-torch-lib-dir "$(pwd)/win-torch-wheel-extracted/torch/lib" | ||||
| } | ||||
|  | ||||
| test_inductor_cpp_wrapper_shard() { | ||||
|   if [[ -z "$NUM_TEST_SHARDS" ]]; then | ||||
|     echo "NUM_TEST_SHARDS must be defined to run a Python test shard" | ||||
| @ -906,7 +900,7 @@ test_inductor_set_cpu_affinity(){ | ||||
|   export LD_PRELOAD="$JEMALLOC_LIB":"$LD_PRELOAD" | ||||
|   export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:-1" | ||||
|  | ||||
|   if [[ "$(uname -m)" != "aarch64" ]]; then | ||||
|   if [[ "${TEST_CONFIG}" != *aarch64* ]]; then | ||||
|     # Use Intel OpenMP for x86 | ||||
|     IOMP_LIB="$(dirname "$(which python)")/../lib/libiomp5.so" | ||||
|     export LD_PRELOAD="$IOMP_LIB":"$LD_PRELOAD" | ||||
| @ -920,7 +914,7 @@ test_inductor_set_cpu_affinity(){ | ||||
|   cores=$((cpus / thread_per_core)) | ||||
|  | ||||
|   # Set number of cores to 16 on aarch64 for performance runs | ||||
|   if [[ "$(uname -m)" == "aarch64" && $cores -gt 16 ]]; then | ||||
|   if [[ "${TEST_CONFIG}" == *aarch64* && $cores -gt 16 ]]; then | ||||
|     cores=16 | ||||
|   fi | ||||
|   export OMP_NUM_THREADS=$cores | ||||
| @ -1621,7 +1615,6 @@ test_operator_benchmark() { | ||||
|   TEST_REPORTS_DIR=$(pwd)/test/test-reports | ||||
|   mkdir -p "$TEST_REPORTS_DIR" | ||||
|   TEST_DIR=$(pwd) | ||||
|   ARCH=$(uname -m) | ||||
|  | ||||
|   test_inductor_set_cpu_affinity | ||||
|  | ||||
| @ -1636,7 +1629,7 @@ test_operator_benchmark() { | ||||
|   pip_install pandas | ||||
|   python check_perf_csv.py \ | ||||
|       --actual "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv" \ | ||||
|       --expected "${ARCH}_expected_ci_operator_benchmark_eager_float32_cpu.csv" | ||||
|       --expected "expected_ci_operator_benchmark_eager_float32_cpu.csv" | ||||
| } | ||||
|  | ||||
| test_operator_microbenchmark() { | ||||
| @ -1673,7 +1666,7 @@ if [[ "${TEST_CONFIG}" == *numpy_2* ]]; then | ||||
|     python -m pip install --pre numpy==2.0.2 scipy==1.13.1 numba==0.60.0 | ||||
|   fi | ||||
|   python test/run_test.py --include dynamo/test_functions.py dynamo/test_unspec.py test_binary_ufuncs.py test_fake_tensor.py test_linalg.py test_numpy_interop.py test_tensor_creation_ops.py test_torch.py torch_np/test_basic.py | ||||
| elif [[ "${BUILD_ENVIRONMENT}" == *aarch64* && "${TEST_CONFIG}" == 'default' ]]; then | ||||
| elif [[ "${BUILD_ENVIRONMENT}" == *aarch64* && "${TEST_CONFIG}" != *perf_cpu_aarch64* ]]; then | ||||
|   test_linux_aarch64 | ||||
| elif [[ "${TEST_CONFIG}" == *backward* ]]; then | ||||
|   test_forward_backward_compatibility | ||||
| @ -1724,8 +1717,6 @@ elif [[ "${TEST_CONFIG}" == *inductor-triton-cpu* ]]; then | ||||
|   test_inductor_triton_cpu | ||||
| elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then | ||||
|   test_inductor_micro_benchmark | ||||
| elif [[ "${TEST_CONFIG}" == *aoti_cross_compile_for_windows* ]]; then | ||||
|   test_inductor_aoti_cross_compile_for_windows | ||||
| elif [[ "${TEST_CONFIG}" == *huggingface* ]]; then | ||||
|   install_torchvision | ||||
|   id=$((SHARD_NUMBER-1)) | ||||
| @ -1766,7 +1757,7 @@ elif [[ "${TEST_CONFIG}" == *inductor_cpp_wrapper* ]]; then | ||||
|   install_torchvision | ||||
|   PYTHONPATH=/torchbench test_inductor_cpp_wrapper_shard "$SHARD_NUMBER" | ||||
|   if [[ "$SHARD_NUMBER" -eq "1" ]]; then | ||||
|     test_inductor_aoti_cpp | ||||
|     test_inductor_aoti | ||||
|   fi | ||||
| elif [[ "${TEST_CONFIG}" == *inductor* ]]; then | ||||
|   install_torchvision | ||||
|  | ||||
| @ -7,9 +7,12 @@ if "%DESIRED_PYTHON%" == "3.13t" ( | ||||
|     set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.13.0/python-3.13.0-amd64.exe" | ||||
|     set ADDITIONAL_OPTIONS="Include_freethreaded=1" | ||||
|     set PYTHON_EXEC="python3.13t" | ||||
| ) else if "%DESIRED_PYTHON%"=="3.14" ( | ||||
|     echo Python version is set to 3.14 or 3.14t | ||||
|     set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.14.0/python-3.14.0rc1-amd64.exe" | ||||
| ) else if "%DESIRED_PYTHON%"=="3.14t" ( | ||||
|     echo Python version is set to 3.14 or 3.14t | ||||
|     set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.14.0/python-3.14.0-amd64.exe" | ||||
|     set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.14.0/python-3.14.0rc1-amd64.exe" | ||||
|     set ADDITIONAL_OPTIONS="Include_freethreaded=1" | ||||
|     set PYTHON_EXEC="python3.14t" | ||||
| ) else ( | ||||
|  | ||||
| @ -163,13 +163,8 @@ if [[ "$(uname)" != Darwin ]]; then | ||||
|   MEMORY_LIMIT_MAX_JOBS=12 | ||||
|   NUM_CPUS=$(( $(nproc) - 2 )) | ||||
|  | ||||
|   if [[ "$(uname)" == Linux ]]; then | ||||
|     # Defaults here for **binary** linux builds so they can be changed in one place | ||||
|     export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))} | ||||
|   else | ||||
|     # For other builds | ||||
|     export MAX_JOBS=${NUM_CPUS} | ||||
|   fi | ||||
|   # Defaults here for **binary** linux builds so they can be changed in one place | ||||
|   export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))} | ||||
|  | ||||
|   cat >>"$envfile" <<EOL | ||||
|   export MAX_JOBS="${MAX_JOBS}" | ||||
|  | ||||
| @ -1,359 +0,0 @@ | ||||
| --- | ||||
| name: docstring | ||||
| description: Write docstrings for PyTorch functions and methods following PyTorch conventions. Use when writing or updating docstrings in PyTorch code. | ||||
| --- | ||||
|  | ||||
| # PyTorch Docstring Writing Guide | ||||
|  | ||||
| This skill describes how to write docstrings for functions and methods in the PyTorch project, following the conventions in `torch/_tensor_docs.py` and `torch/nn/functional.py`. | ||||
|  | ||||
| ## General Principles | ||||
|  | ||||
| - Use **raw strings** (`r"""..."""`) for all docstrings to avoid issues with LaTeX/math backslashes | ||||
| - Follow **Sphinx/reStructuredText** (reST) format for documentation | ||||
| - Be **concise but complete** - include all essential information | ||||
| - Always include **examples** when possible | ||||
| - Use **cross-references** to related functions/classes | ||||
|  | ||||
| ## Docstring Structure | ||||
|  | ||||
| ### 1. Function Signature (First Line) | ||||
|  | ||||
| Start with the function signature showing all parameters: | ||||
|  | ||||
| ```python | ||||
| r"""function_name(param1, param2, *, kwarg1=default1, kwarg2=default2) -> ReturnType | ||||
| ``` | ||||
|  | ||||
| **Notes:** | ||||
| - Include the function name | ||||
| - Show positional and keyword-only arguments (use `*` separator) | ||||
| - Include default values | ||||
| - Show return type annotation | ||||
| - This line should NOT end with a period | ||||
|  | ||||
| ### 2. Brief Description | ||||
|  | ||||
| Provide a one-line description of what the function does: | ||||
|  | ||||
| ```python | ||||
| r"""conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor | ||||
|  | ||||
| Applies a 2D convolution over an input image composed of several input | ||||
| planes. | ||||
| ``` | ||||
|  | ||||
| ### 3. Mathematical Formulas (if applicable) | ||||
|  | ||||
| Use Sphinx math directives for mathematical expressions: | ||||
|  | ||||
| ```python | ||||
| .. math:: | ||||
|     \text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)} | ||||
| ``` | ||||
|  | ||||
| Or inline math: `:math:\`x^2\`` | ||||
|  | ||||
| ### 4. Cross-References | ||||
|  | ||||
| Link to related classes and functions using Sphinx roles: | ||||
|  | ||||
| - `:class:\`~torch.nn.ModuleName\`` - Link to a class | ||||
| - `:func:\`torch.function_name\`` - Link to a function | ||||
| - `:meth:\`~Tensor.method_name\`` - Link to a method | ||||
| - `:attr:\`attribute_name\`` - Reference an attribute | ||||
| - The `~` prefix shows only the last component (e.g., `Conv2d` instead of `torch.nn.Conv2d`) | ||||
|  | ||||
| **Example:** | ||||
| ```python | ||||
| See :class:`~torch.nn.Conv2d` for details and output shape. | ||||
| ``` | ||||
|  | ||||
| ### 5. Notes and Warnings | ||||
|  | ||||
| Use admonitions for important information: | ||||
|  | ||||
| ```python | ||||
| .. note:: | ||||
|     This function doesn't work directly with NLLLoss, | ||||
|     which expects the Log to be computed between the Softmax and itself. | ||||
|     Use log_softmax instead (it's faster and has better numerical properties). | ||||
|  | ||||
| .. warning:: | ||||
|     :func:`new_tensor` always copies :attr:`data`. If you have a Tensor | ||||
|     ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_` | ||||
|     or :func:`torch.Tensor.detach`. | ||||
| ``` | ||||
|  | ||||
| ### 6. Args Section | ||||
|  | ||||
| Document all parameters with type annotations and descriptions: | ||||
|  | ||||
| ```python | ||||
| Args: | ||||
|     input (Tensor): input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)` | ||||
|     weight (Tensor): filters of shape :math:`(\text{out\_channels} , kH , kW)` | ||||
|     bias (Tensor, optional): optional bias tensor of shape :math:`(\text{out\_channels})`. Default: ``None`` | ||||
|     stride (int or tuple): the stride of the convolving kernel. Can be a single number or a | ||||
|       tuple `(sH, sW)`. Default: 1 | ||||
| ``` | ||||
|  | ||||
| **Formatting rules:** | ||||
| - Parameter name in **lowercase** | ||||
| - Type in parentheses: `(Type)`, `(Type, optional)` for optional parameters | ||||
| - Description follows the type | ||||
| - For optional parameters, include "Default: ``value``" at the end | ||||
| - Use double backticks for inline code: ``` ``None`` ``` | ||||
| - Indent continuation lines by 2 spaces | ||||
|  | ||||
| ### 7. Keyword Args Section (if applicable) | ||||
|  | ||||
| Sometimes keyword arguments are documented separately: | ||||
|  | ||||
| ```python | ||||
| Keyword args: | ||||
|     dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. | ||||
|         Default: if None, same :class:`torch.dtype` as this tensor. | ||||
|     device (:class:`torch.device`, optional): the desired device of returned tensor. | ||||
|         Default: if None, same :class:`torch.device` as this tensor. | ||||
|     requires_grad (bool, optional): If autograd should record operations on the | ||||
|         returned tensor. Default: ``False``. | ||||
| ``` | ||||
|  | ||||
| ### 8. Returns Section (if needed) | ||||
|  | ||||
| Document the return value: | ||||
|  | ||||
| ```python | ||||
| Returns: | ||||
|     Tensor: Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution. | ||||
|         If ``hard=True``, the returned samples will be one-hot, otherwise they will | ||||
|         be probability distributions that sum to 1 across `dim`. | ||||
| ``` | ||||
|  | ||||
| Or simply include it in the function signature line if obvious from context. | ||||
|  | ||||
| ### 9. Examples Section | ||||
|  | ||||
| Always include examples when possible: | ||||
|  | ||||
| ```python | ||||
| Examples:: | ||||
|  | ||||
|     >>> inputs = torch.randn(33, 16, 30) | ||||
|     >>> filters = torch.randn(20, 16, 5) | ||||
|     >>> F.conv1d(inputs, filters) | ||||
|  | ||||
|     >>> # With square kernels and equal stride | ||||
|     >>> filters = torch.randn(8, 4, 3, 3) | ||||
|     >>> inputs = torch.randn(1, 4, 5, 5) | ||||
|     >>> F.conv2d(inputs, filters, padding=1) | ||||
| ``` | ||||
|  | ||||
| **Formatting rules:** | ||||
| - Use `Examples::` with double colon | ||||
| - Use `>>>` prompt for Python code | ||||
| - Include comments with `#` when helpful | ||||
| - Show actual output when it helps understanding (indent without `>>>`) | ||||
|  | ||||
| ### 10. External References | ||||
|  | ||||
| Link to papers or external documentation: | ||||
|  | ||||
| ```python | ||||
| .. _Link Name: | ||||
|     https://arxiv.org/abs/1611.00712 | ||||
| ``` | ||||
|  | ||||
| Reference them in text: ```See `Link Name`_``` | ||||
|  | ||||
| ## Method Types | ||||
|  | ||||
| ### Native Python Functions | ||||
|  | ||||
| For regular Python functions, use a standard docstring: | ||||
|  | ||||
| ```python | ||||
| def relu(input: Tensor, inplace: bool = False) -> Tensor: | ||||
|     r"""relu(input, inplace=False) -> Tensor | ||||
|  | ||||
|     Applies the rectified linear unit function element-wise. See | ||||
|     :class:`~torch.nn.ReLU` for more details. | ||||
|     """ | ||||
|     # implementation | ||||
| ``` | ||||
|  | ||||
| ### C-Bound Functions (using add_docstr) | ||||
|  | ||||
| For C-bound functions, use `_add_docstr`: | ||||
|  | ||||
| ```python | ||||
| conv1d = _add_docstr( | ||||
|     torch.conv1d, | ||||
|     r""" | ||||
| conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor | ||||
|  | ||||
| Applies a 1D convolution over an input signal composed of several input | ||||
| planes. | ||||
|  | ||||
| See :class:`~torch.nn.Conv1d` for details and output shape. | ||||
|  | ||||
| Args: | ||||
|     input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)` | ||||
|     weight: filters of shape :math:`(\text{out\_channels} , kW)` | ||||
|     ... | ||||
| """, | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| ### In-Place Variants | ||||
|  | ||||
| For in-place operations (ending with `_`), reference the original: | ||||
|  | ||||
| ```python | ||||
| add_docstr_all( | ||||
|     "abs_", | ||||
|     r""" | ||||
| abs_() -> Tensor | ||||
|  | ||||
| In-place version of :meth:`~Tensor.abs` | ||||
| """, | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| ### Alias Functions | ||||
|  | ||||
| For aliases, simply reference the original: | ||||
|  | ||||
| ```python | ||||
| add_docstr_all( | ||||
|     "absolute", | ||||
|     r""" | ||||
| absolute() -> Tensor | ||||
|  | ||||
| Alias for :func:`abs` | ||||
| """, | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| ## Common Patterns | ||||
|  | ||||
| ### Shape Documentation | ||||
|  | ||||
| Use LaTeX math notation for tensor shapes: | ||||
|  | ||||
| ```python | ||||
| :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)` | ||||
| ``` | ||||
|  | ||||
| ### Reusable Argument Definitions | ||||
|  | ||||
| For commonly used arguments, define them once and reuse: | ||||
|  | ||||
| ```python | ||||
| common_args = parse_kwargs( | ||||
|     """ | ||||
|     dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. | ||||
|         Default: if None, same as this tensor. | ||||
| """ | ||||
| ) | ||||
|  | ||||
| # Then use with .format(): | ||||
| r""" | ||||
| ... | ||||
|  | ||||
| Keyword args: | ||||
|     {dtype} | ||||
|     {device} | ||||
| """.format(**common_args) | ||||
| ``` | ||||
|  | ||||
| ### Template Insertion | ||||
|  | ||||
| Insert reproducibility notes or other common text: | ||||
|  | ||||
| ```python | ||||
| r""" | ||||
| {tf32_note} | ||||
|  | ||||
| {cudnn_reproducibility_note} | ||||
| """.format(**reproducibility_notes, **tf32_notes) | ||||
| ``` | ||||
|  | ||||
| ## Complete Example | ||||
|  | ||||
| Here's a complete example showing all elements: | ||||
|  | ||||
| ```python | ||||
| def gumbel_softmax( | ||||
|     logits: Tensor, | ||||
|     tau: float = 1, | ||||
|     hard: bool = False, | ||||
|     eps: float = 1e-10, | ||||
|     dim: int = -1, | ||||
| ) -> Tensor: | ||||
|     r""" | ||||
|     Sample from the Gumbel-Softmax distribution and optionally discretize. | ||||
|  | ||||
|     Args: | ||||
|         logits (Tensor): `[..., num_features]` unnormalized log probabilities | ||||
|         tau (float): non-negative scalar temperature | ||||
|         hard (bool): if ``True``, the returned samples will be discretized as one-hot vectors, | ||||
|               but will be differentiated as if it is the soft sample in autograd. Default: ``False`` | ||||
|         dim (int): A dimension along which softmax will be computed. Default: -1 | ||||
|  | ||||
|     Returns: | ||||
|         Tensor: Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution. | ||||
|             If ``hard=True``, the returned samples will be one-hot, otherwise they will | ||||
|             be probability distributions that sum to 1 across `dim`. | ||||
|  | ||||
|     .. note:: | ||||
|         This function is here for legacy reasons, may be removed from nn.Functional in the future. | ||||
|  | ||||
|     Examples:: | ||||
|         >>> logits = torch.randn(20, 32) | ||||
|         >>> # Sample soft categorical using reparametrization trick: | ||||
|         >>> F.gumbel_softmax(logits, tau=1, hard=False) | ||||
|         >>> # Sample hard categorical using "Straight-through" trick: | ||||
|         >>> F.gumbel_softmax(logits, tau=1, hard=True) | ||||
|  | ||||
|     .. _Link 1: | ||||
|         https://arxiv.org/abs/1611.00712 | ||||
|     """ | ||||
|     # implementation | ||||
| ``` | ||||
|  | ||||
| ## Quick Checklist | ||||
|  | ||||
| When writing a PyTorch docstring, ensure: | ||||
|  | ||||
| - [ ] Use raw string (`r"""`) | ||||
| - [ ] Include function signature on first line | ||||
| - [ ] Provide brief description | ||||
| - [ ] Document all parameters in Args section with types | ||||
| - [ ] Include default values for optional parameters | ||||
| - [ ] Use Sphinx cross-references (`:func:`, `:class:`, `:meth:`) | ||||
| - [ ] Add mathematical formulas if applicable | ||||
| - [ ] Include at least one example in Examples section | ||||
| - [ ] Add warnings/notes for important caveats | ||||
| - [ ] Link to related module class with `:class:` | ||||
| - [ ] Use proper math notation for tensor shapes | ||||
| - [ ] Follow consistent formatting and indentation | ||||
|  | ||||
| ## Common Sphinx Roles Reference | ||||
|  | ||||
| - `:class:\`~torch.nn.Module\`` - Class reference | ||||
| - `:func:\`torch.function\`` - Function reference | ||||
| - `:meth:\`~Tensor.method\`` - Method reference | ||||
| - `:attr:\`attribute\`` - Attribute reference | ||||
| - `:math:\`equation\`` - Inline math | ||||
| - `:ref:\`label\`` - Internal reference | ||||
| - ``` ``code`` ``` - Inline code (use double backticks) | ||||
|  | ||||
| ## Additional Notes | ||||
|  | ||||
| - **Indentation**: Use 4 spaces for code, 2 spaces for continuation of parameter descriptions | ||||
| - **Line length**: Try to keep lines under 100 characters when possible | ||||
| - **Periods**: End sentences with periods, but not the signature line | ||||
| - **Backticks**: Use double backticks for code: ``` ``True`` ``None`` ``False`` ``` | ||||
| - **Types**: Common types are `Tensor`, `int`, `float`, `bool`, `str`, `tuple`, `list`, etc. | ||||
| @ -1,385 +0,0 @@ | ||||
| --- | ||||
| name: skill-writer | ||||
| description: Guide users through creating Agent Skills for Claude Code. Use when the user wants to create, write, author, or design a new Skill, or needs help with SKILL.md files, frontmatter, or skill structure. | ||||
| --- | ||||
|  | ||||
| # Skill Writer | ||||
|  | ||||
| This Skill helps you create well-structured Agent Skills for Claude Code that follow best practices and validation requirements. | ||||
|  | ||||
| ## When to use this Skill | ||||
|  | ||||
| Use this Skill when: | ||||
| - Creating a new Agent Skill | ||||
| - Writing or updating SKILL.md files | ||||
| - Designing skill structure and frontmatter | ||||
| - Troubleshooting skill discovery issues | ||||
| - Converting existing prompts or workflows into Skills | ||||
|  | ||||
| ## Instructions | ||||
|  | ||||
| ### Step 1: Determine Skill scope | ||||
|  | ||||
| First, understand what the Skill should do: | ||||
|  | ||||
| 1. **Ask clarifying questions**: | ||||
|    - What specific capability should this Skill provide? | ||||
|    - When should Claude use this Skill? | ||||
|    - What tools or resources does it need? | ||||
|    - Is this for personal use or team sharing? | ||||
|  | ||||
| 2. **Keep it focused**: One Skill = one capability | ||||
|    - Good: "PDF form filling", "Excel data analysis" | ||||
|    - Too broad: "Document processing", "Data tools" | ||||
|  | ||||
| ### Step 2: Choose Skill location | ||||
|  | ||||
| Determine where to create the Skill: | ||||
|  | ||||
| **Personal Skills** (`~/.claude/skills/`): | ||||
| - Individual workflows and preferences | ||||
| - Experimental Skills | ||||
| - Personal productivity tools | ||||
|  | ||||
| **Project Skills** (`.claude/skills/`): | ||||
| - Team workflows and conventions | ||||
| - Project-specific expertise | ||||
| - Shared utilities (committed to git) | ||||
|  | ||||
| ### Step 3: Create Skill structure | ||||
|  | ||||
| Create the directory and files: | ||||
|  | ||||
| ```bash | ||||
| # Personal | ||||
| mkdir -p ~/.claude/skills/skill-name | ||||
|  | ||||
| # Project | ||||
| mkdir -p .claude/skills/skill-name | ||||
| ``` | ||||
|  | ||||
| For multi-file Skills: | ||||
| ``` | ||||
| skill-name/ | ||||
| ├── SKILL.md (required) | ||||
| ├── reference.md (optional) | ||||
| ├── examples.md (optional) | ||||
| ├── scripts/ | ||||
| │   └── helper.py (optional) | ||||
| └── templates/ | ||||
|     └── template.txt (optional) | ||||
| ``` | ||||
|  | ||||
| ### Step 4: Write SKILL.md frontmatter | ||||
|  | ||||
| Create YAML frontmatter with required fields: | ||||
|  | ||||
| ```yaml | ||||
| --- | ||||
| name: skill-name | ||||
| description: Brief description of what this does and when to use it | ||||
| --- | ||||
| ``` | ||||
|  | ||||
| **Field requirements**: | ||||
|  | ||||
| - **name**: | ||||
|   - Lowercase letters, numbers, hyphens only | ||||
|   - Max 64 characters | ||||
|   - Must match directory name | ||||
|   - Good: `pdf-processor`, `git-commit-helper` | ||||
|   - Bad: `PDF_Processor`, `Git Commits!` | ||||
|  | ||||
| - **description**: | ||||
|   - Max 1024 characters | ||||
|   - Include BOTH what it does AND when to use it | ||||
|   - Use specific trigger words users would say | ||||
|   - Mention file types, operations, and context | ||||
|  | ||||
| **Optional frontmatter fields**: | ||||
|  | ||||
| - **allowed-tools**: Restrict tool access (comma-separated list) | ||||
|   ```yaml | ||||
|   allowed-tools: Read, Grep, Glob | ||||
|   ``` | ||||
|   Use for: | ||||
|   - Read-only Skills | ||||
|   - Security-sensitive workflows | ||||
|   - Limited-scope operations | ||||
|  | ||||
| ### Step 5: Write effective descriptions | ||||
|  | ||||
| The description is critical for Claude to discover your Skill. | ||||
|  | ||||
| **Formula**: `[What it does] + [When to use it] + [Key triggers]` | ||||
|  | ||||
| **Examples**: | ||||
|  | ||||
| ✅ **Good**: | ||||
| ```yaml | ||||
| description: Extract text and tables from PDF files, fill forms, merge documents. Use when working with PDF files or when the user mentions PDFs, forms, or document extraction. | ||||
| ``` | ||||
|  | ||||
| ✅ **Good**: | ||||
| ```yaml | ||||
| description: Analyze Excel spreadsheets, create pivot tables, and generate charts. Use when working with Excel files, spreadsheets, or analyzing tabular data in .xlsx format. | ||||
| ``` | ||||
|  | ||||
| ❌ **Too vague**: | ||||
| ```yaml | ||||
| description: Helps with documents | ||||
| description: For data analysis | ||||
| ``` | ||||
|  | ||||
| **Tips**: | ||||
| - Include specific file extensions (.pdf, .xlsx, .json) | ||||
| - Mention common user phrases ("analyze", "extract", "generate") | ||||
| - List concrete operations (not generic verbs) | ||||
| - Add context clues ("Use when...", "For...") | ||||
|  | ||||
| ### Step 6: Structure the Skill content | ||||
|  | ||||
| Use clear Markdown sections: | ||||
|  | ||||
| ```markdown | ||||
| # Skill Name | ||||
|  | ||||
| Brief overview of what this Skill does. | ||||
|  | ||||
| ## Quick start | ||||
|  | ||||
| Provide a simple example to get started immediately. | ||||
|  | ||||
| ## Instructions | ||||
|  | ||||
| Step-by-step guidance for Claude: | ||||
| 1. First step with clear action | ||||
| 2. Second step with expected outcome | ||||
| 3. Handle edge cases | ||||
|  | ||||
| ## Examples | ||||
|  | ||||
| Show concrete usage examples with code or commands. | ||||
|  | ||||
| ## Best practices | ||||
|  | ||||
| - Key conventions to follow | ||||
| - Common pitfalls to avoid | ||||
| - When to use vs. not use | ||||
|  | ||||
| ## Requirements | ||||
|  | ||||
| List any dependencies or prerequisites: | ||||
| ```bash | ||||
| pip install package-name | ||||
| ``` | ||||
|  | ||||
| ## Advanced usage | ||||
|  | ||||
| For complex scenarios, see [reference.md](reference.md). | ||||
| ``` | ||||
|  | ||||
| ### Step 7: Add supporting files (optional) | ||||
|  | ||||
| Create additional files for progressive disclosure: | ||||
|  | ||||
| **reference.md**: Detailed API docs, advanced options | ||||
| **examples.md**: Extended examples and use cases | ||||
| **scripts/**: Helper scripts and utilities | ||||
| **templates/**: File templates or boilerplate | ||||
|  | ||||
| Reference them from SKILL.md: | ||||
| ```markdown | ||||
| For advanced usage, see [reference.md](reference.md). | ||||
|  | ||||
| Run the helper script: | ||||
| \`\`\`bash | ||||
| python scripts/helper.py input.txt | ||||
| \`\`\` | ||||
| ``` | ||||
|  | ||||
| ### Step 8: Validate the Skill | ||||
|  | ||||
| Check these requirements: | ||||
|  | ||||
| ✅ **File structure**: | ||||
| - [ ] SKILL.md exists in correct location | ||||
| - [ ] Directory name matches frontmatter `name` | ||||
|  | ||||
| ✅ **YAML frontmatter**: | ||||
| - [ ] Opening `---` on line 1 | ||||
| - [ ] Closing `---` before content | ||||
| - [ ] Valid YAML (no tabs, correct indentation) | ||||
| - [ ] `name` follows naming rules | ||||
| - [ ] `description` is specific and < 1024 chars | ||||
|  | ||||
| ✅ **Content quality**: | ||||
| - [ ] Clear instructions for Claude | ||||
| - [ ] Concrete examples provided | ||||
| - [ ] Edge cases handled | ||||
| - [ ] Dependencies listed (if any) | ||||
|  | ||||
| ✅ **Testing**: | ||||
| - [ ] Description matches user questions | ||||
| - [ ] Skill activates on relevant queries | ||||
| - [ ] Instructions are clear and actionable | ||||
|  | ||||
| ### Step 9: Test the Skill | ||||
|  | ||||
| 1. **Restart Claude Code** (if running) to load the Skill | ||||
|  | ||||
| 2. **Ask relevant questions** that match the description: | ||||
|    ``` | ||||
|    Can you help me extract text from this PDF? | ||||
|    ``` | ||||
|  | ||||
| 3. **Verify activation**: Claude should use the Skill automatically | ||||
|  | ||||
| 4. **Check behavior**: Confirm Claude follows the instructions correctly | ||||
|  | ||||
| ### Step 10: Debug if needed | ||||
|  | ||||
| If Claude doesn't use the Skill: | ||||
|  | ||||
| 1. **Make description more specific**: | ||||
|    - Add trigger words | ||||
|    - Include file types | ||||
|    - Mention common user phrases | ||||
|  | ||||
| 2. **Check file location**: | ||||
|    ```bash | ||||
|    ls ~/.claude/skills/skill-name/SKILL.md | ||||
|    ls .claude/skills/skill-name/SKILL.md | ||||
|    ``` | ||||
|  | ||||
| 3. **Validate YAML**: | ||||
|    ```bash | ||||
|    cat SKILL.md | head -n 10 | ||||
|    ``` | ||||
|  | ||||
| 4. **Run debug mode**: | ||||
|    ```bash | ||||
|    claude --debug | ||||
|    ``` | ||||
|  | ||||
| ## Common patterns | ||||
|  | ||||
| ### Read-only Skill | ||||
|  | ||||
| ```yaml | ||||
| --- | ||||
| name: code-reader | ||||
| description: Read and analyze code without making changes. Use for code review, understanding codebases, or documentation. | ||||
| allowed-tools: Read, Grep, Glob | ||||
| --- | ||||
| ``` | ||||
|  | ||||
| ### Script-based Skill | ||||
|  | ||||
| ```yaml | ||||
| --- | ||||
| name: data-processor | ||||
| description: Process CSV and JSON data files with Python scripts. Use when analyzing data files or transforming datasets. | ||||
| --- | ||||
|  | ||||
| # Data Processor | ||||
|  | ||||
| ## Instructions | ||||
|  | ||||
| 1. Use the processing script: | ||||
| \`\`\`bash | ||||
| python scripts/process.py input.csv --output results.json | ||||
| \`\`\` | ||||
|  | ||||
| 2. Validate output with: | ||||
| \`\`\`bash | ||||
| python scripts/validate.py results.json | ||||
| \`\`\` | ||||
| ``` | ||||
|  | ||||
| ### Multi-file Skill with progressive disclosure | ||||
|  | ||||
| ```yaml | ||||
| --- | ||||
| name: api-designer | ||||
| description: Design REST APIs following best practices. Use when creating API endpoints, designing routes, or planning API architecture. | ||||
| --- | ||||
|  | ||||
| # API Designer | ||||
|  | ||||
| Quick start: See [examples.md](examples.md) | ||||
|  | ||||
| Detailed reference: See [reference.md](reference.md) | ||||
|  | ||||
| ## Instructions | ||||
|  | ||||
| 1. Gather requirements | ||||
| 2. Design endpoints (see examples.md) | ||||
| 3. Document with OpenAPI spec | ||||
| 4. Review against best practices (see reference.md) | ||||
| ``` | ||||
|  | ||||
| ## Best practices for Skill authors | ||||
|  | ||||
| 1. **One Skill, one purpose**: Don't create mega-Skills | ||||
| 2. **Specific descriptions**: Include trigger words users will say | ||||
| 3. **Clear instructions**: Write for Claude, not humans | ||||
| 4. **Concrete examples**: Show real code, not pseudocode | ||||
| 5. **List dependencies**: Mention required packages in description | ||||
| 6. **Test with teammates**: Verify activation and clarity | ||||
| 7. **Version your Skills**: Document changes in content | ||||
| 8. **Use progressive disclosure**: Put advanced details in separate files | ||||
|  | ||||
| ## Validation checklist | ||||
|  | ||||
| Before finalizing a Skill, verify: | ||||
|  | ||||
| - [ ] Name is lowercase, hyphens only, max 64 chars | ||||
| - [ ] Description is specific and < 1024 chars | ||||
| - [ ] Description includes "what" and "when" | ||||
| - [ ] YAML frontmatter is valid | ||||
| - [ ] Instructions are step-by-step | ||||
| - [ ] Examples are concrete and realistic | ||||
| - [ ] Dependencies are documented | ||||
| - [ ] File paths use forward slashes | ||||
| - [ ] Skill activates on relevant queries | ||||
| - [ ] Claude follows instructions correctly | ||||
|  | ||||
| ## Troubleshooting | ||||
|  | ||||
| **Skill doesn't activate**: | ||||
| - Make description more specific with trigger words | ||||
| - Include file types and operations in description | ||||
| - Add "Use when..." clause with user phrases | ||||
|  | ||||
| **Multiple Skills conflict**: | ||||
| - Make descriptions more distinct | ||||
| - Use different trigger words | ||||
| - Narrow the scope of each Skill | ||||
|  | ||||
| **Skill has errors**: | ||||
| - Check YAML syntax (no tabs, proper indentation) | ||||
| - Verify file paths (use forward slashes) | ||||
| - Ensure scripts have execute permissions | ||||
| - List all dependencies | ||||
|  | ||||
| ## Examples | ||||
|  | ||||
| See the documentation for complete examples: | ||||
| - Simple single-file Skill (commit-helper) | ||||
| - Skill with tool permissions (code-reviewer) | ||||
| - Multi-file Skill (pdf-processing) | ||||
|  | ||||
| ## Output format | ||||
|  | ||||
| When creating a Skill, I will: | ||||
|  | ||||
| 1. Ask clarifying questions about scope and requirements | ||||
| 2. Suggest a Skill name and location | ||||
| 3. Create the SKILL.md file with proper frontmatter | ||||
| 4. Include clear instructions and examples | ||||
| 5. Add supporting files if needed | ||||
| 6. Provide testing instructions | ||||
| 7. Validate against all requirements | ||||
|  | ||||
| The result will be a complete, working Skill that follows all best practices and validation rules. | ||||
							
								
								
									
										6
									
								
								.flake8
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								.flake8
									
									
									
									
									
								
							| @ -7,12 +7,16 @@ max-line-length = 120 | ||||
| # C408 ignored because we like the dict keyword argument syntax | ||||
| # E501 is not flexible enough, we're using B950 instead | ||||
| ignore = | ||||
|     E203,E305,E402,E501,E704,E741,F405,F841,F999,W503,W504,C408,E302,W291,E303,F824, | ||||
|     E203,E305,E402,E501,E704,E721,E741,F405,F841,F999,W503,W504,C408,E302,W291,E303,F824, | ||||
|     # shebang has extra meaning in fbcode lints, so I think it's not worth trying | ||||
|     # to line this up with executable bit | ||||
|     EXE001, | ||||
|     # these ignores are from flake8-bugbear; please fix! | ||||
|     B007,B008,B017,B019,B023,B028,B903,B905,B906,B907,B908,B910 | ||||
|     # these ignores are from flake8-comprehensions; please fix! | ||||
|     C407, | ||||
|     # these ignores are from flake8-logging-format; please fix! | ||||
|     G100,G101,G200 | ||||
|     # these ignores are from flake8-simplify. please fix or ignore with commented reason | ||||
|     SIM105,SIM108,SIM110,SIM111,SIM113,SIM114,SIM115,SIM116,SIM117,SIM118,SIM119,SIM12, | ||||
|     # SIM104 is already covered by pyupgrade ruff | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/ISSUE_TEMPLATE/ci-sev.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/ISSUE_TEMPLATE/ci-sev.md
									
									
									
									
										vendored
									
									
								
							| @ -8,7 +8,6 @@ assignees: '' | ||||
| --- | ||||
|  | ||||
| > NOTE: Remember to label this issue with "`ci: sev`" | ||||
| >       If you want autorevert to be disabled, keep the ci: disable-autorevert label | ||||
|  | ||||
|  <!-- Add the `merge blocking` label to this PR to prevent PRs from being merged while this issue is open --> | ||||
|  | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/ISSUE_TEMPLATE/disable-autorevert.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/ISSUE_TEMPLATE/disable-autorevert.md
									
									
									
									
										vendored
									
									
								
							| @ -1,7 +1,7 @@ | ||||
| --- | ||||
| name: "D❌\U0001F519 ISABLE AUTOREVERT" | ||||
| name: DISABLE AUTOREVERT | ||||
| about: Disables autorevert when open | ||||
| title: "[DISABLE AUTOREVERT]" | ||||
| title: "❌\U0001F519 [DISABLE AUTOREVERT]" | ||||
| labels: 'ci: disable-autorevert' | ||||
| assignees: '' | ||||
|  | ||||
|  | ||||
| @ -65,7 +65,7 @@ runs: | ||||
|           cd .ci/lumen_cli | ||||
|           python3 -m pip install -e . | ||||
|         ) | ||||
|         MAX_JOBS="$(nproc --ignore=10)" | ||||
|         MAX_JOBS="$(nproc --ignore=6)" | ||||
|         export MAX_JOBS | ||||
|  | ||||
|         # Split the comma-separated list and build each target | ||||
|  | ||||
							
								
								
									
										20
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										20
									
								
								.github/actions/setup-rocm/action.yml
									
									
									
									
										vendored
									
									
								
							| @ -111,23 +111,3 @@ runs: | ||||
|         # This video group ID maps to subgid 1 inside the docker image due to the /etc/subgid entries. | ||||
|         # The group name corresponding to group ID 1 can change depending on the OS, so both are necessary. | ||||
|         echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd $DEVICE_FLAG --group-add video --group-add $render_gid --group-add daemon --group-add bin --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --network=host" >> "${GITHUB_ENV}" | ||||
|  | ||||
|     - name: configure aws credentials | ||||
|       id: aws_creds | ||||
|       uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0 | ||||
|       with: | ||||
|         role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only | ||||
|         aws-region: us-east-1 | ||||
|         role-duration-seconds: 18000 | ||||
|  | ||||
|     - name: Login to Amazon ECR | ||||
|       id: login-ecr | ||||
|       continue-on-error: true | ||||
|       uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 | ||||
|  | ||||
|     - name: Preserve github env variables for use in docker | ||||
|       shell: bash | ||||
|       run: | | ||||
|         env | grep '^GITHUB' >> "${RUNNER_TEMP}/github_env_${GITHUB_RUN_ID}" | ||||
|         env | grep '^CI' >> "${RUNNER_TEMP}/github_env_${GITHUB_RUN_ID}" | ||||
|         env | grep '^RUNNER' >> "${RUNNER_TEMP}/github_env_${GITHUB_RUN_ID}" | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| 69bbe7363897764f9e758d851cd0340147d27f94 | ||||
| 87ff22e49ed0e92576c4935ccb8c143daac4a3cd | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/vision.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| 1752fe6809b74921644866275ab80244b96e80bc | ||||
| 966da7e46f65d6d49df3e31214470a4fe5cc8e66 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/vllm.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/vllm.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| e5192819208c4d68194844b7dfafbc00020d0dea | ||||
| 0ad9951c416d33c5da4f7a504fb162cbe62386f5 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| df6798dfb931ce7c7fe5bed2447cd1092a5981af | ||||
| 2a9138a26ee257fef05310ad3fecf7c55fe80d73 | ||||
|  | ||||
| @ -1,41 +1,59 @@ | ||||
| # TODO(elainwy): remove this file after the torch nightly dockerfile is in sync in vllm repo | ||||
| # The vLLM Dockerfile is used to construct vLLM image against torch nightly and torch main that can be directly used for testing | ||||
| 
 | ||||
| ARG CUDA_VERSION=12.8.1 | ||||
| ARG PYTHON_VERSION=3.12 | ||||
| 
 | ||||
| # BUILD_BASE_IMAGE: used to setup python build xformers, and vllm wheels, It can be replaced with a different base image from local machine, | ||||
| # by default, it uses the torch-nightly-base stage from this docker image | ||||
| ARG BUILD_BASE_IMAGE=torch-nightly-base | ||||
| 
 | ||||
| # FINAL_BASE_IMAGE: used to set up vllm-instaled environment and build flashinfer, | ||||
| # by default, it uses devel-ubuntu22.04 official image. | ||||
| ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 | ||||
| 
 | ||||
| # The logic is copied from https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile | ||||
| ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py" | ||||
| 
 | ||||
| 
 | ||||
| #################### TORCH NIGHTLY BASE IMAGE #################### | ||||
| # A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci | ||||
| FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base | ||||
| 
 | ||||
| ARG CUDA_VERSION | ||||
| ARG PYTHON_VERSION | ||||
| ARG GET_PIP_URL | ||||
| 
 | ||||
| # Install system dependencies and uv, then create Python virtual environment | ||||
| # Install Python and other dependencies | ||||
| RUN apt-get update -y \ | ||||
|     && apt-get install -y ccache software-properties-common git curl sudo vim python3-pip \ | ||||
|     && curl -LsSf https://astral.sh/uv/install.sh | sh \ | ||||
|     && $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \ | ||||
|     && rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \ | ||||
|     && ln -s /opt/venv/bin/python3 /usr/bin/python3 \ | ||||
|     && ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \ | ||||
|     && ln -s /opt/venv/bin/pip /usr/bin/pip \ | ||||
|     && apt-get install -y ccache software-properties-common git curl wget sudo vim \ | ||||
|     && add-apt-repository -y ppa:deadsnakes/ppa \ | ||||
|     && apt-get update -y \ | ||||
|     && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ | ||||
|     && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ | ||||
|     && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ | ||||
|     && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ | ||||
|     && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \ | ||||
|     && python3 --version && python3 -m pip --version | ||||
| 
 | ||||
| # Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519 | ||||
| # as it was causing spam when compiling the CUTLASS kernels | ||||
| RUN apt-get install -y gcc-10 g++-10 | ||||
| RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10 | ||||
| RUN <<EOF | ||||
| gcc --version | ||||
| EOF | ||||
| # Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519) | ||||
| RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \ | ||||
|     if command -v apt-get >/dev/null; then \ | ||||
|         if [ "$current_gcc_version" -lt 10 ]; then \ | ||||
|             echo "GCC version is $current_gcc_version, installing gcc-10..."; \ | ||||
|             apt-get update \ | ||||
|             && apt-get install -y gcc-10 g++-10 \ | ||||
|             && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 \ | ||||
|             && update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \ | ||||
|         else \ | ||||
|             echo "GCC version is $current_gcc_version, no need to install gcc-10."; \ | ||||
|         fi \ | ||||
|     fi \ | ||||
|     && gcc --version && g++ --version | ||||
| 
 | ||||
| # Install uv for faster pip installs | ||||
| # install uv for faster pip installs | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     python3 -m pip install uv==0.8.4 | ||||
| 
 | ||||
| @ -43,32 +61,36 @@ ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| #################### TORCH NIGHTLY  BASE IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| #################### BASE BUILD IMAGE #################### | ||||
| # A base image for building vLLM with torch nightly or torch wheels | ||||
| # prepare basic build environment | ||||
| FROM ${BUILD_BASE_IMAGE} AS base | ||||
| USER root | ||||
| 
 | ||||
| ARG CUDA_VERSION | ||||
| ARG PYTHON_VERSION | ||||
| 
 | ||||
| # Only work with PyTorch manylinux builder | ||||
| # TODO (huydhn): Only work with PyTorch manylinux builder | ||||
| ENV PATH="/opt/python/cp312-cp312/bin:${PATH}" | ||||
| 
 | ||||
| # Install some system dependencies and double check python version | ||||
| RUN if command -v apt-get >/dev/null; then \ | ||||
|         apt-get update -y \ | ||||
|         && apt-get install -y ccache software-properties-common git wget sudo vim; \ | ||||
|         && apt-get install -y ccache software-properties-common git curl wget sudo vim; \ | ||||
|     else \ | ||||
|         dnf install -y git wget sudo; \ | ||||
|         dnf install -y git curl wget sudo; \ | ||||
|     fi \ | ||||
|     && python3 --version && python3 -m pip --version | ||||
| 
 | ||||
| # Install uv for faster pip installs if not existed | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     python3 -m pip install uv==0.8.4 | ||||
| 
 | ||||
|     if ! python3 -m uv --version >/dev/null 2>&1; then \ | ||||
|         python3 -m pip install uv==0.8.4; \ | ||||
|     fi | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| @ -76,15 +98,15 @@ ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| WORKDIR /workspace | ||||
| 
 | ||||
| # Install build and runtime dependencies | ||||
| # install build and runtime dependencies | ||||
| COPY requirements/common.txt requirements/common.txt | ||||
| COPY use_existing_torch.py use_existing_torch.py | ||||
| COPY pyproject.toml pyproject.toml | ||||
| 
 | ||||
| # Install build and runtime dependencies without stable torch version | ||||
| # install build and runtime dependencies without stable torch version | ||||
| RUN python3 use_existing_torch.py | ||||
| 
 | ||||
| # Default mount file as placeholder, this just avoid the mount error | ||||
| # default mount file as placeholder, this just avoid the mount error | ||||
| # change to a different vllm folder if this does not exist anymore | ||||
| ARG TORCH_WHEELS_PATH="./requirements" | ||||
| ARG PINNED_TORCH_VERSION | ||||
| @ -116,36 +138,56 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/common.txt | ||||
| 
 | ||||
| # Must put before installing xformers, so it can install the correct version of xfomrers. | ||||
| ARG xformers_cuda_arch_list='7.5;8.0+PTX;9.0a' | ||||
| ENV TORCH_CUDA_ARCH_LIST=${xformers_cuda_arch_list} | ||||
| 
 | ||||
| ARG max_jobs=16 | ||||
| ENV MAX_JOBS=${max_jobs} | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH' | ||||
|     export TORCH_CUDA_ARCH_LIST='7.5 8.0+PTX 9.0a' | ||||
|     git clone https://github.com/facebookresearch/xformers.git | ||||
| RUN echo ${TORCH_CUDA_ARCH_LIST} | ||||
| RUN echo ${MAX_JOBS} | ||||
| RUN pip freeze | grep -E 'ninja' | ||||
| 
 | ||||
|     pushd xformers | ||||
|     git checkout v0.0.32.post2 | ||||
|     git submodule update --init --recursive | ||||
|     python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose | ||||
|     popd | ||||
| # Build xformers with cuda and torch nightly/wheel | ||||
| # following official xformers guidance: https://github.com/facebookresearch/xformers#build | ||||
| # sha for https://github.com/facebookresearch/xformers/tree/v0.0.32.post2 | ||||
| ARG XFORMERS_COMMIT=5d4b92a5e5a9c6c6d4878283f47d82e17995b468 | ||||
| ENV CCACHE_DIR=/root/.cache/ccache | ||||
| 
 | ||||
|     rm -rf xformers | ||||
| BASH | ||||
| RUN --mount=type=cache,target=/root/.cache/ccache \ | ||||
|     --mount=type=cache,target=/root/.cache/uv \ | ||||
|     echo 'git clone xformers...' \ | ||||
|     && git clone https://github.com/facebookresearch/xformers.git --recursive \ | ||||
|     && cd xformers \ | ||||
|     && git checkout ${XFORMERS_COMMIT} \ | ||||
|     && git submodule update --init --recursive \ | ||||
|     && echo 'finish git clone xformers...' \ | ||||
|     && rm -rf build \ | ||||
|     && python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose \ | ||||
|     && cd .. \ | ||||
|     && rm -rf xformers | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system xformers-dist/*.whl | ||||
|     uv pip install --system xformers-dist/*.whl --verbose | ||||
| 
 | ||||
| # Build can take a long time, and the torch nightly version fetched from url can be different in next docker stage. | ||||
| # track the nightly torch version used in the build, when we set up runtime environment we can make sure the version is the same | ||||
| RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio' > torch_build_versions.txt | ||||
| 
 | ||||
| RUN cat torch_build_versions.txt | ||||
| RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio' | ||||
| 
 | ||||
| #################### BASE BUILD IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| #################### WHEEL BUILD IMAGE #################### | ||||
| # Image used to build vllm wheel | ||||
| FROM base AS build | ||||
| ARG TARGETPLATFORM | ||||
| 
 | ||||
| COPY . . | ||||
| 
 | ||||
| RUN python3 use_existing_torch.py | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
| @ -155,17 +197,20 @@ ARG GIT_REPO_CHECK=0 | ||||
| RUN --mount=type=bind,source=.git,target=.git \ | ||||
|     if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi | ||||
| 
 | ||||
| # Max jobs used by Ninja to build extensions | ||||
| ARG max_jobs=16 | ||||
| ENV MAX_JOBS=${max_jobs} | ||||
| ARG nvcc_threads=8 | ||||
| ARG nvcc_threads=4 | ||||
| ENV NVCC_THREADS=$nvcc_threads | ||||
| ARG torch_cuda_arch_list='8.0 8.6 8.9 9.0' | ||||
| ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} | ||||
| 
 | ||||
| ARG USE_SCCACHE | ||||
| ARG SCCACHE_BUCKET_NAME=vllm-build-sccache | ||||
| ARG SCCACHE_REGION_NAME=us-west-2 | ||||
| ARG SCCACHE_S3_NO_CREDENTIALS=0 | ||||
| 
 | ||||
| # Use sccache to speed up compilation | ||||
| # if USE_SCCACHE is set, use sccache to speed up compilation | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     --mount=type=bind,source=.git,target=.git \ | ||||
|     if [ "$USE_SCCACHE" = "1" ]; then \ | ||||
| @ -190,9 +235,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|         && sccache --show-stats; \ | ||||
|     fi | ||||
| 
 | ||||
| ARG torch_cuda_arch_list='8.0 8.6 8.9 9.0' | ||||
| ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} | ||||
| 
 | ||||
| ARG vllm_target_device="cuda" | ||||
| ENV VLLM_TARGET_DEVICE=${vllm_target_device} | ||||
| ENV CCACHE_DIR=/root/.cache/ccache | ||||
| @ -206,10 +248,17 @@ RUN --mount=type=cache,target=/root/.cache/ccache \ | ||||
|         export VLLM_DOCKER_BUILD_CONTEXT=1 && \ | ||||
|         python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \ | ||||
|     fi | ||||
| 
 | ||||
| RUN echo "[INFO] Listing current directory:" && \ | ||||
|     ls -al && \ | ||||
|     echo "[INFO] Showing torch_build_versions.txt content:" && \ | ||||
|     cat torch_build_versions.txt | ||||
| 
 | ||||
| #################### WHEEL BUILD IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| ################### VLLM INSTALLED IMAGE #################### | ||||
| # Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer | ||||
| FROM ${FINAL_BASE_IMAGE} AS vllm-base | ||||
| USER root | ||||
| 
 | ||||
| @ -217,7 +266,7 @@ ARG CUDA_VERSION | ||||
| ARG PYTHON_VERSION | ||||
| ARG GET_PIP_URL | ||||
| 
 | ||||
| # Only work with PyTorch manylinux builder | ||||
| # TODO (huydhn): Only work with PyTorch manylinux builder | ||||
| ENV PATH="/opt/python/cp312-cp312/bin:${PATH}" | ||||
| 
 | ||||
| # prepare for environment starts | ||||
| @ -226,19 +275,20 @@ WORKDIR /workspace | ||||
| # Install Python and other dependencies | ||||
| RUN if command -v apt-get >/dev/null; then \ | ||||
|         apt-get update -y \ | ||||
|         && apt-get install -y ccache software-properties-common git sudo vim python3-pip; \ | ||||
|         && apt-get install -y ccache software-properties-common git curl wget sudo vim \ | ||||
|         && add-apt-repository -y ppa:deadsnakes/ppa \ | ||||
|         && apt-get update -y \ | ||||
|         && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ | ||||
|         && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ | ||||
|         && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ | ||||
|         && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ | ||||
|         && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION}; \ | ||||
|     else \ | ||||
|         dnf install -y git wget sudo; \ | ||||
|         dnf install -y git curl wget sudo; \ | ||||
|     fi \ | ||||
|     && curl -LsSf https://astral.sh/uv/install.sh | sh \ | ||||
|     && $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \ | ||||
|     && rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \ | ||||
|     && ln -s /opt/venv/bin/python3 /usr/bin/python3 \ | ||||
|     && ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \ | ||||
|     && ln -s /opt/venv/bin/pip /usr/bin/pip \ | ||||
|     && python3 --version && python3 -m pip --version | ||||
| 
 | ||||
| # Get the torch versions, and whls used in previous stage | ||||
| # Get the torch versions, and whls used in previous stagtes for consistency | ||||
| COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt | ||||
| COPY --from=base /workspace/xformers-dist /wheels/xformers | ||||
| COPY --from=build /workspace/vllm-dist /wheels/vllm | ||||
| @ -247,29 +297,33 @@ RUN echo "[INFO] Listing current directory before torch install step:" && \ | ||||
|     echo "[INFO] Showing torch_build_versions.txt content:" && \ | ||||
|     cat torch_build_versions.txt | ||||
| 
 | ||||
| # Install uv for faster pip installs if not existed | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     python3 -m pip install uv==0.8.4 | ||||
| 
 | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| # Install build and runtime dependencies, this is needed for flashinfer install | ||||
| COPY requirements/build.txt requirements/build.txt | ||||
| COPY use_existing_torch.py use_existing_torch.py | ||||
| RUN python3 use_existing_torch.py | ||||
| RUN cat requirements/build.txt | ||||
| 
 | ||||
| # Install uv for faster pip installs if not existed | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     if ! python3 -m uv --version > /dev/null 2>&1; then \ | ||||
|         python3 -m pip install uv==0.8.4; \ | ||||
|     fi | ||||
| 
 | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/build.txt | ||||
| 
 | ||||
| 
 | ||||
| # Default mount file as placeholder, this just avoid the mount error | ||||
| ARG TORCH_WHEELS_PATH="./requirements" | ||||
| # Install torch, torchaudio and torchvision. If TORCH_WHEELS_PATH is default | ||||
| # to ./requirements, it will pull the nightly versions using pip. Otherwise, | ||||
| # it will use the local wheels from TORCH_WHEELS_PATH | ||||
| # Install torch, torchaudio and torchvision | ||||
| # if TORCH_WHEELS_PATH is default "./requirements", it will pull the nightly versions using pip using torch_build_versions.txt | ||||
| # otherwise, it will use the whls from TORCH_WHEELS_PATH from the host machine | ||||
| RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \ | ||||
|     --mount=type=cache,target=/root/.cache/uv \ | ||||
|     if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \ | ||||
| @ -283,9 +337,6 @@ RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \ | ||||
|         uv pip install --system $(cat torch_build_versions.txt | xargs) --index-url https://download.pytorch.org/whl/nightly/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.'); \ | ||||
|     fi | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system --pre apache-tvm-ffi==0.1.0b15 | ||||
| 
 | ||||
| # Install the vllm wheel from previous stage | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system /wheels/vllm/*.whl --verbose | ||||
| @ -293,16 +344,18 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
| # Install xformers wheel from previous stage | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system /wheels/xformers/*.whl --verbose | ||||
| 
 | ||||
| # Build FlashInfer from source | ||||
| # Build flashinfer from source. | ||||
| ARG torch_cuda_arch_list='8.0;8.9;9.0a;10.0a;12.0' | ||||
| # install package for build flashinfer | ||||
| # see issue: https://github.com/flashinfer-ai/flashinfer/issues/738 | ||||
| 
 | ||||
| RUN pip freeze | grep -E 'setuptools|packaging|build' | ||||
| 
 | ||||
| ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} | ||||
| 
 | ||||
| # TODO(elainewy): remove this once vllm commit is updated, and install flashinfer from pip | ||||
| # see https://github.com/pytorch/pytorch/pull/165274#issuecomment-3408531784 | ||||
| # Build flashinfer for torch nightly from source around 10 mins | ||||
| ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git" | ||||
| # Keep this in sync with https://github.com/vllm-project/vllm/blob/main/requirements/cuda.txt | ||||
| ARG FLASHINFER_GIT_REF="v0.2.14.post1" | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     git clone --depth 1 --recursive --shallow-submodules \ | ||||
|         --branch ${FLASHINFER_GIT_REF} \ | ||||
| @ -314,7 +367,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     && cd .. \ | ||||
|     && rm -rf flashinfer | ||||
| 
 | ||||
| # Install FlashInfer | ||||
| # install flashinfer python | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system wheels/flashinfer/*.whl --verbose | ||||
| 
 | ||||
| @ -324,6 +377,49 @@ RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio\|^xformers\|^vllm | ||||
| ################### VLLM INSTALLED IMAGE #################### | ||||
| 
 | ||||
| 
 | ||||
| #################### UNITTEST IMAGE ############################# | ||||
| FROM vllm-base as test | ||||
| 
 | ||||
| ENV UV_HTTP_TIMEOUT=500 | ||||
| ENV UV_INDEX_STRATEGY="unsafe-best-match" | ||||
| # Use copy mode to avoid hardlink failures with Docker cache mounts | ||||
| ENV UV_LINK_MODE=copy | ||||
| 
 | ||||
| COPY tests/ tests/ | ||||
| COPY examples examples | ||||
| COPY benchmarks benchmarks | ||||
| COPY ./vllm/collect_env.py . | ||||
| COPY requirements/common.txt requirements/common.txt | ||||
| COPY use_existing_torch.py use_existing_torch.py | ||||
| COPY pyproject.toml pyproject.toml | ||||
| # Install build and runtime dependencies without stable torch version | ||||
| COPY requirements/nightly_torch_test.txt requirements/nightly_torch_test.txt | ||||
| 
 | ||||
| RUN python3 use_existing_torch.py | ||||
| 
 | ||||
| # install packages | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/common.txt | ||||
| # enable fast downloads from hf (for testing) | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system hf_transfer | ||||
| ENV HF_HUB_ENABLE_HF_TRANSFER 1 | ||||
| 
 | ||||
| # install development dependencies (for testing) | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -e tests/vllm_test_utils | ||||
| 
 | ||||
| RUN --mount=type=cache,target=/root/.cache/uv \ | ||||
|     uv pip install --system -r requirements/nightly_torch_test.txt | ||||
| 
 | ||||
| # Logging to confirm the torch versions | ||||
| RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer' | ||||
| 
 | ||||
| # Logging to confirm all the packages are installed | ||||
| RUN pip freeze | ||||
| 
 | ||||
| #################### UNITTEST IMAGE ############################# | ||||
| 
 | ||||
| #################### EXPORT STAGE #################### | ||||
| FROM scratch as export-wheels | ||||
| 
 | ||||
							
								
								
									
										9
									
								
								.github/label_to_label.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								.github/label_to_label.yml
									
									
									
									
										vendored
									
									
								
							| @ -15,11 +15,6 @@ | ||||
|   - "module: reinplacing" | ||||
|   then: | ||||
|   - "module: pt2-dispatcher" | ||||
| - any: | ||||
|   - "vllm-compile" | ||||
|   then: | ||||
|   - "module: vllm" | ||||
|   - "oncall: pt2" | ||||
| - any: | ||||
|   - "module: vmap" | ||||
|   then: | ||||
| @ -32,6 +27,10 @@ | ||||
|   - "module: pt2 optimizer" | ||||
|   then: | ||||
|   - "module: dynamo" | ||||
| - any: | ||||
|   - "module: flex attention" | ||||
|   then: | ||||
|   - "module: higher order operators" | ||||
| - any: | ||||
|   - "module: aotinductor" | ||||
|   then: | ||||
|  | ||||
							
								
								
									
										29
									
								
								.github/labeler.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										29
									
								
								.github/labeler.yml
									
									
									
									
										vendored
									
									
								
							| @ -133,32 +133,3 @@ | ||||
|  | ||||
| "ciflow/vllm": | ||||
| - .github/ci_commit_pins/vllm.txt | ||||
|  | ||||
| "ciflow/b200": | ||||
| - test/test_matmul_cuda.py | ||||
| - test/test_scaled_matmul_cuda.py | ||||
| - test/inductor/test_fp8.py | ||||
| - aten/src/ATen/native/cuda/Blas.cpp | ||||
| - torch/**/*cublas* | ||||
| - torch/_inductor/kernel/mm.py | ||||
| - test/inductor/test_max_autotune.py | ||||
| - third_party/fbgemm | ||||
|  | ||||
| "ciflow/h100": | ||||
| - test/test_matmul_cuda.py | ||||
| - test/test_scaled_matmul_cuda.py | ||||
| - test/inductor/test_fp8.py | ||||
| - aten/src/ATen/native/cuda/Blas.cpp | ||||
| - torch/**/*cublas* | ||||
| - torch/_inductor/kernel/mm.py | ||||
| - test/inductor/test_max_autotune.py | ||||
| - third_party/fbgemm | ||||
|  | ||||
| "ciflow/rocm": | ||||
| - test/test_matmul_cuda.py | ||||
| - test/test_scaled_matmul_cuda.py | ||||
| - test/inductor/test_fp8.py | ||||
| - aten/src/ATen/native/cuda/Blas.cpp | ||||
| - torch/_inductor/kernel/mm.py | ||||
| - test/inductor/test_max_autotune.py | ||||
| - third_party/fbgemm | ||||
|  | ||||
							
								
								
									
										5
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							| @ -3,7 +3,6 @@ ciflow_tracking_issue: 64124 | ||||
| ciflow_push_tags: | ||||
| - ciflow/b200 | ||||
| - ciflow/b200-symm-mem | ||||
| - ciflow/b200-distributed | ||||
| - ciflow/binaries | ||||
| - ciflow/binaries_libtorch | ||||
| - ciflow/binaries_wheel | ||||
| @ -16,8 +15,7 @@ ciflow_push_tags: | ||||
| - ciflow/inductor-micro-benchmark | ||||
| - ciflow/inductor-micro-benchmark-cpu-x86 | ||||
| - ciflow/inductor-perf-compare | ||||
| - ciflow/inductor-perf-test-nightly-rocm-mi300 | ||||
| - ciflow/inductor-perf-test-nightly-rocm-mi355 | ||||
| - ciflow/inductor-perf-test-nightly-rocm | ||||
| - ciflow/inductor-perf-test-nightly-x86-zen | ||||
| - ciflow/inductor-periodic | ||||
| - ciflow/inductor-rocm | ||||
| @ -33,7 +31,6 @@ ciflow_push_tags: | ||||
| - ciflow/rocm | ||||
| - ciflow/rocm-mi300 | ||||
| - ciflow/rocm-mi355 | ||||
| - ciflow/rocm-navi31 | ||||
| - ciflow/s390 | ||||
| - ciflow/slow | ||||
| - ciflow/torchbench | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							| @ -512,8 +512,6 @@ def perform_misc_tasks( | ||||
|         "keep-going", | ||||
|         branch == MAIN_BRANCH | ||||
|         or bool(tag and re.match(r"^trunk/[a-f0-9]{40}$", tag)) | ||||
|         # Pattern for tags created via manual run on HUD | ||||
|         or bool(tag and re.match(r"^ciflow/[^/]+/[a-f0-9]{40}$", tag)) | ||||
|         or check_for_setting(labels, pr_body, "keep-going"), | ||||
|     ) | ||||
|     set_output( | ||||
|  | ||||
							
								
								
									
										60
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										60
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							| @ -16,18 +16,16 @@ from typing import Optional | ||||
|  | ||||
|  | ||||
| # NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this | ||||
| CUDA_ARCHES = ["12.6", "12.8", "12.9", "13.0"] | ||||
| CUDA_ARCHES = ["12.6", "12.8", "13.0"] | ||||
| CUDA_STABLE = "12.8" | ||||
| CUDA_ARCHES_FULL_VERSION = { | ||||
|     "12.6": "12.6.3", | ||||
|     "12.8": "12.8.1", | ||||
|     "12.9": "12.9.1", | ||||
|     "13.0": "13.0.2", | ||||
|     "13.0": "13.0.0", | ||||
| } | ||||
| CUDA_ARCHES_CUDNN_VERSION = { | ||||
|     "12.6": "9", | ||||
|     "12.8": "9", | ||||
|     "12.9": "9", | ||||
|     "13.0": "9", | ||||
| } | ||||
|  | ||||
| @ -40,7 +38,7 @@ CPU_AARCH64_ARCH = ["cpu-aarch64"] | ||||
|  | ||||
| CPU_S390X_ARCH = ["cpu-s390x"] | ||||
|  | ||||
| CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "12.9-aarch64", "13.0-aarch64"] | ||||
| CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "13.0-aarch64"] | ||||
|  | ||||
|  | ||||
| PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { | ||||
| @ -78,39 +76,22 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { | ||||
|         "nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | " | ||||
|         "nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'" | ||||
|     ), | ||||
|     "12.9": ( | ||||
|         "nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | " | ||||
|         "nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | " | ||||
|         "nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | " | ||||
|         "nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | " | ||||
|         "nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | " | ||||
|         "nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | " | ||||
|         "nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | " | ||||
|         "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | " | ||||
|         "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | " | ||||
|         "nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | " | ||||
|         "nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | " | ||||
|         "nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | " | ||||
|         "nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'" | ||||
|     ), | ||||
|     "13.0": ( | ||||
|         "nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | " | ||||
|         "nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | " | ||||
|         "nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | " | ||||
|         "nvidia-cublas==13.1.0.3; platform_system == 'Linux' | " | ||||
|         "nvidia-cufft==12.0.0.61; platform_system == 'Linux' | " | ||||
|         "nvidia-cublas==13.0.0.19; platform_system == 'Linux' | " | ||||
|         "nvidia-cufft==12.0.0.15; platform_system == 'Linux' | " | ||||
|         "nvidia-curand==10.4.0.35; platform_system == 'Linux' | " | ||||
|         "nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | " | ||||
|         "nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | " | ||||
|         "nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | " | ||||
|         "nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | " | ||||
|         "nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | " | ||||
|         "nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | " | ||||
|         "nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | " | ||||
|         "nvidia-nvtx==13.0.85; platform_system == 'Linux' | " | ||||
|         "nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | " | ||||
|         "nvidia-cufile==1.15.1.6; platform_system == 'Linux'" | ||||
|         "nvidia-nvtx==13.0.39; platform_system == 'Linux' | " | ||||
|         "nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | " | ||||
|         "nvidia-cufile==1.15.0.42; platform_system == 'Linux'" | ||||
|     ), | ||||
|     "xpu": ( | ||||
|         "intel-cmplr-lib-rt==2025.2.1 | " | ||||
| @ -241,11 +222,7 @@ def generate_libtorch_matrix( | ||||
|             arches += CUDA_ARCHES | ||||
|             arches += ROCM_ARCHES | ||||
|         elif os == "windows": | ||||
|             # TODO (huydhn): Only build CUDA 12.9 for Linux. This logic is to be cleaned up | ||||
|             # in 2.10 | ||||
|             windows_cuda_arches = CUDA_ARCHES.copy() | ||||
|             windows_cuda_arches.remove("12.9") | ||||
|             arches += windows_cuda_arches | ||||
|             arches += CUDA_ARCHES | ||||
|     if libtorch_variants is None: | ||||
|         libtorch_variants = [ | ||||
|             "shared-with-deps", | ||||
| @ -309,11 +286,7 @@ def generate_wheels_matrix( | ||||
|         if os == "linux": | ||||
|             arches += CUDA_ARCHES + ROCM_ARCHES + XPU_ARCHES | ||||
|         elif os == "windows": | ||||
|             # TODO (huydhn): Only build CUDA 12.9 for Linux. This logic is to be cleaned up | ||||
|             # in 2.10 | ||||
|             windows_cuda_arches = CUDA_ARCHES.copy() | ||||
|             windows_cuda_arches.remove("12.9") | ||||
|             arches += windows_cuda_arches + XPU_ARCHES | ||||
|             arches += CUDA_ARCHES + XPU_ARCHES | ||||
|         elif os == "linux-aarch64": | ||||
|             # Separate new if as the CPU type is different and | ||||
|             # uses different build/test scripts | ||||
| @ -349,7 +322,7 @@ def generate_wheels_matrix( | ||||
|             # cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install | ||||
|  | ||||
|             if ( | ||||
|                 arch_version in ["13.0", "12.9", "12.8", "12.6"] | ||||
|                 arch_version in ["13.0", "12.8", "12.6"] | ||||
|                 and os == "linux" | ||||
|                 or arch_version in CUDA_AARCH64_ARCHES | ||||
|             ): | ||||
| @ -413,6 +386,5 @@ def generate_wheels_matrix( | ||||
|  | ||||
|  | ||||
| validate_nccl_dep_consistency("13.0") | ||||
| validate_nccl_dep_consistency("12.9") | ||||
| validate_nccl_dep_consistency("12.8") | ||||
| validate_nccl_dep_consistency("12.6") | ||||
|  | ||||
							
								
								
									
										6
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							| @ -1092,7 +1092,7 @@ class GitHubPR: | ||||
|         editor = node["editor"] | ||||
|         return GitHubComment( | ||||
|             body_text=node["bodyText"], | ||||
|             created_at=node.get("createdAt", ""), | ||||
|             created_at=node["createdAt"] if "createdAt" in node else "", | ||||
|             author_login=node["author"]["login"], | ||||
|             author_url=node["author"].get("url", None), | ||||
|             author_association=node["authorAssociation"], | ||||
| @ -2042,6 +2042,10 @@ def validate_revert( | ||||
|             f"[{', '.join(allowed_reverters)}], but instead is {author_association}." | ||||
|         ) | ||||
|  | ||||
|     # Raises exception if matching rule is not found, but ignores all status checks | ||||
|     find_matching_merge_rule( | ||||
|         pr, repo, skip_mandatory_checks=True, skip_internal_checks=True | ||||
|     ) | ||||
|     commit_sha = get_pr_commit_sha(repo, pr) | ||||
|     return (author_login, commit_sha) | ||||
|  | ||||
|  | ||||
| @ -177,9 +177,6 @@ jobs: | ||||
|     runs-on: linux.rocm.gpu.mi250 | ||||
|     timeout-minutes: !{{ common.timeout_minutes }} | ||||
|     !{{ upload.binary_env(config) }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|  | ||||
| @ -26,8 +26,9 @@ name: !{{ build_environment }} | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "!{{ py_ver.strip('t') + ('.4' if '3.14' not in py_ver else '.0') }}" | ||||
|           python-version: "!{{ (py_ver.strip('t') + '.4') if '3.14' not in py_ver else '3.14.0-rc.2' }}" | ||||
|           freethreaded: !{{ "true" if py_ver.endswith('t') else "false" }} | ||||
| {%- endmacro %} | ||||
|  | ||||
|  | ||||
| @ -79,9 +79,9 @@ jobs: | ||||
|     runs-on: "windows-11-arm64-preview" | ||||
|     {%- else %} | ||||
|     {%- if branches == "nightly" %} | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     {%- else %} | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge.nonephemeral" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" | ||||
|     {%- endif %} | ||||
|     {%- endif %} | ||||
|     timeout-minutes: !{{ common.timeout_minutes_windows_binary }} | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							| @ -72,7 +72,7 @@ jobs: | ||||
|             # Let's try to figure out how this can be improved | ||||
|             timeout-minutes: 360 | ||||
|           - docs_type: python | ||||
|             runner: ${{ inputs.runner_prefix }}linux.c7i.2xlarge | ||||
|             runner: ${{ inputs.runner_prefix }}linux.2xlarge | ||||
|             # It takes less than 30m to finish python docs unless there are issues | ||||
|             timeout-minutes: 30 | ||||
|     # Set a fixed name for this job instead of using the current matrix-generated name, i.e. build-docs (cpp, linux.12xlarge, 180) | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/_linux-build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/_linux-build.yml
									
									
									
									
										vendored
									
									
								
							| @ -37,7 +37,7 @@ on: | ||||
|       runner: | ||||
|         required: false | ||||
|         type: string | ||||
|         default: "linux.c7i.2xlarge" | ||||
|         default: "linux.2xlarge" | ||||
|         description: | | ||||
|           Label of the runner this job should run on. | ||||
|       test-matrix: | ||||
|  | ||||
							
								
								
									
										40
									
								
								.github/workflows/_linux-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										40
									
								
								.github/workflows/_linux-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -224,46 +224,6 @@ jobs: | ||||
|         continue-on-error: true | ||||
|         uses: ./.github/actions/download-td-artifacts | ||||
|  | ||||
|       - name: Download Windows torch wheel for cross-compilation | ||||
|         if: matrix.win_torch_wheel_artifact != '' | ||||
|         uses: seemethere/download-artifact-s3@1da556a7aa0a088e3153970611f6c432d58e80e6 # v4.2.0 | ||||
|         with: | ||||
|           name: ${{ matrix.win_torch_wheel_artifact }} | ||||
|           path: win-torch-wheel | ||||
|  | ||||
|       - name: Extract Windows wheel and setup CUDA libraries | ||||
|         if: matrix.win_torch_wheel_artifact != '' | ||||
|         shell: bash | ||||
|         run: | | ||||
|           set -x | ||||
|  | ||||
|           # Find the wheel file | ||||
|           WHEEL_FILE=$(find win-torch-wheel -name "*.whl" -type f | head -n 1) | ||||
|           if [ -z "$WHEEL_FILE" ]; then | ||||
|             echo "Error: No wheel file found in win-torch-wheel directory" | ||||
|             exit 1 | ||||
|           fi | ||||
|           echo "Found wheel file: $WHEEL_FILE" | ||||
|  | ||||
|           # Unzip the wheel file | ||||
|           unzip -q "$WHEEL_FILE" -d win-torch-wheel-extracted | ||||
|           echo "Extracted wheel contents" | ||||
|  | ||||
|           # Setup CUDA libraries (cuda.lib and cudart.lib) directory | ||||
|           mkdir -p win-torch-wheel-extracted/lib/x64 | ||||
|           if [ -f "win-torch-wheel/cuda.lib" ]; then | ||||
|             mv win-torch-wheel/cuda.lib win-torch-wheel-extracted/lib/x64/ | ||||
|             echo "Moved cuda.lib to win-torch-wheel-extracted/lib/x64/" | ||||
|           fi | ||||
|           if [ -f "win-torch-wheel/cudart.lib" ]; then | ||||
|             mv win-torch-wheel/cudart.lib win-torch-wheel-extracted/lib/x64/ | ||||
|             echo "Moved cudart.lib to win-torch-wheel-extracted/lib/x64/" | ||||
|           fi | ||||
|  | ||||
|           # Verify CUDA libraries are present | ||||
|           echo "CUDA libraries:" | ||||
|           ls -la win-torch-wheel-extracted/lib/x64/ || echo "No CUDA libraries found" | ||||
|  | ||||
|       - name: Parse ref | ||||
|         id: parse-ref | ||||
|         run: .github/scripts/parse_ref.py | ||||
|  | ||||
							
								
								
									
										13
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -102,6 +102,19 @@ jobs: | ||||
|             exit 1 | ||||
|           fi | ||||
|  | ||||
|       - name: configure aws credentials | ||||
|         id: aws_creds | ||||
|         uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0 | ||||
|         with: | ||||
|           role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only | ||||
|           aws-region: us-east-1 | ||||
|           role-duration-seconds: 18000 | ||||
|  | ||||
|       - name: Login to Amazon ECR | ||||
|         id: login-ecr | ||||
|         continue-on-error: true | ||||
|         uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 | ||||
|  | ||||
|       - name: Calculate docker image | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|  | ||||
							
								
								
									
										25
									
								
								.github/workflows/_win-build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										25
									
								
								.github/workflows/_win-build.yml
									
									
									
									
										vendored
									
									
								
							| @ -168,31 +168,6 @@ jobs: | ||||
|         run: | | ||||
|           .ci/pytorch/win-build.sh | ||||
|  | ||||
|       # Collect Windows torch libs and CUDA libs for cross-compilation | ||||
|       - name: Collect Windows CUDA libs for cross-compilation | ||||
|         if: steps.build.outcome != 'skipped' && inputs.cuda-version != 'cpu' | ||||
|         shell: bash | ||||
|         run: | | ||||
|           set -ex | ||||
|  | ||||
|           # Create directory structure if does not exist | ||||
|           mkdir -p /c/${{ github.run_id }}/build-results | ||||
|  | ||||
|           # Copy CUDA libs | ||||
|           CUDA_PATH="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${{ inputs.cuda-version }}" | ||||
|  | ||||
|           if [ -f "${CUDA_PATH}/lib/x64/cuda.lib" ]; then | ||||
|             cp "${CUDA_PATH}/lib/x64/cuda.lib" /c/${{ github.run_id }}/build-results/ | ||||
|           fi | ||||
|  | ||||
|           if [ -f "${CUDA_PATH}/lib/x64/cudart.lib" ]; then | ||||
|             cp "${CUDA_PATH}/lib/x64/cudart.lib" /c/${{ github.run_id }}/build-results/ | ||||
|           fi | ||||
|  | ||||
|           # List collected files | ||||
|           echo "Collected CUDA libs:" | ||||
|           ls -lah /c/${{ github.run_id }}/build-results/*.lib | ||||
|  | ||||
|       # Upload to github so that people can click and download artifacts | ||||
|       - name: Upload artifacts to s3 | ||||
|         if: steps.build.outcome != 'skipped' | ||||
|  | ||||
							
								
								
									
										62
									
								
								.github/workflows/b200-distributed.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										62
									
								
								.github/workflows/b200-distributed.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,62 +0,0 @@ | ||||
| name: CI for distributed tests on B200 | ||||
|  | ||||
| on: | ||||
|   pull_request: | ||||
|     paths: | ||||
|       - .github/workflows/b200-distributed.yml | ||||
|   workflow_dispatch: | ||||
|   push: | ||||
|     tags: | ||||
|       - ciflow/b200-distributed/* | ||||
|   schedule: | ||||
|     - cron: 46 8 * * *  # about 1:46am PDT | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: | ||||
|   id-token: write | ||||
|   contents: read | ||||
|  | ||||
| jobs: | ||||
|  | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|       curr_branch: ${{ github.head_ref || github.ref_name }} | ||||
|       curr_ref_type: ${{ github.ref_type }} | ||||
|  | ||||
|   linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200: | ||||
|     name: linux-jammy-cuda12.8-py3.10-gcc11-build-distributed-b200 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runner: linux.12xlarge.memory | ||||
|       build-environment: linux-jammy-cuda12.8-py3.10-gcc11-distributed-b200 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11 | ||||
|       cuda-arch-list: '10.0' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "distributed", shard: 1, num_shards: 2, runner: "linux.dgx.b200.8" }, | ||||
|           { config: "distributed", shard: 2, num_shards: 2, runner: "linux.dgx.b200.8" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-cuda12_8-py3_10-gcc11-test-distributed-b200: | ||||
|     name: linux-jammy-cuda12.8-py3.10-gcc11-test-b200 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: | ||||
|       - linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200 | ||||
|     with: | ||||
|       timeout-minutes: 1200 | ||||
|       build-environment: linux-jammy-cuda12.8-py3.10-gcc11-distributed-b200 | ||||
|       docker-image: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200.outputs.test-matrix }} | ||||
|       aws-role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only | ||||
|     secrets: inherit | ||||
							
								
								
									
										4
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							| @ -46,12 +46,10 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         include: [ | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda13.0",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda13.0",         runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda12.8",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda12.9",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "cuda12.6",          runner: "linux.9xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda13.0",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda12.9",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda12.8",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinuxaarch64-builder",       tag: "cuda12.6",          runner: "linux.arm64.2xlarge.ephemeral" }, | ||||
|           { name: "manylinux2_28-builder",          tag: "rocm6.4",           runner: "linux.9xlarge.ephemeral" }, | ||||
|  | ||||
							
								
								
									
										19
									
								
								.github/workflows/build-vllm-wheel.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										19
									
								
								.github/workflows/build-vllm-wheel.yml
									
									
									
									
										vendored
									
									
								
							| @ -27,8 +27,9 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         python-version: [ '3.12' ] | ||||
|         # TODO (huydhn): Add cu130 after https://github.com/vllm-project/vllm/issues/24464 is resolved | ||||
|         platform: [ 'manylinux_2_28_x86_64', 'manylinux_2_28_aarch64' ] | ||||
|         device: [ 'cu128', 'cu129', 'cu130' ] | ||||
|         device: [ 'cu128', 'cu129' ] | ||||
|         include: | ||||
|           - platform: manylinux_2_28_x86_64 | ||||
|             device: cu128 | ||||
| @ -38,10 +39,6 @@ jobs: | ||||
|             device: cu129 | ||||
|             manylinux-image: 'pytorch/manylinux2_28-builder:cuda12.9' | ||||
|             runner: linux.12xlarge.memory | ||||
|           - platform: manylinux_2_28_x86_64 | ||||
|             device: cu130 | ||||
|             manylinux-image: 'pytorch/manylinux2_28-builder:cuda13.0' | ||||
|             runner: linux.12xlarge.memory | ||||
|           - platform: manylinux_2_28_aarch64 | ||||
|             device: cu128 | ||||
|             manylinux-image: 'pytorch/manylinuxaarch64-builder:cuda12.8' | ||||
| @ -50,11 +47,6 @@ jobs: | ||||
|             device: cu129 | ||||
|             manylinux-image: 'pytorch/manylinuxaarch64-builder:cuda12.9' | ||||
|             runner: linux.arm64.r7g.12xlarge.memory | ||||
|         exclude: | ||||
|           # TODO (huydhn): Add cu130 aarch64 once PyTorch is on 2.9+ and | ||||
|           # xformers is update to support 13.0 | ||||
|           - platform: manylinux_2_28_aarch64 | ||||
|             device: cu130 | ||||
|     name: "Build ${{ matrix.device }} vLLM wheel on ${{ matrix.platform }}" | ||||
|     runs-on: ${{ matrix.runner }} | ||||
|     timeout-minutes: 480 | ||||
| @ -177,12 +169,7 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         platform: [ 'manylinux_2_28_x86_64', 'manylinux_2_28_aarch64' ] | ||||
|         device: [ 'cu128', 'cu129', 'cu130' ] | ||||
|         exclude: | ||||
|           # TODO (huydhn): Add cu130 aarch64 once PyTorch is on 2.9+ and | ||||
|           # xformers is update to support 13.0 | ||||
|           - platform: manylinux_2_28_aarch64 | ||||
|             device: cu130 | ||||
|         device: [ 'cu128', 'cu129' ] | ||||
|     env: | ||||
|       PLATFORM: ${{ matrix.platform }} | ||||
|       BUILD_DEVICE: ${{ matrix.device }} | ||||
|  | ||||
							
								
								
									
										336
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										336
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -204,52 +204,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_10-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_10-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -270,7 +224,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_10-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -453,52 +407,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_11-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_11-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -519,7 +427,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_11-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -702,52 +610,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_12-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_12-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -768,7 +630,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_12-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -951,52 +813,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1017,7 +833,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -1200,52 +1016,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13t-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13t-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1266,7 +1036,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13t-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -1449,52 +1219,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_14-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       build_name: manywheel-py3_14-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1515,7 +1239,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_14-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -1698,52 +1422,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda-aarch64-12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.arm64.r7g.12xlarge.memory | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_14t-cuda-aarch64-12_9 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14t-cuda-aarch64-12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14t-cuda-aarch64-12_9-build | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9-aarch64" | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: manylinuxaarch64-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       build_name: manywheel-py3_14t-cuda-aarch64-12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda-aarch64-13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1764,7 +1442,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_14t-cuda-aarch64-13_0 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|  | ||||
							
								
								
									
										74
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										74
									
								
								.github/workflows/generated-linux-binary-libtorch-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -248,74 +248,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   libtorch-cuda12_9-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: libtorch-cxx11-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: libtorch-cuda12_9-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda12_9-shared-with-deps-release-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-cuda12_9-shared-with-deps-release-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: libtorch-cxx11-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda12_9-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda12_9-shared-with-deps-release-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: libtorch-cuda12_9-shared-with-deps-release-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: libtorch-cxx11-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda12_9-shared-with-deps-release | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   libtorch-cuda13_0-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -426,9 +358,6 @@ jobs: | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -544,9 +473,6 @@ jobs: | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       LIBTORCH_CONFIG: release | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|  | ||||
							
								
								
									
										518
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										518
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -241,72 +241,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_10-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_10-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_10-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_10-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -325,7 +259,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_10-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda13_0-test:  # Testing | ||||
| @ -413,9 +347,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -528,9 +459,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -907,72 +835,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_11-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_11-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_11-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_11-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -991,7 +853,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_11-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda13_0-test:  # Testing | ||||
| @ -1079,9 +941,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -1194,9 +1053,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -1573,72 +1429,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_12-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_12-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_12-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -1657,7 +1447,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_12-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda13_0-test:  # Testing | ||||
| @ -1745,9 +1535,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -1860,9 +1647,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -2239,72 +2023,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_13-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_13-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -2323,7 +2041,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_13-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cuda13_0-test:  # Testing | ||||
| @ -2411,9 +2129,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -2526,9 +2241,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -2905,72 +2617,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_13t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_13t-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_13t-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_13t-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -2989,7 +2635,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_13t-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cuda13_0-test:  # Testing | ||||
| @ -3077,9 +2723,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -3192,9 +2835,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -3571,72 +3211,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_14-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_14-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       build_name: manywheel-py3_14-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|       build_name: manywheel-py3_14-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -3655,7 +3229,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_14-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14-cuda13_0-test:  # Testing | ||||
| @ -3743,9 +3317,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -3858,9 +3429,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.14" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -4237,72 +3805,6 @@ jobs: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda12_9-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_14t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14t-cuda12_9-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - manywheel-py3_14t-cuda12_9-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       build_name: manywheel-py3_14t-cuda12_9 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14t-cuda12_9-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_14t-cuda12_9-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cu129 | ||||
|       GPU_ARCH_VERSION: "12.9" | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: cuda12.9 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|       build_name: manywheel-py3_14t-cuda12_9 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_14t-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
| @ -4321,7 +3823,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: manywheel-py3_14t-cuda13_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.88; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.96; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.85; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.1.0.3; platform_system == 'Linux' | nvidia-cufft==12.0.0.61; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.4.66; platform_system == 'Linux' | nvidia-cusparse==12.6.3.3; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.85; platform_system == 'Linux' | nvidia-nvjitlink==13.0.88; platform_system == 'Linux' | nvidia-cufile==1.15.1.6; platform_system == 'Linux' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_14t-cuda13_0-test:  # Testing | ||||
| @ -4409,9 +3911,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm6.4 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
| @ -4524,9 +4023,6 @@ jobs: | ||||
|       DOCKER_IMAGE: manylinux2_28-builder | ||||
|       DOCKER_IMAGE_TAG_PREFIX: rocm7.0 | ||||
|       DESIRED_PYTHON: "3.14t" | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/workflows/generated-macos-arm64-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/generated-macos-arm64-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -63,6 +63,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.10.4" | ||||
|           freethreaded: false | ||||
|  | ||||
							
								
								
									
										11
									
								
								.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -59,6 +59,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.10.4" | ||||
|           freethreaded: false | ||||
| @ -168,6 +169,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.11.4" | ||||
|           freethreaded: false | ||||
| @ -277,6 +279,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.12.4" | ||||
|           freethreaded: false | ||||
| @ -386,6 +389,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.13.4" | ||||
|           freethreaded: false | ||||
| @ -495,6 +499,7 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.13.4" | ||||
|           freethreaded: true | ||||
| @ -604,8 +609,9 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.14.0" | ||||
|           python-version: "3.14.0-rc.2" | ||||
|           freethreaded: false | ||||
|       - name: Checkout PyTorch | ||||
|         uses: actions/checkout@v4 | ||||
| @ -713,8 +719,9 @@ jobs: | ||||
|       - name: Setup Python | ||||
|         uses: actions/setup-python@v6 | ||||
|         with: | ||||
|           # TODO: Removeme once 3.14 is out | ||||
|           # .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3 | ||||
|           python-version: "3.14.0" | ||||
|           python-version: "3.14.0-rc.2" | ||||
|           freethreaded: true | ||||
|       - name: Checkout PyTorch | ||||
|         uses: actions/checkout@v4 | ||||
|  | ||||
							
								
								
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -44,7 +44,7 @@ jobs: | ||||
|   libtorch-cpu-shared-with-deps-debug-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -291,7 +291,7 @@ jobs: | ||||
|   libtorch-cuda12_6-shared-with-deps-debug-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -541,7 +541,7 @@ jobs: | ||||
|   libtorch-cuda12_8-shared-with-deps-debug-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -791,7 +791,7 @@ jobs: | ||||
|   libtorch-cuda13_0-shared-with-deps-debug-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|  | ||||
							
								
								
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -44,7 +44,7 @@ jobs: | ||||
|   libtorch-cpu-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -291,7 +291,7 @@ jobs: | ||||
|   libtorch-cuda12_6-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -541,7 +541,7 @@ jobs: | ||||
|   libtorch-cuda12_8-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -791,7 +791,7 @@ jobs: | ||||
|   libtorch-cuda13_0-shared-with-deps-release-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|  | ||||
							
								
								
									
										70
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										70
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -44,7 +44,7 @@ jobs: | ||||
|   wheel-py3_10-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -279,7 +279,7 @@ jobs: | ||||
|   wheel-py3_10-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -517,7 +517,7 @@ jobs: | ||||
|   wheel-py3_10-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -755,7 +755,7 @@ jobs: | ||||
|   wheel-py3_10-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -993,7 +993,7 @@ jobs: | ||||
|   wheel-py3_10-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -1229,7 +1229,7 @@ jobs: | ||||
|   wheel-py3_11-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -1464,7 +1464,7 @@ jobs: | ||||
|   wheel-py3_11-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -1702,7 +1702,7 @@ jobs: | ||||
|   wheel-py3_11-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -1940,7 +1940,7 @@ jobs: | ||||
|   wheel-py3_11-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -2178,7 +2178,7 @@ jobs: | ||||
|   wheel-py3_11-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -2414,7 +2414,7 @@ jobs: | ||||
|   wheel-py3_12-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -2649,7 +2649,7 @@ jobs: | ||||
|   wheel-py3_12-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -2887,7 +2887,7 @@ jobs: | ||||
|   wheel-py3_12-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -3125,7 +3125,7 @@ jobs: | ||||
|   wheel-py3_12-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -3363,7 +3363,7 @@ jobs: | ||||
|   wheel-py3_12-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -3599,7 +3599,7 @@ jobs: | ||||
|   wheel-py3_13-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -3834,7 +3834,7 @@ jobs: | ||||
|   wheel-py3_13-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -4072,7 +4072,7 @@ jobs: | ||||
|   wheel-py3_13-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -4310,7 +4310,7 @@ jobs: | ||||
|   wheel-py3_13-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -4548,7 +4548,7 @@ jobs: | ||||
|   wheel-py3_13-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -4784,7 +4784,7 @@ jobs: | ||||
|   wheel-py3_13t-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -5019,7 +5019,7 @@ jobs: | ||||
|   wheel-py3_13t-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -5257,7 +5257,7 @@ jobs: | ||||
|   wheel-py3_13t-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -5495,7 +5495,7 @@ jobs: | ||||
|   wheel-py3_13t-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -5733,7 +5733,7 @@ jobs: | ||||
|   wheel-py3_13t-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -5969,7 +5969,7 @@ jobs: | ||||
|   wheel-py3_14-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -6204,7 +6204,7 @@ jobs: | ||||
|   wheel-py3_14-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -6442,7 +6442,7 @@ jobs: | ||||
|   wheel-py3_14-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -6680,7 +6680,7 @@ jobs: | ||||
|   wheel-py3_14-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -6918,7 +6918,7 @@ jobs: | ||||
|   wheel-py3_14-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -7154,7 +7154,7 @@ jobs: | ||||
|   wheel-py3_14t-cpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -7389,7 +7389,7 @@ jobs: | ||||
|   wheel-py3_14t-cuda12_6-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -7627,7 +7627,7 @@ jobs: | ||||
|   wheel-py3_14t-cuda12_8-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -7865,7 +7865,7 @@ jobs: | ||||
|   wheel-py3_14t-cuda13_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
| @ -8103,7 +8103,7 @@ jobs: | ||||
|   wheel-py3_14t-xpu-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: get-label-type | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge" | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge" | ||||
|     timeout-minutes: 360 | ||||
|     env: | ||||
|       PYTORCH_ROOT: ${{ github.workspace }}/pytorch | ||||
|  | ||||
| @ -1,132 +0,0 @@ | ||||
| name: inductor-perf-nightly-rocm-mi300 | ||||
|  | ||||
| on: | ||||
|   push: | ||||
|     tags: | ||||
|       - ciflow/inductor-perf-test-nightly-rocm-mi300/* | ||||
|   schedule: | ||||
|     - cron: 15 0 * * * | ||||
|   # NB: GitHub has an upper limit of 10 inputs here, so before we can sort it | ||||
|   # out, let try to run torchao cudagraphs_low_precision as part of cudagraphs | ||||
|   workflow_dispatch: | ||||
|     inputs: | ||||
|       training: | ||||
|         description: Run training (on by default)? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       inference: | ||||
|         description: Run inference (on by default)? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       default: | ||||
|         description: Run inductor_default? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       dynamic: | ||||
|         description: Run inductor_dynamic_shapes? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       cppwrapper: | ||||
|         description: Run inductor_cpp_wrapper? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       cudagraphs: | ||||
|         description: Run inductor_cudagraphs? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       freezing_cudagraphs: | ||||
|         description: Run inductor_cudagraphs with freezing for inference? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       aotinductor: | ||||
|         description: Run aot_inductor for inference? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       maxautotune: | ||||
|         description: Run inductor_max_autotune? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       benchmark_configs: | ||||
|         description: The list of configs used the benchmark | ||||
|         required: false | ||||
|         type: string | ||||
|         default: inductor_huggingface_perf_rocm_mi300,inductor_timm_perf_rocm_mi300,inductor_torchbench_perf_rocm_mi300 | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: read-all | ||||
|  | ||||
| jobs: | ||||
|   get-label-type: | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|       curr_branch: ${{ github.head_ref || github.ref_name }} | ||||
|       curr_ref_type: ${{ github.ref_type }} | ||||
|       opt_out_experiments: lf | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-inductor-benchmark-build: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: rocm-py3_10-inductor-benchmark-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3_10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi300", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 1, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 2, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 3, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 4, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 5, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 6, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi300", shard: 7, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 1, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 2, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 3, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 4, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 5, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 6, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 7, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 8, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi300", shard: 9, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-inductor-benchmark-test: | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     name: rocm-py3_10-inductor-benchmark-test | ||||
|     uses: ./.github/workflows/_rocm-test.yml | ||||
|     needs: linux-jammy-rocm-py3_10-inductor-benchmark-build | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3_10 | ||||
|       dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true | ||||
|       docker-image: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.test-matrix }} | ||||
|       timeout-minutes: 720 | ||||
|       # Disable monitor in perf tests for more investigation | ||||
|       disable-monitor: true | ||||
|       monitor-log-interval: 10 | ||||
|       monitor-data-collect-interval: 2 | ||||
|     secrets: inherit | ||||
| @ -1,11 +1,11 @@ | ||||
| name: inductor-perf-nightly-rocm-mi355 | ||||
| name: inductor-perf-nightly-rocm | ||||
| 
 | ||||
| on: | ||||
|   push: | ||||
|     tags: | ||||
|       - ciflow/inductor-perf-test-nightly-rocm-mi355/* | ||||
|       - ciflow/inductor-perf-test-nightly-rocm/* | ||||
|   schedule: | ||||
|     - cron: 15 0 * * * | ||||
|     - cron: 0 7 * * 0,3 | ||||
|   # NB: GitHub has an upper limit of 10 inputs here, so before we can sort it | ||||
|   # out, let try to run torchao cudagraphs_low_precision as part of cudagraphs | ||||
|   workflow_dispatch: | ||||
| @ -59,7 +59,7 @@ on: | ||||
|         description: The list of configs used the benchmark | ||||
|         required: false | ||||
|         type: string | ||||
|         default: inductor_huggingface_perf_rocm_mi355,inductor_timm_perf_rocm_mi355,inductor_torchbench_perf_rocm_mi355 | ||||
|         default: inductor_huggingface_perf_rocm,inductor_timm_perf_rocm,inductor_torchbench_perf_rocm | ||||
| 
 | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
| @ -88,27 +88,23 @@ jobs: | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm_mi355", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 1, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 2, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 3, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 4, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 5, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 6, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_timm_perf_rocm_mi355", shard: 7, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 1, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 2, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 3, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 4, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 5, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 6, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 7, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 8, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm_mi355", shard: 9, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 1, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 2, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 3, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_huggingface_perf_rocm", shard: 4, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_timm_perf_rocm", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 1, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 2, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 3, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 4, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 5, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 6, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 7, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "inductor_torchbench_perf_rocm", shard: 8, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
| 
 | ||||
							
								
								
									
										1
									
								
								.github/workflows/inductor-periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/inductor-periodic.yml
									
									
									
									
										vendored
									
									
								
							| @ -88,6 +88,7 @@ jobs: | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3_10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks | ||||
|       sync-tag: rocm-build | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "dynamo_eager_torchbench", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							| @ -118,9 +118,9 @@ jobs: | ||||
|         CHANGED_FILES="${{ needs.get-changed-files.outputs.changed-files }}" | ||||
|         echo "Running all other linters" | ||||
|         if [ "$CHANGED_FILES" = '*' ]; then | ||||
|           ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT,PYREFLY --all-files" .github/scripts/lintrunner.sh | ||||
|           ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT --all-files" .github/scripts/lintrunner.sh | ||||
|         else | ||||
|           ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT,PYREFLY ${CHANGED_FILES}" .github/scripts/lintrunner.sh | ||||
|           ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT ${CHANGED_FILES}" .github/scripts/lintrunner.sh | ||||
|         fi | ||||
|  | ||||
|   quick-checks: | ||||
|  | ||||
							
								
								
									
										49
									
								
								.github/workflows/operator_benchmark.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										49
									
								
								.github/workflows/operator_benchmark.yml
									
									
									
									
										vendored
									
									
								
							| @ -7,11 +7,9 @@ on: | ||||
|   workflow_dispatch: | ||||
|     inputs: | ||||
|       test_mode: | ||||
|         type: choice | ||||
|         options: | ||||
|           - 'short' | ||||
|           - 'long' | ||||
|           - 'all' | ||||
|         required: false | ||||
|         type: string | ||||
|         default: 'short' | ||||
|         description: tag filter for operator benchmarks, options from long, short, all | ||||
|   schedule: | ||||
|     # Run at 07:00 UTC every Sunday | ||||
| @ -30,49 +28,38 @@ permissions: | ||||
|   contents: read | ||||
|  | ||||
| jobs: | ||||
|   x86-opbenchmark-build: | ||||
|   opbenchmark-build: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: x86-opbenchmark-build | ||||
|     name: opbenchmark-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.10-gcc11-build | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-py3-gcc11-inductor-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "cpu_operator_benchmark_${{ inputs.test_mode || 'short' }}", shard: 1, num_shards: 1, runner: "linux.12xlarge" }, | ||||
|           { config: "cpu_operator_benchmark_short", shard: 1, num_shards: 1, runner: "linux.12xlarge" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   x86-opbenchmark-test: | ||||
|     name: x86-opbenchmark-test | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: x86-opbenchmark-build | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.10-gcc11-build | ||||
|       docker-image: ${{ needs.x86-opbenchmark-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.x86-opbenchmark-build.outputs.test-matrix }} | ||||
|     secrets: inherit | ||||
|  | ||||
|   aarch64-opbenchmark-build: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: aarch64-opbenchmark-build | ||||
|   opbenchmark-on-demand-build: | ||||
|     if: ${{ github.event_name == 'workflow_dispatch' && github.repository_owner == 'pytorch' }} | ||||
|     name: opbenchmark-on-demand-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-aarch64-py3.10 | ||||
|       runner: linux.arm64.m7g.4xlarge | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-aarch64-py3.10-gcc11 | ||||
|       build-environment: linux-jammy-py3.10-gcc11-build | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-py3-gcc11-inductor-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "cpu_operator_benchmark_short", shard: 1, num_shards: 1, runner: "linux.arm64.m8g.4xlarge" }, | ||||
|           { config: "cpu_operator_benchmark_${{ inputs.test_mode }}", shard: 1, num_shards: 1, runner: "linux.12xlarge" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   aarch64-opbenchmark-test: | ||||
|     name: aarch64-opbenchmark-test | ||||
|   opbenchmark-test: | ||||
|     name: opbenchmark-test | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: aarch64-opbenchmark-build | ||||
|     needs: opbenchmark-build | ||||
|     with: | ||||
|       build-environment: linux-jammy-aarch64-py3.10 | ||||
|       docker-image: ${{ needs.aarch64-opbenchmark-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.aarch64-opbenchmark-build.outputs.test-matrix }} | ||||
|       build-environment: linux-jammy-py3.10-gcc11-build | ||||
|       docker-image: ${{ needs.opbenchmark-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.opbenchmark-build.outputs.test-matrix }} | ||||
|     secrets: inherit | ||||
|  | ||||
							
								
								
									
										15
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										15
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							| @ -147,16 +147,15 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-jammy-cuda12.8-py3.10-gcc9-debug | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9 | ||||
|       cuda-arch-list: 8.9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 2, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 3, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 4, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 5, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 6, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 7, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 1, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 2, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 3, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 4, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 5, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 6, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|           { config: "default", shard: 7, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|  | ||||
							
								
								
									
										3
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							| @ -347,8 +347,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       # This should sync with the build in xpu.yml but xpu uses a larger runner | ||||
|       # sync-tag: linux-xpu-n-build | ||||
|       sync-tag: linux-xpu-n-build | ||||
|       runner_prefix: ${{ needs.get-label-type.outputs.label-type }} | ||||
|       build-environment: linux-jammy-xpu-n-py3.10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3 | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/workflows/rocm-mi300.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/rocm-mi300.yml
									
									
									
									
										vendored
									
									
								
							| @ -45,6 +45,7 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-noble-rocm-py3.12-mi300 | ||||
|       docker-image-name: ci-image:pytorch-linux-noble-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|  | ||||
							
								
								
									
										13
									
								
								.github/workflows/rocm-mi355.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/workflows/rocm-mi355.yml
									
									
									
									
										vendored
									
									
								
							| @ -42,14 +42,15 @@ jobs: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-noble-rocm-py3.12-mi355 | ||||
|       docker-image-name: ci-image:pytorch-linux-noble-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 2, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 3, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 4, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 5, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 6, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" }, | ||||
|           { config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "default", shard: 2, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "default", shard: 3, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "default", shard: 4, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "default", shard: 5, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|           { config: "default", shard: 6, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|  | ||||
							
								
								
									
										75
									
								
								.github/workflows/rocm-navi31.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										75
									
								
								.github/workflows/rocm-navi31.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,75 +0,0 @@ | ||||
| name: rocm-navi31 | ||||
|  | ||||
| on: | ||||
|   push: | ||||
|     tags: | ||||
|       - ciflow/rocm-navi31/* | ||||
|   workflow_dispatch: | ||||
|   schedule: | ||||
|     # We have several schedules so jobs can check github.event.schedule to activate only for a fraction of the runs. | ||||
|     # Also run less frequently on weekends. | ||||
|     - cron: 45 */2 * * 1-5 | ||||
|     - cron: 45 4,12 * * 0,6 | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: read-all | ||||
|  | ||||
| jobs: | ||||
|   target-determination: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: before-test | ||||
|     uses: ./.github/workflows/target_determination.yml | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|  | ||||
|   get-label-type: | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|       curr_branch: ${{ github.head_ref || github.ref_name }} | ||||
|       curr_ref_type: ${{ github.ref_type }} | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-build: | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     name: linux-jammy-rocm-py3.10 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" }, | ||||
|           { config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-test: | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     name: linux-jammy-rocm-py3_10 | ||||
|     uses: ./.github/workflows/_rocm-test.yml | ||||
|     needs: | ||||
|       - linux-jammy-rocm-py3_10-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }} | ||||
|       tests-to-include: >- | ||||
|          ${{ github.event_name == 'schedule' && 'test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs | ||||
|          test_autograd inductor/test_torchinductor inductor/test_kernel_benchmark | ||||
|          inductor/test_pad_mm inductor/test_benchmark_fusion inductor/test_aot_inductor | ||||
|          inductor/test_torchinductor inductor/test_decompose_mem_bound_mm | ||||
|          inductor/test_flex_attention inductor/test_max_autotune' || '' }} | ||||
|     secrets: inherit | ||||
							
								
								
									
										38
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										38
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							| @ -26,23 +26,11 @@ jobs: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|  | ||||
|   get-label-type: | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|       curr_branch: ${{ github.head_ref || github.ref_name }} | ||||
|       curr_ref_type: ${{ github.ref_type }} | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-build: | ||||
|     if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }} | ||||
|     name: linux-jammy-rocm-py3.10 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
| @ -71,3 +59,29 @@ jobs: | ||||
|       docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }} | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-gfx1100-test: | ||||
|     if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     name: linux-jammy-rocm-py3_10-gfx1100 | ||||
|     uses: ./.github/workflows/_rocm-test.yml | ||||
|     needs: | ||||
|       - linux-jammy-rocm-py3_10-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }} | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" }, | ||||
|           { config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" }, | ||||
|         ]} | ||||
|       tests-to-include: > | ||||
|          test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs | ||||
|          test_autograd inductor/test_torchinductor inductor/test_kernel_benchmark | ||||
|          inductor/test_pad_mm inductor/test_benchmark_fusion inductor/test_aot_inductor | ||||
|          inductor/test_torchinductor inductor/test_decompose_mem_bound_mm | ||||
|          inductor/test_flex_attention inductor/test_max_autotune | ||||
|     secrets: inherit | ||||
|  | ||||
							
								
								
									
										149
									
								
								.github/workflows/trunk-tagging.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										149
									
								
								.github/workflows/trunk-tagging.yml
									
									
									
									
										vendored
									
									
								
							| @ -58,10 +58,8 @@ jobs: | ||||
|           else | ||||
|             COMMIT_SHA="${{ github.sha }}" | ||||
|           fi | ||||
|           { | ||||
|             echo "sha=${COMMIT_SHA}" | ||||
|             echo "tag_name=trunk/${COMMIT_SHA}" | ||||
|           } >> "${GITHUB_OUTPUT}" | ||||
|           echo "sha=${COMMIT_SHA}" >> "${GITHUB_OUTPUT}" | ||||
|           echo "tag_name=trunk/${COMMIT_SHA}" >> "${GITHUB_OUTPUT}" | ||||
|  | ||||
|       - name: Validate commit SHA | ||||
|         run: | | ||||
| @ -89,7 +87,7 @@ jobs: | ||||
|             echo "✅ Commit ${COMMIT_SHA} is valid (automatic push trigger)" | ||||
|           fi | ||||
|  | ||||
|       - name: Create and push tag(s) with retry | ||||
|       - name: Create and push tag with retry | ||||
|         id: check_tag | ||||
|         env: | ||||
|           TAG_NAME: ${{ steps.commit.outputs.tag_name }} | ||||
| @ -114,23 +112,14 @@ jobs: | ||||
|             return 1 | ||||
|           } | ||||
|  | ||||
|           # Counters for summary reporting | ||||
|           created_count=0 | ||||
|           skipped_count=0 | ||||
|           failed_count=0 | ||||
|           # Exit early if tag already exists | ||||
|           if check_tag_exists; then | ||||
|             echo "✅ Tag already exists - no action needed" | ||||
|             echo "exists=true" >> "${GITHUB_OUTPUT}" | ||||
|             exit 0 | ||||
|           fi | ||||
|  | ||||
|           # Always write outputs once on exit | ||||
|           finish() { | ||||
|             set +e | ||||
|             if [ -n "${GITHUB_OUTPUT:-}" ]; then | ||||
|               { | ||||
|                 echo "created_count=${created_count}" | ||||
|                 echo "skipped_count=${skipped_count}" | ||||
|                 echo "failed_count=${failed_count}" | ||||
|               } >> "${GITHUB_OUTPUT}" | ||||
|             fi | ||||
|           } | ||||
|           trap finish EXIT | ||||
|           echo "Tag ${TAG_NAME} does not exist, proceeding with creation" | ||||
|  | ||||
|           # Retry configuration | ||||
|           MAX_RETRIES=5 | ||||
| @ -205,111 +194,31 @@ jobs: | ||||
|             } | ||||
|           } | ||||
|  | ||||
|           # New behavior for push events: enumerate commits in the push and tag each one. | ||||
|           # For workflow_dispatch, retain existing single-SHA behavior. | ||||
|  | ||||
|           # Always fetch tags once up front to improve idempotency in loops | ||||
|           git fetch origin --tags --quiet || true | ||||
|  | ||||
|           if [ "${{ github.event_name }}" = "push" ]; then | ||||
|             BEFORE_SHA="${{ github.event.before }}" | ||||
|             AFTER_SHA="${{ github.sha }}"  # same as event.after | ||||
|  | ||||
|             # List commits introduced by this push (old..new), oldest first for stable ordering | ||||
|             commits_file="$(mktemp)" | ||||
|             git rev-list --reverse "${BEFORE_SHA}..${AFTER_SHA}" > "${commits_file}" | ||||
|  | ||||
|             if [ ! -s "${commits_file}" ]; then | ||||
|               echo "No new commits found between ${BEFORE_SHA}..${AFTER_SHA}; nothing to tag." | ||||
|               rm -f "${commits_file}" | ||||
|               exit 0 | ||||
|             fi | ||||
|  | ||||
|             commit_count="$(wc -l < "${commits_file}" | tr -d ' ')" | ||||
|             echo "Found ${commit_count} commit(s) to tag for push:" | ||||
|             while IFS= read -r sha; do | ||||
|               printf '  %s\n' "${sha}" | ||||
|             done < "${commits_file}" | ||||
|  | ||||
|             while IFS= read -r sha; do | ||||
|               TAG_NAME="trunk/${sha}" | ||||
|               COMMIT_SHA="${sha}" | ||||
|  | ||||
|               # If tag already exists locally or remotely, skip (idempotent) | ||||
|               if check_tag_exists; then | ||||
|                 echo "✅ Tag ${TAG_NAME} already exists - skipping" | ||||
|                 skipped_count=$((skipped_count + 1)) | ||||
|                 continue | ||||
|               fi | ||||
|  | ||||
|               echo "Tag ${TAG_NAME} does not exist, proceeding with creation" | ||||
|  | ||||
|               if retry_with_backoff "tag_with_retry" "Creating tag ${TAG_NAME} for commit ${COMMIT_SHA}"; then | ||||
|                 created_count=$((created_count + 1)) | ||||
|               else | ||||
|                 echo "Tag creation failed after all retry attempts for ${TAG_NAME}" | ||||
|                 failed_count=$((failed_count + 1)) | ||||
|               fi | ||||
|             done < "${commits_file}" | ||||
|  | ||||
|             rm -f "${commits_file}" | ||||
|  | ||||
|             if [ "${failed_count}" -gt 0 ]; then | ||||
|               exit 1 | ||||
|             fi | ||||
|           # Execute with retry | ||||
|           if retry_with_backoff "tag_with_retry" "Creating tag ${TAG_NAME} for commit ${COMMIT_SHA}"; then | ||||
|             echo "exists=false" >> "${GITHUB_OUTPUT}" | ||||
|             exit 0 | ||||
|           else | ||||
|             # workflow_dispatch path (single SHA tagging preserved) | ||||
|  | ||||
|             # Exit early if tag already exists | ||||
|             if check_tag_exists; then | ||||
|               echo "✅ Tag already exists - no action needed" | ||||
|               skipped_count=1 | ||||
|               exit 0 | ||||
|             fi | ||||
|  | ||||
|             echo "Tag ${TAG_NAME} does not exist, proceeding with creation" | ||||
|  | ||||
|             if retry_with_backoff "tag_with_retry" "Creating tag ${TAG_NAME} for commit ${COMMIT_SHA}"; then | ||||
|               created_count=1 | ||||
|               exit 0 | ||||
|             else | ||||
|               echo "Tag creation failed after all retry attempts" | ||||
|               failed_count=1 | ||||
|               exit 1 | ||||
|             fi | ||||
|             echo "Tag creation failed after all retry attempts" | ||||
|             exit 1 | ||||
|           fi | ||||
|  | ||||
|       - name: Tag creation summary | ||||
|         if: always() | ||||
|         run: | | ||||
|           if [ "${{ github.event_name }}" = "push" ]; then | ||||
|             echo "Trigger: push on main" | ||||
|             echo "Created: ${{ steps.check_tag.outputs.created_count }}" | ||||
|             echo "Skipped (already existed): ${{ steps.check_tag.outputs.skipped_count }}" | ||||
|             echo "Failed: ${{ steps.check_tag.outputs.failed_count }}" | ||||
|             if [ "${{ steps.check_tag.outputs.failed_count }}" = "0" ]; then | ||||
|               echo "✅ Completed tagging for push range ${{ github.event.before }}..${{ github.sha }}" | ||||
|             else | ||||
|               echo "❌ Some tags failed to create for push range ${{ github.event.before }}..${{ github.sha }}" | ||||
|             fi | ||||
|           if [ "${{ steps.check_tag.outputs.exists }}" = "true" ]; then | ||||
|             echo "✅ Tag ${{ steps.commit.outputs.tag_name }} already existed - no action needed" | ||||
|           elif [ "${{ job.status }}" = "success" ]; then | ||||
|             echo "✅ Successfully created tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}" | ||||
|           else | ||||
|             if [ "${{ steps.check_tag.outputs.failed_count }}" = "0" ]; then | ||||
|               if [ "${{ steps.check_tag.outputs.created_count }}" = "0" ]; then | ||||
|                 echo "✅ Tag ${{ steps.commit.outputs.tag_name }} already existed - no action needed" | ||||
|               else | ||||
|                 echo "✅ Successfully created tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}" | ||||
|               fi | ||||
|             else | ||||
|               echo "❌ Failed to create tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}" | ||||
|             fi | ||||
|  | ||||
|             echo "" | ||||
|             echo "Tag details:" | ||||
|             echo "  Name: ${{ steps.commit.outputs.tag_name }}" | ||||
|             echo "  Commit: ${{ steps.commit.outputs.sha }}" | ||||
|             echo "  Trigger: ${{ github.event_name }}" | ||||
|             if [ -n "${{ github.event.inputs.commit_sha }}" ]; then | ||||
|               echo "  Manual commit: ${{ github.event.inputs.commit_sha }}" | ||||
|             fi | ||||
|             echo "❌ Failed to create tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}" | ||||
|           fi | ||||
|  | ||||
|           echo "" | ||||
|           echo "Tag details:" | ||||
|           echo "  Name: ${{ steps.commit.outputs.tag_name }}" | ||||
|           echo "  Commit: ${{ steps.commit.outputs.sha }}" | ||||
|           echo "  Trigger: ${{ github.event_name }}" | ||||
|           if [ -n "${{ github.event.inputs.commit_sha }}" ]; then | ||||
|             echo "  Manual commit: ${{ github.event.inputs.commit_sha }}" | ||||
|           fi | ||||
|  | ||||
							
								
								
									
										59
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										59
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							| @ -180,50 +180,16 @@ jobs: | ||||
|       disable-monitor: false | ||||
|     secrets: inherit | ||||
|  | ||||
|   win-vs2022-cuda12_8-py3-build: | ||||
|     name: win-vs2022-cuda12.8-py3 | ||||
|   win-vs2022-cuda12_6-py3-build: | ||||
|     name: win-vs2022-cuda12.6-py3 | ||||
|     uses: ./.github/workflows/_win-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       build-environment: win-vs2022-cuda12.8-py3 | ||||
|       cuda-version: "12.8" | ||||
|       build-environment: win-vs2022-cuda12.6-py3 | ||||
|       cuda-version: "12.6" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-build: | ||||
|     if: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/trunk') }} | ||||
|     name: linux-jammy-rocm-py3.10 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|           { config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   linux-jammy-rocm-py3_10-test: | ||||
|     if: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/trunk') }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     name: linux-jammy-rocm-py3.10 | ||||
|     uses: ./.github/workflows/_rocm-test.yml | ||||
|     needs: | ||||
|       - linux-jammy-rocm-py3_10-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-jammy-rocm-py3.10 | ||||
|       docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }} | ||||
|       tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor" | ||||
|     secrets: inherit | ||||
|  | ||||
|   inductor-build: | ||||
|     name: inductor-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
| @ -234,23 +200,6 @@ jobs: | ||||
|       cuda-arch-list: '8.0' | ||||
|     secrets: inherit | ||||
|  | ||||
|   # Test cross-compiled models with Windows libs extracted from wheel | ||||
|   cross-compile-linux-test: | ||||
|     name: cross-compile-linux-test | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: | ||||
|       - linux-jammy-cuda12_8-py3_10-gcc11-build | ||||
|       - get-label-type | ||||
|       - win-vs2022-cuda12_8-py3-build | ||||
|     with: | ||||
|       build-environment: linux-jammy-cuda12.8-py3.10-gcc11 | ||||
|       docker-image: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc11-build.outputs.docker-image }} | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "aoti_cross_compile_for_windows", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", win_torch_wheel_artifact: "win-vs2022-cuda12.8-py3" }, | ||||
|         ]} | ||||
|     secrets: inherit | ||||
|  | ||||
|   verify-cachebench-cpu-build: | ||||
|     name: verify-cachebench-cpu-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/workflows/vllm.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/vllm.yml
									
									
									
									
										vendored
									
									
								
							| @ -46,7 +46,7 @@ jobs: | ||||
|       runner: linux.24xlarge.memory | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config:  "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_basic_models_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_entrypoints_test", shard: 1, num_shards: 1,runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_regression_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
| @ -54,7 +54,7 @@ jobs: | ||||
|           { config: "vllm_pytorch_compilation_unit_tests", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_lora_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_multi_model_test_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"}, | ||||
|           { config: "vllm_language_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"}, | ||||
|           { config: "vllm_languagde_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"}, | ||||
|           { config: "vllm_distributed_test_2_gpu_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_lora_test", shard: 0, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|           { config: "vllm_lora_test", shard: 1, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" }, | ||||
|  | ||||
							
								
								
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -374,7 +374,6 @@ third_party/ruy/ | ||||
| third_party/glog/ | ||||
|  | ||||
| # Virtualenv | ||||
| .venv/ | ||||
| venv/ | ||||
|  | ||||
| # Log files | ||||
| @ -396,4 +395,3 @@ android/pytorch_android_torchvision/.cxx | ||||
| CLAUDE.local.md | ||||
| /test_*.py | ||||
| /debug_*.py | ||||
| CLAUDE_CONTEXT/ | ||||
|  | ||||
| @ -209,46 +209,6 @@ command = [ | ||||
|     '@{{PATHSFILE}}' | ||||
| ] | ||||
|  | ||||
|  | ||||
| [[linter]] | ||||
| code = 'PYREFLY' | ||||
| include_patterns = [ | ||||
|     'torch/**/*.py', | ||||
|     'torch/**/*.pyi', | ||||
|     'torchgen/**/*.py', | ||||
|     'torchgen/**/*.pyi', | ||||
|     'functorch/**/*.py', | ||||
|     'functorch/**/*.pyi', | ||||
| ] | ||||
| exclude_patterns = [] | ||||
| command = [ | ||||
|     'python3', | ||||
|     'tools/linter/adapters/pyrefly_linter.py', | ||||
|     '--config=pyrefly.toml', | ||||
| ] | ||||
| init_command = [ | ||||
|     'python3', | ||||
|     'tools/linter/adapters/pip_init.py', | ||||
|     '--dry-run={{DRYRUN}}', | ||||
|     'numpy==2.1.0 ; python_version >= "3.12"', | ||||
|     'expecttest==0.3.0', | ||||
|     'pyrefly==0.36.2', | ||||
|     'sympy==1.13.3', | ||||
|     'types-requests==2.27.25', | ||||
|     'types-pyyaml==6.0.2', | ||||
|     'types-tabulate==0.8.8', | ||||
|     'types-protobuf==5.29.1.20250403', | ||||
|     'types-setuptools==79.0.0.20250422', | ||||
|     'types-jinja2==2.11.9', | ||||
|     'types-colorama==0.4.6', | ||||
|     'filelock==3.18.0', | ||||
|     'junitparser==2.1.1', | ||||
|     'rich==14.1.0', | ||||
|     'optree==0.17.0', | ||||
|     'types-openpyxl==3.1.5.20250919', | ||||
|     'types-python-dateutil==2.9.0.20251008' | ||||
| ] | ||||
|  | ||||
| [[linter]] | ||||
| code = 'CLANGTIDY' | ||||
| include_patterns = [ | ||||
| @ -833,7 +793,8 @@ exclude_patterns = [ | ||||
| command = [ | ||||
|     'python3', | ||||
|     'tools/linter/adapters/grep_linter.py', | ||||
|     '--pattern=(cudaSetDevice|cudaGetDevice)\\(', | ||||
|     '--pattern=cudaSetDevice(', | ||||
|     '--pattern=cudaGetDevice(', | ||||
|     '--linter-name=RAWCUDADEVICE', | ||||
|     '--error-name=raw CUDA API usage', | ||||
|     """--error-description=\ | ||||
| @ -1137,8 +1098,11 @@ command = [ | ||||
| [[linter]] | ||||
| code = 'WORKFLOWSYNC' | ||||
| include_patterns = [ | ||||
|     '.github/workflows/*.yml', | ||||
|     '.github/workflows/*.yaml', | ||||
|     '.github/workflows/pull.yml', | ||||
|     '.github/workflows/trunk.yml', | ||||
|     '.github/workflows/periodic.yml', | ||||
|     '.github/workflows/mac-mps.yml', | ||||
|     '.github/workflows/slow.yml', | ||||
| ] | ||||
| command = [ | ||||
|     'python3', | ||||
|  | ||||
							
								
								
									
										14
									
								
								CODEOWNERS
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								CODEOWNERS
									
									
									
									
									
								
							| @ -201,17 +201,3 @@ torch/backends/cudnn/ @eqy @syed-ahmed @Aidyn-A | ||||
| /torch/csrc/stable/ @janeyx99 @mikaylagawarecki | ||||
| /torch/headeronly/ @janeyx99 | ||||
| /torch/header_only_apis.txt @janeyx99 | ||||
|  | ||||
| # FlexAttention | ||||
| /torch/nn/attention/flex_attention.py @drisspg | ||||
| /torch/_higher_order_ops/flex_attention.py @drisspg | ||||
| /torch/_inductor/kernel/flex/ @drisspg | ||||
| /torch/_inductor/codegen/cpp_flex_attention_template.py @drisspg | ||||
| /test/inductor/test_flex_attention.py @drisspg | ||||
| /test/inductor/test_flex_decoding.py @drisspg | ||||
|  | ||||
| # Low Precision GEMMs | ||||
| /aten/src/ATen/native/cuda/Blas.cpp @drisspg @slayton58 | ||||
| /aten/src/ATen/cuda/CUDABlas.cpp @drisspg @slayton58 | ||||
| /aten/src/ATen/cuda/CUDABlas.h @drisspg @slayton58 | ||||
| /test/test_scaled_matmul_cuda.py @drisspg @slayton58 | ||||
|  | ||||
| @ -1,4 +1,4 @@ | ||||
|  | ||||
|  | ||||
|  | ||||
| -------------------------------------------------------------------------------- | ||||
|  | ||||
| @ -72,7 +72,7 @@ Elaborating Further: | ||||
|  | ||||
| If you use NumPy, then you have used Tensors (a.k.a. ndarray). | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| PyTorch provides Tensors that can live either on the CPU or the GPU and accelerates the | ||||
| computation by a huge amount. | ||||
| @ -99,7 +99,7 @@ from several research papers on this topic, as well as current and past work suc | ||||
| While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date. | ||||
| You get the best of speed and flexibility for your crazy research. | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| ### Python First | ||||
|  | ||||
|  | ||||
| @ -31,9 +31,9 @@ Be careful when running untrusted models. This classification includes models cr | ||||
|  | ||||
| **Prefer to execute untrusted models within a secure, isolated environment such as a sandbox** (e.g., containers, virtual machines). This helps protect your system from potentially malicious code. You can find further details and instructions in [this page](https://developers.google.com/code-sandboxing). | ||||
|  | ||||
| **Be mindful of risky model formats**. Give preference to share and load weights with the appropriate format for your use case. [safetensors](https://huggingface.co/docs/safetensors/en/index) gives the most safety but is the most restricted in what it supports. [`torch.load`](https://pytorch.org/docs/stable/generated/torch.load.html#torch.load) has a significantly larger surface of attack but is more flexible in what it can serialize. See the documentation for more details. | ||||
| **Be mindful of risky model formats**. Give preference to share and load weights with the appropriate format for your use case. [safetensors](https://huggingface.co/docs/safetensors/en/index) gives the most safety but is the most restricted in what it supports. [`torch.load`](https://pytorch.org/docs/stable/generated/torch.load.html#torch.load) with `weights_only=True` is also secure to our knowledge even though it offers significantly larger surface of attack. Loading un-trusted checkpoint with `weights_only=False` MUST never be done. | ||||
|  | ||||
|  | ||||
| Even for more secure serialization formats, unexpected inputs to the downstream system can cause diverse security threats (e.g. denial of service, out of bound reads/writes) and thus we recommend extensive validation of any untrusted inputs. | ||||
|  | ||||
| Important Note: The trustworthiness of a model is not binary. You must always determine the proper level of caution depending on the specific model and how it matches your use case and risk tolerance. | ||||
|  | ||||
|  | ||||
| @ -38,7 +38,7 @@ set_bool(AT_HIPSPARSELT_ENABLED CAFFE2_USE_HIPSPARSELT) | ||||
|  | ||||
| configure_file(Config.h.in "${CMAKE_CURRENT_SOURCE_DIR}/Config.h") | ||||
| # TODO: Do not generate CUDAConfig.h for ROCm BUILDS | ||||
| # At the moment, `jit_macros.h` include CUDAConfig.h for both CUDA and HIP builds | ||||
| # At the moment, `jit_macors.h` include CUDAConfig.h for both CUDA and HIP builds | ||||
| if(USE_CUDA OR USE_ROCM) | ||||
|   configure_file(cuda/CUDAConfig.h.in "${CMAKE_CURRENT_SOURCE_DIR}/cuda/CUDAConfig.h") | ||||
| endif() | ||||
| @ -256,11 +256,10 @@ endif() | ||||
| IF(USE_FBGEMM_GENAI) | ||||
|   set(FBGEMM_THIRD_PARTY ${PROJECT_SOURCE_DIR}/third_party/fbgemm/external/) | ||||
|   set(FBGEMM_GENAI_SRCS ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize) | ||||
|  | ||||
|   if(USE_CUDA) | ||||
|     # To avoid increasing the build time/binary size unnecessarily, use an allow-list of kernels to build. | ||||
|     # If you want to integrate a kernel from FBGEMM into torch, you have to add it here. | ||||
|     set(FBGEMM_CUTLASS_KERNELS_REGEX ".*(mx8mx8bf16_grouped|f4f4bf16_grouped).*") | ||||
|     set(FBGEMM_CUTLASS_KERNELS_REGEX ".*mx8mx8bf16_grouped.*") | ||||
|     file(GLOB_RECURSE fbgemm_genai_native_cuda_cu | ||||
|       "${FBGEMM_GENAI_SRCS}/cutlass_extensions/*.cu" | ||||
|       "${FBGEMM_GENAI_SRCS}/cutlass_extensions/**/*.cu") | ||||
| @ -289,71 +288,62 @@ IF(USE_FBGEMM_GENAI) | ||||
|  | ||||
|     set_target_properties(fbgemm_genai PROPERTIES POSITION_INDEPENDENT_CODE ON) | ||||
|  | ||||
|     set(fbgemm_genai_cuh | ||||
|     set(fbgemm_genai_mx8mx8bf16_grouped | ||||
|       "${FBGEMM_GENAI_SRCS}/cutlass_extensions/mx8mx8bf16_grouped/" | ||||
|       "${FBGEMM_GENAI_SRCS}/cutlass_extensions/f4f4bf16_grouped/" | ||||
|       "${FBGEMM_GENAI_SRCS}/" | ||||
|     ) | ||||
|  | ||||
|     target_include_directories(fbgemm_genai PRIVATE | ||||
|     target_include_directories(fbgemm_genai PUBLIC | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/include | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/tools/util/include | ||||
|       ${fbgemm_genai_cuh} | ||||
|       ${fbgemm_genai_mx8mx8bf16_grouped} | ||||
|       ${FBGEMM_GENAI_SRCS}/common/include/   # includes fbgemm_gpu/quantize/utils.h, fbgemm_gpu/quantize/tuning_cache.hpp | ||||
|       ${FBGEMM_GENAI_SRCS}/include/          # includes fbgemm_gpu/torch_ops.h | ||||
|     ) | ||||
|   else() | ||||
|     if(USE_ROCM) | ||||
|       # Only include the kernels we want to build to avoid increasing binary size. | ||||
|       file(GLOB_RECURSE fbgemm_genai_native_rocm_hip | ||||
|         "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/kernels/fp8_rowwise_grouped*.hip" | ||||
|         "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/fp8_rowwise_grouped_gemm.hip") | ||||
|       set_source_files_properties(${fbgemm_genai_native_rocm_hip} PROPERTIES HIP_SOURCE_PROPERTY_FORMAT 1) | ||||
|  | ||||
|     # Add FBGEMM_GENAI include directories for torch_ops.h | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/include) | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/common/include) | ||||
|   elseif(USE_ROCM) | ||||
|     # Only include the kernels we want to build to avoid increasing binary size. | ||||
|     file(GLOB_RECURSE fbgemm_genai_native_rocm_hip | ||||
|       "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/kernels/fp8_rowwise_grouped*.hip" | ||||
|       "${FBGEMM_GENAI_SRCS}/ck_extensions/fp8_rowwise_grouped/fp8_rowwise_grouped_gemm.hip") | ||||
|     set_source_files_properties(${fbgemm_genai_native_rocm_hip} PROPERTIES HIP_SOURCE_PROPERTY_FORMAT 1) | ||||
|       # Add additional HIPCC compiler flags for performance | ||||
|       set(FBGEMM_GENAI_EXTRA_HIPCC_FLAGS | ||||
|         -mllvm | ||||
|         -amdgpu-coerce-illegal-types=1 | ||||
|         -mllvm | ||||
|         -enable-post-misched=0 | ||||
|         -mllvm | ||||
|         -greedy-reverse-local-assignment=1 | ||||
|         -fhip-new-launch-api) | ||||
|  | ||||
|     # Add additional HIPCC compiler flags for performance | ||||
|     set(FBGEMM_GENAI_EXTRA_HIPCC_FLAGS | ||||
|       -mllvm | ||||
|       -enable-post-misched=0 | ||||
|       -mllvm | ||||
|       -greedy-reverse-local-assignment=1 | ||||
|       -fhip-new-launch-api) | ||||
|     if(DEFINED ROCM_VERSION_DEV AND ROCM_VERSION_DEV VERSION_LESS "7.2.0") | ||||
|         list(PREPEND FBGEMM_GENAI_EXTRA_HIPCC_FLAGS -mllvm -amdgpu-coerce-illegal-types=1) | ||||
|       # Only compile for gfx942 for now. | ||||
|       # This is rather hacky, I could not figure out a clean solution :( | ||||
|       set(HIP_CLANG_FLAGS_ORIGINAL ${HIP_CLANG_FLAGS}) | ||||
|       string(REGEX REPLACE "--offload-arch=[^ ]*" "" FILTERED_HIP_CLANG_FLAGS "${HIP_CLANG_FLAGS}") | ||||
|       if("gfx942" IN_LIST PYTORCH_ROCM_ARCH) | ||||
|         list(APPEND FILTERED_HIP_CLANG_FLAGS --offload-arch=gfx942;) | ||||
|       endif() | ||||
|       set(HIP_CLANG_FLAGS ${FILTERED_HIP_CLANG_FLAGS}) | ||||
|  | ||||
|     # Only compile for gfx942 for now. | ||||
|     # This is rather hacky, I could not figure out a clean solution :( | ||||
|     set(HIP_CLANG_FLAGS_ORIGINAL ${HIP_CLANG_FLAGS}) | ||||
|     string(REGEX REPLACE "--offload-arch=[^ ]*" "" FILTERED_HIP_CLANG_FLAGS "${HIP_CLANG_FLAGS}") | ||||
|     if("gfx942" IN_LIST PYTORCH_ROCM_ARCH) | ||||
|       list(APPEND FILTERED_HIP_CLANG_FLAGS --offload-arch=gfx942;) | ||||
|       hip_add_library( | ||||
|         fbgemm_genai STATIC | ||||
|         ${fbgemm_genai_native_rocm_hip} | ||||
|         HIPCC_OPTIONS ${HIP_HCC_FLAGS} ${FBGEMM_GENAI_EXTRA_HIPCC_FLAGS}) | ||||
|       set(HIP_CLANG_FLAGS ${HIP_CLANG_FLAGS_ORIGINAL}) | ||||
|       set_target_properties(fbgemm_genai PROPERTIES POSITION_INDEPENDENT_CODE ON) | ||||
|       target_compile_definitions(fbgemm_genai PRIVATE FBGEMM_GENAI_NO_EXTENDED_SHAPES) | ||||
|  | ||||
|       target_include_directories(fbgemm_genai PUBLIC | ||||
|         # FBGEMM version of Composable Kernel is used due to some customizations | ||||
|         ${FBGEMM_THIRD_PARTY}/composable_kernel/include | ||||
|         ${FBGEMM_THIRD_PARTY}/composable_kernel/library/include | ||||
|         ${FBGEMM_THIRD_PARTY}/cutlass/include | ||||
|         ${FBGEMM_THIRD_PARTY}/cutlass/tools/util/include | ||||
|         ${FBGEMM_GENAI_SRCS}/common/include/   # includes fbgemm_gpu/quantize/utils.h, fbgemm_gpu/quantize/tuning_cache.hpp | ||||
|         ${FBGEMM_GENAI_SRCS}/include/          # includes fbgemm_gpu/torch_ops.h | ||||
|       ) | ||||
|     endif() | ||||
|     set(HIP_CLANG_FLAGS ${FILTERED_HIP_CLANG_FLAGS}) | ||||
|  | ||||
|     hip_add_library( | ||||
|       fbgemm_genai STATIC | ||||
|       ${fbgemm_genai_native_rocm_hip} | ||||
|       HIPCC_OPTIONS ${HIP_HCC_FLAGS} ${FBGEMM_GENAI_EXTRA_HIPCC_FLAGS}) | ||||
|     set(HIP_CLANG_FLAGS ${HIP_CLANG_FLAGS_ORIGINAL}) | ||||
|     set_target_properties(fbgemm_genai PROPERTIES POSITION_INDEPENDENT_CODE ON) | ||||
|     target_compile_definitions(fbgemm_genai PRIVATE FBGEMM_GENAI_NO_EXTENDED_SHAPES) | ||||
|  | ||||
|     target_include_directories(fbgemm_genai PRIVATE | ||||
|       # FBGEMM version of Composable Kernel is used due to some customizations | ||||
|       ${FBGEMM_THIRD_PARTY}/composable_kernel/include | ||||
|       ${FBGEMM_THIRD_PARTY}/composable_kernel/library/include | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/include | ||||
|       ${FBGEMM_THIRD_PARTY}/cutlass/tools/util/include | ||||
|       ${FBGEMM_GENAI_SRCS}/common/include/   # includes fbgemm_gpu/quantize/utils.h, fbgemm_gpu/quantize/tuning_cache.hpp | ||||
|       ${FBGEMM_GENAI_SRCS}/include/          # includes fbgemm_gpu/torch_ops.h | ||||
|     ) | ||||
|  | ||||
|     # Add FBGEMM_GENAI include directories for torch_ops.h | ||||
|     list(APPEND ATen_HIP_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/include) | ||||
|     list(APPEND ATen_HIP_INCLUDE ${PROJECT_SOURCE_DIR}/third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/common/include) | ||||
|   endif() | ||||
| endif() | ||||
|  | ||||
| @ -702,6 +692,12 @@ if(USE_CUDA AND NOT USE_ROCM) | ||||
|   list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/include) | ||||
|   list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/tools/util/include) | ||||
|  | ||||
|   # Add FBGEMM_GENAI include directories for torch_ops.h | ||||
|   if(USE_FBGEMM_GENAI) | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/include) | ||||
|     list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/fbgemm/fbgemm_gpu/experimental/gen_ai/src/quantize/common/include) | ||||
|   endif() | ||||
|  | ||||
|   if($ENV{ATEN_STATIC_CUDA}) | ||||
|     if(CUDA_VERSION VERSION_LESS_EQUAL 12.9) | ||||
|       list(APPEND ATen_CUDA_DEPENDENCY_LIBS | ||||
|  | ||||
| @ -19,7 +19,6 @@ | ||||
| #include <ATen/detail/MPSHooksInterface.h> | ||||
| #include <ATen/detail/MTIAHooksInterface.h> | ||||
| #include <ATen/detail/PrivateUse1HooksInterface.h> | ||||
| #include <ATen/detail/XLAHooksInterface.h> | ||||
| #include <ATen/detail/XPUHooksInterface.h> | ||||
| #include <c10/core/QEngine.h> | ||||
| #include <c10/core/impl/DeviceGuardImplInterface.h> | ||||
| @ -89,8 +88,6 @@ class TORCH_API Context { | ||||
|       return at::detail::getHIPHooks(); | ||||
|     } else if (opt_device_type == at::kHPU) { | ||||
|       return at::detail::getHPUHooks(); | ||||
|     } else if (opt_device_type == at::kXLA) { | ||||
|       return at::detail::getXLAHooks(); | ||||
|     } else { | ||||
|       TORCH_CHECK( | ||||
|           false, | ||||
| @ -199,7 +196,7 @@ class TORCH_API Context { | ||||
|     return c10::impl::hasDeviceGuardImpl(c10::DeviceType::IPU); | ||||
|   } | ||||
|   static bool hasXLA() { | ||||
|     return detail::getXLAHooks().hasXLA(); | ||||
|     return c10::impl::hasDeviceGuardImpl(c10::DeviceType::XLA); | ||||
|   } | ||||
|   static bool hasXPU() { | ||||
|     return detail::getXPUHooks().hasXPU(); | ||||
|  | ||||
| @ -389,16 +389,37 @@ void fillVersion<DLManagedTensorVersioned>( | ||||
| // constructed out of ATen tensor | ||||
| template <class T> | ||||
| T* toDLPackImpl(const Tensor& src) { | ||||
|   auto view = src; | ||||
|  | ||||
|   // Detect whether there is need to normalize the strides | ||||
|   // Background: gh-83069 | ||||
|   // | ||||
|   // However, normalizing strides can come at a high-cost | ||||
|   // to slow down toDLPack conversion 3x, so we | ||||
|   // only normalize if needed. | ||||
|   // | ||||
|   // The following code detects whether the src follows | ||||
|   // a continuous pattern. If the src follows such pattern (common-case) | ||||
|   // then we do not need to normalize the strides. | ||||
|   bool need_normalize_strides = src.dim() == 1 && src.size(0) == 1 && src.stride(0) != 1; | ||||
|   // less common case, try normalizing the strides | ||||
|   if (need_normalize_strides) { | ||||
|     // create a new tensor with possibly normalized strides | ||||
|     // gh-83069 | ||||
|     auto shape = src.sizes(); | ||||
|     view = src.as_strided(shape, {1}, src.storage_offset()); | ||||
|   } | ||||
|  | ||||
|   ATenDLMTensor<T>* atDLMTensor(new ATenDLMTensor<T>); | ||||
|   atDLMTensor->handle = src; | ||||
|   atDLMTensor->handle = view; | ||||
|   atDLMTensor->tensor.manager_ctx = atDLMTensor; | ||||
|   atDLMTensor->tensor.deleter = &deleter<T>; | ||||
|   atDLMTensor->tensor.dl_tensor.data = src.data_ptr(); | ||||
|   atDLMTensor->tensor.dl_tensor.data = view.data_ptr(); | ||||
|   atDLMTensor->tensor.dl_tensor.device = torchDeviceToDLDevice(src.device()); | ||||
|   atDLMTensor->tensor.dl_tensor.ndim = static_cast<int32_t>(src.dim()); | ||||
|   atDLMTensor->tensor.dl_tensor.dtype = getDLDataType(src); | ||||
|   atDLMTensor->tensor.dl_tensor.shape = const_cast<int64_t*>(src.sizes().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.strides = const_cast<int64_t*>(src.strides().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.shape = const_cast<int64_t*>(view.sizes().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.strides = const_cast<int64_t*>(view.strides().data()); | ||||
|   atDLMTensor->tensor.dl_tensor.byte_offset = 0; | ||||
|   fillVersion(&atDLMTensor->tensor); | ||||
|  | ||||
|  | ||||
| @ -52,16 +52,16 @@ struct DLPackTraits {}; | ||||
|  | ||||
| template <> | ||||
| struct DLPackTraits<DLManagedTensor> { | ||||
|   inline static constexpr const char* capsule = "dltensor"; | ||||
|   inline static constexpr const char* used = "used_dltensor"; | ||||
|   inline static const char* capsule = "dltensor"; | ||||
|   inline static const char* used = "used_dltensor"; | ||||
|   inline static auto toDLPack = at::toDLPack; | ||||
|   inline static auto fromDLPack = at::fromDLPack; | ||||
| }; | ||||
|  | ||||
| template <> | ||||
| struct DLPackTraits<DLManagedTensorVersioned> { | ||||
|   inline static constexpr const char* capsule = "dltensor_versioned"; | ||||
|   inline static constexpr const char* used = "used_dltensor_versioned"; | ||||
|   inline static const char* capsule = "dltensor_versioned"; | ||||
|   inline static const char* used = "used_dltensor_versioned"; | ||||
|   inline static auto toDLPack = at::toDLPackVersioned; | ||||
|   inline static auto fromDLPack = at::fromDLPackVersioned; | ||||
| }; | ||||
|  | ||||
| @ -122,7 +122,7 @@ void FunctionalTensorWrapper::freeze_storage() const { | ||||
| //          |   have their own storages, but backends like functorch      | | ||||
| //         \/   are allowed to re-alias underneath the pass               \/ | ||||
| // . - - - - - - - - - - - - - .                             . - - - - - - - - - - - - - - - . | ||||
| // |    underlying_storage     |                             |      underlying_storage       | | ||||
| // |    underyling_storage     |                             |      underyling_storage       | | ||||
| // . - - - - - - - - - - - - - .                             . - - - - - - - - - - - - - - - . | ||||
| // | ||||
| // This constructor is only used by view ops. | ||||
|  | ||||
| @ -58,7 +58,7 @@ namespace at { | ||||
| namespace{ | ||||
|  | ||||
| // PyTorch allows operations to specify dim 0 and dim -1 on a scalar tensor. | ||||
| bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
| static bool is_allowed_dim_on_scalar_tensor(int64_t dim) { | ||||
|   return dim == 0 || dim == -1; | ||||
| } | ||||
|  | ||||
| @ -365,7 +365,7 @@ Tensor select_batching_rule(const Tensor& self, int64_t dim, int64_t index) { | ||||
|   return self_physical.getPhysicalToLogicalMap().apply(result); | ||||
| } | ||||
|  | ||||
| int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) { | ||||
| static int64_t getGradInputPhysicalDim(int64_t dim, IntArrayRef input_sizes, int64_t num_batch_dims) { | ||||
|   return maybe_wrap_dim(dim, static_cast<int64_t>(input_sizes.size())) + num_batch_dims; | ||||
| } | ||||
|  | ||||
| @ -488,7 +488,7 @@ Tensor view_as_complex_batching_rule(const Tensor& self) { | ||||
| // Checks that the smallest batch stride is greater than the largest example | ||||
| // stride. This is something we can support but we choose not to because it's | ||||
| // potentially error prone. | ||||
| void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) { | ||||
| static void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_batch_dims) { | ||||
|   auto smallest_batch_stride = std::min_element( | ||||
|       physical_strides.begin(), physical_strides.begin() + num_batch_dims); | ||||
|   auto largest_example_stride = std::max_element( | ||||
| @ -508,7 +508,7 @@ void checkBatchDimsAtFrontInLayout(IntArrayRef physical_strides, int64_t num_bat | ||||
| // given (sizes, strides, storage_offset) returns the maximum location that | ||||
| // can be indexed (or nullopt if such a location doesn't exist, e.g., tensors | ||||
| // with zero-size dims). | ||||
| std::optional<int64_t> maximum_indexable_location( | ||||
| static std::optional<int64_t> maximum_indexable_location( | ||||
|     IntArrayRef sizes, IntArrayRef strides, int64_t storage_offset) { | ||||
|   auto result = native::storage_size_for(sizes, strides); | ||||
|   if (result == 0) { | ||||
| @ -521,7 +521,7 @@ std::optional<int64_t> maximum_indexable_location( | ||||
| // This checks that the range of possible memory locations accessible by | ||||
| // x.as_strided(sizes, strides, maybe_storage_offset) | ||||
| // are within the bounds of possible memory locations accessible by x. | ||||
| void checkBasicAsStridedValidForSlice( | ||||
| static void checkBasicAsStridedValidForSlice( | ||||
|     const Tensor& physical_tensor, | ||||
|     int64_t num_batch_dims, | ||||
|     IntArrayRef sizes, | ||||
|  | ||||
| @ -42,14 +42,8 @@ const PythonTorchFunctionTLS& PythonTorchFunctionTLS::get_state() { | ||||
| } | ||||
|  | ||||
| bool torch_function_mode_enabled() { | ||||
|   // Manually flatten because gcc is refusing to inline here.  Note | ||||
|   // that we are still calling __tls_get_addr twice here with GCC, | ||||
|   // presumably because of | ||||
|   // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81501 (which says | ||||
|   // the fix ships in GCC 16), but forcing inlining still improves | ||||
|   // performance. | ||||
|   const auto& ptfs = pythonTorchFunctionState; | ||||
|   return ptfs.disabled_state_ != TorchFunctionDisabledState::ALL_DISABLED && !ptfs.stack_.empty(); | ||||
|   return PythonTorchFunctionTLS::get_disabled_state() != TorchFunctionDisabledState::ALL_DISABLED && | ||||
|          PythonTorchFunctionTLS::stack_len() > 0; | ||||
| } | ||||
|  | ||||
| // This is needed to disambiguate the ternary torch function disabled states | ||||
|  | ||||
| @ -27,7 +27,6 @@ struct TORCH_API PythonTorchFunctionTLS { | ||||
|   TorchFunctionDisabledState disabled_state_ = | ||||
|       TorchFunctionDisabledState::ENABLED; | ||||
|   std::vector<std::shared_ptr<c10::SafePyObject>> stack_; | ||||
|   friend TORCH_API bool torch_function_mode_enabled(); | ||||
| }; | ||||
|  | ||||
| TORCH_API bool torch_function_mode_enabled(); | ||||
|  | ||||
| @ -13,7 +13,7 @@ namespace { | ||||
|   // and left at true for the rest of the execution. | ||||
|   // It's an optimization so that users who never use default hooks don't need to | ||||
|   // read the thread_local variables pack_hook_ and unpack_hook_. | ||||
|   bool is_initialized(false); | ||||
|   static bool is_initialized(false); | ||||
| } | ||||
|  | ||||
| static void assertSavedTensorHooksNotDisabled() { | ||||
|  | ||||
| @ -56,7 +56,7 @@ inline void get_strides(int64_t* strides, ArrayRef<OperandInfo> operands, int64_ | ||||
|   } | ||||
| } | ||||
|  | ||||
| OptionalTensorRef make_otr(const TensorBase &tensor) { | ||||
| static OptionalTensorRef make_otr(const TensorBase &tensor) { | ||||
|   if (tensor.defined()) { | ||||
|     return OptionalTensorRef(tensor); | ||||
|   } else { | ||||
| @ -1534,7 +1534,7 @@ void TensorIteratorBase::build(TensorIteratorConfig& config) { | ||||
|  | ||||
|   // XLA and lazy tensors don't have storage, so they don't have an underlying data pointer. | ||||
|   // Nothing beyond this point is important for meta functions, so it's fine to exit early here. | ||||
|   // Extend the condition to MAIA tensors as MAIA tensors also don't have storage. | ||||
|   // Extend the condition to MAIA tesnors as MAIA tensors also don't have storage. | ||||
|   if (privateuse1_without_storage  || | ||||
|       common_device_.type() == DeviceType::XLA  || | ||||
|       common_device_.type() == DeviceType::IPU  || | ||||
|  | ||||
| @ -36,7 +36,7 @@ namespace { | ||||
| using weakref_type = c10::weak_intrusive_ptr<TensorImpl, UndefinedTensorImpl>; | ||||
| using val_type = std::tuple<weakref_type, Tensor>; | ||||
|  | ||||
| ska::flat_hash_map<TensorImpl*, val_type>& get_cached_casts() { | ||||
| static ska::flat_hash_map<TensorImpl*, val_type>& get_cached_casts() { | ||||
|   static ska::flat_hash_map<TensorImpl*, val_type> cached_casts; | ||||
|   return cached_casts; | ||||
| } | ||||
|  | ||||
| @ -6,9 +6,9 @@ namespace at { | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| std::array<HostAllocator*, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
| static std::array<HostAllocator*, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
|     allocator_array{}; | ||||
| std::array<uint8_t, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
| static std::array<uint8_t, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
|     allocator_priority{}; | ||||
|  | ||||
| } // anonymous namespace | ||||
|  | ||||
| @ -39,7 +39,7 @@ struct HostBlock { | ||||
| }; | ||||
|  | ||||
| template <typename B> | ||||
| struct alignas(hardware_destructive_interference_size) FreeBlockList { | ||||
| struct alignas(64) FreeBlockList { | ||||
|   std::mutex mutex_; | ||||
|   std::deque<B*> list_; | ||||
| }; | ||||
| @ -94,11 +94,11 @@ struct PinnedReserveSegment { | ||||
| struct TORCH_API HostStats { | ||||
|   // COUNT: total allocations (active) | ||||
|   Stat active_requests; | ||||
|   // SUM: bytes allocated/reserved by this memory allocator. (active) | ||||
|   // SUM: bytes allocated/reserved by this memory alocator. (active) | ||||
|   Stat active_bytes; | ||||
|   // COUNT: total allocations (active + free) | ||||
|   Stat allocations; | ||||
|   // SUM: bytes allocated/reserved by this memory allocator. This accounts | ||||
|   // SUM: bytes allocated/reserved by this memory alocator. This accounts | ||||
|   // for both free and in-use blocks. | ||||
|   Stat allocated_bytes; | ||||
|  | ||||
| @ -122,12 +122,12 @@ struct TORCH_API HostStats { | ||||
| // Struct containing memory allocator summary statistics for host, as they | ||||
| // are staged for reporting. This is a temporary struct that is used to | ||||
| // avoid locking the allocator while collecting stats. | ||||
| struct alignas(hardware_destructive_interference_size) HostStatsStaged { | ||||
| struct alignas(64) HostStatsStaged { | ||||
|   std::mutex timing_mutex_; | ||||
|   // COUNT: total allocations (active + free) | ||||
|   // LOCK: access to this stat is protected by the allocator's blocks_mutex_ | ||||
|   Stat allocations; | ||||
|   // SUM: bytes allocated/reserved by this memory allocator. This accounts | ||||
|   // SUM: bytes allocated/reserved by this memory alocator. This accounts | ||||
|   // for both free and in-use blocks. | ||||
|   Stat allocated_bytes; | ||||
|   // COUNT: number of allocations per bucket (active) | ||||
| @ -455,7 +455,7 @@ struct CachingHostAllocatorImpl { | ||||
|   } | ||||
|  | ||||
|   void resetAccumulatedStats() { | ||||
|     // Resetting accumulated memory stats requires concurrently holding both the | ||||
|     // Reseting accumulated memory stats requires concurrently holding both the | ||||
|     // free list mutexes and the blocks mutex. Previously, this was only done in | ||||
|     // empty_cache function. | ||||
|     for (size_t i = 0; i < free_list_.size(); ++i) { | ||||
| @ -482,7 +482,7 @@ struct CachingHostAllocatorImpl { | ||||
|   } | ||||
|  | ||||
|   void resetPeakStats() { | ||||
|     // Resetting peak memory stats requires concurrently holding both the | ||||
|     // Reseting peak memory stats requires concurrently holding both the | ||||
|     // free list mutexes and the blocks mutex. Previously, this was only done in | ||||
|     // empty_cache function. | ||||
|     for (size_t i = 0; i < free_list_.size(); ++i) { | ||||
| @ -669,7 +669,7 @@ struct CachingHostAllocatorImpl { | ||||
|     TORCH_CHECK_NOT_IMPLEMENTED(false, "Not implemented for query_event"); | ||||
|   } | ||||
|  | ||||
|   alignas(hardware_destructive_interference_size) std::mutex blocks_mutex_; | ||||
|   alignas(64) std::mutex blocks_mutex_; | ||||
|   ska::flat_hash_set<B*> blocks_; // block list | ||||
|   ska::flat_hash_map<void*, B*> ptr_to_block_; | ||||
|  | ||||
| @ -677,17 +677,17 @@ struct CachingHostAllocatorImpl { | ||||
|   // size. This allows us to quickly find a free block of the right size. | ||||
|   // We use deque to store per size free list and guard the list with its own | ||||
|   // mutex. | ||||
|   alignas(hardware_destructive_interference_size) std::vector<FreeBlockList<B>> free_list_ = | ||||
|   alignas(64) std::vector<FreeBlockList<B>> free_list_ = | ||||
|       std::vector<FreeBlockList<B>>(MAX_SIZE_INDEX); | ||||
|  | ||||
|   alignas(hardware_destructive_interference_size) std::mutex events_mutex_; | ||||
|   alignas(64) std::mutex events_mutex_; | ||||
|   std::deque<std::pair<E, B*>> events_; // event queue paired with block | ||||
|  | ||||
|   // Indicates whether the object is active. | ||||
|   // Set to false in the destructor to signal background threads to stop. | ||||
|   std::atomic<bool> active_{true}; | ||||
| protected: | ||||
|   alignas(hardware_destructive_interference_size) HostStatsStaged stats_; | ||||
|   alignas(64) HostStatsStaged stats_; | ||||
| }; | ||||
|  | ||||
| struct TORCH_API HostAllocator : public at::Allocator { | ||||
|  | ||||
| @ -59,7 +59,9 @@ struct TORCH_API Generator { | ||||
|  | ||||
|   explicit Generator(c10::intrusive_ptr<c10::GeneratorImpl> gen_impl) | ||||
|    : impl_(std::move(gen_impl)) { | ||||
|     TORCH_CHECK(impl_.get(), "GeneratorImpl with nullptr is not supported"); | ||||
|     if (impl_.get() == nullptr) { | ||||
|       throw std::runtime_error("GeneratorImpl with nullptr is not supported"); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   bool operator==(const Generator& rhs) const { | ||||
|  | ||||
| @ -229,10 +229,10 @@ private: | ||||
|   } | ||||
|  | ||||
|  | ||||
|   static constexpr uint32_t kPhilox10A = 0x9E3779B9; | ||||
|   static constexpr uint32_t kPhilox10B = 0xBB67AE85; | ||||
|   static constexpr uint32_t kPhiloxSA = 0xD2511F53; | ||||
|   static constexpr uint32_t kPhiloxSB = 0xCD9E8D57; | ||||
|   static const uint32_t kPhilox10A = 0x9E3779B9; | ||||
|   static const uint32_t kPhilox10B = 0xBB67AE85; | ||||
|   static const uint32_t kPhiloxSA = 0xD2511F53; | ||||
|   static const uint32_t kPhiloxSB = 0xCD9E8D57; | ||||
| }; | ||||
|  | ||||
| typedef philox_engine Philox4_32; | ||||
|  | ||||
| @ -111,7 +111,9 @@ class TORCH_API TensorBase { | ||||
|   explicit TensorBase( | ||||
|       c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl) | ||||
|       : impl_(std::move(tensor_impl)) { | ||||
|     TORCH_CHECK(impl_.get(), "TensorImpl with nullptr is not supported"); | ||||
|     if (impl_.get() == nullptr) { | ||||
|       throw std::runtime_error("TensorImpl with nullptr is not supported"); | ||||
|     } | ||||
|   } | ||||
|   TensorBase(const TensorBase&) = default; | ||||
|   TensorBase(TensorBase&&) noexcept = default; | ||||
|  | ||||
| @ -109,10 +109,6 @@ TORCH_LIBRARY_IMPL(_, AutogradHPU, m) { | ||||
|   m.fallback(AUTOGRAD_FALLBACK); | ||||
| } | ||||
|  | ||||
| TORCH_LIBRARY_IMPL(_, AutogradPrivateUse1, m) { | ||||
|   m.fallback(AUTOGRAD_FALLBACK); | ||||
| } | ||||
|  | ||||
| #undef AUTOGRAD_FALLBACK | ||||
|  | ||||
| } // namespace | ||||
|  | ||||
| @ -148,7 +148,7 @@ struct TORCH_API ClassType : public NamedType { | ||||
|  | ||||
|   void checkNotExist(const std::string& name, const std::string& what) const; | ||||
|  | ||||
|   // Attributes are stored in a specific slot at runtime for efficiency. | ||||
|   // Attributes are stored in a specific slot at runtime for effiency. | ||||
|   // When emitting instructions we specify the slot so that attribute access is | ||||
|   // a constant lookup | ||||
|   std::optional<size_t> findAttributeSlot(const std::string& name) const { | ||||
| @ -412,7 +412,7 @@ struct TORCH_API ClassType : public NamedType { | ||||
|   // Holds method attributes | ||||
|   std::weak_ptr<CompilationUnit> compilation_unit_; | ||||
|  | ||||
|   // Holds all attributes, attribute details are found on ClassAttribute | ||||
|   // Holds all atrributes, attribute details are found on ClassAttribute | ||||
|   std::vector<ClassAttribute> attributes_; | ||||
|   // Construct mirroring attributes_, only around due to the fact that `containedTypes()` method returns an ArrayRef. | ||||
|   // Never fill this without using the appropriate provideNewClassAttribute method | ||||
|  | ||||
| @ -442,17 +442,11 @@ RegistrationHandleRAII Dispatcher::registerFallback(DispatchKey dispatchKey, Ker | ||||
|  | ||||
|   auto idx = getDispatchTableIndexForDispatchKey(dispatchKey); | ||||
|   TORCH_CHECK(idx >= 0 && static_cast<uint64_t>(idx) < backendFallbackKernels_.size(), "idx=", idx); | ||||
|   // NB: Perserve BC for registering fallback for AutogradPrivateUse1 multiple time, | ||||
|   // refer to https://github.com/pytorch/pytorch/issues/163979 for more informations. | ||||
|   TORCH_CHECK( | ||||
|       dispatchKey == DispatchKey::AutogradPrivateUse1 || | ||||
|           !backendFallbackKernels_[idx].kernel.isValid(), | ||||
|       "Tried to register multiple backend fallbacks for the same dispatch key ", | ||||
|       dispatchKey, | ||||
|       "; previous registration ", | ||||
|       backendFallbackKernels_[idx].debug, | ||||
|       ", new registration ", | ||||
|       debug); | ||||
|     !backendFallbackKernels_[idx].kernel.isValid(), | ||||
|     "Tried to register multiple backend fallbacks for the same dispatch key ", dispatchKey, "; previous registration ", | ||||
|     backendFallbackKernels_[idx].debug, ", new registration ", debug | ||||
|   ); | ||||
|   // NB: inferred function schema is always nullptr for fallbacks, as fallbacks | ||||
|   // cannot be unboxed | ||||
|   backendFallbackKernels_[idx] = impl::AnnotatedKernel(std::move(kernel), nullptr, std::move(debug)); | ||||
| @ -537,7 +531,7 @@ int64_t Dispatcher::sequenceNumberForRunningRecordFunction(DispatchKey dispatchK | ||||
|  | ||||
|   // Note: this records a sequence number for both Autograd keys, and for | ||||
|   // non-Autograd keys where the dispatchKeySet still contains an autograd key. | ||||
|   // This means that we might collect the same sequence number two different | ||||
|   // This means that we might collect the same sequence nubmer two different | ||||
|   // events if they all occurred above Autograd and still had the Autograd | ||||
|   // dispatch key in the dispatch key set. | ||||
|   // However, this usually doesn't happen: normally the first call will | ||||
|  | ||||
| @ -585,7 +585,7 @@ class TORCH_API OperatorHandle { | ||||
|  | ||||
|   // We need to store this iterator in order to make | ||||
|   // Dispatcher::cleanup() fast -- it runs a lot on program | ||||
|   // termination (and presumably library unloading). | ||||
|   // termination (and presuambly library unloading). | ||||
|   std::list<Dispatcher::OperatorDef>::iterator operatorIterator_; | ||||
| }; | ||||
|  | ||||
|  | ||||
| @ -365,7 +365,7 @@ std::pair<const AnnotatedKernel&, const char*> OperatorEntry::computeDispatchTab | ||||
|   //          For autograd keys, we only use kernel from CompositeImplicitAutograd when there's no direct registration | ||||
|   //          to its corresponding backend key or CompositeExplicitAutograd. See Note [CompositeExplicitAutograd and CompositeImplicitAutograd]. | ||||
|   //          For AutogradOther, we eagerly return ambiguousAutogradOtherKernel() if there's registration to any of | ||||
|   //          its backends and ask backend extender to request a dedicated Autograd key for the backend. | ||||
|   //          its backends and ask backend extender to request a decicated Autograd key for the backend. | ||||
|   //          See Note [Ambiguity in AutogradOther kernel] for more details. | ||||
|   //          A CompositeExplicitAutograd kernel prevents CompositeImplicitAutograd kernel being used for Autograd keys, but it doesn't | ||||
|   //          cause confusion for AutogradOther. It's pretty straightforward to use Autograd (if available) | ||||
|  | ||||
| @ -261,7 +261,7 @@ std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) { | ||||
|     // | ||||
|     // There are 2 cases | ||||
|     // 1. something like 'aten::items.str(Dict(str, t) self) -> ((str, t)[])'. | ||||
|     // without the extra parenthesis, the c++ scheme parser can not parse it. | ||||
|     // without the extra parenthesis, the c++ schem parser can not parse it. | ||||
|     // 2. something like '-> ((str, str))'. Need extra parenthesis so the return | ||||
|     // type is a single tuple rather than two strings. | ||||
|     // PR (https://github.com/pytorch/pytorch/pull/23204) has more context about | ||||
|  | ||||
| @ -68,7 +68,11 @@ Symbol InternedStrings::_symbol(const std::string& s) { | ||||
|     return it->second; | ||||
|  | ||||
|   auto pos = s.find("::"); | ||||
|   TORCH_CHECK(pos != std::string::npos, "all symbols must have a namespace, <namespace>::<string>, but found: ", s); | ||||
|   if (pos == std::string::npos) { | ||||
|     std::stringstream ss; | ||||
|     ss << "all symbols must have a namespace, <namespace>::<string>, but found: " << s; | ||||
|     throw std::runtime_error(ss.str()); | ||||
|   } | ||||
|   Symbol ns = _symbol("namespaces::" + s.substr(0, pos)); | ||||
|  | ||||
|   Symbol sym(sym_to_info_.size()); | ||||
| @ -117,7 +121,12 @@ std::string Symbol::domainString() const { | ||||
| } | ||||
|  | ||||
| Symbol Symbol::fromDomainAndUnqualString(const std::string & d, const std::string & s) { | ||||
|   TORCH_CHECK(d.compare(0, domain_prefix().size(), domain_prefix()) == 0, "Symbol: domain string is expected to be prefixed with '", domain_prefix(), "', e.g. 'org.pytorch.aten'"); | ||||
|   if (d.compare(0, domain_prefix().size(), domain_prefix()) != 0) { | ||||
|     std::ostringstream ss; | ||||
|     ss << "Symbol: domain string is expected to be prefixed with '" | ||||
|        << domain_prefix() << "', e.g. 'org.pytorch.aten'"; | ||||
|     throw std::runtime_error(ss.str()); | ||||
|   } | ||||
|   std::string qualString = d.substr(domain_prefix().size()) + "::" + s; | ||||
|   return fromQualString(qualString); | ||||
| } | ||||
|  | ||||
| @ -7,7 +7,6 @@ | ||||
| #include <ATen/core/jit_type.h> | ||||
| #include <ATen/core/stack.h> | ||||
| #include <ATen/core/type_factory.h> | ||||
| #include <c10/util/Exception.h> | ||||
| #include <c10/util/StringUtil.h> | ||||
| #include <c10/util/hash.h> | ||||
| #include <c10/util/irange.h> | ||||
| @ -413,7 +412,7 @@ size_t IValue::hash(const IValue& v) { | ||||
|     case Tag::Enum: | ||||
|     case Tag::Stream: | ||||
|     case Tag::Uninitialized: | ||||
|       TORCH_CHECK(false, | ||||
|       throw std::runtime_error( | ||||
|           "unhashable type: '" + v.type()->repr_str() + "'"); | ||||
|   } | ||||
|   // the above switch should be exhaustive | ||||
|  | ||||
| @ -624,14 +624,7 @@ struct TORCH_API IValue final { | ||||
|   IValue(const c10::SymBool& i) { | ||||
|     if (auto mi = i.maybe_as_bool()) { | ||||
|       tag = Tag::Bool; | ||||
| #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ | ||||
|       payload.u.as_int = *mi; | ||||
| #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ | ||||
|       /* due to byteorder if value assigned as_int, as_bool actually is not set correctly */ | ||||
|       payload.u.as_bool = *mi; | ||||
| #else | ||||
| #error Unexpected or undefined __BYTE_ORDER__ | ||||
| #endif | ||||
|     } else { | ||||
|       tag = Tag::SymBool; | ||||
|       payload.u.as_intrusive_ptr = i.toSymNodeImpl().release(); | ||||
| @ -1176,7 +1169,7 @@ struct TORCH_API IValue final { | ||||
|   using HashIdentityIValueMap = | ||||
|       std::unordered_map<IValue, IValue, HashIdentityIValue, CompIdentityIValues>; | ||||
|  | ||||
|   // Checks if this and rhs has a subvalues in common. | ||||
|   // Chechs if this and rhs has a subvalues in common. | ||||
|   // [t1,t2] and [t2, t3] returns true. | ||||
|   bool overlaps(const IValue& rhs) const; | ||||
|  | ||||
|  | ||||
| @ -1501,7 +1501,7 @@ struct C10_EXPORT ivalue::Object final : c10::intrusive_ptr_target { | ||||
|   // However, the CompilationUnit holds ownership of the type's graphs, so | ||||
|   // inserting a constant object into a Graph would create a reference cycle if | ||||
|   // that constant object held a shared_ptr to its CU. For these objects we | ||||
|   // instantiate them with non-owning references to its CU | ||||
|   // instatiate them with non-owning references to its CU | ||||
|   Object(WeakOrStrongTypePtr type, size_t numSlots) : type_(std::move(type)) { | ||||
|     slots_.resize(numSlots); | ||||
|   } | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	