mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-11-01 04:54:55 +08:00 
			
		
		
		
	Compare commits
	
		
			112 Commits
		
	
	
		
			csl/multis
			...
			v2.7.1-rc5
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| e2d141dbde | |||
| 121419899b | |||
| 790cc2f02c | |||
| 62ea99a947 | |||
| 941732c829 | |||
| 769d5da702 | |||
| 306ba122bd | |||
| 1ae9953280 | |||
| 4a815ed15a | |||
| 4c7314e78c | |||
| ff4dddf9ef | |||
| e8f8a352f8 | |||
| bdec1570e0 | |||
| a159920494 | |||
| 6f2f41c85b | |||
| 0073e33899 | |||
| 92d3286c2a | |||
| 1d1c7e08af | |||
| 70518b720c | |||
| ab54c4737c | |||
| 0d98f9d446 | |||
| b8d9208ca3 | |||
| 8af995f207 | |||
| 5beafa5faa | |||
| 9ebc62ec6b | |||
| 378a55ccdb | |||
| 800aa04bac | |||
| c14233ded1 | |||
| 3bfe0711d2 | |||
| fa98236357 | |||
| f77213d3da | |||
| 1a3161ae5a | |||
| 27e9ca5d36 | |||
| dab8130f4f | |||
| 20d62a8d25 | |||
| cd885e7c9a | |||
| 99847860ea | |||
| 24b0c4abfc | |||
| 2dc4b15cf3 | |||
| cd6037ed4b | |||
| 1341794745 | |||
| 073912749d | |||
| 0c236f3c72 | |||
| c7ff78dfc0 | |||
| 894909a613 | |||
| ef2b1390ed | |||
| 3f236f1903 | |||
| 35f1e76212 | |||
| a6321d6227 | |||
| 1cc51c640a | |||
| 28ca4dd77d | |||
| 06c6a81a98 | |||
| 3b61d5d4e3 | |||
| 8b6bc59e95 | |||
| c2ccaa3c21 | |||
| 6569576c4e | |||
| 5416dff2b2 | |||
| 791265114e | |||
| 7ad8bc7e8b | |||
| f2ee3f4847 | |||
| dfd39fe14f | |||
| b766c0200a | |||
| a3cd7b0cc4 | |||
| 8522972133 | |||
| c4b98c8364 | |||
| d10ffd76db | |||
| 53a13e553d | |||
| 5745d6a770 | |||
| 60ddcd803e | |||
| f2b3b5c453 | |||
| 71fa7def26 | |||
| 1a6c192dc4 | |||
| e691e92297 | |||
| 2b73f403c7 | |||
| 697cd9bbb1 | |||
| 64ca70f83c | |||
| 1b84fd1503 | |||
| 6b27e11a5b | |||
| 18a926f547 | |||
| ecd434bea9 | |||
| 5bed3fafc7 | |||
| 9b4f085526 | |||
| d29e4c81d9 | |||
| 8d2186cd79 | |||
| b04d8358d9 | |||
| d80afc07f0 | |||
| 84210a82ef | |||
| 4268b2f40a | |||
| 12a6d2a0b8 | |||
| 464432ec47 | |||
| 1f612dafb5 | |||
| f63def6ac7 | |||
| 3a8e623a9b | |||
| bf727425a0 | |||
| 8c7dbc939f | |||
| 644fdbad95 | |||
| fb027c5692 | |||
| 3b87bd8b82 | |||
| 89b098a677 | |||
| 4cc4302b32 | |||
| c632e4fdb8 | |||
| b23bfae9f7 | |||
| 1b8f496f87 | |||
| c236b602ff | |||
| 6926f30654 | |||
| 483980d7f3 | |||
| 7173a73cf4 | |||
| 7bab7354df | |||
| b1940b5867 | |||
| abebbd5113 | |||
| cdd7a2c72b | |||
| d94ea2647c | 
| @ -55,9 +55,22 @@ def build_ArmComputeLibrary() -> None: | ||||
|         shutil.copytree(f"{acl_checkout_dir}/{d}", f"{acl_install_dir}/{d}") | ||||
|  | ||||
|  | ||||
| def update_wheel(wheel_path, desired_cuda) -> None: | ||||
| def replace_tag(filename) -> None: | ||||
|     with open(filename) as f: | ||||
|         lines = f.readlines() | ||||
|     for i, line in enumerate(lines): | ||||
|         if line.startswith("Tag:"): | ||||
|             lines[i] = line.replace("-linux_", "-manylinux_2_28_") | ||||
|             print(f"Updated tag from {line} to {lines[i]}") | ||||
|             break | ||||
|  | ||||
|     with open(filename, "w") as f: | ||||
|         f.writelines(lines) | ||||
|  | ||||
|  | ||||
| def package_cuda_wheel(wheel_path, desired_cuda) -> None: | ||||
|     """ | ||||
|     Update the cuda wheel libraries | ||||
|     Package the cuda wheel libraries | ||||
|     """ | ||||
|     folder = os.path.dirname(wheel_path) | ||||
|     wheelname = os.path.basename(wheel_path) | ||||
| @ -88,30 +101,19 @@ def update_wheel(wheel_path, desired_cuda) -> None: | ||||
|         "/usr/lib64/libgfortran.so.5", | ||||
|         "/acl/build/libarm_compute.so", | ||||
|         "/acl/build/libarm_compute_graph.so", | ||||
|         "/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0", | ||||
|         "/usr/local/lib/libnvpl_blas_lp64_gomp.so.0", | ||||
|         "/usr/local/lib/libnvpl_lapack_core.so.0", | ||||
|         "/usr/local/lib/libnvpl_blas_core.so.0", | ||||
|     ] | ||||
|     if enable_cuda: | ||||
|  | ||||
|     if "128" in desired_cuda: | ||||
|         libs_to_copy += [ | ||||
|             "/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0", | ||||
|             "/usr/local/lib/libnvpl_blas_lp64_gomp.so.0", | ||||
|             "/usr/local/lib/libnvpl_lapack_core.so.0", | ||||
|             "/usr/local/lib/libnvpl_blas_core.so.0", | ||||
|         ] | ||||
|         if "126" in desired_cuda: | ||||
|             libs_to_copy += [ | ||||
|                 "/usr/local/cuda/lib64/libnvrtc-builtins.so.12.6", | ||||
|                 "/usr/local/cuda/lib64/libcufile.so.0", | ||||
|                 "/usr/local/cuda/lib64/libcufile_rdma.so.1", | ||||
|             ] | ||||
|         elif "128" in desired_cuda: | ||||
|             libs_to_copy += [ | ||||
|                 "/usr/local/cuda/lib64/libnvrtc-builtins.so.12.8", | ||||
|                 "/usr/local/cuda/lib64/libcufile.so.0", | ||||
|                 "/usr/local/cuda/lib64/libcufile_rdma.so.1", | ||||
|             ] | ||||
|     else: | ||||
|         libs_to_copy += [ | ||||
|             "/opt/OpenBLAS/lib/libopenblas.so.0", | ||||
|             "/usr/local/cuda/lib64/libnvrtc-builtins.so.12.8", | ||||
|             "/usr/local/cuda/lib64/libcufile.so.0", | ||||
|             "/usr/local/cuda/lib64/libcufile_rdma.so.1", | ||||
|         ] | ||||
|  | ||||
|     # Copy libraries to unzipped_folder/a/lib | ||||
|     for lib_path in libs_to_copy: | ||||
|         lib_name = os.path.basename(lib_path) | ||||
| @ -120,6 +122,13 @@ def update_wheel(wheel_path, desired_cuda) -> None: | ||||
|             f"cd {folder}/tmp/torch/lib/; " | ||||
|             f"patchelf --set-rpath '$ORIGIN' --force-rpath {folder}/tmp/torch/lib/{lib_name}" | ||||
|         ) | ||||
|  | ||||
|     # Make sure the wheel is tagged with manylinux_2_28 | ||||
|     for f in os.scandir(f"{folder}/tmp/"): | ||||
|         if f.is_dir() and f.name.endswith(".dist-info"): | ||||
|             replace_tag(f"{f.path}/WHEEL") | ||||
|             break | ||||
|  | ||||
|     os.mkdir(f"{folder}/cuda_wheel") | ||||
|     os.system(f"cd {folder}/tmp/; zip -r {folder}/cuda_wheel/{wheelname} *") | ||||
|     shutil.move( | ||||
| @ -136,6 +145,9 @@ def complete_wheel(folder: str) -> str: | ||||
|     """ | ||||
|     wheel_name = list_dir(f"/{folder}/dist")[0] | ||||
|  | ||||
|     # Please note for cuda we don't run auditwheel since we use custom script to package | ||||
|     # the cuda dependencies to the wheel file using update_wheel() method. | ||||
|     # However we need to make sure filename reflects the correct Manylinux platform. | ||||
|     if "pytorch" in folder and not enable_cuda: | ||||
|         print("Repairing Wheel with AuditWheel") | ||||
|         check_call(["auditwheel", "repair", f"dist/{wheel_name}"], cwd=folder) | ||||
| @ -147,7 +159,14 @@ def complete_wheel(folder: str) -> str: | ||||
|             f"/{folder}/dist/{repaired_wheel_name}", | ||||
|         ) | ||||
|     else: | ||||
|         repaired_wheel_name = wheel_name | ||||
|         repaired_wheel_name = wheel_name.replace( | ||||
|             "linux_aarch64", "manylinux_2_28_aarch64" | ||||
|         ) | ||||
|         print(f"Renaming {wheel_name} wheel to {repaired_wheel_name}") | ||||
|         os.rename( | ||||
|             f"/{folder}/dist/{wheel_name}", | ||||
|             f"/{folder}/dist/{repaired_wheel_name}", | ||||
|         ) | ||||
|  | ||||
|     print(f"Copying {repaired_wheel_name} to artifacts") | ||||
|     shutil.copy2( | ||||
| @ -232,6 +251,6 @@ if __name__ == "__main__": | ||||
|         print("Updating Cuda Dependency") | ||||
|         filename = os.listdir("/pytorch/dist/") | ||||
|         wheel_path = f"/pytorch/dist/{filename[0]}" | ||||
|         update_wheel(wheel_path, desired_cuda) | ||||
|         package_cuda_wheel(wheel_path, desired_cuda) | ||||
|     pytorch_wheel_name = complete_wheel("/pytorch/") | ||||
|     print(f"Build Complete. Created {pytorch_wheel_name}..") | ||||
|  | ||||
| @ -19,11 +19,13 @@ import boto3 | ||||
|  | ||||
| # AMI images for us-east-1, change the following based on your ~/.aws/config | ||||
| os_amis = { | ||||
|     "ubuntu18_04": "ami-078eece1d8119409f",  # login_name: ubuntu | ||||
|     "ubuntu20_04": "ami-052eac90edaa9d08f",  # login_name: ubuntu | ||||
|     "ubuntu22_04": "ami-0c6c29c5125214c77",  # login_name: ubuntu | ||||
|     "redhat8": "ami-0698b90665a2ddcf1",  # login_name: ec2-user | ||||
| } | ||||
|  | ||||
| ubuntu18_04_ami = os_amis["ubuntu18_04"] | ||||
| ubuntu20_04_ami = os_amis["ubuntu20_04"] | ||||
|  | ||||
|  | ||||
| @ -657,6 +659,18 @@ def configure_system( | ||||
|             "sudo apt-get install -y python3-dev python3-yaml python3-setuptools python3-wheel python3-pip" | ||||
|         ) | ||||
|     host.run_cmd("pip3 install dataclasses typing-extensions") | ||||
|     # Install and switch to gcc-8 on Ubuntu-18.04 | ||||
|     if not host.using_docker() and host.ami == ubuntu18_04_ami and compiler == "gcc-8": | ||||
|         host.run_cmd("sudo apt-get install -y g++-8 gfortran-8") | ||||
|         host.run_cmd( | ||||
|             "sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 100" | ||||
|         ) | ||||
|         host.run_cmd( | ||||
|             "sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-8 100" | ||||
|         ) | ||||
|         host.run_cmd( | ||||
|             "sudo update-alternatives --install /usr/bin/gfortran gfortran /usr/bin/gfortran-8 100" | ||||
|         ) | ||||
|     if not use_conda: | ||||
|         print("Installing Cython + numpy from PyPy") | ||||
|         host.run_cmd("sudo pip3 install Cython") | ||||
|  | ||||
| @ -105,6 +105,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=11 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
| @ -118,6 +119,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
| @ -132,6 +134,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.12 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
| @ -146,6 +149,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.13 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
| @ -160,6 +164,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
| @ -173,6 +178,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
| @ -187,6 +193,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.12 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
| @ -201,6 +208,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.13 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
| @ -215,6 +223,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
| @ -226,6 +235,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CLANG_VERSION=10 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     ONNX=yes | ||||
| @ -234,7 +244,10 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CLANG_VERSION=10 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     VULKAN_SDK_VERSION=1.2.162.1 | ||||
|     SWIFTSHADER=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
| @ -242,7 +255,10 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.11 | ||||
|     CLANG_VERSION=10 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     VULKAN_SDK_VERSION=1.2.162.1 | ||||
|     SWIFTSHADER=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
| @ -250,6 +266,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
| @ -258,6 +275,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=11 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     ROCM_VERSION=6.2.4 | ||||
|     NINJA_VERSION=1.9.0 | ||||
| @ -272,6 +290,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=11 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     ROCM_VERSION=6.3 | ||||
|     NINJA_VERSION=1.9.0 | ||||
| @ -286,6 +305,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     GCC_VERSION=11 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     XPU_VERSION=0.5 | ||||
|     NINJA_VERSION=1.9.0 | ||||
| @ -296,6 +316,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     GCC_VERSION=11 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     XPU_VERSION=2025.0 | ||||
|     NINJA_VERSION=1.9.0 | ||||
| @ -306,6 +327,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     GCC_VERSION=11 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     CONDA_CMAKE=yes | ||||
| @ -319,6 +341,7 @@ case "$image" in | ||||
|     CUDNN_VERSION=9 | ||||
|     CLANG_VERSION=12 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
| @ -326,6 +349,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CLANG_VERSION=12 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
| @ -346,6 +370,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     GCC_VERSION=11 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     CONDA_CMAKE=yes | ||||
| @ -378,19 +403,20 @@ case "$image" in | ||||
|     # TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627. | ||||
|     # We will need to update mypy version eventually, but that's for another day. The task | ||||
|     # would be to upgrade mypy to 1.0.0 with Python 3.11 | ||||
|     PYTHON_VERSION=3.9 | ||||
|     PIP_CMAKE=yes | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CONDA_CMAKE=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter) | ||||
|     PYTHON_VERSION=3.9 | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CUDA_VERSION=11.8 | ||||
|     PIP_CMAKE=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-aarch64-py3.10-gcc11) | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=11 | ||||
|     ACL=yes | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     # snadampal: skipping llvm src build install because the current version | ||||
| @ -402,6 +428,7 @@ case "$image" in | ||||
|     GCC_VERSION=11 | ||||
|     ACL=yes | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     # snadampal: skipping llvm src build install because the current version | ||||
| @ -412,6 +439,7 @@ case "$image" in | ||||
|   *) | ||||
|     # Catch-all for builds that are not hardcoded. | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     echo "image '$image' did not match an existing build configuration" | ||||
|     if [[ "$image" == *py* ]]; then | ||||
| @ -467,6 +495,7 @@ docker build \ | ||||
|        --build-arg "BUILD_ENVIRONMENT=${image}" \ | ||||
|        --build-arg "PROTOBUF=${PROTOBUF:-}" \ | ||||
|        --build-arg "LLVMDEV=${LLVMDEV:-}" \ | ||||
|        --build-arg "DB=${DB:-}" \ | ||||
|        --build-arg "VISION=${VISION:-}" \ | ||||
|        --build-arg "UBUNTU_VERSION=${UBUNTU_VERSION}" \ | ||||
|        --build-arg "CENTOS_VERSION=${CENTOS_VERSION}" \ | ||||
| @ -474,12 +503,13 @@ docker build \ | ||||
|        --build-arg "GLIBC_VERSION=${GLIBC_VERSION}" \ | ||||
|        --build-arg "CLANG_VERSION=${CLANG_VERSION}" \ | ||||
|        --build-arg "ANACONDA_PYTHON_VERSION=${ANACONDA_PYTHON_VERSION}" \ | ||||
|        --build-arg "PYTHON_VERSION=${PYTHON_VERSION}" \ | ||||
|        --build-arg "GCC_VERSION=${GCC_VERSION}" \ | ||||
|        --build-arg "CUDA_VERSION=${CUDA_VERSION}" \ | ||||
|        --build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \ | ||||
|        --build-arg "TENSORRT_VERSION=${TENSORRT_VERSION}" \ | ||||
|        --build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \ | ||||
|        --build-arg "VULKAN_SDK_VERSION=${VULKAN_SDK_VERSION}" \ | ||||
|        --build-arg "SWIFTSHADER=${SWIFTSHADER}" \ | ||||
|        --build-arg "CMAKE_VERSION=${CMAKE_VERSION:-}" \ | ||||
|        --build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \ | ||||
|        --build-arg "KATEX=${KATEX:-}" \ | ||||
| @ -489,7 +519,6 @@ docker build \ | ||||
|        --build-arg "UCX_COMMIT=${UCX_COMMIT}" \ | ||||
|        --build-arg "UCC_COMMIT=${UCC_COMMIT}" \ | ||||
|        --build-arg "CONDA_CMAKE=${CONDA_CMAKE}" \ | ||||
|        --build-arg "PIP_CMAKE=${PIP_CMAKE}" \ | ||||
|        --build-arg "TRITON=${TRITON}" \ | ||||
|        --build-arg "TRITON_CPU=${TRITON_CPU}" \ | ||||
|        --build-arg "ONNX=${ONNX}" \ | ||||
|  | ||||
| @ -55,6 +55,13 @@ RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi | ||||
| RUN rm install_protobuf.sh | ||||
| ENV INSTALLED_PROTOBUF ${PROTOBUF} | ||||
|  | ||||
| # (optional) Install database packages like LMDB and LevelDB | ||||
| ARG DB | ||||
| COPY ./common/install_db.sh install_db.sh | ||||
| RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | ||||
| RUN rm install_db.sh | ||||
| ENV INSTALLED_DB ${DB} | ||||
|  | ||||
| # (optional) Install vision packages like OpenCV | ||||
| ARG VISION | ||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||
|  | ||||
| @ -1 +1 @@ | ||||
| 01a22b6f16d117454b7d21ebdc691b0785b84a7f | ||||
| ebe8522378c3f9944aaaef44868f5ececdd845fc | ||||
|  | ||||
| @ -37,7 +37,7 @@ install_ubuntu() { | ||||
|   if [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "11.8"* ]]; then | ||||
|     maybe_libnccl_dev="libnccl2=2.15.5-1+cuda11.8 libnccl-dev=2.15.5-1+cuda11.8 --allow-downgrades --allow-change-held-packages" | ||||
|   elif [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "12.4"* ]]; then | ||||
|     maybe_libnccl_dev="libnccl2=2.25.1-1+cuda12.4 libnccl-dev=2.25.1-1+cuda12.4 --allow-downgrades --allow-change-held-packages" | ||||
|     maybe_libnccl_dev="libnccl2=2.26.2-1+cuda12.4 libnccl-dev=2.26.2-1+cuda12.4 --allow-downgrades --allow-change-held-packages" | ||||
|   else | ||||
|     maybe_libnccl_dev="" | ||||
|   fi | ||||
|  | ||||
| @ -2,45 +2,29 @@ | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| SCCACHE_VERSION="0.9.1" | ||||
|  | ||||
| CARGO_FLAGS="" | ||||
|  | ||||
| install_prereqs_ubuntu() { | ||||
| install_ubuntu() { | ||||
|   echo "Preparing to build sccache from source" | ||||
|   apt-get update | ||||
|   # libssl-dev will not work as it is upgraded to libssl3 in Ubuntu-22.04. | ||||
|   # Instead use lib and headers from OpenSSL1.1 installed in `install_openssl.sh`` | ||||
|   apt-get install -y cargo | ||||
|  | ||||
|   # cleanup after ourselves | ||||
|   trap 'cleanup_ubuntu' EXIT | ||||
|   echo "Checking out sccache repo" | ||||
|   git clone https://github.com/mozilla/sccache -b v0.9.1 | ||||
|   cd sccache | ||||
|   echo "Building sccache" | ||||
|   cargo build --release | ||||
|   cp target/release/sccache /opt/cache/bin | ||||
|   echo "Cleaning up" | ||||
|   cd .. | ||||
|   rm -rf sccache | ||||
|   apt-get remove -y cargo rustc | ||||
|   apt-get autoclean && apt-get clean | ||||
|  | ||||
|   echo "Downloading old sccache binary from S3 repo for PCH builds" | ||||
|   curl --retry 3 https://s3.amazonaws.com/ossci-linux/sccache -o /opt/cache/bin/sccache-0.2.14a | ||||
|   chmod 755 /opt/cache/bin/sccache-0.2.14a | ||||
| } | ||||
|  | ||||
| cleanup_ubuntu() { | ||||
|   rm -rf sccache | ||||
|   apt-get remove -y cargo rustc | ||||
|   apt-get autoclean && apt-get clean | ||||
| } | ||||
|  | ||||
| install_prereqs_almalinux() { | ||||
|   dnf install -y cargo | ||||
|   # use vendored openssl, we're not going to use the dist-server anyways | ||||
|   CARGO_FEATURES="--bin sccache --features openssl/vendored" | ||||
| } | ||||
|  | ||||
| build_and_install_sccache() { | ||||
|   # modern version of git don't like openssl1.1 | ||||
|   wget -q -O sccache.tar.gz "https://github.com/mozilla/sccache/archive/refs/tags/v${SCCACHE_VERSION}.tar.gz" | ||||
|   tar xzf sccache.tar.gz | ||||
|   pushd "sccache-${SCCACHE_VERSION}" | ||||
|   cargo build --release ${CARGO_FEATURES} | ||||
|   cp target/release/sccache /opt/cache/bin | ||||
| } | ||||
|  | ||||
| install_binary() { | ||||
|   echo "Downloading sccache binary from S3 repo" | ||||
|   curl --retry 3 https://s3.amazonaws.com/ossci-linux/sccache -o /opt/cache/bin/sccache | ||||
| @ -51,21 +35,8 @@ mkdir -p /opt/cache/lib | ||||
| sed -e 's|PATH="\(.*\)"|PATH="/opt/cache/bin:\1"|g' -i /etc/environment | ||||
| export PATH="/opt/cache/bin:$PATH" | ||||
|  | ||||
| echo "Preparing to build sccache from source" | ||||
| DIST_ID=$(. /etc/os-release && echo "$ID") | ||||
| case ${DIST_ID} in | ||||
|   ubuntu) | ||||
|     install_prereqs_ubuntu | ||||
|     ;; | ||||
|   almalinux) | ||||
|     install_prereqs_almalinux | ||||
|     ;; | ||||
|   *) | ||||
|     echo "ERROR: Unknown distribution ${DIST_ID}" | ||||
|     exit 1 | ||||
|     ;; | ||||
| esac | ||||
| build_and_install_sccache | ||||
| # Setup compiler cache | ||||
| install_ubuntu | ||||
| chmod a+x /opt/cache/bin/sccache | ||||
|  | ||||
| function write_sccache_stub() { | ||||
|  | ||||
| @ -4,10 +4,16 @@ set -ex | ||||
|  | ||||
| if [ -n "$CLANG_VERSION" ]; then | ||||
|  | ||||
|   if [[ $UBUNTU_VERSION == 22.04 ]]; then | ||||
|   if [[ $CLANG_VERSION == 9 && $UBUNTU_VERSION == 18.04 ]]; then | ||||
|     sudo apt-get update | ||||
|     # gpg-agent is not available by default on 18.04 | ||||
|     sudo apt-get install  -y --no-install-recommends gpg-agent | ||||
|     wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add  - | ||||
|     apt-add-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-${CLANG_VERSION} main" | ||||
|   elif [[ $UBUNTU_VERSION == 22.04 ]]; then | ||||
|     # work around ubuntu apt-get conflicts | ||||
|     sudo apt-get -y -f install | ||||
|     wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - | ||||
|     wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add  - | ||||
|     if [[ $CLANG_VERSION == 18 ]]; then | ||||
|       apt-add-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-18 main" | ||||
|     fi | ||||
| @ -35,7 +41,7 @@ if [ -n "$CLANG_VERSION" ]; then | ||||
|   # clang's packaging is a little messed up (the runtime libs aren't | ||||
|   # added into the linker path), so give it a little help | ||||
|   clang_lib=("/usr/lib/llvm-$CLANG_VERSION/lib/clang/"*"/lib/linux") | ||||
|   echo "$clang_lib" >/etc/ld.so.conf.d/clang.conf | ||||
|   echo "$clang_lib" > /etc/ld.so.conf.d/clang.conf | ||||
|   ldconfig | ||||
|  | ||||
|   # Cleanup package manager | ||||
|  | ||||
| @ -2,7 +2,7 @@ | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| NCCL_VERSION=v2.25.1-1 | ||||
| NCCL_VERSION=v2.26.2-1 | ||||
| CUDNN_VERSION=9.5.1.17 | ||||
|  | ||||
| function install_cusparselt_040 { | ||||
| @ -240,7 +240,7 @@ function prune_126 { | ||||
| } | ||||
|  | ||||
| function install_128 { | ||||
|   CUDNN_VERSION=9.8.0.87 | ||||
|   CUDNN_VERSION=9.7.1.26 | ||||
|   echo "Installing CUDA 12.8.0 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.3" | ||||
|   rm -rf /usr/local/cuda-12.8 /usr/local/cuda | ||||
|   # install CUDA 12.8.0 in the same container | ||||
|  | ||||
| @ -5,7 +5,7 @@ if [[ -n "${CUDNN_VERSION}" ]]; then | ||||
|     mkdir tmp_cudnn | ||||
|     pushd tmp_cudnn | ||||
|     if [[ ${CUDA_VERSION:0:4} == "12.8" ]]; then | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-9.8.0.87_cuda12-archive" | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-9.7.1.26_cuda12-archive" | ||||
|     elif [[ ${CUDA_VERSION:0:4} == "12.6" ]]; then | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-9.5.1.17_cuda12-archive" | ||||
|     elif [[ ${CUDA_VERSION:0:2} == "12" ]]; then | ||||
|  | ||||
							
								
								
									
										38
									
								
								.ci/docker/common/install_db.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										38
									
								
								.ci/docker/common/install_db.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,38 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| install_ubuntu() { | ||||
|   apt-get update | ||||
|  | ||||
|   # Cleanup | ||||
|   apt-get autoclean && apt-get clean | ||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* | ||||
| } | ||||
|  | ||||
| install_centos() { | ||||
|   # Need EPEL for many packages we depend on. | ||||
|   # See http://fedoraproject.org/wiki/EPEL | ||||
|   yum --enablerepo=extras install -y epel-release | ||||
|  | ||||
|   # Cleanup | ||||
|   yum clean all | ||||
|   rm -rf /var/cache/yum | ||||
|   rm -rf /var/lib/yum/yumdb | ||||
|   rm -rf /var/lib/yum/history | ||||
| } | ||||
|  | ||||
| # Install base packages depending on the base OS | ||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ||||
| case "$ID" in | ||||
|   ubuntu) | ||||
|     install_ubuntu | ||||
|     ;; | ||||
|   centos) | ||||
|     install_centos | ||||
|     ;; | ||||
|   *) | ||||
|     echo "Unable to determine OS..." | ||||
|     exit 1 | ||||
|     ;; | ||||
| esac | ||||
| @ -50,8 +50,7 @@ setup_executorch() { | ||||
|   pushd executorch | ||||
|  | ||||
|   export PYTHON_EXECUTABLE=python | ||||
|   export EXECUTORCH_BUILD_PYBIND=ON | ||||
|   export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" | ||||
|   export CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" | ||||
|  | ||||
|   as_jenkins .ci/scripts/setup-linux.sh --build-tool cmake || true | ||||
|   popd | ||||
|  | ||||
| @ -35,7 +35,9 @@ git clone https://github.com/halide/Halide.git | ||||
| pushd Halide | ||||
| git checkout ${COMMIT} && git submodule update --init --recursive | ||||
| pip_install -r requirements.txt | ||||
| cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -S . -B build | ||||
| # NOTE: pybind has a requirement for cmake > 3.5 so set the minimum cmake version here with a flag | ||||
| #       Context: https://github.com/pytorch/pytorch/issues/150420 | ||||
| cmake -G Ninja -DCMAKE_POLICY_VERSION_MINIMUM=3.5 -DCMAKE_BUILD_TYPE=Release -S . -B build | ||||
| cmake --build build | ||||
| test -e ${CONDA_PREFIX}/lib/python3 || ln -s python${ANACONDA_PYTHON_VERSION} ${CONDA_PREFIX}/lib/python3 | ||||
| cmake --install build --prefix ${CONDA_PREFIX} | ||||
|  | ||||
| @ -14,13 +14,6 @@ function install_timm() { | ||||
|   local commit | ||||
|   commit=$(get_pinned_commit timm) | ||||
|  | ||||
|   # TODO (huydhn): There is no torchvision release on 3.13 when I write this, so | ||||
|   # I'm using nightly here instead. We just need to package to be able to install | ||||
|   # TIMM. Removing this once vision has a release on 3.13 | ||||
|   if [[ "${ANACONDA_PYTHON_VERSION}" == "3.13" ]]; then | ||||
|     pip_install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/cu124 | ||||
|   fi | ||||
|  | ||||
|   pip_install "git+https://github.com/huggingface/pytorch-image-models@${commit}" | ||||
|   # Clean up | ||||
|   conda_run pip uninstall -y cmake torch torchvision triton | ||||
|  | ||||
| @ -2,6 +2,8 @@ | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" | ||||
|  | ||||
| if [ -n "${UBUNTU_VERSION}" ]; then | ||||
|   apt update | ||||
|   apt-get install -y clang doxygen git graphviz nodejs npm libtinfo5 | ||||
| @ -13,8 +15,8 @@ chown -R jenkins pytorch | ||||
|  | ||||
| pushd pytorch | ||||
| # Install all linter dependencies | ||||
| pip install -r requirements.txt | ||||
| lintrunner init | ||||
| pip_install -r requirements.txt | ||||
| conda_run lintrunner init | ||||
|  | ||||
| # Cache .lintbin directory as part of the Docker image | ||||
| cp -r .lintbin /tmp | ||||
|  | ||||
| @ -1,18 +0,0 @@ | ||||
| #!/bin/bash | ||||
| set -ex | ||||
|  | ||||
| apt-get update | ||||
| # Use deadsnakes in case we need an older python version | ||||
| sudo add-apt-repository ppa:deadsnakes/ppa | ||||
| apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python3-pip python${PYTHON_VERSION}-venv | ||||
|  | ||||
| # Use a venv because uv and some other package managers don't support --user install | ||||
| ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python | ||||
| python -m venv /var/lib/jenkins/ci_env | ||||
| source /var/lib/jenkins/ci_env/bin/activate | ||||
|  | ||||
| python -mpip install --upgrade pip | ||||
| python -mpip install -r /opt/requirements-ci.txt | ||||
| if [ -n "${PIP_CMAKE}" ]; then | ||||
|   python -mpip install cmake==3.31.6 | ||||
| fi | ||||
| @ -8,6 +8,10 @@ ver() { | ||||
|  | ||||
| install_ubuntu() { | ||||
|     apt-get update | ||||
|     if [[ $UBUNTU_VERSION == 18.04 ]]; then | ||||
|       # gpg-agent is not available by default on 18.04 | ||||
|       apt-get install -y --no-install-recommends gpg-agent | ||||
|     fi | ||||
|     if [[ $UBUNTU_VERSION == 20.04 ]]; then | ||||
|       # gpg-agent is not available by default on 20.04 | ||||
|       apt-get install -y --no-install-recommends gpg-agent | ||||
|  | ||||
| @ -25,9 +25,7 @@ python3 -m pip install meson ninja | ||||
| ########################### | ||||
| ### clone repo | ||||
| ########################### | ||||
| # TEMPORARY FIX: https://gitlab.freedesktop.org/mesa/drm.git is down until 2025/03/22 | ||||
| # GIT_SSL_NO_VERIFY=true git clone https://gitlab.freedesktop.org/mesa/drm.git | ||||
| GIT_SSL_NO_VERIFY=true git clone git://anongit.freedesktop.org/mesa/drm | ||||
| GIT_SSL_NO_VERIFY=true git clone https://gitlab.freedesktop.org/mesa/drm.git | ||||
| pushd drm | ||||
|  | ||||
| ########################### | ||||
|  | ||||
							
								
								
									
										24
									
								
								.ci/docker/common/install_swiftshader.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										24
									
								
								.ci/docker/common/install_swiftshader.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,24 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| [ -n "${SWIFTSHADER}" ] | ||||
|  | ||||
| retry () { | ||||
|     $*  || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) | ||||
| } | ||||
|  | ||||
| _https_amazon_aws=https://ossci-android.s3.amazonaws.com | ||||
|  | ||||
| # SwiftShader | ||||
| _swiftshader_dir=/var/lib/jenkins/swiftshader | ||||
| _swiftshader_file_targz=swiftshader-abe07b943-prebuilt.tar.gz | ||||
| mkdir -p $_swiftshader_dir | ||||
| _tmp_swiftshader_targz="/tmp/${_swiftshader_file_targz}" | ||||
|  | ||||
| curl --silent --show-error --location --fail --retry 3 \ | ||||
|   --output "${_tmp_swiftshader_targz}" "$_https_amazon_aws/${_swiftshader_file_targz}" | ||||
|  | ||||
| tar -C "${_swiftshader_dir}" -xzf "${_tmp_swiftshader_targz}" | ||||
|  | ||||
| export VK_ICD_FILENAMES="${_swiftshader_dir}/build/Linux/vk_swiftshader_icd.json" | ||||
							
								
								
									
										24
									
								
								.ci/docker/common/install_vulkan_sdk.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										24
									
								
								.ci/docker/common/install_vulkan_sdk.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,24 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| [ -n "${VULKAN_SDK_VERSION}" ] | ||||
|  | ||||
| retry () { | ||||
|     $*  || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) | ||||
| } | ||||
|  | ||||
| _vulkansdk_dir=/var/lib/jenkins/vulkansdk | ||||
| _tmp_vulkansdk_targz=/tmp/vulkansdk.tar.gz | ||||
|  | ||||
| curl \ | ||||
|   --silent \ | ||||
|   --show-error \ | ||||
|   --location \ | ||||
|   --fail \ | ||||
|   --retry 3 \ | ||||
|   --output "${_tmp_vulkansdk_targz}" "https://ossci-android.s3.amazonaws.com/vulkansdk-linux-x86_64-${VULKAN_SDK_VERSION}.tar.gz" | ||||
|  | ||||
| mkdir -p "${_vulkansdk_dir}" | ||||
| tar -C "${_vulkansdk_dir}" -xzf "${_tmp_vulkansdk_targz}" --strip-components 1 | ||||
| rm -rf "${_tmp_vulkansdk_targz}" | ||||
| @ -47,6 +47,9 @@ function install_ubuntu() { | ||||
|     # Development Packages | ||||
|     apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev | ||||
|     # Install Intel Support Packages | ||||
|     if [[ "$XPU_VERSION" == "2025.0" ]]; then | ||||
|         XPU_PACKAGES="${XPU_PACKAGES} intel-oneapi-dnnl=2025.0.1-6" | ||||
|     fi | ||||
|     apt-get install -y ${XPU_PACKAGES} | ||||
|  | ||||
|     # Cleanup | ||||
| @ -82,6 +85,9 @@ gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS. | ||||
| EOF | ||||
|  | ||||
|     # Install Intel Support Packages | ||||
|     if [[ "$XPU_VERSION" == "2025.0" ]]; then | ||||
|         XPU_PACKAGES="${XPU_PACKAGES} intel-oneapi-dnnl-2025.0.1-6" | ||||
|     fi | ||||
|     yum install -y ${XPU_PACKAGES} | ||||
|     # The xpu-smi packages | ||||
|     dnf install -y xpu-smi | ||||
|  | ||||
| @ -18,14 +18,15 @@ COPY ./common/install_user.sh install_user.sh | ||||
| RUN bash ./install_user.sh && rm install_user.sh | ||||
|  | ||||
| # Install conda and other packages (e.g., numpy, pytest) | ||||
| ARG PYTHON_VERSION | ||||
| ARG PIP_CMAKE | ||||
| # Put venv into the env vars so users don't need to activate it | ||||
| ENV PATH /var/lib/jenkins/ci_env/bin:$PATH | ||||
| ENV VIRTUAL_ENV /var/lib/jenkins/ci_env | ||||
| COPY requirements-ci.txt /opt/requirements-ci.txt | ||||
| COPY ./common/install_python.sh install_python.sh | ||||
| RUN bash ./install_python.sh && rm install_python.sh /opt/requirements-ci.txt | ||||
| ARG ANACONDA_PYTHON_VERSION | ||||
| ARG CONDA_CMAKE | ||||
| ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION | ||||
| ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH | ||||
| COPY requirements-ci.txt /opt/conda/requirements-ci.txt | ||||
| COPY ./common/install_conda.sh install_conda.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ./common/install_magma_conda.sh install_magma_conda.sh | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh install_magma_conda.sh common_utils.sh /opt/conda/requirements-ci.txt | ||||
|  | ||||
| # Install cuda and cudnn | ||||
| ARG CUDA_VERSION | ||||
| @ -36,10 +37,9 @@ ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH | ||||
|  | ||||
| # Note that Docker build forbids copying file outside the build context | ||||
| COPY ./common/install_linter.sh install_linter.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| RUN bash ./install_linter.sh | ||||
| RUN rm install_linter.sh | ||||
|  | ||||
| RUN chown -R jenkins:jenkins /var/lib/jenkins/ci_env | ||||
| RUN rm install_linter.sh common_utils.sh | ||||
|  | ||||
| USER jenkins | ||||
| CMD ["bash"] | ||||
|  | ||||
| @ -15,18 +15,20 @@ COPY ./common/install_user.sh install_user.sh | ||||
| RUN bash ./install_user.sh && rm install_user.sh | ||||
|  | ||||
| # Install conda and other packages (e.g., numpy, pytest) | ||||
| ARG PYTHON_VERSION | ||||
| ARG PIP_CMAKE | ||||
| ENV PATH /var/lib/jenkins/ci_env/bin:$PATH | ||||
| ENV VIRTUAL_ENV /var/lib/jenkins/ci_env | ||||
| COPY requirements-ci.txt /opt/requirements-ci.txt | ||||
| COPY ./common/install_python.sh install_python.sh | ||||
| RUN bash ./install_python.sh && rm install_python.sh /opt/requirements-ci.txt | ||||
| ARG ANACONDA_PYTHON_VERSION | ||||
| ARG CONDA_CMAKE | ||||
| ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION | ||||
| ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH | ||||
| COPY requirements-ci.txt /opt/conda/requirements-ci.txt | ||||
| COPY ./common/install_conda.sh install_conda.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt | ||||
|  | ||||
| # Note that Docker build forbids copying file outside the build context | ||||
| COPY ./common/install_linter.sh install_linter.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| RUN bash ./install_linter.sh | ||||
| RUN rm install_linter.sh | ||||
| RUN rm install_linter.sh common_utils.sh | ||||
|  | ||||
| USER jenkins | ||||
| CMD ["bash"] | ||||
|  | ||||
| @ -60,11 +60,6 @@ FROM base as libpng | ||||
| ADD ./common/install_libpng.sh install_libpng.sh | ||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh | ||||
|  | ||||
| FROM base as cache | ||||
| # Install sccache | ||||
| ADD ./common/install_cache.sh install_cache.sh | ||||
| RUN bash ./install_cache.sh && rm install_cache.sh | ||||
|  | ||||
| FROM ${GPU_IMAGE} as common | ||||
| ARG DEVTOOLSET_VERSION=11 | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| @ -117,10 +112,6 @@ COPY --from=libpng             /usr/local/include/libpng*            /usr/local/ | ||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ | ||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig | ||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h | ||||
| COPY --from=cache              /opt/cache                            /opt/cache | ||||
|  | ||||
| # Ensure sccache is included in the path | ||||
| ENV PATH /opt/cache/bin:$PATH | ||||
|  | ||||
| FROM common as cpu_final | ||||
| ARG BASE_CUDA_VERSION=11.8 | ||||
|  | ||||
| @ -59,11 +59,6 @@ FROM base as openblas | ||||
| ADD ./common/install_openblas.sh install_openblas.sh | ||||
| RUN bash ./install_openblas.sh && rm install_openblas.sh | ||||
|  | ||||
| FROM base as cache | ||||
| # Install sccache | ||||
| ADD ./common/install_cache.sh install_cache.sh | ||||
| RUN bash ./install_cache.sh && rm install_cache.sh | ||||
|  | ||||
| FROM base as final | ||||
|  | ||||
| # remove unncessary python versions | ||||
| @ -72,8 +67,4 @@ RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 | ||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 | ||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 | ||||
| COPY --from=openblas     /opt/OpenBLAS/  /opt/OpenBLAS/ | ||||
| COPY --from=cache              /opt/cache                            /opt/cache | ||||
|  | ||||
| ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:$LD_LIBRARY_PATH | ||||
| # Ensure sccache is included in the path | ||||
| ENV PATH /opt/cache/bin:$PATH | ||||
|  | ||||
| @ -42,7 +42,6 @@ RUN yum install -y \ | ||||
|   llvm-devel \ | ||||
|   libzstd-devel \ | ||||
|   python3.12-devel \ | ||||
|   python3.12-test \ | ||||
|   python3.12-setuptools \ | ||||
|   python3.12-pip \ | ||||
|   python3-virtualenv \ | ||||
| @ -86,20 +85,11 @@ ADD ./common/install_cpython.sh /build_scripts/install_cpython.sh | ||||
| ENV SSL_CERT_FILE= | ||||
| RUN bash build_scripts/build.sh && rm -r build_scripts | ||||
|  | ||||
| FROM base as cache | ||||
| # Install sccache | ||||
| ADD ./common/install_cache.sh install_cache.sh | ||||
| RUN bash ./install_cache.sh && rm install_cache.sh | ||||
|  | ||||
| FROM base as final | ||||
| COPY --from=python             /opt/python                           /opt/python | ||||
| COPY --from=python             /opt/_internal                        /opt/_internal | ||||
| COPY --from=python             /opt/python/cp39-cp39/bin/auditwheel  /usr/local/bin/auditwheel | ||||
| COPY --from=patchelf           /usr/local/bin/patchelf               /usr/local/bin/patchelf | ||||
| COPY --from=cache              /opt/cache                            /opt/cache | ||||
|  | ||||
| # Ensure sccache is included in the path | ||||
| ENV PATH /opt/cache/bin:$PATH | ||||
|  | ||||
| RUN alternatives --set python /usr/bin/python3.12 | ||||
| RUN alternatives --set python3 /usr/bin/python3.12 | ||||
| @ -111,33 +101,24 @@ CMD ["/bin/bash"] | ||||
|  | ||||
| # install test dependencies: | ||||
| # - grpcio requires system openssl, bundled crypto fails to build | ||||
| # - ml_dtypes 0.4.0 requires some fixes provided in later commits to build | ||||
| RUN dnf install -y \ | ||||
|   protobuf-devel \ | ||||
|   protobuf-c-devel \ | ||||
|   protobuf-lite-devel \ | ||||
|   hdf5-devel \ | ||||
|   python3-h5py \ | ||||
|   git | ||||
|   wget \ | ||||
|   patch | ||||
|  | ||||
| RUN env GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=True pip3 install grpcio | ||||
|  | ||||
| # cmake-3.28.0 from pip for onnxruntime | ||||
| RUN python3 -mpip install cmake==3.28.0 | ||||
|  | ||||
| # build onnxruntime 1.21.0 from sources. | ||||
| # it is not possible to build it from sources using pip, | ||||
| # so just build it from upstream repository. | ||||
| # h5py is dependency of onnxruntime_training. | ||||
| # h5py==3.11.0 builds with hdf5-devel 1.10.5 from repository. | ||||
| # install newest flatbuffers version first: | ||||
| # for some reason old version is getting pulled in otherwise. | ||||
| # packaging package is required for onnxruntime wheel build. | ||||
| RUN pip3 install flatbuffers && \ | ||||
|   pip3 install h5py==3.11.0 && \ | ||||
|   pip3 install packaging && \ | ||||
|   git clone https://github.com/microsoft/onnxruntime && \ | ||||
|   cd onnxruntime && git checkout v1.21.0 && \ | ||||
| RUN env GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=True pip3 install grpcio==1.65.4 | ||||
| RUN cd ~ && \ | ||||
|   git clone https://github.com/jax-ml/ml_dtypes && \ | ||||
|   cd ml_dtypes && \ | ||||
|   git checkout v0.4.0 && \ | ||||
|   git submodule update --init --recursive && \ | ||||
|   ./build.sh --config Release --parallel 0 --enable_pybind --build_wheel --enable_training --enable_training_apis --enable_training_ops --skip_tests --allow_running_as_root && \ | ||||
|   pip3 install ./build/Linux/Release/dist/onnxruntime_training-*.whl && \ | ||||
|   cd .. && /bin/rm -rf ./onnxruntime | ||||
|   wget https://github.com/jax-ml/ml_dtypes/commit/b969f76914d6b30676721bc92bf0f6021a0d1321.patch && \ | ||||
|   wget https://github.com/jax-ml/ml_dtypes/commit/d4e6d035ecda073eab8bcf60f4eef572ee7087e6.patch && \ | ||||
|   patch -p1 < b969f76914d6b30676721bc92bf0f6021a0d1321.patch && \ | ||||
|   patch -p1 < d4e6d035ecda073eab8bcf60f4eef572ee7087e6.patch && \ | ||||
|   python3 setup.py bdist_wheel && \ | ||||
|   pip3 install dist/*.whl && \ | ||||
|   rm -rf ml_dtypes | ||||
|  | ||||
| @ -41,14 +41,11 @@ fbscribelogger==0.1.7 | ||||
| #Pinned versions: 0.1.6 | ||||
| #test that import: | ||||
|  | ||||
| flatbuffers==2.0 ; platform_machine != "s390x" | ||||
| flatbuffers==2.0 | ||||
| #Description: cross platform serialization library | ||||
| #Pinned versions: 2.0 | ||||
| #test that import: | ||||
|  | ||||
| flatbuffers ; platform_machine == "s390x" | ||||
| #Description: cross platform serialization library; Newer version is required on s390x for new python version | ||||
|  | ||||
| hypothesis==5.35.1 | ||||
| # Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136 | ||||
| #Description: advanced library for generating parametrized tests | ||||
| @ -105,10 +102,10 @@ networkx==2.8.8 | ||||
| #Pinned versions: 2.8.8 | ||||
| #test that import: functorch | ||||
|  | ||||
| ninja==1.11.1.3 | ||||
| #Description: build system. Used in some tests. Used in build to generate build | ||||
| #time tracing information | ||||
| #Pinned versions: 1.11.1.3 | ||||
| #ninja | ||||
| #Description: build system.  Note that it install from | ||||
| #here breaks things so it is commented out | ||||
| #Pinned versions: 1.10.0.post1 | ||||
| #test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py | ||||
|  | ||||
| numba==0.49.0 ; python_version < "3.9" | ||||
| @ -368,6 +365,7 @@ PyYAML | ||||
| pyzstd | ||||
| setuptools | ||||
|  | ||||
| ninja==1.11.1 ; platform_machine == "aarch64" | ||||
| scons==4.5.2 ; platform_machine == "aarch64" | ||||
|  | ||||
| pulp==2.9.0 ; python_version >= "3.8" | ||||
|  | ||||
| @ -1 +1 @@ | ||||
| 3.3.0 | ||||
| 3.3.1 | ||||
|  | ||||
| @ -50,6 +50,13 @@ RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi | ||||
| RUN rm install_protobuf.sh | ||||
| ENV INSTALLED_PROTOBUF ${PROTOBUF} | ||||
|  | ||||
| # (optional) Install database packages like LMDB and LevelDB | ||||
| ARG DB | ||||
| COPY ./common/install_db.sh install_db.sh | ||||
| RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | ||||
| RUN rm install_db.sh | ||||
| ENV INSTALLED_DB ${DB} | ||||
|  | ||||
| # (optional) Install vision packages like OpenCV | ||||
| ARG VISION | ||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||
|  | ||||
| @ -50,6 +50,13 @@ RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi | ||||
| RUN rm install_protobuf.sh | ||||
| ENV INSTALLED_PROTOBUF ${PROTOBUF} | ||||
|  | ||||
| # (optional) Install database packages like LMDB and LevelDB | ||||
| ARG DB | ||||
| COPY ./common/install_db.sh install_db.sh | ||||
| RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | ||||
| RUN rm install_db.sh | ||||
| ENV INSTALLED_DB ${DB} | ||||
|  | ||||
| # (optional) Install vision packages like OpenCV | ||||
| ARG VISION | ||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||
|  | ||||
| @ -77,6 +77,13 @@ COPY triton_version.txt triton_version.txt | ||||
| RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | ||||
| RUN rm install_triton.sh common_utils.sh triton-xpu.txt triton_version.txt | ||||
|  | ||||
| # (optional) Install database packages like LMDB and LevelDB | ||||
| ARG DB | ||||
| COPY ./common/install_db.sh install_db.sh | ||||
| RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | ||||
| RUN rm install_db.sh | ||||
| ENV INSTALLED_DB ${DB} | ||||
|  | ||||
| # (optional) Install vision packages like OpenCV | ||||
| ARG VISION | ||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||
|  | ||||
| @ -1,12 +1,11 @@ | ||||
| ARG UBUNTU_VERSION | ||||
|  | ||||
| # ======================================== | ||||
| # Builder stage | ||||
| # ======================================== | ||||
| FROM ubuntu:${UBUNTU_VERSION} AS builder | ||||
| FROM ubuntu:${UBUNTU_VERSION} | ||||
|  | ||||
| ARG UBUNTU_VERSION | ||||
|  | ||||
| ENV DEBIAN_FRONTEND noninteractive | ||||
|  | ||||
| ARG CLANG_VERSION | ||||
|  | ||||
| # Install common dependencies (so that this step can be cached separately) | ||||
| @ -48,7 +47,7 @@ RUN bash ./install_gcc.sh && rm install_gcc.sh | ||||
|  | ||||
| # Install lcov for C++ code coverage | ||||
| COPY ./common/install_lcov.sh install_lcov.sh | ||||
| RUN bash ./install_lcov.sh && rm install_lcov.sh | ||||
| RUN  bash ./install_lcov.sh && rm install_lcov.sh | ||||
|  | ||||
| # Install cuda and cudnn | ||||
| ARG CUDA_VERSION | ||||
| @ -75,6 +74,13 @@ RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi | ||||
| RUN rm install_protobuf.sh | ||||
| ENV INSTALLED_PROTOBUF ${PROTOBUF} | ||||
|  | ||||
| # (optional) Install database packages like LMDB and LevelDB | ||||
| ARG DB | ||||
| COPY ./common/install_db.sh install_db.sh | ||||
| RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | ||||
| RUN rm install_db.sh | ||||
| ENV INSTALLED_DB ${DB} | ||||
|  | ||||
| # (optional) Install vision packages like OpenCV | ||||
| ARG VISION | ||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||
| @ -82,6 +88,18 @@ RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | ||||
| RUN rm install_vision.sh cache_vision_models.sh common_utils.sh | ||||
| ENV INSTALLED_VISION ${VISION} | ||||
|  | ||||
| # (optional) Install Vulkan SDK | ||||
| ARG VULKAN_SDK_VERSION | ||||
| COPY ./common/install_vulkan_sdk.sh install_vulkan_sdk.sh | ||||
| RUN if [ -n "${VULKAN_SDK_VERSION}" ]; then bash ./install_vulkan_sdk.sh; fi | ||||
| RUN rm install_vulkan_sdk.sh | ||||
|  | ||||
| # (optional) Install swiftshader | ||||
| ARG SWIFTSHADER | ||||
| COPY ./common/install_swiftshader.sh install_swiftshader.sh | ||||
| RUN if [ -n "${SWIFTSHADER}" ]; then bash ./install_swiftshader.sh; fi | ||||
| RUN rm install_swiftshader.sh | ||||
|  | ||||
| # (optional) Install non-default CMake version | ||||
| ARG CMAKE_VERSION | ||||
| COPY ./common/install_cmake.sh install_cmake.sh | ||||
| @ -170,76 +188,19 @@ COPY ./common/install_openmpi.sh install_openmpi.sh | ||||
| RUN if [ -n "${CUDA_VERSION}" ]; then bash install_openmpi.sh; fi | ||||
| RUN rm install_openmpi.sh | ||||
|  | ||||
| # Include BUILD_ENVIRONMENT environment variable in image | ||||
| ARG BUILD_ENVIRONMENT | ||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} | ||||
|  | ||||
| # Install LLVM dev version (Defined in the pytorch/builder github repository) | ||||
| ARG SKIP_LLVM_SRC_BUILD_INSTALL | ||||
| COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm | ||||
| RUN if [ -n "${SKIP_LLVM_SRC_BUILD_INSTALL}" ]; then set -eu; rm -rf /opt/llvm; fi | ||||
|  | ||||
| # ======================================== | ||||
| # Final stage | ||||
| # ======================================== | ||||
| FROM ubuntu:${UBUNTU_VERSION} | ||||
|  | ||||
| # Pass all arguments from the command line to this stage | ||||
| ARG UBUNTU_VERSION | ||||
| ARG CLANG_VERSION | ||||
| ARG LLVMDEV | ||||
| ARG KATEX | ||||
| ARG ANACONDA_PYTHON_VERSION | ||||
| ARG CONDA_CMAKE | ||||
| ARG DOCS | ||||
| ARG GCC_VERSION | ||||
| ARG CUDA_VERSION | ||||
| ARG UCX_COMMIT | ||||
| ARG UCC_COMMIT | ||||
| ARG PROTOBUF | ||||
| ARG VISION | ||||
| ARG CMAKE_VERSION | ||||
| ARG NINJA_VERSION | ||||
| ARG INDUCTOR_BENCHMARKS | ||||
| ARG TRITON | ||||
| ARG TRITON_CPU | ||||
| ARG EXECUTORCH | ||||
| ARG HALIDE | ||||
| ARG ONNX | ||||
| ARG ACL | ||||
| ARG BUILD_ENVIRONMENT | ||||
|  | ||||
| ENV DEBIAN_FRONTEND noninteractive | ||||
| ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION | ||||
| ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH | ||||
| ENV DOCS=$DOCS | ||||
| ENV DESIRED_CUDA ${CUDA_VERSION} | ||||
| ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:/opt/cache/bin:$PATH | ||||
| ENV UCX_COMMIT $UCX_COMMIT | ||||
| ENV UCC_COMMIT $UCC_COMMIT | ||||
| ENV UCX_HOME /usr | ||||
| ENV UCC_HOME /usr | ||||
| ENV INSTALLED_PROTOBUF ${PROTOBUF} | ||||
| ENV INSTALLED_VISION ${VISION} | ||||
| ENV OPENSSL_ROOT_DIR /opt/openssl | ||||
| ENV OPENSSL_DIR /opt/openssl | ||||
| ENV INSTALLED_ACL ${ACL} | ||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} | ||||
| # AWS specific CUDA build guidance | ||||
| ENV TORCH_CUDA_ARCH_LIST Maxwell | ||||
| ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all" | ||||
| ENV CUDA_PATH /usr/local/cuda | ||||
|  | ||||
| # Copy essential system directories and files from builder stage | ||||
| COPY --from=builder /etc/apt /etc/apt | ||||
| COPY --from=builder /usr/local /usr/local | ||||
| COPY --from=builder /opt /opt | ||||
| COPY --from=builder /usr/include /usr/include | ||||
| COPY --from=builder /usr/lib /usr/lib | ||||
| COPY --from=builder /usr/bin /usr/bin | ||||
| COPY --from=builder /usr/share /usr/share | ||||
|  | ||||
| # Copy the user setup | ||||
| COPY --from=builder /etc/passwd /etc/passwd | ||||
| COPY --from=builder /etc/group /etc/group | ||||
| COPY --from=builder /etc/shadow /etc/shadow | ||||
| COPY --from=builder /etc/sudoers.d /etc/sudoers.d | ||||
|  | ||||
| # Create and switch to jenkins user | ||||
| USER jenkins | ||||
| CMD ["bash"] | ||||
| CMD ["bash"] | ||||
|  | ||||
| @ -111,6 +111,12 @@ case ${DESIRED_PYTHON} in | ||||
|     ;; | ||||
| esac | ||||
|  | ||||
| if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then | ||||
|     export _GLIBCXX_USE_CXX11_ABI=1 | ||||
| else | ||||
|     export _GLIBCXX_USE_CXX11_ABI=0 | ||||
| fi | ||||
|  | ||||
| if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then | ||||
|     echo "Calling build_amd.py at $(date)" | ||||
|     python tools/amd_build/build_amd.py | ||||
| @ -203,6 +209,12 @@ if [[ -n "$BUILD_PYTHONLESS" ]]; then | ||||
|  | ||||
|     mkdir -p /tmp/$LIBTORCH_HOUSE_DIR | ||||
|  | ||||
|     if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then | ||||
|         LIBTORCH_ABI="cxx11-abi-" | ||||
|     else | ||||
|         LIBTORCH_ABI= | ||||
|     fi | ||||
|  | ||||
|     zip -rq /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-$PYTORCH_BUILD_VERSION.zip libtorch | ||||
|     cp /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-$PYTORCH_BUILD_VERSION.zip \ | ||||
|        /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-latest.zip | ||||
| @ -321,8 +333,8 @@ for pkg in /$WHEELHOUSE_DIR/torch_no_python*.whl /$WHEELHOUSE_DIR/torch*linux*.w | ||||
|             # ROCm workaround for roctracer dlopens | ||||
|             if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then | ||||
|                 patchedpath=$(fname_without_so_number $destpath) | ||||
|             # Keep the so number for XPU dependencies | ||||
|             elif [[ "$DESIRED_CUDA" == *"xpu"* ]]; then | ||||
|             # Keep the so number for XPU dependencies and libgomp.so.1 to avoid twice load | ||||
|             elif [[ "$DESIRED_CUDA" == *"xpu"* || "$filename" == "libgomp.so.1" ]]; then | ||||
|                 patchedpath=$destpath | ||||
|             else | ||||
|                 patchedpath=$(fname_with_sha256 $destpath) | ||||
|  | ||||
| @ -95,6 +95,12 @@ python setup.py clean | ||||
| retry pip install -qr requirements.txt | ||||
| retry pip install -q numpy==2.0.1 | ||||
|  | ||||
| if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then | ||||
|     export _GLIBCXX_USE_CXX11_ABI=1 | ||||
| else | ||||
|     export _GLIBCXX_USE_CXX11_ABI=0 | ||||
| fi | ||||
|  | ||||
| if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then | ||||
|     echo "Calling build_amd.py at $(date)" | ||||
|     python tools/amd_build/build_amd.py | ||||
| @ -163,6 +169,12 @@ fi | ||||
|  | ||||
| ) | ||||
|  | ||||
| if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then | ||||
|     LIBTORCH_ABI="cxx11-abi-" | ||||
| else | ||||
|     LIBTORCH_ABI= | ||||
| fi | ||||
|  | ||||
| ( | ||||
|     set -x | ||||
|  | ||||
|  | ||||
| @ -59,28 +59,30 @@ else | ||||
|   export install_root="$(dirname $(which python))/../lib/python${py_dot}/site-packages/torch/" | ||||
| fi | ||||
|  | ||||
| ############################################################################### | ||||
| # Setup XPU ENV | ||||
| ############################################################################### | ||||
| if [[ "$DESIRED_CUDA" == 'xpu' ]]; then | ||||
|   set +u | ||||
|   # Refer https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html | ||||
|   source /opt/intel/oneapi/compiler/latest/env/vars.sh | ||||
|   source /opt/intel/oneapi/pti/latest/env/vars.sh | ||||
| fi | ||||
|  | ||||
| ############################################################################### | ||||
| # Check GCC ABI | ||||
| ############################################################################### | ||||
|  | ||||
| # NOTE: As of https://github.com/pytorch/pytorch/issues/126551 we only produce | ||||
| #       wheels with cxx11-abi | ||||
| # NOTE [ Building libtorch with old vs. new gcc ABI ] | ||||
| # | ||||
| # Packages built with one version of ABI could not be linked against by client | ||||
| # C++ libraries that were compiled using the other version of ABI. Since both | ||||
| # gcc ABIs are still common in the wild, we need to support both ABIs. Currently: | ||||
| # | ||||
| # - All the nightlies built on CentOS 7 + devtoolset7 use the old gcc ABI. | ||||
| # - All the nightlies built on Ubuntu 16.04 + gcc 5.4 use the new gcc ABI. | ||||
|  | ||||
| echo "Checking that the gcc ABI is what we expect" | ||||
| if [[ "$(uname)" != 'Darwin' ]]; then | ||||
|   function is_expected() { | ||||
|     if [[ "$1" -gt 0 || "$1" == "ON " ]]; then | ||||
|       echo 1 | ||||
|     if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* || "$DESIRED_CUDA" == *"rocm"* ]]; then | ||||
|       if [[ "$1" -gt 0 || "$1" == "ON " ]]; then | ||||
|         echo 1 | ||||
|       fi | ||||
|     else | ||||
|       if [[ -z "$1" || "$1" == 0 || "$1" == "OFF" ]]; then | ||||
|         echo 1 | ||||
|       fi | ||||
|     fi | ||||
|   } | ||||
|  | ||||
| @ -196,11 +198,35 @@ setup_link_flags () { | ||||
|  | ||||
| TEST_CODE_DIR="$(dirname $(realpath ${BASH_SOURCE[0]}))/test_example_code" | ||||
| build_and_run_example_cpp () { | ||||
|   if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then | ||||
|     GLIBCXX_USE_CXX11_ABI=1 | ||||
|   else | ||||
|     GLIBCXX_USE_CXX11_ABI=0 | ||||
|   fi | ||||
|   setup_link_flags | ||||
|   g++ ${TEST_CODE_DIR}/$1.cpp -I${install_root}/include -I${install_root}/include/torch/csrc/api/include -std=gnu++17 -L${install_root}/lib ${REF_LIB} ${ADDITIONAL_LINKER_FLAGS} -ltorch $TORCH_CPU_LINK_FLAGS $TORCH_CUDA_LINK_FLAGS $C10_LINK_FLAGS -o $1 | ||||
|   g++ ${TEST_CODE_DIR}/$1.cpp -I${install_root}/include -I${install_root}/include/torch/csrc/api/include -D_GLIBCXX_USE_CXX11_ABI=$GLIBCXX_USE_CXX11_ABI -std=gnu++17 -L${install_root}/lib ${REF_LIB} ${ADDITIONAL_LINKER_FLAGS} -ltorch $TORCH_CPU_LINK_FLAGS $TORCH_CUDA_LINK_FLAGS $C10_LINK_FLAGS -o $1 | ||||
|   ./$1 | ||||
| } | ||||
|  | ||||
| build_example_cpp_with_incorrect_abi () { | ||||
|   if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then | ||||
|     GLIBCXX_USE_CXX11_ABI=0 | ||||
|   else | ||||
|     GLIBCXX_USE_CXX11_ABI=1 | ||||
|   fi | ||||
|   set +e | ||||
|   setup_link_flags | ||||
|   g++ ${TEST_CODE_DIR}/$1.cpp -I${install_root}/include -I${install_root}/include/torch/csrc/api/include -D_GLIBCXX_USE_CXX11_ABI=$GLIBCXX_USE_CXX11_ABI -std=gnu++17 -L${install_root}/lib ${REF_LIB} ${ADDITIONAL_LINKER_FLAGS} -ltorch $TORCH_CPU_LINK_FLAGS $TORCH_CUDA_LINK_FLAGS $C10_LINK_FLAGS -o $1 | ||||
|   ERRCODE=$? | ||||
|   set -e | ||||
|   if [ "$ERRCODE" -eq "0" ]; then | ||||
|     echo "Building example with incorrect ABI didn't throw error. Aborting." | ||||
|     exit 1 | ||||
|   else | ||||
|     echo "Building example with incorrect ABI throws expected error. Proceeding." | ||||
|   fi | ||||
| } | ||||
|  | ||||
| ############################################################################### | ||||
| # Check simple Python/C++ calls | ||||
| ############################################################################### | ||||
| @ -210,6 +236,11 @@ if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then | ||||
|     export LD_LIBRARY_PATH=/usr/local/cuda/lib64 | ||||
|   fi | ||||
|   build_and_run_example_cpp simple-torch-test | ||||
|   # `_GLIBCXX_USE_CXX11_ABI` is always ignored by gcc in devtoolset7, so we test | ||||
|   # the expected failure case for Ubuntu 16.04 + gcc 5.4 only. | ||||
|   if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then | ||||
|     build_example_cpp_with_incorrect_abi simple-torch-test | ||||
|   fi | ||||
| else | ||||
|   pushd /tmp | ||||
|   python -c 'import torch' | ||||
|  | ||||
| @ -202,7 +202,7 @@ function install_torchrec_and_fbgemm() { | ||||
|  | ||||
| function clone_pytorch_xla() { | ||||
|   if [[ ! -d ./xla ]]; then | ||||
|     git clone --recursive --quiet https://github.com/pytorch/xla.git | ||||
|     git clone --recursive -b r2.7 https://github.com/pytorch/xla.git | ||||
|     pushd xla | ||||
|     # pin the xla hash so that we don't get broken by changes to xla | ||||
|     git checkout "$(cat ../.github/ci_commit_pins/xla.txt)" | ||||
|  | ||||
| @ -1,31 +1,50 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Script for installing sccache on the xla build job, which uses xla's docker | ||||
| # image and doesn't have sccache installed on it.  This is mostly copied from | ||||
| # .ci/docker/install_cache.sh.  Changes are: removing checks that will always | ||||
| # return the same thing, ex checks for for rocm, CUDA, and changing the path | ||||
| # where sccache is installed, and not changing /etc/environment. | ||||
| # image, which has sccache installed but doesn't write the stubs.  This is | ||||
| # mostly copied from .ci/docker/install_cache.sh.  Changes are: removing checks | ||||
| # that will always return the same thing, ex checks for for rocm, CUDA, changing | ||||
| # the path where sccache is installed, not changing /etc/environment, and not | ||||
| # installing/downloading sccache as it is already in the docker image. | ||||
|  | ||||
| set -ex -o pipefail | ||||
|  | ||||
| install_binary() { | ||||
|   echo "Downloading sccache binary from S3 repo" | ||||
|   curl --retry 3 https://s3.amazonaws.com/ossci-linux/sccache -o /tmp/cache/bin/sccache | ||||
| } | ||||
|  | ||||
| mkdir -p /tmp/cache/bin | ||||
| mkdir -p /tmp/cache/lib | ||||
| export PATH="/tmp/cache/bin:$PATH" | ||||
|  | ||||
| install_binary | ||||
| chmod a+x /tmp/cache/bin/sccache | ||||
|  | ||||
| function write_sccache_stub() { | ||||
|   # Unset LD_PRELOAD for ps because of asan + ps issues | ||||
|   # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90589 | ||||
|   # shellcheck disable=SC2086 | ||||
|   # shellcheck disable=SC2059 | ||||
|   printf "#!/bin/sh\nif [ \$(env -u LD_PRELOAD ps -p \$PPID -o comm=) != sccache ]; then\n  exec sccache $(which $1) \"\$@\"\nelse\n  exec $(which $1) \"\$@\"\nfi" > "/tmp/cache/bin/$1" | ||||
|   if [ "$1" == "gcc" ]; then | ||||
|     # Do not call sccache recursively when dumping preprocessor argument | ||||
|     # For some reason it's very important for the first cached nvcc invocation | ||||
|     cat >"/tmp/cache/bin/$1" <<EOF | ||||
| #!/bin/sh | ||||
|  | ||||
| # sccache does not support -E flag, so we need to call the original compiler directly in order to avoid calling this wrapper recursively | ||||
| for arg in "\$@"; do | ||||
|   if [ "\$arg" = "-E" ]; then | ||||
|     exec $(which "$1") "\$@" | ||||
|   fi | ||||
| done | ||||
|  | ||||
| if [ \$(env -u LD_PRELOAD ps -p \$PPID -o comm=) != sccache ]; then | ||||
|   exec sccache $(which "$1") "\$@" | ||||
| else | ||||
|   exec $(which "$1") "\$@" | ||||
| fi | ||||
| EOF | ||||
|   else | ||||
|     cat >"/tmp/cache/bin/$1" <<EOF | ||||
| #!/bin/sh | ||||
|  | ||||
| if [ \$(env -u LD_PRELOAD ps -p \$PPID -o comm=) != sccache ]; then | ||||
|   exec sccache $(which "$1") "\$@" | ||||
| else | ||||
|   exec $(which "$1") "\$@" | ||||
| fi | ||||
| EOF | ||||
|   fi | ||||
|   chmod a+x "/tmp/cache/bin/$1" | ||||
| } | ||||
|  | ||||
|  | ||||
| @ -121,9 +121,9 @@ def main() -> None: | ||||
|         else: | ||||
|             install_root = Path(distutils.sysconfig.get_python_lib()) / "torch" | ||||
|  | ||||
|     libtorch_cpu_path = str(install_root / "lib" / "libtorch_cpu.so") | ||||
|     # NOTE: All binaries are built with cxx11abi now | ||||
|     check_lib_symbols_for_abi_correctness(libtorch_cpu_path, False) | ||||
|     libtorch_cpu_path = install_root / "lib" / "libtorch_cpu.so" | ||||
|     pre_cxx11_abi = "cxx11-abi" not in os.getenv("DESIRED_DEVTOOLSET", "") | ||||
|     check_lib_symbols_for_abi_correctness(libtorch_cpu_path, pre_cxx11_abi) | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|  | ||||
							
								
								
									
										74
									
								
								.ci/pytorch/smoke_test/check_gomp.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										74
									
								
								.ci/pytorch/smoke_test/check_gomp.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,74 @@ | ||||
| import ctypes | ||||
| import os | ||||
| import sys | ||||
| from pathlib import Path | ||||
|  | ||||
|  | ||||
| def get_gomp_thread(): | ||||
|     """ | ||||
|     Retrieves the maximum number of OpenMP threads after loading the `libgomp.so.1` library | ||||
|     and the `libtorch_cpu.so` library. It then queries the | ||||
|     maximum number of threads available for OpenMP parallel regions using the | ||||
|     `omp_get_max_threads` function. | ||||
|  | ||||
|     Returns: | ||||
|         int: The maximum number of OpenMP threads available. | ||||
|  | ||||
|     Notes: | ||||
|         - The function assumes the default path for `libgomp.so.1` on AlmaLinux OS. | ||||
|         - The path to `libtorch_cpu.so` is constructed based on the Python executable's | ||||
|           installation directory. | ||||
|         - This function is specific to environments where PyTorch and OpenMP are used | ||||
|           together and may require adjustments for other setups. | ||||
|     """ | ||||
|     python_path = Path(sys.executable).resolve() | ||||
|     python_prefix = ( | ||||
|         python_path.parent.parent | ||||
|     )  # Typically goes to the Python installation root | ||||
|  | ||||
|     # Get the additional ABI flags (if any); it may be an empty string. | ||||
|     abiflags = getattr(sys, "abiflags", "") | ||||
|  | ||||
|     # Construct the Python directory name correctly (e.g., "python3.13t"). | ||||
|     python_version = ( | ||||
|         f"python{sys.version_info.major}.{sys.version_info.minor}{abiflags}" | ||||
|     ) | ||||
|  | ||||
|     libtorch_cpu_path = ( | ||||
|         python_prefix | ||||
|         / "lib" | ||||
|         / python_version | ||||
|         / "site-packages" | ||||
|         / "torch" | ||||
|         / "lib" | ||||
|         / "libtorch_cpu.so" | ||||
|     ) | ||||
|  | ||||
|     # use the default gomp path of AlmaLinux OS | ||||
|     libgomp_path = "/usr/lib64/libgomp.so.1" | ||||
|  | ||||
|     os.environ["GOMP_CPU_AFFINITY"] = "0-3" | ||||
|  | ||||
|     libgomp = ctypes.CDLL(libgomp_path) | ||||
|     libgomp = ctypes.CDLL(libtorch_cpu_path) | ||||
|  | ||||
|     libgomp.omp_get_max_threads.restype = ctypes.c_int | ||||
|     libgomp.omp_get_max_threads.argtypes = [] | ||||
|  | ||||
|     omp_max_threads = libgomp.omp_get_max_threads() | ||||
|     return omp_max_threads | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|     omp_max_threads = get_gomp_thread() | ||||
|     print( | ||||
|         f"omp_max_threads after loading libgomp.so and libtorch_cpu.so: {omp_max_threads}" | ||||
|     ) | ||||
|     if omp_max_threads == 1: | ||||
|         raise RuntimeError( | ||||
|             "omp_max_threads is 1. Check whether libgomp.so is loaded twice." | ||||
|         ) | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     main() | ||||
| @ -1173,8 +1173,9 @@ build_xla() { | ||||
|   apply_patches | ||||
|   SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')" | ||||
|   # These functions are defined in .circleci/common.sh in pytorch/xla repo | ||||
|   retry install_deps_pytorch_xla $XLA_DIR $USE_CACHE | ||||
|   retry install_pre_deps_pytorch_xla $XLA_DIR $USE_CACHE | ||||
|   CMAKE_PREFIX_PATH="${SITE_PACKAGES}/torch:${CMAKE_PREFIX_PATH}" XLA_SANDBOX_BUILD=1 build_torch_xla $XLA_DIR | ||||
|   retry install_post_deps_pytorch_xla | ||||
|   assert_git_not_dirty | ||||
| } | ||||
|  | ||||
| @ -1474,8 +1475,7 @@ test_executorch() { | ||||
|   pushd /executorch | ||||
|  | ||||
|   export PYTHON_EXECUTABLE=python | ||||
|   export EXECUTORCH_BUILD_PYBIND=ON | ||||
|   export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" | ||||
|   export CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" | ||||
|  | ||||
|   # For llama3 | ||||
|   bash examples/models/llama3_2_vision/install_requirements.sh | ||||
| @ -1619,7 +1619,6 @@ elif [[ "${TEST_CONFIG}" == *inductor_cpp_wrapper* ]]; then | ||||
|   install_torchvision | ||||
|   checkout_install_torchbench hf_T5 llama moco | ||||
|   PYTHONPATH=$(pwd)/torchbench test_inductor_cpp_wrapper_shard "$SHARD_NUMBER" | ||||
|   test_inductor_aoti | ||||
| elif [[ "${TEST_CONFIG}" == *inductor* ]]; then | ||||
|   install_torchvision | ||||
|   test_inductor_shard "${SHARD_NUMBER}" | ||||
|  | ||||
| @ -128,6 +128,7 @@ goto end | ||||
| :libtorch | ||||
| echo "install and test libtorch" | ||||
|  | ||||
| if "%VC_YEAR%" == "2019" powershell internal\vs2019_install.ps1 | ||||
| if "%VC_YEAR%" == "2022" powershell internal\vs2022_install.ps1 | ||||
|  | ||||
| if ERRORLEVEL 1 exit /b 1 | ||||
| @ -139,6 +140,10 @@ pushd tmp\libtorch | ||||
|  | ||||
| set VC_VERSION_LOWER=17 | ||||
| set VC_VERSION_UPPER=18 | ||||
| IF "%VC_YEAR%" == "2019" ( | ||||
|     set VC_VERSION_LOWER=16 | ||||
|     set VC_VERSION_UPPER=17 | ||||
| ) | ||||
|  | ||||
| for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do ( | ||||
|     if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( | ||||
|  | ||||
| @ -70,6 +70,7 @@ echo "install and test libtorch" | ||||
| pip install cmake | ||||
| echo "installing cmake" | ||||
|  | ||||
| if "%VC_YEAR%" == "2019" powershell internal\vs2019_install.ps1 | ||||
| if "%VC_YEAR%" == "2022" powershell internal\vs2022_install.ps1 | ||||
|  | ||||
| if ERRORLEVEL 1 exit /b 1 | ||||
| @ -82,6 +83,10 @@ pushd tmp\libtorch | ||||
|  | ||||
| set VC_VERSION_LOWER=17 | ||||
| set VC_VERSION_UPPER=18 | ||||
| IF "%VC_YEAR%" == "2019" ( | ||||
|     set VC_VERSION_LOWER=16 | ||||
|     set VC_VERSION_UPPER=17 | ||||
| ) | ||||
|  | ||||
| for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do ( | ||||
|     if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( | ||||
|  | ||||
| @ -1,8 +1,12 @@ | ||||
| if "%VC_YEAR%" == "2019" powershell windows/internal/vs2019_install.ps1 | ||||
| if "%VC_YEAR%" == "2022" powershell windows/internal/vs2022_install.ps1 | ||||
|  | ||||
| set VC_VERSION_LOWER=17 | ||||
| set VC_VERSION_UPPER=18 | ||||
|  | ||||
| if "%VC_YEAR%" == "2019" ( | ||||
|     set VC_VERSION_LOWER=16 | ||||
|     set VC_VERSION_UPPER=17 | ||||
| ) | ||||
|  | ||||
| for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe"  -products Microsoft.VisualStudio.Product.BuildTools -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do ( | ||||
|     if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( | ||||
|  | ||||
							
								
								
									
										48
									
								
								.ci/pytorch/windows/internal/vs2019_install.ps1
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										48
									
								
								.ci/pytorch/windows/internal/vs2019_install.ps1
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,48 @@ | ||||
| # https://developercommunity.visualstudio.com/t/install-specific-version-of-vs-component/1142479 | ||||
| # https://docs.microsoft.com/en-us/visualstudio/releases/2019/history#release-dates-and-build-numbers | ||||
|  | ||||
| # 16.8.6 BuildTools | ||||
| $VS_DOWNLOAD_LINK = "https://ossci-windows.s3.us-east-1.amazonaws.com/vs16.8.6_BuildTools.exe" | ||||
| $COLLECT_DOWNLOAD_LINK = "https://aka.ms/vscollect.exe" | ||||
| $VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStudio.Workload.VCTools", | ||||
|                                                      "--add Microsoft.Component.MSBuild", | ||||
|                                                      "--add Microsoft.VisualStudio.Component.Roslyn.Compiler", | ||||
|                                                      "--add Microsoft.VisualStudio.Component.TextTemplating", | ||||
|                                                      "--add Microsoft.VisualStudio.Component.VC.CoreIde", | ||||
|                                                      "--add Microsoft.VisualStudio.Component.VC.Redist.14.Latest", | ||||
|                                                      "--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core", | ||||
|                                                      "--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64", | ||||
|                                                      "--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Win81") | ||||
|  | ||||
| curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe | ||||
| if ($LASTEXITCODE -ne 0) { | ||||
|     echo "Download of the VS 2019 Version 16.8.5 installer failed" | ||||
|     exit 1 | ||||
| } | ||||
|  | ||||
| if (Test-Path "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe") { | ||||
|     $existingPath = & "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -products "Microsoft.VisualStudio.Product.BuildTools" -version "[16, 17)" -property installationPath | ||||
|     if ($existingPath -ne $null) { | ||||
|         if (!${env:CIRCLECI}) { | ||||
|             echo "Found correctly versioned existing BuildTools installation in $existingPath" | ||||
|             exit 0 | ||||
|         } | ||||
|         echo "Found existing BuildTools installation in $existingPath, keeping it" | ||||
|     } | ||||
| } | ||||
|  | ||||
| $process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru | ||||
| Remove-Item -Path vs_installer.exe -Force | ||||
| $exitCode = $process.ExitCode | ||||
| if (($exitCode -ne 0) -and ($exitCode -ne 3010)) { | ||||
|     echo "VS 2019 installer exited with code $exitCode, which should be one of [0, 3010]." | ||||
|     curl.exe --retry 3 -kL $COLLECT_DOWNLOAD_LINK --output Collect.exe | ||||
|     if ($LASTEXITCODE -ne 0) { | ||||
|         echo "Download of the VS Collect tool failed." | ||||
|         exit 1 | ||||
|     } | ||||
|     Start-Process "${PWD}\Collect.exe" -NoNewWindow -Wait -PassThru | ||||
|     New-Item -Path "C:\w\build-results" -ItemType "directory" -Force | ||||
|     Copy-Item -Path "C:\Users\${env:USERNAME}\AppData\Local\Temp\vslogs.zip" -Destination "C:\w\build-results\" | ||||
|     exit 1 | ||||
| } | ||||
| @ -92,6 +92,11 @@ fi | ||||
| if [[ "\$GPU_ARCH_TYPE" != *s390x* && "\$GPU_ARCH_TYPE" != *xpu* && "\$GPU_ARCH_TYPE" != *rocm*  && "$PACKAGE_TYPE" != libtorch ]]; then | ||||
|   # Exclude s390, xpu, rocm and libtorch builds from smoke testing | ||||
|   python /pytorch/.ci/pytorch/smoke_test/smoke_test.py --package=torchonly --torch-compile-check disabled | ||||
|  | ||||
|   if [[ "\$GPU_ARCH_TYPE" != *cpu-aarch64* ]]; then | ||||
|     # test for issue https://github.com/pytorch/pytorch/issues/149422 | ||||
|     python /pytorch/.ci/pytorch/smoke_test/check_gomp.py | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| # Clean temp files | ||||
|  | ||||
| @ -55,16 +55,12 @@ s3_upload() { | ||||
|     s3_upload_dir="${s3_root_dir}/${UPLOAD_SUBFOLDER}/" | ||||
|   fi | ||||
|   ( | ||||
|     cache_control_flag="" | ||||
|     if [[ "${UPLOAD_CHANNEL}" = "test" ]]; then | ||||
|       cache_control_flag="--cache-control='no-cache,no-store,must-revalidate'" | ||||
|     fi | ||||
|     for pkg in ${PKG_DIR}/*.${extension}; do | ||||
|       ( | ||||
|         set -x | ||||
|         shm_id=$(sha256sum "${pkg}" | awk '{print $1}') | ||||
|         ${AWS_S3_CP} --no-progress --acl public-read "${pkg}" "${s3_upload_dir}" \ | ||||
|           --metadata "checksum-sha256=${shm_id}" ${cache_control_flag} | ||||
|           --metadata "checksum-sha256=${shm_id}" | ||||
|       ) | ||||
|     done | ||||
|   ) | ||||
|  | ||||
| @ -8,9 +8,10 @@ export CUDA_VERSION="${DESIRED_CUDA/cu/}" | ||||
| export USE_SCCACHE=1 | ||||
| export SCCACHE_BUCKET=ossci-compiler-cache | ||||
| export SCCACHE_IGNORE_SERVER_IO_ERROR=1 | ||||
| export VC_YEAR=2022 | ||||
| export VC_YEAR=2019 | ||||
|  | ||||
| if [[ "$DESIRED_CUDA" == 'xpu' ]]; then | ||||
|     export VC_YEAR=2022 | ||||
|     export USE_SCCACHE=0 | ||||
|     export XPU_VERSION=2025.0 | ||||
|     export XPU_ENABLE_KINETO=1 | ||||
|  | ||||
| @ -4,9 +4,10 @@ set -eux -o pipefail | ||||
| source "${BINARY_ENV_FILE:-/c/w/env}" | ||||
|  | ||||
| export CUDA_VERSION="${DESIRED_CUDA/cu/}" | ||||
| export VC_YEAR=2022 | ||||
| export VC_YEAR=2019 | ||||
|  | ||||
| if [[ "$DESIRED_CUDA" == 'xpu' ]]; then | ||||
|     export VC_YEAR=2022 | ||||
|     export XPU_VERSION=2025.0 | ||||
| fi | ||||
|  | ||||
|  | ||||
| @ -48,6 +48,7 @@ misc-*, | ||||
| -misc-no-recursion, | ||||
| -misc-non-private-member-variables-in-classes, | ||||
| -misc-unused-using-decls, | ||||
| -misc-use-internal-linkage, | ||||
| modernize-*, | ||||
| -modernize-macro-to-enum, | ||||
| -modernize-return-braced-init-list, | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ISSUE_TEMPLATE/disable-ci-jobs.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ISSUE_TEMPLATE/disable-ci-jobs.md
									
									
									
									
										vendored
									
									
								
							| @ -5,7 +5,7 @@ title: "DISABLED [WORKFLOW_NAME] / [PLATFORM_NAME] / [JOB_NAME]" | ||||
| labels: "module: ci" | ||||
| --- | ||||
|  | ||||
| > For example, DISABLED pull / win-vs2022-cpu-py3 / test (default). Once | ||||
| > For example, DISABLED pull / win-vs2019-cpu-py3 / test (default). Once | ||||
| > created, the job will be disabled within 15 minutes. You can check the | ||||
| > list of disabled jobs at https://ossci-metrics.s3.amazonaws.com/disabled-jobs.json | ||||
|  | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/actionlint.yaml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/actionlint.yaml
									
									
									
									
										vendored
									
									
								
							| @ -3,9 +3,6 @@ self-hosted-runner: | ||||
|     # GitHub hosted runner that actionlint doesn't recognize because actionlint version (1.6.21) is too old | ||||
|     - ubuntu-24.04 | ||||
|     # GitHub hosted x86 Linux runners | ||||
|     # TODO: Cleanup mentions of linux.20_04 when upgrade to linux.24_04 is complete | ||||
|     - linux.20_04.4x | ||||
|     - linux.20_04.16x | ||||
|     - linux.24_04.4x | ||||
|     - linux.24_04.16x | ||||
|     # Organization-wide AWS Linux Runners | ||||
| @ -52,7 +49,6 @@ self-hosted-runner: | ||||
|     - linux.rocm.gpu | ||||
|     - linux.rocm.gpu.2 | ||||
|     - linux.rocm.gpu.4 | ||||
|     - rocm-docker | ||||
|     # Repo-specific Apple hosted  runners | ||||
|     - macos-m1-ultra | ||||
|     - macos-m2-14 | ||||
|  | ||||
							
								
								
									
										46
									
								
								.github/actions/checkout-pytorch/action.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										46
									
								
								.github/actions/checkout-pytorch/action.yml
									
									
									
									
										vendored
									
									
								
							| @ -23,44 +23,9 @@ runs: | ||||
|       id: check_container_runner | ||||
|       run: echo "IN_CONTAINER_RUNNER=$(if [ -f /.inarc ] || [ -f /.incontainer ]; then echo true ; else echo false; fi)" >> "$GITHUB_OUTPUT" | ||||
|  | ||||
|     - name: Set up parallel fetch and clean workspace | ||||
|       id: first-clean | ||||
|       continue-on-error: true | ||||
|     - name: Clean workspace | ||||
|       shell: bash | ||||
|       if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }} | ||||
|       env: | ||||
|         NO_SUDO: ${{ inputs.no-sudo }} | ||||
|       run: | | ||||
|         # Use all available CPUs for fetching | ||||
|         cd "${GITHUB_WORKSPACE}" | ||||
|         git config --global fetch.parallel 0 | ||||
|         git config --global submodule.fetchJobs 0 | ||||
|  | ||||
|         # Clean workspace. The default checkout action should also do this, but | ||||
|         # do it here as well just in case | ||||
|         if [[ -d .git ]]; then | ||||
|           if [ -z "${NO_SUDO}" ]; then | ||||
|             sudo git clean -ffdx | ||||
|           else | ||||
|             git clean -ffdx | ||||
|           fi | ||||
|         fi | ||||
|  | ||||
|     - name: Checkout PyTorch | ||||
|       id: first-checkout-attempt | ||||
|       continue-on-error: true | ||||
|       uses: actions/checkout@v4 | ||||
|       with: | ||||
|         ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|         # --depth=1 for speed, manually fetch history and other refs as necessary | ||||
|         fetch-depth: ${{ inputs.fetch-depth }} | ||||
|         submodules: ${{ inputs.submodules }} | ||||
|         show-progress: false | ||||
|  | ||||
|     - name: Clean workspace (try again) | ||||
|       if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' && | ||||
|         (steps.first-clean.outcome != 'success' || steps.first-checkout-attempt.outcome != 'success') }} | ||||
|       shell: bash | ||||
|       env: | ||||
|         NO_SUDO: ${{ inputs.no-sudo }} | ||||
|       run: | | ||||
| @ -75,11 +40,16 @@ runs: | ||||
|         fi | ||||
|         mkdir "${GITHUB_WORKSPACE}" | ||||
|  | ||||
|     - name: Checkout PyTorch (try again) | ||||
|         # Use all available CPUs for fetching | ||||
|         cd "${GITHUB_WORKSPACE}" | ||||
|         git config --global fetch.parallel 0 | ||||
|         git config --global submodule.fetchJobs 0 | ||||
|  | ||||
|     - name: Checkout PyTorch | ||||
|       uses: actions/checkout@v4 | ||||
|       if: ${{ steps.first-clean.outcome != 'success' || steps.first-checkout-attempt.outcome != 'success' }} | ||||
|       with: | ||||
|         ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|         # --depth=1 for speed, manually fetch history and other refs as necessary | ||||
|         fetch-depth: ${{ inputs.fetch-depth }} | ||||
|         submodules: ${{ inputs.submodules }} | ||||
|         show-progress: false | ||||
|  | ||||
| @ -15,6 +15,7 @@ runs: | ||||
|           -e BINARY_ENV_FILE \ | ||||
|           -e BUILD_ENVIRONMENT \ | ||||
|           -e DESIRED_CUDA \ | ||||
|           -e DESIRED_DEVTOOLSET \ | ||||
|           -e DESIRED_PYTHON \ | ||||
|           -e GITHUB_ACTIONS \ | ||||
|           -e GPU_ARCH_TYPE \ | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/audio.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| 318bace01aebc1f82ae13d0d133fcf9fede73383 | ||||
| c670ad81fda266b6598aeeef434583eb98197ae8 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ci_commit_pins/xla.txt
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| b2b890e962f5fb6f481e5da2eb4a43bb990d0f1b | ||||
| r2.7 | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/scripts/filter_test_configs.py
									
									
									
									
										vendored
									
									
								
							| @ -39,9 +39,9 @@ SUPPORTED_PERIODICAL_MODES: dict[str, Callable[[Optional[str]], bool]] = { | ||||
| } | ||||
|  | ||||
| # The link to the published list of disabled jobs | ||||
| DISABLED_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/disabled-jobs.json" | ||||
| DISABLED_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/disabled-jobs.json?versionId=n.FT07XR3dLMwOLBwmRNquyYSeGk8Het" | ||||
| # and unstable jobs | ||||
| UNSTABLE_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/unstable-jobs.json" | ||||
| UNSTABLE_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/unstable-jobs.json?versionId=.Ox7WAXa21I1PVqadHyPfhMRPhl0aCnD" | ||||
|  | ||||
| # Some constants used to handle disabled and unstable jobs | ||||
| JOB_NAME_SEP = "/" | ||||
|  | ||||
							
								
								
									
										52
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										52
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							| @ -34,6 +34,8 @@ ROCM_ARCHES = ["6.2.4", "6.3"] | ||||
|  | ||||
| XPU_ARCHES = ["xpu"] | ||||
|  | ||||
| CPU_CXX11_ABI_ARCH = ["cpu-cxx11-abi"] | ||||
|  | ||||
| CPU_AARCH64_ARCH = ["cpu-aarch64"] | ||||
|  | ||||
| CPU_S390X_ARCH = ["cpu-s390x"] | ||||
| @ -75,7 +77,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { | ||||
|         "nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cudnn-cu12==9.7.1.26; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
| @ -144,6 +146,8 @@ def arch_type(arch_version: str) -> str: | ||||
|         return "rocm" | ||||
|     elif arch_version in XPU_ARCHES: | ||||
|         return "xpu" | ||||
|     elif arch_version in CPU_CXX11_ABI_ARCH: | ||||
|         return "cpu-cxx11-abi" | ||||
|     elif arch_version in CPU_AARCH64_ARCH: | ||||
|         return "cpu-aarch64" | ||||
|     elif arch_version in CPU_S390X_ARCH: | ||||
| @ -172,23 +176,31 @@ WHEEL_CONTAINER_IMAGES = { | ||||
|     }, | ||||
|     "xpu": f"pytorch/manylinux2_28-builder:xpu-{DEFAULT_TAG}", | ||||
|     "cpu": f"pytorch/manylinux2_28-builder:cpu-{DEFAULT_TAG}", | ||||
|     "cpu-cxx11-abi": f"pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-{DEFAULT_TAG}", | ||||
|     "cpu-aarch64": f"pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-{DEFAULT_TAG}", | ||||
|     "cpu-s390x": f"pytorch/manylinuxs390x-builder:cpu-s390x-{DEFAULT_TAG}", | ||||
| } | ||||
|  | ||||
| CXX11_ABI = "cxx11-abi" | ||||
| RELEASE = "release" | ||||
| DEBUG = "debug" | ||||
|  | ||||
| LIBTORCH_CONTAINER_IMAGES: dict[str, str] = { | ||||
| LIBTORCH_CONTAINER_IMAGES: dict[tuple[str, str], str] = { | ||||
|     **{ | ||||
|         gpu_arch: f"pytorch/libtorch-cxx11-builder:cuda{gpu_arch}-{DEFAULT_TAG}" | ||||
|         ( | ||||
|             gpu_arch, | ||||
|             CXX11_ABI, | ||||
|         ): f"pytorch/libtorch-cxx11-builder:cuda{gpu_arch}-{DEFAULT_TAG}" | ||||
|         for gpu_arch in CUDA_ARCHES | ||||
|     }, | ||||
|     **{ | ||||
|         gpu_arch: f"pytorch/libtorch-cxx11-builder:rocm{gpu_arch}-{DEFAULT_TAG}" | ||||
|         ( | ||||
|             gpu_arch, | ||||
|             CXX11_ABI, | ||||
|         ): f"pytorch/libtorch-cxx11-builder:rocm{gpu_arch}-{DEFAULT_TAG}" | ||||
|         for gpu_arch in ROCM_ARCHES | ||||
|     }, | ||||
|     "cpu": f"pytorch/libtorch-cxx11-builder:cpu-{DEFAULT_TAG}", | ||||
|     ("cpu", CXX11_ABI): f"pytorch/libtorch-cxx11-builder:cpu-{DEFAULT_TAG}", | ||||
| } | ||||
|  | ||||
| FULL_PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12", "3.13", "3.13t"] | ||||
| @ -198,6 +210,7 @@ def translate_desired_cuda(gpu_arch_type: str, gpu_arch_version: str) -> str: | ||||
|     return { | ||||
|         "cpu": "cpu", | ||||
|         "cpu-aarch64": "cpu", | ||||
|         "cpu-cxx11-abi": "cpu-cxx11-abi", | ||||
|         "cpu-s390x": "cpu", | ||||
|         "cuda": f"cu{gpu_arch_version.replace('.', '')}", | ||||
|         "cuda-aarch64": f"cu{gpu_arch_version.replace('-aarch64', '').replace('.', '')}", | ||||
| @ -212,7 +225,7 @@ def list_without(in_list: list[str], without: list[str]) -> list[str]: | ||||
|  | ||||
| def generate_libtorch_matrix( | ||||
|     os: str, | ||||
|     release_type: str, | ||||
|     abi_version: str, | ||||
|     arches: Optional[list[str]] = None, | ||||
|     libtorch_variants: Optional[list[str]] = None, | ||||
| ) -> list[dict[str, str]]: | ||||
| @ -234,6 +247,9 @@ def generate_libtorch_matrix( | ||||
|     ret: list[dict[str, str]] = [] | ||||
|     for arch_version in arches: | ||||
|         for libtorch_variant in libtorch_variants: | ||||
|             # one of the values in the following list must be exactly | ||||
|             # CXX11_ABI, but the precise value of the other one doesn't | ||||
|             # matter | ||||
|             gpu_arch_type = arch_type(arch_version) | ||||
|             gpu_arch_version = "" if arch_version == "cpu" else arch_version | ||||
|             # ROCm builds without-deps failed even in ROCm runners; skip for now | ||||
| @ -246,15 +262,20 @@ def generate_libtorch_matrix( | ||||
|                     "desired_cuda": translate_desired_cuda( | ||||
|                         gpu_arch_type, gpu_arch_version | ||||
|                     ), | ||||
|                     "libtorch_config": release_type, | ||||
|                     "libtorch_variant": libtorch_variant, | ||||
|                     "libtorch_config": abi_version | ||||
|                     if os in ("windows", "windows-arm64") | ||||
|                     else "", | ||||
|                     "devtoolset": abi_version | ||||
|                     if os not in ("windows", "windows-arm64") | ||||
|                     else "", | ||||
|                     "container_image": ( | ||||
|                         LIBTORCH_CONTAINER_IMAGES[arch_version] | ||||
|                         LIBTORCH_CONTAINER_IMAGES[(arch_version, abi_version)] | ||||
|                         if os not in ("windows", "windows-arm64") | ||||
|                         else "" | ||||
|                     ), | ||||
|                     "package_type": "libtorch", | ||||
|                     "build_name": f"libtorch-{gpu_arch_type}{gpu_arch_version}-{libtorch_variant}-{release_type}".replace( | ||||
|                     "build_name": f"libtorch-{gpu_arch_type}{gpu_arch_version}-{libtorch_variant}-{abi_version}".replace( | ||||
|                         ".", "_" | ||||
|                     ), | ||||
|                 } | ||||
| @ -280,7 +301,7 @@ def generate_wheels_matrix( | ||||
|         # Define default compute archivectures | ||||
|         arches = ["cpu"] | ||||
|         if os == "linux": | ||||
|             arches += CUDA_ARCHES + ROCM_ARCHES + XPU_ARCHES | ||||
|             arches += CPU_CXX11_ABI_ARCH + CUDA_ARCHES + ROCM_ARCHES + XPU_ARCHES | ||||
|         elif os == "windows": | ||||
|             arches += CUDA_ARCHES + XPU_ARCHES | ||||
|         elif os == "linux-aarch64": | ||||
| @ -299,6 +320,7 @@ def generate_wheels_matrix( | ||||
|             gpu_arch_version = ( | ||||
|                 "" | ||||
|                 if arch_version == "cpu" | ||||
|                 or arch_version == "cpu-cxx11-abi" | ||||
|                 or arch_version == "cpu-aarch64" | ||||
|                 or arch_version == "cpu-s390x" | ||||
|                 or arch_version == "xpu" | ||||
| @ -333,6 +355,7 @@ def generate_wheels_matrix( | ||||
|                         "gpu_arch_version": gpu_arch_version, | ||||
|                         "desired_cuda": desired_cuda, | ||||
|                         "use_split_build": "True" if use_split_build else "False", | ||||
|                         "devtoolset": "cxx11-abi", | ||||
|                         "container_image": WHEEL_CONTAINER_IMAGES[arch_version], | ||||
|                         "package_type": package_type, | ||||
|                         "pytorch_extra_install_requirements": ( | ||||
| @ -361,6 +384,7 @@ def generate_wheels_matrix( | ||||
|                                 gpu_arch_type, gpu_arch_version | ||||
|                             ), | ||||
|                             "use_split_build": "True" if use_split_build else "False", | ||||
|                             "devtoolset": "", | ||||
|                             "container_image": WHEEL_CONTAINER_IMAGES[arch_version], | ||||
|                             "package_type": package_type, | ||||
|                             "pytorch_extra_install_requirements": "", | ||||
| @ -379,6 +403,12 @@ def generate_wheels_matrix( | ||||
|                             gpu_arch_type, gpu_arch_version | ||||
|                         ), | ||||
|                         "use_split_build": "True" if use_split_build else "False", | ||||
|                         "devtoolset": ( | ||||
|                             "cxx11-abi" | ||||
|                             if (arch_version in ["cpu-cxx11-abi", "cpu-aarch64"]) | ||||
|                             or os == "linux" | ||||
|                             else "" | ||||
|                         ), | ||||
|                         "container_image": WHEEL_CONTAINER_IMAGES[arch_version], | ||||
|                         "package_type": package_type, | ||||
|                         "build_name": f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}".replace( | ||||
| @ -387,8 +417,6 @@ def generate_wheels_matrix( | ||||
|                         "pytorch_extra_install_requirements": ( | ||||
|                             PYTORCH_EXTRA_INSTALL_REQUIREMENTS["xpu"] | ||||
|                             if gpu_arch_type == "xpu" | ||||
|                             else PYTORCH_EXTRA_INSTALL_REQUIREMENTS[CUDA_STABLE] | ||||
|                             if os != "linux" | ||||
|                             else "" | ||||
|                         ), | ||||
|                     } | ||||
|  | ||||
							
								
								
									
										23
									
								
								.github/scripts/generate_ci_workflows.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								.github/scripts/generate_ci_workflows.py
									
									
									
									
										vendored
									
									
								
							| @ -54,6 +54,7 @@ class BinaryBuildWorkflow: | ||||
|  | ||||
|     # Optional fields | ||||
|     build_environment: str = "" | ||||
|     abi_version: str = "" | ||||
|     ciflow_config: CIFlowConfig = field(default_factory=CIFlowConfig) | ||||
|     is_scheduled: str = "" | ||||
|     branches: str = "nightly" | ||||
| @ -63,7 +64,12 @@ class BinaryBuildWorkflow: | ||||
|     use_split_build: bool = False | ||||
|  | ||||
|     def __post_init__(self) -> None: | ||||
|         self.build_environment = f"{self.os}-binary-{self.package_type}" | ||||
|         if self.abi_version: | ||||
|             self.build_environment = ( | ||||
|                 f"{self.os}-binary-{self.package_type}-{self.abi_version}" | ||||
|             ) | ||||
|         else: | ||||
|             self.build_environment = f"{self.os}-binary-{self.package_type}" | ||||
|         if self.use_split_build: | ||||
|             # added to distinguish concurrency groups | ||||
|             self.build_environment += "-split" | ||||
| @ -127,9 +133,10 @@ LINUX_BINARY_BUILD_WORFKLOWS = [ | ||||
|     BinaryBuildWorkflow( | ||||
|         os=OperatingSystem.LINUX, | ||||
|         package_type="libtorch", | ||||
|         abi_version=generate_binary_build_matrix.CXX11_ABI, | ||||
|         build_configs=generate_binary_build_matrix.generate_libtorch_matrix( | ||||
|             OperatingSystem.LINUX, | ||||
|             generate_binary_build_matrix.RELEASE, | ||||
|             generate_binary_build_matrix.CXX11_ABI, | ||||
|             libtorch_variants=["shared-with-deps"], | ||||
|         ), | ||||
|         ciflow_config=CIFlowConfig( | ||||
| @ -169,9 +176,10 @@ LINUX_BINARY_SMOKE_WORKFLOWS = [ | ||||
|     BinaryBuildWorkflow( | ||||
|         os=OperatingSystem.LINUX, | ||||
|         package_type="libtorch", | ||||
|         abi_version=generate_binary_build_matrix.CXX11_ABI, | ||||
|         build_configs=generate_binary_build_matrix.generate_libtorch_matrix( | ||||
|             OperatingSystem.LINUX, | ||||
|             generate_binary_build_matrix.RELEASE, | ||||
|             generate_binary_build_matrix.CXX11_ABI, | ||||
|             arches=["cpu"], | ||||
|             libtorch_variants=["shared-with-deps"], | ||||
|         ), | ||||
| @ -194,6 +202,7 @@ WINDOWS_BINARY_BUILD_WORKFLOWS = [ | ||||
|     BinaryBuildWorkflow( | ||||
|         os=OperatingSystem.WINDOWS, | ||||
|         package_type="libtorch", | ||||
|         abi_version=generate_binary_build_matrix.RELEASE, | ||||
|         build_configs=generate_binary_build_matrix.generate_libtorch_matrix( | ||||
|             OperatingSystem.WINDOWS, | ||||
|             generate_binary_build_matrix.RELEASE, | ||||
| @ -207,6 +216,7 @@ WINDOWS_BINARY_BUILD_WORKFLOWS = [ | ||||
|     BinaryBuildWorkflow( | ||||
|         os=OperatingSystem.WINDOWS, | ||||
|         package_type="libtorch", | ||||
|         abi_version=generate_binary_build_matrix.DEBUG, | ||||
|         build_configs=generate_binary_build_matrix.generate_libtorch_matrix( | ||||
|             OperatingSystem.WINDOWS, | ||||
|             generate_binary_build_matrix.DEBUG, | ||||
| @ -223,6 +233,7 @@ WINDOWS_BINARY_SMOKE_WORKFLOWS = [ | ||||
|     BinaryBuildWorkflow( | ||||
|         os=OperatingSystem.WINDOWS, | ||||
|         package_type="libtorch", | ||||
|         abi_version=generate_binary_build_matrix.RELEASE, | ||||
|         build_configs=generate_binary_build_matrix.generate_libtorch_matrix( | ||||
|             OperatingSystem.WINDOWS, | ||||
|             generate_binary_build_matrix.RELEASE, | ||||
| @ -237,6 +248,7 @@ WINDOWS_BINARY_SMOKE_WORKFLOWS = [ | ||||
|     BinaryBuildWorkflow( | ||||
|         os=OperatingSystem.WINDOWS, | ||||
|         package_type="libtorch", | ||||
|         abi_version=generate_binary_build_matrix.DEBUG, | ||||
|         build_configs=generate_binary_build_matrix.generate_libtorch_matrix( | ||||
|             OperatingSystem.WINDOWS, | ||||
|             generate_binary_build_matrix.DEBUG, | ||||
| @ -267,6 +279,7 @@ WINDOWS_ARM64_BINARY_BUILD_WORKFLOWS = [ | ||||
|     BinaryBuildWorkflow( | ||||
|         os=OperatingSystem.WINDOWS_ARM64, | ||||
|         package_type="libtorch", | ||||
|         abi_version=generate_binary_build_matrix.RELEASE, | ||||
|         build_configs=generate_binary_build_matrix.generate_libtorch_matrix( | ||||
|             OperatingSystem.WINDOWS_ARM64, | ||||
|             generate_binary_build_matrix.RELEASE, | ||||
| @ -281,6 +294,7 @@ WINDOWS_ARM64_BINARY_BUILD_WORKFLOWS = [ | ||||
|     BinaryBuildWorkflow( | ||||
|         os=OperatingSystem.WINDOWS_ARM64, | ||||
|         package_type="libtorch", | ||||
|         abi_version=generate_binary_build_matrix.DEBUG, | ||||
|         build_configs=generate_binary_build_matrix.generate_libtorch_matrix( | ||||
|             OperatingSystem.WINDOWS_ARM64, | ||||
|             generate_binary_build_matrix.DEBUG, | ||||
| @ -298,9 +312,10 @@ MACOS_BINARY_BUILD_WORKFLOWS = [ | ||||
|     BinaryBuildWorkflow( | ||||
|         os=OperatingSystem.MACOS_ARM64, | ||||
|         package_type="libtorch", | ||||
|         abi_version=generate_binary_build_matrix.CXX11_ABI, | ||||
|         build_configs=generate_binary_build_matrix.generate_libtorch_matrix( | ||||
|             OperatingSystem.MACOS, | ||||
|             generate_binary_build_matrix.RELEASE, | ||||
|             generate_binary_build_matrix.CXX11_ABI, | ||||
|             libtorch_variants=["shared-with-deps"], | ||||
|         ), | ||||
|         cross_compile_arm64=False, | ||||
|  | ||||
							
								
								
									
										5
									
								
								.github/scripts/lintrunner.sh
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								.github/scripts/lintrunner.sh
									
									
									
									
										vendored
									
									
								
							| @ -1,6 +1,11 @@ | ||||
| #!/usr/bin/env bash | ||||
| set -ex | ||||
|  | ||||
| # The generic Linux job chooses to use base env, not the one setup by the image | ||||
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | ||||
| eval "$(command conda 'shell.bash' 'hook' 2> /dev/null)" | ||||
| conda activate "${CONDA_ENV}" | ||||
|  | ||||
| # Use uv to speed up lintrunner init | ||||
| python3 -m pip install uv==0.1.45 | ||||
|  | ||||
|  | ||||
| @ -5,50 +5,6 @@ FROM --platform=linux/amd64 docker.io/ubuntu:24.04 as ld-prefix | ||||
| ENV DEBIAN_FRONTEND=noninteractive | ||||
| RUN apt-get update && apt-get -y install ca-certificates libicu74 libssl3 | ||||
|  | ||||
| # Patched podman | ||||
| FROM --platform=linux/s390x docker.io/ubuntu:24.04 as podman | ||||
| ENV DEBIAN_FRONTEND=noninteractive | ||||
| RUN sed -i 's/^Types: deb$/Types: deb deb-src/' /etc/apt/sources.list.d/ubuntu.sources | ||||
| RUN apt-get update && \ | ||||
|     apt-get install -y \ | ||||
|         cmake \ | ||||
|         curl \ | ||||
|         devscripts \ | ||||
|         dpkg-dev \ | ||||
|         gdb \ | ||||
|         less \ | ||||
|         make \ | ||||
|         python3 \ | ||||
|         python3-pip \ | ||||
|         quilt \ | ||||
|         rsync \ | ||||
|         software-properties-common \ | ||||
|         stress-ng \ | ||||
|         vim \ | ||||
|         nano \ | ||||
|         wget && \ | ||||
|     apt-get build-dep -y podman && \ | ||||
|     apt-get source podman | ||||
|  | ||||
| COPY podman-patches/podman-25245.patch /tmp/podman-25245.patch | ||||
| COPY podman-patches/podman-25102-backport.patch /tmp/podman-25102-backport.patch | ||||
|  | ||||
| # import and apply patches | ||||
| # patches: | ||||
| # https://github.com/containers/podman/pull/25102 | ||||
| # https://github.com/containers/podman/pull/25245 | ||||
| RUN cd /libpod-* && \ | ||||
|     quilt import /tmp/podman-25245.patch && quilt push && \ | ||||
|     quilt import /tmp/podman-25102-backport.patch && quilt push && \ | ||||
|     dch -i "Fix podman deadlock and add option to clean up build leftovers" && \ | ||||
|     /bin/rm /tmp/podman-25245.patch /tmp/podman-25102-backport.patch | ||||
|  | ||||
| # build patched podman | ||||
| RUN cd /libpod-* && \ | ||||
|     debuild -i -us -uc -b && \ | ||||
|     /bin/rm /podman-remote_*.deb && \ | ||||
|     mkdir /tmp/podman && cp -v /podman*.deb /tmp/podman | ||||
|  | ||||
| # Main image. | ||||
| FROM --platform=linux/s390x docker.io/ubuntu:24.04 | ||||
|  | ||||
| @ -89,11 +45,7 @@ COPY fs/ / | ||||
| RUN chmod +x /usr/bin/actions-runner /usr/bin/entrypoint | ||||
|  | ||||
| # install podman | ||||
| # RUN apt-get update && apt -y install podman podman-docker | ||||
|  | ||||
| # install patched podman | ||||
| COPY --from=podman /tmp/podman /tmp/podman | ||||
| RUN apt-get update && apt -y install /tmp/podman/*.deb && /bin/rm -rfv /tmp/podman | ||||
| RUN apt -y install podman podman-docker | ||||
|  | ||||
| # amd64 Github Actions Runner. | ||||
| RUN useradd -m actions-runner | ||||
| @ -113,7 +65,7 @@ RUN virtualenv --system-site-packages venv | ||||
| # | ||||
| COPY --chown=actions-runner:actions-runner manywheel-s390x.tar /home/actions-runner/manywheel-s390x.tar | ||||
|  | ||||
| RUN curl -L https://github.com/actions/runner/releases/download/v2.322.0/actions-runner-linux-x64-2.322.0.tar.gz | tar -xz | ||||
| RUN curl -L https://github.com/actions/runner/releases/download/v2.317.0/actions-runner-linux-x64-2.317.0.tar.gz | tar -xz | ||||
|  | ||||
| ENTRYPOINT ["/usr/bin/entrypoint"] | ||||
| CMD ["/usr/bin/actions-runner"] | ||||
|  | ||||
| @ -1,358 +0,0 @@ | ||||
| diff --git a/cmd/podman/system/prune.go b/cmd/podman/system/prune.go | ||||
| index f7cf7b551..739f87cde 100644 | ||||
| --- a/cmd/podman/system/prune.go | ||||
| +++ b/cmd/podman/system/prune.go | ||||
| @@ -48,6 +48,7 @@ func init() { | ||||
|  	flags.BoolVarP(&force, "force", "f", false, "Do not prompt for confirmation.  The default is false") | ||||
|  	flags.BoolVarP(&pruneOptions.All, "all", "a", false, "Remove all unused data") | ||||
|  	flags.BoolVar(&pruneOptions.External, "external", false, "Remove container data in storage not controlled by podman") | ||||
| +	flags.BoolVar(&pruneOptions.Build, "build", false, "Remove build containers") | ||||
|  	flags.BoolVar(&pruneOptions.Volume, "volumes", false, "Prune volumes") | ||||
|  	filterFlagName := "filter" | ||||
|  	flags.StringArrayVar(&filters, filterFlagName, []string{}, "Provide filter values (e.g. 'label=<key>=<value>')") | ||||
| @@ -64,8 +65,12 @@ func prune(cmd *cobra.Command, args []string) error { | ||||
|  			volumeString = ` | ||||
|  	- all volumes not used by at least one container` | ||||
|  		} | ||||
| - | ||||
| -		fmt.Printf(createPruneWarningMessage(pruneOptions), volumeString, "Are you sure you want to continue? [y/N] ") | ||||
| +		buildString := "" | ||||
| +		if pruneOptions.Build { | ||||
| +			buildString = ` | ||||
| +	- all build containers` | ||||
| +		} | ||||
| +		fmt.Printf(createPruneWarningMessage(pruneOptions), volumeString, buildString, "Are you sure you want to continue? [y/N] ") | ||||
|   | ||||
|  		answer, err := reader.ReadString('\n') | ||||
|  		if err != nil { | ||||
| @@ -124,7 +129,7 @@ func createPruneWarningMessage(pruneOpts entities.SystemPruneOptions) string { | ||||
|  	if pruneOpts.All { | ||||
|  		return `WARNING! This command removes: | ||||
|  	- all stopped containers | ||||
| -	- all networks not used by at least one container%s | ||||
| +	- all networks not used by at least one container%s%s | ||||
|  	- all images without at least one container associated with them | ||||
|  	- all build cache | ||||
|   | ||||
| @@ -132,7 +137,7 @@ func createPruneWarningMessage(pruneOpts entities.SystemPruneOptions) string { | ||||
|  	} | ||||
|  	return `WARNING! This command removes: | ||||
|  	- all stopped containers | ||||
| -	- all networks not used by at least one container%s | ||||
| +	- all networks not used by at least one container%s%s | ||||
|  	- all dangling images | ||||
|  	- all dangling build cache | ||||
|   | ||||
| diff --git a/docs/source/markdown/podman-system-prune.1.md b/docs/source/markdown/podman-system-prune.1.md | ||||
| index 52f9ec1c7..95099d018 100644 | ||||
| --- a/docs/source/markdown/podman-system-prune.1.md | ||||
| +++ b/docs/source/markdown/podman-system-prune.1.md | ||||
| @@ -7,20 +7,28 @@ podman\-system\-prune - Remove all unused pods, containers, images, networks, an | ||||
|  **podman system prune** [*options*] | ||||
|   | ||||
|  ## DESCRIPTION | ||||
| -**podman system prune** removes all unused containers (both dangling and unreferenced), pods, networks, and optionally, volumes from local storage. | ||||
| +**podman system prune** removes all unused containers (both dangling and unreferenced), build containers, pods, networks, and optionally, volumes from local storage. | ||||
|   | ||||
|  Use the **--all** option to delete all unused images.  Unused images are dangling images as well as any image that does not have any containers based on it. | ||||
|   | ||||
|  By default, volumes are not removed to prevent important data from being deleted if there is currently no container using the volume. Use the **--volumes** flag when running the command to prune volumes as well. | ||||
|   | ||||
| +By default, build containers are not removed to prevent interference with builds in progress. Use the **--build** flag when running the command to remove build containers as well. | ||||
| + | ||||
|  ## OPTIONS | ||||
|  #### **--all**, **-a** | ||||
|   | ||||
|  Recursively remove all unused pods, containers, images, networks, and volume data. (Maximum 50 iterations.) | ||||
|   | ||||
| +#### **--build** | ||||
| + | ||||
| +Removes any build containers that were created during the build, but were not removed because the build was unexpectedly terminated. | ||||
| + | ||||
| +Note: **This is not safe operation and should be executed only when no builds are in progress. It can interfere with builds in progress.** | ||||
| + | ||||
|  #### **--external** | ||||
|   | ||||
| -Removes all leftover container storage files from local storage not managed by Podman. In normal circumstances, no such data exists, but in case of an unclean shutdown, the Podman database may be corrupted and cause this. | ||||
| +Tries to clean up remainders of previous containers or layers that are not references in the storage json files. These can happen in the case of unclean shutdowns or regular restarts in transient storage mode. | ||||
|   | ||||
|  However, when using transient storage mode, the Podman database does not persist. This means containers leave the writable layers on disk after a reboot. When using a transient store, it is recommended that the **podman system prune --external** command is run during boot. | ||||
|   | ||||
| diff --git a/libpod/runtime.go b/libpod/runtime.go | ||||
| index 986e40f60..609fbba57 100644 | ||||
| --- a/libpod/runtime.go | ||||
| +++ b/libpod/runtime.go | ||||
| @@ -33,6 +33,7 @@ import ( | ||||
|  	"github.com/containers/podman/v4/libpod/lock" | ||||
|  	"github.com/containers/podman/v4/libpod/plugin" | ||||
|  	"github.com/containers/podman/v4/libpod/shutdown" | ||||
| +	"github.com/containers/podman/v4/pkg/domain/entities/reports" | ||||
|  	"github.com/containers/podman/v4/pkg/rootless" | ||||
|  	"github.com/containers/podman/v4/pkg/systemd" | ||||
|  	"github.com/containers/podman/v4/pkg/util" | ||||
| @@ -1250,3 +1251,52 @@ func (r *Runtime) LockConflicts() (map[uint32][]string, []uint32, error) { | ||||
|   | ||||
|  	return toReturn, locksHeld, nil | ||||
|  } | ||||
| + | ||||
| +// Exists checks whether a file or directory exists at the given path. | ||||
| +// If the path is a symlink, the symlink is followed. | ||||
| +func Exists(path string) error { | ||||
| +	// It uses unix.Faccessat which is a faster operation compared to os.Stat for | ||||
| +	// simply checking the existence of a file. | ||||
| +	err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0) | ||||
| +	if err != nil { | ||||
| +		return &os.PathError{Op: "faccessat", Path: path, Err: err} | ||||
| +	} | ||||
| +	return nil | ||||
| +} | ||||
| + | ||||
| +// PruneBuildContainers removes any build containers that were created during the build, | ||||
| +// but were not removed because the build was unexpectedly terminated. | ||||
| +// | ||||
| +// Note: This is not safe operation and should be executed only when no builds are in progress. It can interfere with builds in progress. | ||||
| +func (r *Runtime) PruneBuildContainers() ([]*reports.PruneReport, error) { | ||||
| +	stageContainersPruneReports := []*reports.PruneReport{} | ||||
| + | ||||
| +	containers, err := r.store.Containers() | ||||
| +	if err != nil { | ||||
| +		return stageContainersPruneReports, err | ||||
| +	} | ||||
| +	for _, container := range containers { | ||||
| +		path, err := r.store.ContainerDirectory(container.ID) | ||||
| +		if err != nil { | ||||
| +			return stageContainersPruneReports, err | ||||
| +		} | ||||
| +		if err := Exists(filepath.Join(path, "buildah.json")); err != nil { | ||||
| +			continue | ||||
| +		} | ||||
| + | ||||
| +		report := &reports.PruneReport{ | ||||
| +			Id: container.ID, | ||||
| +		} | ||||
| +		size, err := r.store.ContainerSize(container.ID) | ||||
| +		if err != nil { | ||||
| +			report.Err = err | ||||
| +		} | ||||
| +		report.Size = uint64(size) | ||||
| + | ||||
| +		if err := r.store.DeleteContainer(container.ID); err != nil { | ||||
| +			report.Err = errors.Join(report.Err, err) | ||||
| +		} | ||||
| +		stageContainersPruneReports = append(stageContainersPruneReports, report) | ||||
| +	} | ||||
| +	return stageContainersPruneReports, nil | ||||
| +} | ||||
| diff --git a/pkg/api/handlers/libpod/system.go b/pkg/api/handlers/libpod/system.go | ||||
| index 70d4493f8..7c129b1ba 100644 | ||||
| --- a/pkg/api/handlers/libpod/system.go | ||||
| +++ b/pkg/api/handlers/libpod/system.go | ||||
| @@ -22,6 +22,7 @@ func SystemPrune(w http.ResponseWriter, r *http.Request) { | ||||
|  		All      bool `schema:"all"` | ||||
|  		Volumes  bool `schema:"volumes"` | ||||
|  		External bool `schema:"external"` | ||||
| +		Build    bool `schema:"build"` | ||||
|  	}{} | ||||
|   | ||||
|  	if err := decoder.Decode(&query, r.URL.Query()); err != nil { | ||||
| @@ -43,6 +44,7 @@ func SystemPrune(w http.ResponseWriter, r *http.Request) { | ||||
|  		Volume:   query.Volumes, | ||||
|  		Filters:  *filterMap, | ||||
|  		External: query.External, | ||||
| +		Build:    query.Build, | ||||
|  	} | ||||
|  	report, err := containerEngine.SystemPrune(r.Context(), pruneOptions) | ||||
|  	if err != nil { | ||||
| diff --git a/pkg/bindings/system/types.go b/pkg/bindings/system/types.go | ||||
| index 89e093f68..b4a4ff064 100644 | ||||
| --- a/pkg/bindings/system/types.go | ||||
| +++ b/pkg/bindings/system/types.go | ||||
| @@ -18,6 +18,7 @@ type PruneOptions struct { | ||||
|  	Filters  map[string][]string | ||||
|  	Volumes  *bool | ||||
|  	External *bool | ||||
| +	Build    *bool | ||||
|  } | ||||
|   | ||||
|  // VersionOptions are optional options for getting version info | ||||
| diff --git a/pkg/bindings/system/types_prune_options.go b/pkg/bindings/system/types_prune_options.go | ||||
| index d00498520..5f3bd652c 100644 | ||||
| --- a/pkg/bindings/system/types_prune_options.go | ||||
| +++ b/pkg/bindings/system/types_prune_options.go | ||||
| @@ -76,3 +76,18 @@ func (o *PruneOptions) GetExternal() bool { | ||||
|  	} | ||||
|  	return *o.External | ||||
|  } | ||||
| + | ||||
| +// WithBuild set field Build to given value | ||||
| +func (o *PruneOptions) WithBuild(value bool) *PruneOptions { | ||||
| +	o.Build = &value | ||||
| +	return o | ||||
| +} | ||||
| + | ||||
| +// GetBuild returns value of field Build | ||||
| +func (o *PruneOptions) GetBuild() bool { | ||||
| +	if o.Build == nil { | ||||
| +		var z bool | ||||
| +		return z | ||||
| +	} | ||||
| +	return *o.Build | ||||
| +} | ||||
| diff --git a/pkg/domain/entities/system.go b/pkg/domain/entities/system.go | ||||
| index 473db3530..f6938652a 100644 | ||||
| --- a/pkg/domain/entities/system.go | ||||
| +++ b/pkg/domain/entities/system.go | ||||
| @@ -22,6 +22,7 @@ type SystemPruneOptions struct { | ||||
|  	Volume   bool | ||||
|  	Filters  map[string][]string `json:"filters" schema:"filters"` | ||||
|  	External bool | ||||
| +	Build    bool | ||||
|  } | ||||
|   | ||||
|  // SystemPruneReport provides report after system prune is executed. | ||||
| diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go | ||||
| index 24ee64d29..ea3e5f203 100644 | ||||
| --- a/pkg/domain/infra/abi/system.go | ||||
| +++ b/pkg/domain/infra/abi/system.go | ||||
| @@ -150,16 +150,16 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) | ||||
|  	return nil | ||||
|  } | ||||
|   | ||||
| -// SystemPrune removes unused data from the system. Pruning pods, containers, networks, volumes and images. | ||||
| +// SystemPrune removes unused data from the system. Pruning pods, containers, build container, networks, volumes and images. | ||||
|  func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.SystemPruneOptions) (*entities.SystemPruneReport, error) { | ||||
|  	var systemPruneReport = new(entities.SystemPruneReport) | ||||
|   | ||||
|  	if options.External { | ||||
| -		if options.All || options.Volume || len(options.Filters) > 0 { | ||||
| +		if options.All || options.Volume || len(options.Filters) > 0 || options.Build { | ||||
|  			return nil, fmt.Errorf("system prune --external cannot be combined with other options") | ||||
|  		} | ||||
| -		err := ic.Libpod.GarbageCollect() | ||||
| -		if err != nil { | ||||
| + | ||||
| +		if err := ic.Libpod.GarbageCollect(); err != nil { | ||||
|  			return nil, err | ||||
|  		} | ||||
|  		return systemPruneReport, nil | ||||
| @@ -170,6 +170,17 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys | ||||
|  		filters = append(filters, fmt.Sprintf("%s=%s", k, v[0])) | ||||
|  	} | ||||
|  	reclaimedSpace := (uint64)(0) | ||||
| + | ||||
| +	// Prune Build Containers | ||||
| +	if options.Build { | ||||
| +		stageContainersPruneReports, err := ic.Libpod.PruneBuildContainers() | ||||
| +		if err != nil { | ||||
| +			return nil, err | ||||
| +		} | ||||
| +		reclaimedSpace += reports.PruneReportsSize(stageContainersPruneReports) | ||||
| +		systemPruneReport.ContainerPruneReports = append(systemPruneReport.ContainerPruneReports, stageContainersPruneReports...) | ||||
| +	} | ||||
| + | ||||
|  	found := true | ||||
|  	for found { | ||||
|  		found = false | ||||
| diff --git a/pkg/domain/infra/tunnel/system.go b/pkg/domain/infra/tunnel/system.go | ||||
| index fc82e7b2b..142a9fa5c 100644 | ||||
| --- a/pkg/domain/infra/tunnel/system.go | ||||
| +++ b/pkg/domain/infra/tunnel/system.go | ||||
| @@ -19,7 +19,7 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) | ||||
|   | ||||
|  // SystemPrune prunes unused data from the system. | ||||
|  func (ic *ContainerEngine) SystemPrune(ctx context.Context, opts entities.SystemPruneOptions) (*entities.SystemPruneReport, error) { | ||||
| -	options := new(system.PruneOptions).WithAll(opts.All).WithVolumes(opts.Volume).WithFilters(opts.Filters).WithExternal(opts.External) | ||||
| +	options := new(system.PruneOptions).WithAll(opts.All).WithVolumes(opts.Volume).WithFilters(opts.Filters).WithExternal(opts.External).WithBuild(opts.Build) | ||||
|  	return system.Prune(ic.ClientCtx, options) | ||||
|  } | ||||
|   | ||||
| diff --git a/test/e2e/prune_test.go b/test/e2e/prune_test.go | ||||
| index 01e848478..57bd5582d 100644 | ||||
| --- a/test/e2e/prune_test.go | ||||
| +++ b/test/e2e/prune_test.go | ||||
| @@ -4,6 +4,8 @@ import ( | ||||
|  	"fmt" | ||||
|  	"os" | ||||
|  	"path/filepath" | ||||
| +	"syscall" | ||||
| +	"time" | ||||
|   | ||||
|  	. "github.com/containers/podman/v4/test/utils" | ||||
|  	. "github.com/onsi/ginkgo/v2" | ||||
| @@ -22,6 +24,11 @@ FROM scratch | ||||
|  ENV test1=test1 | ||||
|  ENV test2=test2` | ||||
|   | ||||
| +var longBuildImage = fmt.Sprintf(` | ||||
| +FROM %s | ||||
| +RUN echo "Hello, World!" | ||||
| +RUN RUN echo "Please use signal 9 this will never ends" && sleep 10000s`, ALPINE) | ||||
| + | ||||
|  var _ = Describe("Podman prune", func() { | ||||
|   | ||||
|  	It("podman container prune containers", func() { | ||||
| @@ -593,4 +600,63 @@ var _ = Describe("Podman prune", func() { | ||||
|  		Expect(err).ToNot(HaveOccurred()) | ||||
|  		Expect(dirents).To(HaveLen(3)) | ||||
|  	}) | ||||
| + | ||||
| +	It("podman system prune --build clean up after terminated build", func() { | ||||
| +		useCustomNetworkDir(podmanTest, tempdir) | ||||
| + | ||||
| +		podmanTest.BuildImage(pruneImage, "alpine_notleaker:latest", "false") | ||||
| + | ||||
| +		create := podmanTest.Podman([]string{"create", "--name", "test", BB, "sleep", "10000"}) | ||||
| +		create.WaitWithDefaultTimeout() | ||||
| +		Expect(create).Should(ExitCleanly()) | ||||
| + | ||||
| +		containerFilePath := filepath.Join(podmanTest.TempDir, "ContainerFile-podman-leaker") | ||||
| +		err := os.WriteFile(containerFilePath, []byte(longBuildImage), 0755) | ||||
| +		Expect(err).ToNot(HaveOccurred()) | ||||
| + | ||||
| +		build := podmanTest.Podman([]string{"build", "-f", containerFilePath, "-t", "podmanleaker"}) | ||||
| +		// Build will never finish so let's wait for build to ask for SIGKILL to simulate a failed build that leaves stage containers. | ||||
| +		matchedOutput := false | ||||
| +		for range 900 { | ||||
| +			if build.LineInOutputContains("Please use signal 9") { | ||||
| +				matchedOutput = true | ||||
| +				build.Signal(syscall.SIGKILL) | ||||
| +				break | ||||
| +			} | ||||
| +			time.Sleep(100 * time.Millisecond) | ||||
| +		} | ||||
| +		if !matchedOutput { | ||||
| +			Fail("Did not match special string in podman build") | ||||
| +		} | ||||
| + | ||||
| +		// Check Intermediate image of stage container | ||||
| +		none := podmanTest.Podman([]string{"images", "-a"}) | ||||
| +		none.WaitWithDefaultTimeout() | ||||
| +		Expect(none).Should(ExitCleanly()) | ||||
| +		Expect(none.OutputToString()).Should(ContainSubstring("none")) | ||||
| + | ||||
| +		// Check if Container and Stage Container exist | ||||
| +		count := podmanTest.Podman([]string{"ps", "-aq", "--external"}) | ||||
| +		count.WaitWithDefaultTimeout() | ||||
| +		Expect(count).Should(ExitCleanly()) | ||||
| +		Expect(count.OutputToStringArray()).To(HaveLen(3)) | ||||
| + | ||||
| +		prune := podmanTest.Podman([]string{"system", "prune", "--build", "-f"}) | ||||
| +		prune.WaitWithDefaultTimeout() | ||||
| +		Expect(prune).Should(ExitCleanly()) | ||||
| + | ||||
| +		// Container should still exist, but no stage containers | ||||
| +		count = podmanTest.Podman([]string{"ps", "-aq", "--external"}) | ||||
| +		count.WaitWithDefaultTimeout() | ||||
| +		Expect(count).Should(ExitCleanly()) | ||||
| +		Expect(count.OutputToString()).To(BeEmpty()) | ||||
| + | ||||
| +		Expect(podmanTest.NumberOfContainers()).To(Equal(0)) | ||||
| + | ||||
| +		after := podmanTest.Podman([]string{"images", "-a"}) | ||||
| +		after.WaitWithDefaultTimeout() | ||||
| +		Expect(after).Should(ExitCleanly()) | ||||
| +		Expect(after.OutputToString()).ShouldNot(ContainSubstring("none")) | ||||
| +		Expect(after.OutputToString()).Should(ContainSubstring("notleaker")) | ||||
| +	}) | ||||
|  }) | ||||
|  | ||||
| @ -1,21 +0,0 @@ | ||||
| diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c | ||||
| index 4f71d49e5c..3d74af6a6c 100644 | ||||
| --- a/pkg/rootless/rootless_linux.c | ||||
| +++ b/pkg/rootless/rootless_linux.c | ||||
| @@ -658,7 +658,7 @@ create_pause_process (const char *pause_pid_file_path, char **argv) | ||||
|    if (pipe (p) < 0) | ||||
|      return -1; | ||||
|  | ||||
| -  pid = fork (); | ||||
| +  pid = syscall_clone (SIGCHLD, NULL); | ||||
|    if (pid < 0) | ||||
|      { | ||||
|        close (p[0]); | ||||
| @@ -689,7 +689,7 @@ create_pause_process (const char *pause_pid_file_path, char **argv) | ||||
|        close (p[0]); | ||||
|  | ||||
|        setsid (); | ||||
| -      pid = fork (); | ||||
| +      pid = syscall_clone (SIGCHLD, NULL); | ||||
|        if (pid < 0) | ||||
|          _exit (EXIT_FAILURE); | ||||
							
								
								
									
										97
									
								
								.github/scripts/s390x-ci/tests_list.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										97
									
								
								.github/scripts/s390x-ci/tests_list.py
									
									
									
									
										vendored
									
									
								
							| @ -1,97 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
|  | ||||
| import os | ||||
| import re | ||||
| import sys | ||||
|  | ||||
|  | ||||
| sys.path.insert(1, os.path.join(sys.path[0], "..", "..", "..")) | ||||
|  | ||||
| from tools.testing.discover_tests import TESTS | ||||
|  | ||||
|  | ||||
| skip_list = [ | ||||
|     # these tests fail due to various reasons | ||||
|     "dynamo/test_misc", | ||||
|     "inductor/test_aot_inductor", | ||||
|     "inductor/test_cpu_repro", | ||||
|     "inductor/test_cpu_select_algorithm", | ||||
|     "inductor/test_aot_inductor_arrayref", | ||||
|     "inductor/test_torchinductor_codegen_dynamic_shapes", | ||||
|     "lazy/test_meta_kernel", | ||||
|     "onnx/test_utility_funs", | ||||
|     "profiler/test_profiler", | ||||
|     "test_ao_sparsity", | ||||
|     "test_cpp_extensions_open_device_registration", | ||||
|     "test_jit", | ||||
|     "test_metal", | ||||
|     "test_mps", | ||||
|     "dynamo/test_torchrec", | ||||
|     "inductor/test_aot_inductor_utils", | ||||
|     "inductor/test_coordinate_descent_tuner", | ||||
|     "test_jiterator", | ||||
|     # these tests run long and fail in addition to that | ||||
|     "dynamo/test_dynamic_shapes", | ||||
|     "test_quantization", | ||||
|     "inductor/test_torchinductor", | ||||
|     "inductor/test_torchinductor_dynamic_shapes", | ||||
|     "inductor/test_torchinductor_opinfo", | ||||
|     "test_binary_ufuncs", | ||||
|     "test_unary_ufuncs", | ||||
|     # these tests fail when cuda is not available | ||||
|     "inductor/test_cudacodecache", | ||||
|     "inductor/test_inductor_utils", | ||||
|     "inductor/test_inplacing_pass", | ||||
|     "inductor/test_kernel_benchmark", | ||||
|     "inductor/test_max_autotune", | ||||
|     "inductor/test_move_constructors_to_cuda", | ||||
|     "inductor/test_multi_kernel", | ||||
|     "inductor/test_pattern_matcher", | ||||
|     "inductor/test_perf", | ||||
|     "inductor/test_select_algorithm", | ||||
|     "inductor/test_snode_runtime", | ||||
|     "inductor/test_triton_wrapper", | ||||
|     # these tests fail when mkldnn is not available | ||||
|     "inductor/test_custom_post_grad_passes", | ||||
|     "inductor/test_mkldnn_pattern_matcher", | ||||
|     # lacks quantization support | ||||
|     "onnx/test_models_quantized_onnxruntime", | ||||
|     "onnx/test_pytorch_onnx_onnxruntime", | ||||
|     # https://github.com/pytorch/pytorch/issues/102078 | ||||
|     "test_decomp", | ||||
|     # https://github.com/pytorch/pytorch/issues/146698 | ||||
|     "test_model_exports_to_core_aten", | ||||
|     # runs very long, skip for now | ||||
|     "inductor/test_layout_optim", | ||||
|     "test_fx", | ||||
|     # some false errors | ||||
|     "doctests", | ||||
| ] | ||||
|  | ||||
| skip_list_regex = [ | ||||
|     # distributed tests fail randomly | ||||
|     "distributed/.*", | ||||
| ] | ||||
|  | ||||
| all_testfiles = sorted(TESTS) | ||||
|  | ||||
| filtered_testfiles = [] | ||||
|  | ||||
| for filename in all_testfiles: | ||||
|     if filename in skip_list: | ||||
|         continue | ||||
|  | ||||
|     regex_filtered = False | ||||
|  | ||||
|     for regex_string in skip_list_regex: | ||||
|         if re.fullmatch(regex_string, filename): | ||||
|             regex_filtered = True | ||||
|             break | ||||
|  | ||||
|     if regex_filtered: | ||||
|         continue | ||||
|  | ||||
|     filtered_testfiles.append(filename) | ||||
|  | ||||
| for filename in filtered_testfiles: | ||||
|     print('    "' + filename + '",') | ||||
							
								
								
									
										10
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							| @ -819,9 +819,10 @@ class GitHubPR: | ||||
|                     cursor=info["reviews"]["pageInfo"]["startCursor"], | ||||
|                 ) | ||||
|                 info = rc["data"]["repository"]["pullRequest"] | ||||
|         reviews = { | ||||
|             author: state for author, state in self._reviews if state != "COMMENTED" | ||||
|         } | ||||
|         reviews = {} | ||||
|         for author, state in self._reviews: | ||||
|             if state != "COMMENTED": | ||||
|                 reviews[author] = state | ||||
|         return list(reviews.items()) | ||||
|  | ||||
|     def get_approved_by(self) -> list[str]: | ||||
| @ -2281,8 +2282,7 @@ def merge( | ||||
|         except MandatoryChecksMissingError as ex: | ||||
|             last_exception = str(ex) | ||||
|             print( | ||||
|                 f"Merge of https://github.com/{pr.org}/{pr.project}/pull/{pr.pr_num} failed due to: {ex}. Retrying in 5 min", | ||||
|                 flush=True, | ||||
|                 f"Merge of https://github.com/{pr.org}/{pr.project}/pull/{pr.pr_num} failed due to: {ex}. Retrying in 5 min" | ||||
|             ) | ||||
|             time.sleep(5 * 60) | ||||
|     # Finally report timeout back | ||||
|  | ||||
							
								
								
									
										3
									
								
								.github/scripts/windows/build_triton.bat
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.github/scripts/windows/build_triton.bat
									
									
									
									
										vendored
									
									
								
							| @ -9,7 +9,8 @@ if "%PY_VERS%" == "3.13t" ( | ||||
| ) else ( | ||||
|     call conda create -n %PYTHON_PREFIX% -y -c=conda-forge python=%PY_VERS% | ||||
| ) | ||||
| call conda run -n %PYTHON_PREFIX% pip install wheel pybind11 certifi cython cmake setuptools==72.1.0 ninja | ||||
| :: Fix cmake version for issue https://github.com/pytorch/pytorch/issues/150480 | ||||
| call conda run -n %PYTHON_PREFIX% pip install wheel pybind11 certifi cython cmake==3.31.6 setuptools==72.1.0 ninja | ||||
|  | ||||
| dir "%VC_INSTALL_PATH%" | ||||
|  | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/templates/common.yml.j2
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/templates/common.yml.j2
									
									
									
									
										vendored
									
									
								
							| @ -32,7 +32,7 @@ concurrency: | ||||
| {%- macro setup_ec2_windows() -%} | ||||
|       !{{ display_ec2_information() }} | ||||
|       - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
|  | ||||
| @ -53,7 +53,7 @@ jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
| @ -147,9 +147,9 @@ jobs: | ||||
|         with: | ||||
|           name: !{{ config["build_name"] }} | ||||
|           path: "${{ runner.temp }}/artifacts/" | ||||
|       !{{ common.checkout(deep_clone=False, directory="pytorch") }} | ||||
|       !{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }} | ||||
|       - name: Pull Docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: !{{ config["container_image"] }} | ||||
|       - name: Test Pytorch binary | ||||
| @ -168,12 +168,12 @@ jobs: | ||||
|         with: | ||||
|           name: !{{ config["build_name"] }} | ||||
|           path: "${{ runner.temp }}/artifacts/" | ||||
|       !{{ common.checkout(deep_clone=False, directory="pytorch") }} | ||||
|       !{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }} | ||||
|       - name: ROCm set GPU_FLAG | ||||
|         run: | | ||||
|           echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}" | ||||
|       - name: Pull Docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: !{{ config["container_image"] }} | ||||
|       - name: Test Pytorch binary | ||||
|  | ||||
| @ -76,7 +76,7 @@ jobs: | ||||
|           elif [ -d "/Applications/Xcode_13.3.1.app" ]; then | ||||
|             echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}" | ||||
|           fi | ||||
|       !{{ common.checkout(deep_clone=False, directory="pytorch") }} | ||||
|       !{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }} | ||||
|       - name: Populate binary env | ||||
|         run: | | ||||
|           # shellcheck disable=SC1091 | ||||
|  | ||||
							
								
								
									
										6
									
								
								.github/templates/upload.yml.j2
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.github/templates/upload.yml.j2
									
									
									
									
										vendored
									
									
								
							| @ -25,6 +25,9 @@ | ||||
|       DOCKER_IMAGE: !{{ config["container_image"] }} | ||||
| {%- endif %} | ||||
| {%- if config["package_type"] == "manywheel" %} | ||||
|   {%- if config["devtoolset"] %} | ||||
|       DESIRED_DEVTOOLSET: !{{ config["devtoolset"] }} | ||||
|   {%- endif %} | ||||
|   {%- if config.use_split_build is defined %} | ||||
|       use_split_build: !{{ config["use_split_build"] }} | ||||
|   {%- endif %} | ||||
| @ -34,6 +37,9 @@ | ||||
|       LIBTORCH_CONFIG: !{{ config["libtorch_config"] }} | ||||
|   {%- endif %} | ||||
|       LIBTORCH_VARIANT: !{{ config["libtorch_variant"] }} | ||||
|   {%- if config["devtoolset"] %} | ||||
|       DESIRED_DEVTOOLSET: !{{ config["devtoolset"] }} | ||||
|   {%- endif %} | ||||
|   {%- if is_windows %} | ||||
|       # This is a dummy value for libtorch to work correctly with our batch scripts | ||||
|       # without this value pip does not get installed for some reason | ||||
|  | ||||
| @ -55,7 +55,7 @@ jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|  | ||||
| @ -55,7 +55,7 @@ jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
| @ -79,7 +79,7 @@ jobs: | ||||
|     steps: | ||||
|       !{{ common.setup_ec2_windows() }} | ||||
|       !{{ set_runner_specific_vars() }} | ||||
|       !{{ common.checkout(deep_clone=False, directory="pytorch") }} | ||||
|       !{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }} | ||||
|       - name: Populate binary env | ||||
|         shell: bash | ||||
|         run: | | ||||
| @ -124,7 +124,7 @@ jobs: | ||||
|         with: | ||||
|           name: !{{ config["build_name"] }} | ||||
|           path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}" | ||||
|       !{{ common.checkout(deep_clone=False, directory="pytorch") }} | ||||
|       !{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }} | ||||
|       - name: Populate binary env | ||||
|         shell: bash | ||||
|         run: | | ||||
|  | ||||
							
								
								
									
										14
									
								
								.github/workflows/_bazel-build-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								.github/workflows/_bazel-build-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -47,7 +47,7 @@ jobs: | ||||
|       reenabled-issues: ${{ steps.filter.outputs.reenabled-issues }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           fetch-depth: 1 | ||||
|           submodules: false | ||||
| @ -69,25 +69,25 @@ jobs: | ||||
|     runs-on: ${{ matrix.runner }} | ||||
|     steps: | ||||
|       - name: Setup SSH (Click me for login details) | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
|  | ||||
|       # [see note: pytorch repo ref] | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|  | ||||
|       - name: Setup Linux | ||||
|         uses: ./.github/actions/setup-linux | ||||
|  | ||||
|       - name: Calculate docker image | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image-name: ${{ inputs.docker-image-name }} | ||||
|  | ||||
|       - name: Pull docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
|  | ||||
| @ -97,7 +97,7 @@ jobs: | ||||
|         run: echo "IN_CONTAINER_RUNNER=$(if [ -f /.inarc ] || [ -f /.incontainer ]; then echo true ; else echo false; fi)" >> "$GITHUB_OUTPUT" | ||||
|  | ||||
|       - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG | ||||
|         uses: pytorch/test-infra/.github/actions/setup-nvidia@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.7 | ||||
|         if: ${{ inputs.cuda-version != 'cpu' && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }} | ||||
|  | ||||
|       - name: Output disk space left | ||||
| @ -209,5 +209,5 @@ jobs: | ||||
|           file-suffix: bazel-${{ github.job }}_${{ steps.get-job-id.outputs.job-id }} | ||||
|  | ||||
|       - name: Teardown Linux | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.7 | ||||
|         if: always() | ||||
|  | ||||
							
								
								
									
										16
									
								
								.github/workflows/_binary-build-linux.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										16
									
								
								.github/workflows/_binary-build-linux.yml
									
									
									
									
										vendored
									
									
								
							| @ -70,6 +70,10 @@ on: | ||||
|         required: false | ||||
|         type: string | ||||
|         description: Desired libtorch variant (for libtorch builds only) | ||||
|       DESIRED_DEVTOOLSET: | ||||
|         required: false | ||||
|         type: string | ||||
|         description: Desired dev toolset | ||||
|       DESIRED_PYTHON: | ||||
|         required: false | ||||
|         type: string | ||||
| @ -100,6 +104,7 @@ jobs: | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       LIBTORCH_CONFIG: ${{ inputs.LIBTORCH_CONFIG }} | ||||
|       LIBTORCH_VARIANT: ${{ inputs.LIBTORCH_VARIANT }} | ||||
|       DESIRED_DEVTOOLSET: ${{ inputs.DESIRED_DEVTOOLSET }} | ||||
|       DESIRED_PYTHON: ${{ inputs.DESIRED_PYTHON }} | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: ${{ inputs.PYTORCH_EXTRA_INSTALL_REQUIREMENTS }} | ||||
|       ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }} | ||||
| @ -125,6 +130,7 @@ jobs: | ||||
|             echo "SKIP_ALL_TESTS=${{ env.SKIP_ALL_TESTS }}" | ||||
|             echo "LIBTORCH_CONFIG=${{ env.LIBTORCH_CONFIG }}" | ||||
|             echo "LIBTORCH_VARIANT=${{ env.LIBTORCH_VARIANT }}" | ||||
|             echo "DESIRED_DEVTOOLSET=${{ env.DESIRED_DEVTOOLSET }}" | ||||
|             echo "DESIRED_PYTHON=${{ env.DESIRED_PYTHON }}" | ||||
|             echo "PYTORCH_EXTRA_INSTALL_REQUIREMENTS=${{ env.PYTORCH_EXTRA_INSTALL_REQUIREMENTS }}" | ||||
|             echo "ALPINE_IMAGE=${{ env.ALPINE_IMAGE }}" | ||||
| @ -144,13 +150,13 @@ jobs: | ||||
|  | ||||
|       - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" | ||||
|         if: inputs.build_environment != 'linux-s390x-binary-manywheel' | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           github-secret: ${{ secrets.github-token }} | ||||
|  | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' }} | ||||
|  | ||||
| @ -180,7 +186,6 @@ jobs: | ||||
|       - name: Checkout PyTorch to pytorch dir | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|           submodules: recursive | ||||
|           path: pytorch | ||||
|           show-progress: false | ||||
| @ -205,7 +210,7 @@ jobs: | ||||
|  | ||||
|       - name: Pull Docker image | ||||
|         if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }} | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: ${{ inputs.DOCKER_IMAGE }} | ||||
|  | ||||
| @ -218,6 +223,7 @@ jobs: | ||||
|             -e BINARY_ENV_FILE \ | ||||
|             -e BUILD_ENVIRONMENT \ | ||||
|             -e DESIRED_CUDA \ | ||||
|             -e DESIRED_DEVTOOLSET \ | ||||
|             -e DESIRED_PYTHON \ | ||||
|             -e GITHUB_ACTIONS \ | ||||
|             -e GPU_ARCH_TYPE \ | ||||
| @ -260,7 +266,7 @@ jobs: | ||||
|  | ||||
|       - name: Teardown Linux | ||||
|         if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.7 | ||||
|  | ||||
|       - name: Chown workspace | ||||
|         if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' | ||||
|  | ||||
							
								
								
									
										17
									
								
								.github/workflows/_binary-test-linux.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										17
									
								
								.github/workflows/_binary-test-linux.yml
									
									
									
									
										vendored
									
									
								
							| @ -47,6 +47,10 @@ on: | ||||
|         required: false | ||||
|         type: string | ||||
|         description: Desired libtorch variant (for libtorch builds only) | ||||
|       DESIRED_DEVTOOLSET: | ||||
|         required: false | ||||
|         type: string | ||||
|         description: Desired dev toolset | ||||
|       DESIRED_PYTHON: | ||||
|         required: false | ||||
|         type: string | ||||
| @ -88,6 +92,7 @@ jobs: | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       LIBTORCH_CONFIG: ${{ inputs.LIBTORCH_CONFIG }} | ||||
|       LIBTORCH_VARIANT: ${{ inputs.LIBTORCH_VARIANT }} | ||||
|       DESIRED_DEVTOOLSET: ${{ inputs.DESIRED_DEVTOOLSET }} | ||||
|       DESIRED_PYTHON: ${{ inputs.DESIRED_PYTHON }} | ||||
|       ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }} | ||||
|       AWS_DEFAULT_REGION: us-east-1 | ||||
| @ -113,6 +118,7 @@ jobs: | ||||
|             echo "SKIP_ALL_TESTS=${{ env.SKIP_ALL_TESTS }}" | ||||
|             echo "LIBTORCH_CONFIG=${{ env.LIBTORCH_CONFIG }}" | ||||
|             echo "LIBTORCH_VARIANT=${{ env.LIBTORCH_VARIANT }}" | ||||
|             echo "DESIRED_DEVTOOLSET=${{ env.DESIRED_DEVTOOLSET }}" | ||||
|             echo "DESIRED_PYTHON=${{ env.DESIRED_PYTHON }}" | ||||
|  | ||||
|             echo "ALPINE_IMAGE=${{ env.ALPINE_IMAGE }}" | ||||
| @ -127,14 +133,14 @@ jobs: | ||||
|  | ||||
|       - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" | ||||
|         if: inputs.build_environment != 'linux-s390x-binary-manywheel' | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           github-secret: ${{ secrets.github-token }} | ||||
|  | ||||
|         # Setup the environment | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' }} | ||||
|  | ||||
| @ -157,7 +163,6 @@ jobs: | ||||
|       - name: Checkout PyTorch to pytorch dir | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|           submodules: recursive | ||||
|           show-progress: false | ||||
|           path: pytorch | ||||
| @ -188,12 +193,12 @@ jobs: | ||||
|           path: "${{ runner.temp }}/artifacts/" | ||||
|  | ||||
|       - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG | ||||
|         uses: pytorch/test-infra/.github/actions/setup-nvidia@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.7 | ||||
|         if: ${{ inputs.GPU_ARCH_TYPE == 'cuda' && steps.filter.outputs.is-test-matrix-empty == 'False' }} | ||||
|  | ||||
|       - name: Pull Docker image | ||||
|         if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }} | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: ${{ inputs.DOCKER_IMAGE }} | ||||
|  | ||||
| @ -203,7 +208,7 @@ jobs: | ||||
|  | ||||
|       - name: Teardown Linux | ||||
|         if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.7 | ||||
|  | ||||
|       - name: Chown workspace | ||||
|         if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' | ||||
|  | ||||
							
								
								
									
										7
									
								
								.github/workflows/_binary-upload.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/_binary-upload.yml
									
									
									
									
										vendored
									
									
								
							| @ -43,6 +43,10 @@ on: | ||||
|         required: false | ||||
|         type: string | ||||
|         description: Desired libtorch variant (for libtorch builds only) | ||||
|       DESIRED_DEVTOOLSET: | ||||
|         required: false | ||||
|         type: string | ||||
|         description: Desired dev toolset | ||||
|       DESIRED_PYTHON: | ||||
|         required: false | ||||
|         type: string | ||||
| @ -76,6 +80,7 @@ jobs: | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       LIBTORCH_CONFIG: ${{ inputs.LIBTORCH_CONFIG }} | ||||
|       LIBTORCH_VARIANT: ${{ inputs.LIBTORCH_VARIANT }} | ||||
|       DESIRED_DEVTOOLSET: ${{ inputs.DESIRED_DEVTOOLSET }} | ||||
|       DESIRED_PYTHON: ${{ inputs.DESIRED_PYTHON }} | ||||
|       BINARY_ENV_FILE: /tmp/env | ||||
|       GITHUB_TOKEN: ${{ secrets.github-token }} | ||||
| @ -85,7 +90,7 @@ jobs: | ||||
|       USE_SPLIT_BUILD: ${{ inputs.use_split_build }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           no-sudo: true | ||||
|  | ||||
|  | ||||
							
								
								
									
										10
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/_docs.yml
									
									
									
									
										vendored
									
									
								
							| @ -84,7 +84,7 @@ jobs: | ||||
|     name: build-docs-${{ matrix.docs_type }}-${{ inputs.push }} | ||||
|     steps: | ||||
|       - name: Setup SSH (Click me for login details) | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
|           instructions: | | ||||
| @ -95,7 +95,7 @@ jobs: | ||||
|  | ||||
|       # [see note: pytorch repo ref] | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|  | ||||
|       - name: Setup Linux | ||||
|         uses: ./.github/actions/setup-linux | ||||
| @ -110,12 +110,12 @@ jobs: | ||||
|  | ||||
|       - name: Calculate docker image | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image-name: ${{ inputs.docker-image }} | ||||
|  | ||||
|       - name: Pull docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
|  | ||||
| @ -222,5 +222,5 @@ jobs: | ||||
|           s3-prefix: pytorch/pytorch/${{ github.event.pull_request.number }}/functorchdocs | ||||
|  | ||||
|       - name: Teardown Linux | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.7 | ||||
|         if: always() | ||||
|  | ||||
							
								
								
									
										10
									
								
								.github/workflows/_linux-build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/_linux-build.yml
									
									
									
									
										vendored
									
									
								
							| @ -106,7 +106,7 @@ jobs: | ||||
|       test-matrix: ${{ steps.filter.outputs.test-matrix }} | ||||
|     steps: | ||||
|       - name: Setup SSH (Click me for login details) | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -116,7 +116,7 @@ jobs: | ||||
|       # checkout because when we run this action we don't *have* a local | ||||
|       # checkout. In other cases you should prefer a local checkout. | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           no-sudo: true | ||||
|  | ||||
| @ -134,7 +134,7 @@ jobs: | ||||
|  | ||||
|       - name: Calculate docker image | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|         with: | ||||
|           docker-image-name: ${{ inputs.docker-image-name }} | ||||
| @ -150,7 +150,7 @@ jobs: | ||||
|           echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}" | ||||
|  | ||||
|       - name: Pull docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|         with: | ||||
|           docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
| @ -312,7 +312,7 @@ jobs: | ||||
|           build-time: ${{ steps.build.outputs.build_time }} | ||||
|  | ||||
|       - name: Teardown Linux | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.7 | ||||
|         if: always() && inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|  | ||||
|       - name: Cleanup docker | ||||
|  | ||||
							
								
								
									
										14
									
								
								.github/workflows/_linux-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								.github/workflows/_linux-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -80,7 +80,7 @@ jobs: | ||||
|     timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }} | ||||
|     steps: | ||||
|       - name: Setup SSH (Click me for login details) | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         if: ${{ !contains(matrix.runner, 'gcp.a100') && inputs.build-environment != 'linux-s390x-binary-manywheel' }} | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -89,7 +89,7 @@ jobs: | ||||
|               docker exec -it $(docker container ps --format '{{.ID}}') bash | ||||
|  | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           no-sudo: true | ||||
|  | ||||
| @ -107,7 +107,7 @@ jobs: | ||||
|  | ||||
|       - name: Calculate docker image | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|         with: | ||||
|           docker-image-name: ${{ inputs.docker-image }} | ||||
| @ -123,7 +123,7 @@ jobs: | ||||
|           echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}" | ||||
|  | ||||
|       - name: Pull docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         if: inputs.build-environment != 'linux-s390x-binary-manywheel' | ||||
|         with: | ||||
|           docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
| @ -135,7 +135,7 @@ jobs: | ||||
|  | ||||
|       - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG | ||||
|         id: install-nvidia-driver | ||||
|         uses: pytorch/test-infra/.github/actions/setup-nvidia@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.7 | ||||
|         if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }} | ||||
|  | ||||
|       - name: Setup GPU_FLAG for docker run | ||||
| @ -371,7 +371,7 @@ jobs: | ||||
|           job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }} | ||||
|  | ||||
|       - name: Upload the benchmark results | ||||
|         uses: pytorch/test-infra/.github/actions/upload-benchmark-results@main | ||||
|         uses: pytorch/test-infra/.github/actions/upload-benchmark-results@release/2.7 | ||||
|         with: | ||||
|           benchmark-results-dir: test/test-reports | ||||
|           dry-run: false | ||||
| @ -428,7 +428,7 @@ jobs: | ||||
|           workflow_attempt: ${{github.run_attempt}} | ||||
|  | ||||
|       - name: Teardown Linux | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.7 | ||||
|         if: always() && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' | ||||
|  | ||||
|       # NB: We are currently having an intermittent GPU-related issue on G5 runners with | ||||
|  | ||||
							
								
								
									
										23
									
								
								.github/workflows/_mac-build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										23
									
								
								.github/workflows/_mac-build.yml
									
									
									
									
										vendored
									
									
								
							| @ -33,6 +33,10 @@ on: | ||||
|         default: "3.9" | ||||
|         description: | | ||||
|           The python version to be used. Will be 3.9 by default | ||||
|       environment-file: | ||||
|         required: false | ||||
|         type: string | ||||
|         description: Set the conda environment file used to setup macOS build. | ||||
|       test-matrix: | ||||
|         required: false | ||||
|         type: string | ||||
| @ -67,11 +71,11 @@ jobs: | ||||
|       test-matrix: ${{ steps.filter.outputs.test-matrix }} | ||||
|     steps: | ||||
|       - name: Clean up disk space before running MacOS workflow | ||||
|         uses: pytorch/test-infra/.github/actions/check-disk-space@main | ||||
|         uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.7 | ||||
|  | ||||
|       # [see note: pytorch repo ref] | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|  | ||||
|       - name: Set xcode version | ||||
|         env: | ||||
| @ -82,12 +86,23 @@ jobs: | ||||
|           fi | ||||
|  | ||||
|       - name: Setup miniconda | ||||
|         uses: pytorch/test-infra/.github/actions/setup-miniconda@main | ||||
|         if: inputs.environment-file == '' | ||||
|         uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.7 | ||||
|         with: | ||||
|           python-version: ${{ inputs.python-version }} | ||||
|           environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }} | ||||
|           pip-requirements-file: .github/requirements/pip-requirements-${{ runner.os }}.txt | ||||
|  | ||||
|       # This option is used when cross-compiling arm64 from x86-64. Specifically, we need arm64 conda | ||||
|       # environment even though the arch is x86-64 | ||||
|       - name: Setup miniconda using the provided environment file | ||||
|         if: inputs.environment-file != '' | ||||
|         uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.7 | ||||
|         with: | ||||
|           python-version: ${{ inputs.python-version }} | ||||
|           environment-file: ${{ inputs.environment-file }} | ||||
|           pip-requirements-file: .github/requirements/pip-requirements-${{ runner.os }}.txt | ||||
|  | ||||
|       - name: Install sccache (only for non-forked PRs, and pushes to trunk) | ||||
|         uses: nick-fields/retry@v3.0.0 | ||||
|         if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }} | ||||
| @ -192,4 +207,4 @@ jobs: | ||||
|       - name: Clean up disk space | ||||
|         if: always() | ||||
|         continue-on-error: true | ||||
|         uses: pytorch/test-infra/.github/actions/check-disk-space@main | ||||
|         uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.7 | ||||
|  | ||||
							
								
								
									
										6
									
								
								.github/workflows/_mac-test-mps.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.github/workflows/_mac-test-mps.yml
									
									
									
									
										vendored
									
									
								
							| @ -41,7 +41,7 @@ jobs: | ||||
|       reenabled-issues: ${{ steps.filter.outputs.reenabled-issues }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|  | ||||
| @ -82,7 +82,7 @@ jobs: | ||||
|           use-gha: true | ||||
|  | ||||
|       - name: Setup miniconda | ||||
|         uses: pytorch/test-infra/.github/actions/setup-miniconda@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.7 | ||||
|         with: | ||||
|           python-version: ${{ inputs.python-version }} | ||||
|           environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }} | ||||
| @ -170,4 +170,4 @@ jobs: | ||||
|       - name: Clean up disk space | ||||
|         if: always() | ||||
|         continue-on-error: true | ||||
|         uses: pytorch/test-infra/.github/actions/check-disk-space@main | ||||
|         uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.7 | ||||
|  | ||||
							
								
								
									
										10
									
								
								.github/workflows/_mac-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/_mac-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -82,11 +82,11 @@ jobs: | ||||
|           done | ||||
|  | ||||
|       - name: Clean up disk space before running MacOS workflow | ||||
|         uses: pytorch/test-infra/.github/actions/check-disk-space@main | ||||
|         uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.7 | ||||
|  | ||||
|       # [see note: pytorch repo ref] | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|  | ||||
|       - name: Start monitoring script | ||||
|         id: monitor-script | ||||
| @ -109,7 +109,7 @@ jobs: | ||||
|           use-gha: true | ||||
|  | ||||
|       - name: Setup miniconda | ||||
|         uses: pytorch/test-infra/.github/actions/setup-miniconda@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.7 | ||||
|         with: | ||||
|           python-version: ${{ inputs.python-version }} | ||||
|           environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }} | ||||
| @ -224,7 +224,7 @@ jobs: | ||||
|           file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }} | ||||
|  | ||||
|       - name: Upload the benchmark results | ||||
|         uses: pytorch/test-infra/.github/actions/upload-benchmark-results@main | ||||
|         uses: pytorch/test-infra/.github/actions/upload-benchmark-results@release/2.7 | ||||
|         with: | ||||
|           benchmark-results-dir: test/test-reports | ||||
|           dry-run: false | ||||
| @ -234,4 +234,4 @@ jobs: | ||||
|       - name: Clean up disk space | ||||
|         if: always() | ||||
|         continue-on-error: true | ||||
|         uses: pytorch/test-infra/.github/actions/check-disk-space@main | ||||
|         uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.7 | ||||
|  | ||||
							
								
								
									
										8
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/_rocm-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -70,7 +70,7 @@ jobs: | ||||
|     steps: | ||||
|       # [see note: pytorch repo ref] | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           no-sudo: true | ||||
|  | ||||
| @ -92,12 +92,12 @@ jobs: | ||||
|  | ||||
|       - name: Calculate docker image | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image-name: ${{ inputs.docker-image }} | ||||
|  | ||||
|       - name: Pull docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
|  | ||||
| @ -302,7 +302,7 @@ jobs: | ||||
|           aws-region: us-east-1 | ||||
|  | ||||
|       - name: Upload the benchmark results | ||||
|         uses: pytorch/test-infra/.github/actions/upload-benchmark-results@main | ||||
|         uses: pytorch/test-infra/.github/actions/upload-benchmark-results@release/2.7 | ||||
|         with: | ||||
|           benchmark-results-dir: test/test-reports | ||||
|           dry-run: false | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/_runner-determinator.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/_runner-determinator.yml
									
									
									
									
										vendored
									
									
								
							| @ -54,7 +54,7 @@ jobs: | ||||
|       PR_NUMBER: ${{ github.event.pull_request.number }} | ||||
|     steps: | ||||
|       # - name: Checkout PyTorch | ||||
|       #   uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|       #   uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|       #   with: | ||||
|       #     fetch-depth: 1 | ||||
|       #     submodules: true | ||||
|  | ||||
							
								
								
									
										6
									
								
								.github/workflows/_win-build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.github/workflows/_win-build.yml
									
									
									
									
										vendored
									
									
								
							| @ -84,10 +84,10 @@ jobs: | ||||
|           git config --global core.fsmonitor false | ||||
|  | ||||
|       - name: Clean up leftover processes on non-ephemeral Windows runner | ||||
|         uses: pytorch/test-infra/.github/actions/cleanup-runner@main | ||||
|         uses: pytorch/test-infra/.github/actions/cleanup-runner@release/2.7 | ||||
|  | ||||
|       - name: Setup SSH (Click me for login details) | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
|           instructions: | | ||||
| @ -102,7 +102,7 @@ jobs: | ||||
|  | ||||
|       # [see note: pytorch repo ref] | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           no-sudo: true | ||||
|  | ||||
|  | ||||
							
								
								
									
										6
									
								
								.github/workflows/_win-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.github/workflows/_win-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -66,10 +66,10 @@ jobs: | ||||
|           git config --global core.fsmonitor false | ||||
|  | ||||
|       - name: Clean up leftover processes on non-ephemeral Windows runner | ||||
|         uses: pytorch/test-infra/.github/actions/cleanup-runner@main | ||||
|         uses: pytorch/test-infra/.github/actions/cleanup-runner@release/2.7 | ||||
|  | ||||
|       - name: Setup SSH (Click me for login details) | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
|           instructions: | | ||||
| @ -85,7 +85,7 @@ jobs: | ||||
|  | ||||
|       # [see note: pytorch repo ref] | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           no-sudo: true | ||||
|  | ||||
|  | ||||
							
								
								
									
										6
									
								
								.github/workflows/_xpu-test.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.github/workflows/_xpu-test.yml
									
									
									
									
										vendored
									
									
								
							| @ -62,7 +62,7 @@ jobs: | ||||
|     steps: | ||||
|       # [see note: pytorch repo ref] | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|  | ||||
|       - name: Setup XPU | ||||
|         uses: ./.github/actions/setup-xpu | ||||
| @ -80,12 +80,12 @@ jobs: | ||||
|  | ||||
|       - name: Calculate docker image | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image-name: ${{ inputs.docker-image }} | ||||
|  | ||||
|       - name: Pull docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
|  | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/workflows/build-almalinux-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/build-almalinux-images.yml
									
									
									
									
										vendored
									
									
								
							| @ -41,12 +41,12 @@ jobs: | ||||
|       CUDA_VERSION: ${{ matrix.cuda_version }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: almalinux-builder${{ matrix.cuda_version == 'cpu' && '-' || '-cuda' }}${{matrix.cuda_version}} | ||||
|             docker-build-dir:  .ci/docker/almalinux | ||||
|  | ||||
							
								
								
									
										14
									
								
								.github/workflows/build-libtorch-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								.github/workflows/build-libtorch-images.yml
									
									
									
									
										vendored
									
									
								
							| @ -32,7 +32,7 @@ jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
| @ -51,12 +51,12 @@ jobs: | ||||
|       GPU_ARCH_VERSION: ${{ matrix.cuda_version }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: libtorch-cxx11-builder-cuda${{matrix.cuda_version}} | ||||
|             docker-build-dir:  .ci/docker/libtorch | ||||
| @ -93,12 +93,12 @@ jobs: | ||||
|       GPU_ARCH_VERSION: ${{ matrix.rocm_version }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: libtorch-cxx11-builder-rocm${{matrix.rocm_version}} | ||||
|             docker-build-dir:  .ci/docker/libtorch | ||||
| @ -129,12 +129,12 @@ jobs: | ||||
|     runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral" | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: libtorch-cxx11-builder-cpu | ||||
|             docker-build-dir:  .ci/docker/libtorch | ||||
|  | ||||
| @ -41,7 +41,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu-s390x | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|           no-sudo: true | ||||
| @ -62,12 +62,7 @@ jobs: | ||||
|         if: cancelled() | ||||
|         shell: bash | ||||
|         run: | | ||||
|           # If podman build command is interrupted, | ||||
|           # if podman build command is interrupted, | ||||
|           # it can leave a couple of processes still running. | ||||
|           # Order them to stop for clean shutdown. | ||||
|           # It looks like sometimes some processes remain | ||||
|           # after first cleanup. | ||||
|           # Wait a bit and do cleanup again. It looks like it helps. | ||||
|           docker system prune --build -f || true | ||||
|           sleep 60 | ||||
|           # order them to stop for clean shutdown. | ||||
|           docker system prune --build -f || true | ||||
|  | ||||
							
								
								
									
										32
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										32
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
								
							| @ -36,7 +36,7 @@ jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
| @ -57,12 +57,12 @@ jobs: | ||||
|       - name: Purge tools folder (free space for build) | ||||
|         run: rm -rf /opt/hostedtoolcache | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: manylinux2_28-builder-cuda${{matrix.cuda_version}} | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
| @ -102,7 +102,7 @@ jobs: | ||||
|         uses: actions/checkout@v3 | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: manylinuxaarch64-builder-cuda${{matrix.cuda_version}} | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
| @ -139,12 +139,12 @@ jobs: | ||||
|       GPU_ARCH_VERSION: ${{ matrix.rocm_version }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: manylinux2_28-builder-rocm${{matrix.rocm_version}} | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
| @ -177,12 +177,12 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu-manylinux_2_28 | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: manylinux2_28-builder-cpu | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
| @ -215,12 +215,12 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: manylinuxaarch64-builder-cpu-aarch64 | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
| @ -253,12 +253,12 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu-aarch64-2_28 | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: manylinux2_28_aarch64-builder-cpu-aarch64 | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
| @ -294,12 +294,12 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu-cxx11-abi | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: manylinuxcxx11-abi-builder-cpu-cxx11-abi | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
| @ -332,12 +332,12 @@ jobs: | ||||
|       GPU_ARCH_TYPE: xpu | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|             docker-image-name: manylinux2_28-builder-xpu | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
|  | ||||
							
								
								
									
										21
									
								
								.github/workflows/build-triton-wheel.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										21
									
								
								.github/workflows/build-triton-wheel.yml
									
									
									
									
										vendored
									
									
								
							| @ -3,7 +3,7 @@ name: Build Triton wheels | ||||
| on: | ||||
|   push: | ||||
|     branches: | ||||
|       - main | ||||
|       - release/2.7 | ||||
|     tags: | ||||
|       # NOTE: Binary build pipelines should only get triggered on release candidate builds | ||||
|       # Release candidate tags look like: v1.11.0-rc1 | ||||
| @ -12,6 +12,8 @@ on: | ||||
|       - .github/workflows/build-triton-wheel.yml | ||||
|       - .github/scripts/build_triton_wheel.py | ||||
|       - .github/ci_commit_pins/triton.txt | ||||
|       - .github/scripts/windows/install_vs2022.ps1 | ||||
|       - .github/scripts/windows/build_triton.bat | ||||
|       - .ci/docker/ci_commit_pins/triton.txt | ||||
|       - .ci/docker/ci_commit_pins/triton-xpu.txt | ||||
|   pull_request: | ||||
| @ -19,6 +21,8 @@ on: | ||||
|       - .github/workflows/build-triton-wheel.yml | ||||
|       - .github/scripts/build_triton_wheel.py | ||||
|       - .github/ci_commit_pins/triton.txt | ||||
|       - .github/scripts/windows/install_vs2022.ps1 | ||||
|       - .github/scripts/windows/build_triton.bat | ||||
|       - .ci/docker/ci_commit_pins/triton.txt | ||||
|       - .ci/docker/ci_commit_pins/triton-xpu.txt | ||||
|  | ||||
| @ -30,7 +34,7 @@ jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
| @ -68,12 +72,12 @@ jobs: | ||||
|       PLATFORM: 'manylinux_2_28_x86_64' | ||||
|     steps: | ||||
|       - name: Setup SSH (Click me for login details) | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
|  | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|  | ||||
| @ -81,7 +85,7 @@ jobs: | ||||
|         uses: ./.github/actions/setup-linux | ||||
|  | ||||
|       - name: Pull Docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: ${{ env.DOCKER_IMAGE }} | ||||
|  | ||||
| @ -133,7 +137,7 @@ jobs: | ||||
|           fi | ||||
|  | ||||
|           docker exec -t "${container_name}" yum install -y zlib-devel zip | ||||
|           docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}"  -m pip install -U setuptools==67.4.0 pybind11==2.13.1 auditwheel wheel | ||||
|           docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}"  -m pip install -U setuptools==78.1.0 pybind11==2.13.1 auditwheel wheel | ||||
|  | ||||
|           if [[ ("${{ matrix.device }}" == "cuda" || "${{ matrix.device }}" == "rocm" || "${{ matrix.device }}" == "aarch64" ) ]]; then | ||||
|             # With this install, it gets clang 16.0.6. | ||||
| @ -163,7 +167,7 @@ jobs: | ||||
|           path: ${{ runner.temp }}/artifacts/wheelhouse/* | ||||
|  | ||||
|       - name: Teardown Linux | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.7 | ||||
|         if: always() | ||||
|  | ||||
|   build-wheel-win: | ||||
| @ -196,7 +200,7 @@ jobs: | ||||
|           echo "instance-type: $(get_ec2_metadata instance-type)" | ||||
|           echo "system info $(uname -a)" | ||||
|       - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         continue-on-error: true | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -243,7 +247,6 @@ jobs: | ||||
|           .github/scripts/windows/build_triton.bat | ||||
|           mkdir -p "${RUNNER_TEMP}/artifacts/" | ||||
|           mv ./*.whl "${RUNNER_TEMP}/artifacts/" | ||||
|  | ||||
|       - uses: actions/upload-artifact@v4.4.0 | ||||
|         with: | ||||
|           name: pytorch-triton-wheel-${{ matrix.py_vers }}-${{ matrix.device }} | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/check-labels.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/check-labels.yml
									
									
									
									
										vendored
									
									
								
							| @ -38,7 +38,7 @@ jobs: | ||||
|     runs-on: linux.24_04.4x | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|           fetch-depth: 1 | ||||
|  | ||||
| @ -13,7 +13,7 @@ jobs: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           submodules: false | ||||
|           fetch-depth: 1 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/create_release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/create_release.yml
									
									
									
									
										vendored
									
									
								
							| @ -19,7 +19,7 @@ jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|  | ||||
							
								
								
									
										10
									
								
								.github/workflows/docker-builds.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/docker-builds.yml
									
									
									
									
										vendored
									
									
								
							| @ -33,7 +33,7 @@ jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
| @ -99,21 +99,21 @@ jobs: | ||||
|       # [see note: pytorch repo ref] | ||||
|       # deep clone (fetch-depth 0) required for git merge-base | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|  | ||||
|       - name: Setup Linux | ||||
|         uses: ./.github/actions/setup-linux | ||||
|  | ||||
|       - name: Build docker image | ||||
|         id: build-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image-name: ${{ matrix.docker-image-name }} | ||||
|           always-rebuild: true | ||||
|           push: true | ||||
|  | ||||
|       - name: Pull docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: ${{ steps.build-docker-image.outputs.docker-image }} | ||||
|  | ||||
| @ -145,5 +145,5 @@ jobs: | ||||
|         if: always() | ||||
|  | ||||
|       - name: Teardown Linux | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.7 | ||||
|         if: always() | ||||
|  | ||||
							
								
								
									
										55
									
								
								.github/workflows/docker-cache-mi300.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										55
									
								
								.github/workflows/docker-cache-mi300.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,55 +0,0 @@ | ||||
| name: docker-cache-mi300 | ||||
|  | ||||
| on: | ||||
|   # run every 6 hours | ||||
|   schedule: | ||||
|     - cron: 0 0,6,12,18 * * * | ||||
|   workflow_dispatch: | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: | ||||
|   id-token: write | ||||
|   contents: read | ||||
|  | ||||
| jobs: | ||||
|   docker-cache: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     runs-on: rocm-docker | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           no-sudo: true | ||||
|  | ||||
|       - name: configure aws credentials | ||||
|         id: aws_creds | ||||
|         uses: aws-actions/configure-aws-credentials@v4 | ||||
|         with: | ||||
|           role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only | ||||
|           aws-region: us-east-1 | ||||
|           role-duration-seconds: 18000 | ||||
|  | ||||
|       - name: Login to Amazon ECR | ||||
|         id: login-ecr | ||||
|         continue-on-error: false | ||||
|         uses: aws-actions/amazon-ecr-login@v2 | ||||
|  | ||||
|       - name: Calculate docker image | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|           docker-image-name: pytorch-linux-focal-rocm-n-py3 | ||||
|           push: false | ||||
|  | ||||
|       - name: Pull docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         with: | ||||
|           docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
|  | ||||
|       - name: Tar and upload to S3 bucket | ||||
|         run: | | ||||
|           sudo docker save -o ~/docker-data/pytorch/pytorch_docker_image.tar ${{ steps.calculate-docker-image.outputs.docker-image }} | ||||
|           sudo rclone copy -P --s3-upload-concurrency 64 --s3-chunk-size 200M --s3-upload-cutoff 300M ~/docker-data/pytorch/pytorch_docker_image.tar oci:pytorchbucket0002/pytorch_docker_image --progress | ||||
							
								
								
									
										12
									
								
								.github/workflows/docker-release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								.github/workflows/docker-release.yml
									
									
									
									
										vendored
									
									
								
							| @ -37,7 +37,7 @@ jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
| @ -52,7 +52,7 @@ jobs: | ||||
|       matrix: ${{ steps.generate-matrix.outputs.matrix }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.7 | ||||
|         with: | ||||
|           fetch-depth: 1 | ||||
|           submodules: true | ||||
| @ -82,7 +82,7 @@ jobs: | ||||
|       CUDNN_VERSION: ${{ matrix.cudnn_version }} | ||||
|     steps: | ||||
|       - name: Setup SSH (Click me for login details) | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@main | ||||
|         uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.7 | ||||
|         with: | ||||
|           github-secret: ${{ secrets.GITHUB_TOKEN }} | ||||
|       # [see note: pytorch repo ref] | ||||
| @ -164,12 +164,12 @@ jobs: | ||||
|           fi | ||||
|  | ||||
|       - name: Teardown Linux | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.7 | ||||
|         if: always() | ||||
|  | ||||
|   validate: | ||||
|     needs: build | ||||
|     uses: pytorch/test-infra/.github/workflows/validate-docker-images.yml@main | ||||
|     uses: pytorch/test-infra/.github/workflows/validate-docker-images.yml@release/2.7 | ||||
|     with: | ||||
|       channel: nightly | ||||
|       channel: test | ||||
|       ref: main | ||||
|  | ||||
							
								
								
									
										110
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										110
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -38,7 +38,7 @@ jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
| @ -55,7 +55,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -63,7 +64,6 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_9-cpu-aarch64 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_9-cpu-aarch64-test:  # Testing | ||||
| @ -79,7 +79,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       build_name: manywheel-py3_9-cpu-aarch64 | ||||
| @ -102,7 +103,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       build_name: manywheel-py3_9-cpu-aarch64 | ||||
| @ -122,7 +124,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -130,7 +133,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_9-cuda-aarch64-12_8 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.7.1.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -148,7 +151,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       build_name: manywheel-py3_9-cuda-aarch64-12_8 | ||||
| @ -167,7 +171,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -175,7 +180,6 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_10-cpu-aarch64 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cpu-aarch64-test:  # Testing | ||||
| @ -191,7 +195,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cpu-aarch64 | ||||
| @ -214,7 +219,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cpu-aarch64 | ||||
| @ -234,7 +240,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -242,7 +249,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_10-cuda-aarch64-12_8 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.7.1.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -260,7 +267,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda-aarch64-12_8 | ||||
| @ -279,7 +287,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -287,7 +296,6 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_11-cpu-aarch64 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cpu-aarch64-test:  # Testing | ||||
| @ -303,7 +311,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cpu-aarch64 | ||||
| @ -326,7 +335,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cpu-aarch64 | ||||
| @ -346,7 +356,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -354,7 +365,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_11-cuda-aarch64-12_8 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.7.1.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -372,7 +383,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda-aarch64-12_8 | ||||
| @ -391,7 +403,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -399,7 +412,6 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_12-cpu-aarch64 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cpu-aarch64-test:  # Testing | ||||
| @ -415,7 +427,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cpu-aarch64 | ||||
| @ -438,7 +451,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cpu-aarch64 | ||||
| @ -458,7 +472,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -466,7 +481,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_12-cuda-aarch64-12_8 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.7.1.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -484,7 +499,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda-aarch64-12_8 | ||||
| @ -503,7 +519,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -511,7 +528,6 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13-cpu-aarch64 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13-cpu-aarch64-test:  # Testing | ||||
| @ -527,7 +543,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cpu-aarch64 | ||||
| @ -550,7 +567,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cpu-aarch64 | ||||
| @ -570,7 +588,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -578,7 +597,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13-cuda-aarch64-12_8 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.7.1.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -596,7 +615,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.13" | ||||
|       build_name: manywheel-py3_13-cuda-aarch64-12_8 | ||||
| @ -615,7 +635,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -623,7 +644,6 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13t-cpu-aarch64 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_13t-cpu-aarch64-test:  # Testing | ||||
| @ -639,7 +659,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cpu-aarch64 | ||||
| @ -662,7 +683,8 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main | ||||
|       DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cpu-aarch64 | ||||
| @ -682,7 +704,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
| @ -690,7 +713,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_13t-cuda-aarch64-12_8 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.7.1.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       timeout-minutes: 420 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
| @ -708,7 +731,8 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8-aarch64 | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-main | ||||
|       DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.8-2.7 | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       use_split_build: False | ||||
|       DESIRED_PYTHON: "3.13t" | ||||
|       build_name: manywheel-py3_13t-cuda-aarch64-12_8 | ||||
|  | ||||
| @ -2,7 +2,7 @@ | ||||
| 
 | ||||
| # Template is at:    .github/templates/linux_binary_build_workflow.yml.j2 | ||||
| # Generation script: .github/scripts/generate_ci_workflows.py | ||||
| name: linux-binary-libtorch | ||||
| name: linux-binary-libtorch-cxx11-abi | ||||
| 
 | ||||
| 
 | ||||
| on: | ||||
| @ -18,7 +18,7 @@ env: | ||||
|   ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine" | ||||
|   AWS_DEFAULT_REGION: us-east-1 | ||||
|   BINARY_ENV_FILE: /tmp/env | ||||
|   BUILD_ENVIRONMENT: linux-binary-libtorch | ||||
|   BUILD_ENVIRONMENT: linux-binary-libtorch-cxx11-abi | ||||
|   GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||||
|   PR_NUMBER: ${{ github.event.pull_request.number }} | ||||
|   PYTORCH_FINAL_PACKAGE_DIR: /artifacts | ||||
| @ -26,20 +26,20 @@ env: | ||||
|   SHA1: ${{ github.event.pull_request.head.sha || github.sha }} | ||||
|   SKIP_ALL_TESTS: 0 | ||||
| concurrency: | ||||
|   group: linux-binary-libtorch-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }} | ||||
|   group: linux-binary-libtorch-cxx11-abi-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }} | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|       curr_branch: ${{ github.head_ref || github.ref_name }} | ||||
|       curr_ref_type: ${{ github.ref_type }} | ||||
|   libtorch-cpu-shared-with-deps-release-build: | ||||
|   libtorch-cpu-shared-with-deps-cxx11-abi-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
| @ -50,18 +50,18 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: libtorch-cpu-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       build_name: libtorch-cpu-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cpu-shared-with-deps-release-test:  # Testing | ||||
|   libtorch-cpu-shared-with-deps-cxx11-abi-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-cpu-shared-with-deps-release-build | ||||
|       - libtorch-cpu-shared-with-deps-cxx11-abi-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
| @ -71,11 +71,11 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cpu-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       build_name: libtorch-cpu-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.4xlarge | ||||
|     secrets: | ||||
| @ -2,7 +2,7 @@ | ||||
| 
 | ||||
| # Template is at:    .github/templates/linux_binary_build_workflow.yml.j2 | ||||
| # Generation script: .github/scripts/generate_ci_workflows.py | ||||
| name: linux-binary-libtorch | ||||
| name: linux-binary-libtorch-cxx11-abi | ||||
| 
 | ||||
| 
 | ||||
| on: | ||||
| @ -23,7 +23,7 @@ env: | ||||
|   ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine" | ||||
|   AWS_DEFAULT_REGION: us-east-1 | ||||
|   BINARY_ENV_FILE: /tmp/env | ||||
|   BUILD_ENVIRONMENT: linux-binary-libtorch | ||||
|   BUILD_ENVIRONMENT: linux-binary-libtorch-cxx11-abi | ||||
|   GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||||
|   PR_NUMBER: ${{ github.event.pull_request.number }} | ||||
|   PYTORCH_FINAL_PACKAGE_DIR: /artifacts | ||||
| @ -31,20 +31,20 @@ env: | ||||
|   SHA1: ${{ github.event.pull_request.head.sha || github.sha }} | ||||
|   SKIP_ALL_TESTS: 0 | ||||
| concurrency: | ||||
|   group: linux-binary-libtorch-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }} | ||||
|   group: linux-binary-libtorch-cxx11-abi-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }} | ||||
|   cancel-in-progress: true | ||||
| 
 | ||||
| jobs: | ||||
|   get-label-type: | ||||
|     if: github.repository_owner == 'pytorch' | ||||
|     name: get-label-type | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main | ||||
|     uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.7 | ||||
|     with: | ||||
|       triggering_actor: ${{ github.triggering_actor }} | ||||
|       issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} | ||||
|       curr_branch: ${{ github.head_ref || github.ref_name }} | ||||
|       curr_ref_type: ${{ github.ref_type }} | ||||
|   libtorch-cpu-shared-with-deps-release-build: | ||||
|   libtorch-cpu-shared-with-deps-cxx11-abi-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
| @ -55,18 +55,18 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: libtorch-cpu-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       build_name: libtorch-cpu-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cpu-shared-with-deps-release-test:  # Testing | ||||
|   libtorch-cpu-shared-with-deps-cxx11-abi-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-cpu-shared-with-deps-release-build | ||||
|       - libtorch-cpu-shared-with-deps-cxx11-abi-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
| @ -76,21 +76,21 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cpu-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       build_name: libtorch-cpu-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.4xlarge | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cpu-shared-with-deps-release-upload:  # Uploading | ||||
|   libtorch-cpu-shared-with-deps-cxx11-abi-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: libtorch-cpu-shared-with-deps-release-test | ||||
|     needs: libtorch-cpu-shared-with-deps-cxx11-abi-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
| @ -98,15 +98,15 @@ jobs: | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: cpu | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cpu-shared-with-deps-release | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       build_name: libtorch-cpu-shared-with-deps-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
| 
 | ||||
|   libtorch-cuda11_8-shared-with-deps-release-build: | ||||
|   libtorch-cuda11_8-shared-with-deps-cxx11-abi-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
| @ -118,18 +118,18 @@ jobs: | ||||
|       DESIRED_CUDA: cu118 | ||||
|       GPU_ARCH_VERSION: 11.8 | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: libtorch-cuda11_8-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       build_name: libtorch-cuda11_8-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda11_8-shared-with-deps-release-test:  # Testing | ||||
|   libtorch-cuda11_8-shared-with-deps-cxx11-abi-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-cuda11_8-shared-with-deps-release-build | ||||
|       - libtorch-cuda11_8-shared-with-deps-cxx11-abi-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
| @ -140,21 +140,21 @@ jobs: | ||||
|       DESIRED_CUDA: cu118 | ||||
|       GPU_ARCH_VERSION: 11.8 | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda11_8-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       build_name: libtorch-cuda11_8-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.4xlarge.nvidia.gpu | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda11_8-shared-with-deps-release-upload:  # Uploading | ||||
|   libtorch-cuda11_8-shared-with-deps-cxx11-abi-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: libtorch-cuda11_8-shared-with-deps-release-test | ||||
|     needs: libtorch-cuda11_8-shared-with-deps-cxx11-abi-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
| @ -163,15 +163,15 @@ jobs: | ||||
|       DESIRED_CUDA: cu118 | ||||
|       GPU_ARCH_VERSION: 11.8 | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda11_8-shared-with-deps-release | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       build_name: libtorch-cuda11_8-shared-with-deps-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
| 
 | ||||
|   libtorch-cuda12_6-shared-with-deps-release-build: | ||||
|   libtorch-cuda12_6-shared-with-deps-cxx11-abi-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
| @ -183,18 +183,18 @@ jobs: | ||||
|       DESIRED_CUDA: cu126 | ||||
|       GPU_ARCH_VERSION: 12.6 | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: libtorch-cuda12_6-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       build_name: libtorch-cuda12_6-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda12_6-shared-with-deps-release-test:  # Testing | ||||
|   libtorch-cuda12_6-shared-with-deps-cxx11-abi-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-cuda12_6-shared-with-deps-release-build | ||||
|       - libtorch-cuda12_6-shared-with-deps-cxx11-abi-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
| @ -205,21 +205,21 @@ jobs: | ||||
|       DESIRED_CUDA: cu126 | ||||
|       GPU_ARCH_VERSION: 12.6 | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda12_6-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       build_name: libtorch-cuda12_6-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.4xlarge.nvidia.gpu | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda12_6-shared-with-deps-release-upload:  # Uploading | ||||
|   libtorch-cuda12_6-shared-with-deps-cxx11-abi-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: libtorch-cuda12_6-shared-with-deps-release-test | ||||
|     needs: libtorch-cuda12_6-shared-with-deps-cxx11-abi-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
| @ -228,15 +228,15 @@ jobs: | ||||
|       DESIRED_CUDA: cu126 | ||||
|       GPU_ARCH_VERSION: 12.6 | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda12_6-shared-with-deps-release | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       build_name: libtorch-cuda12_6-shared-with-deps-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
| 
 | ||||
|   libtorch-cuda12_8-shared-with-deps-release-build: | ||||
|   libtorch-cuda12_8-shared-with-deps-cxx11-abi-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
| @ -248,18 +248,18 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8 | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.8-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.8-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: libtorch-cuda12_8-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       build_name: libtorch-cuda12_8-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda12_8-shared-with-deps-release-test:  # Testing | ||||
|   libtorch-cuda12_8-shared-with-deps-cxx11-abi-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-cuda12_8-shared-with-deps-release-build | ||||
|       - libtorch-cuda12_8-shared-with-deps-cxx11-abi-build | ||||
|       - get-label-type | ||||
|     uses: ./.github/workflows/_binary-test-linux.yml | ||||
|     with: | ||||
| @ -270,21 +270,21 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8 | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.8-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.8-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda12_8-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       build_name: libtorch-cuda12_8-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       runs_on: linux.g4dn.4xlarge.nvidia.gpu  # 12.8 build needs sm_70+ runner | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-cuda12_8-shared-with-deps-release-upload:  # Uploading | ||||
|   libtorch-cuda12_8-shared-with-deps-cxx11-abi-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: libtorch-cuda12_8-shared-with-deps-release-test | ||||
|     needs: libtorch-cuda12_8-shared-with-deps-cxx11-abi-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
| @ -293,15 +293,15 @@ jobs: | ||||
|       DESIRED_CUDA: cu128 | ||||
|       GPU_ARCH_VERSION: 12.8 | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.8-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.8-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-cuda12_8-shared-with-deps-release | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       build_name: libtorch-cuda12_8-shared-with-deps-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
| 
 | ||||
|   libtorch-rocm6_2_4-shared-with-deps-release-build: | ||||
|   libtorch-rocm6_2_4-shared-with-deps-cxx11-abi-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
| @ -313,18 +313,18 @@ jobs: | ||||
|       DESIRED_CUDA: rocm6.2.4 | ||||
|       GPU_ARCH_VERSION: 6.2.4 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: libtorch-rocm6_2_4-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       build_name: libtorch-rocm6_2_4-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-rocm6_2_4-shared-with-deps-release-test:  # Testing | ||||
|   libtorch-rocm6_2_4-shared-with-deps-cxx11-abi-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-rocm6_2_4-shared-with-deps-release-build | ||||
|       - libtorch-rocm6_2_4-shared-with-deps-cxx11-abi-build | ||||
|       - get-label-type | ||||
|     runs-on: linux.rocm.gpu | ||||
|     timeout-minutes: 240 | ||||
| @ -337,21 +337,20 @@ jobs: | ||||
|       GPU_ARCH_VERSION: 6.2.4 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|       - uses: actions/download-artifact@v4.1.7 | ||||
|         name: Download Build Artifacts | ||||
|         with: | ||||
|           name: libtorch-rocm6_2_4-shared-with-deps-release | ||||
|           name: libtorch-rocm6_2_4-shared-with-deps-cxx11-abi | ||||
|           path: "${{ runner.temp }}/artifacts/" | ||||
|       - name: Checkout PyTorch | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|           submodules: recursive | ||||
|           path: pytorch | ||||
|           show-progress: false | ||||
| @ -364,19 +363,19 @@ jobs: | ||||
|         run: | | ||||
|           echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}" | ||||
|       - name: Pull Docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: pytorch/libtorch-cxx11-builder:rocm6.2.4-main | ||||
|           docker-image: pytorch/libtorch-cxx11-builder:rocm6.2.4-2.7 | ||||
|       - name: Test Pytorch binary | ||||
|         uses: ./pytorch/.github/actions/test-pytorch-binary | ||||
|       - name: Teardown ROCm | ||||
|         uses: ./.github/actions/teardown-rocm | ||||
|   libtorch-rocm6_2_4-shared-with-deps-release-upload:  # Uploading | ||||
|   libtorch-rocm6_2_4-shared-with-deps-cxx11-abi-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: libtorch-rocm6_2_4-shared-with-deps-release-test | ||||
|     needs: libtorch-rocm6_2_4-shared-with-deps-cxx11-abi-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
| @ -385,15 +384,15 @@ jobs: | ||||
|       DESIRED_CUDA: rocm6.2.4 | ||||
|       GPU_ARCH_VERSION: 6.2.4 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-rocm6_2_4-shared-with-deps-release | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       build_name: libtorch-rocm6_2_4-shared-with-deps-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
| 
 | ||||
|   libtorch-rocm6_3-shared-with-deps-release-build: | ||||
|   libtorch-rocm6_3-shared-with-deps-cxx11-abi-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     needs: get-label-type | ||||
| @ -405,18 +404,18 @@ jobs: | ||||
|       DESIRED_CUDA: rocm6.3 | ||||
|       GPU_ARCH_VERSION: 6.3 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.3-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.3-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" | ||||
|       build_name: libtorch-rocm6_3-shared-with-deps-release | ||||
|       build_environment: linux-binary-libtorch | ||||
|       build_name: libtorch-rocm6_3-shared-with-deps-cxx11-abi | ||||
|       build_environment: linux-binary-libtorch-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   libtorch-rocm6_3-shared-with-deps-release-test:  # Testing | ||||
|   libtorch-rocm6_3-shared-with-deps-cxx11-abi-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: | ||||
|       - libtorch-rocm6_3-shared-with-deps-release-build | ||||
|       - libtorch-rocm6_3-shared-with-deps-cxx11-abi-build | ||||
|       - get-label-type | ||||
|     runs-on: linux.rocm.gpu | ||||
|     timeout-minutes: 240 | ||||
| @ -429,21 +428,20 @@ jobs: | ||||
|       GPU_ARCH_VERSION: 6.3 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.3-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.3-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|       - uses: actions/download-artifact@v4.1.7 | ||||
|         name: Download Build Artifacts | ||||
|         with: | ||||
|           name: libtorch-rocm6_3-shared-with-deps-release | ||||
|           name: libtorch-rocm6_3-shared-with-deps-cxx11-abi | ||||
|           path: "${{ runner.temp }}/artifacts/" | ||||
|       - name: Checkout PyTorch | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|           submodules: recursive | ||||
|           path: pytorch | ||||
|           show-progress: false | ||||
| @ -456,19 +454,19 @@ jobs: | ||||
|         run: | | ||||
|           echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}" | ||||
|       - name: Pull Docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.7 | ||||
|         with: | ||||
|           docker-image: pytorch/libtorch-cxx11-builder:rocm6.3-main | ||||
|           docker-image: pytorch/libtorch-cxx11-builder:rocm6.3-2.7 | ||||
|       - name: Test Pytorch binary | ||||
|         uses: ./pytorch/.github/actions/test-pytorch-binary | ||||
|       - name: Teardown ROCm | ||||
|         uses: ./.github/actions/teardown-rocm | ||||
|   libtorch-rocm6_3-shared-with-deps-release-upload:  # Uploading | ||||
|   libtorch-rocm6_3-shared-with-deps-cxx11-abi-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: libtorch-rocm6_3-shared-with-deps-release-test | ||||
|     needs: libtorch-rocm6_3-shared-with-deps-cxx11-abi-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       PACKAGE_TYPE: libtorch | ||||
| @ -477,10 +475,10 @@ jobs: | ||||
|       DESIRED_CUDA: rocm6.3 | ||||
|       GPU_ARCH_VERSION: 6.3 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.3-main | ||||
|       LIBTORCH_CONFIG: release | ||||
|       DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.3-2.7 | ||||
|       LIBTORCH_VARIANT: shared-with-deps | ||||
|       build_name: libtorch-rocm6_3-shared-with-deps-release | ||||
|       DESIRED_DEVTOOLSET: cxx11-abi | ||||
|       build_name: libtorch-rocm6_3-shared-with-deps-cxx11-abi | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	