mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-25 16:14:55 +08:00
Compare commits
27 Commits
ciflow/tru
...
fixflashgi
| Author | SHA1 | Date | |
|---|---|---|---|
| 2f10f1b888 | |||
| 517f267085 | |||
| cac7242b91 | |||
| b54dc58cb5 | |||
| 4efdd216bd | |||
| 95654a32f5 | |||
| 2f5c2ccf7a | |||
| 813cae6074 | |||
| ef4730d5bb | |||
| 3ad3df90c3 | |||
| 257bf0e654 | |||
| 02d16522d8 | |||
| e6d3372157 | |||
| 3f3d86adf2 | |||
| 58478b0ab8 | |||
| 98e554222f | |||
| 700d608f4a | |||
| 1b27857415 | |||
| 73995b1b5e | |||
| 03d7c77071 | |||
| 019d9cda40 | |||
| 3620191a0a | |||
| 5a722ca130 | |||
| 8746e3cea2 | |||
| 8cd1996b57 | |||
| 73c23f3554 | |||
| 1da3d6f595 |
@ -8,8 +8,6 @@ if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then
|
|||||||
export TORCH_CUDA_ARCH_LIST="8.0;9.0"
|
export TORCH_CUDA_ARCH_LIST="8.0;9.0"
|
||||||
elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then
|
elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then
|
||||||
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
|
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
|
||||||
elif [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then
|
|
||||||
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
|
|
||||||
elif [[ "$GPU_ARCH_VERSION" == *"13.0"* ]]; then
|
elif [[ "$GPU_ARCH_VERSION" == *"13.0"* ]]; then
|
||||||
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;11.0;12.0+PTX"
|
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;11.0;12.0+PTX"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@ -37,9 +37,9 @@ case ${DOCKER_TAG_PREFIX} in
|
|||||||
rocm*)
|
rocm*)
|
||||||
BASE_TARGET=rocm
|
BASE_TARGET=rocm
|
||||||
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||||
# add gfx950, gfx115x conditionally starting in ROCm 7.0
|
# add gfx950 conditionally starting in ROCm 7.0
|
||||||
if [[ "$ROCM_VERSION" == *"7.0"* ]]; then
|
if [[ "$ROCM_VERSION" == *"7.0"* ]]; then
|
||||||
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
|
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950"
|
||||||
fi
|
fi
|
||||||
EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}"
|
EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}"
|
||||||
;;
|
;;
|
||||||
|
|||||||
@ -113,7 +113,6 @@ case "$tag" in
|
|||||||
UCX_COMMIT=${_UCX_COMMIT}
|
UCX_COMMIT=${_UCX_COMMIT}
|
||||||
UCC_COMMIT=${_UCC_COMMIT}
|
UCC_COMMIT=${_UCC_COMMIT}
|
||||||
TRITON=yes
|
TRITON=yes
|
||||||
INSTALL_MINGW=yes
|
|
||||||
;;
|
;;
|
||||||
pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11)
|
pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11)
|
||||||
CUDA_VERSION=13.0.0
|
CUDA_VERSION=13.0.0
|
||||||
@ -182,7 +181,7 @@ case "$tag" in
|
|||||||
KATEX=yes
|
KATEX=yes
|
||||||
UCX_COMMIT=${_UCX_COMMIT}
|
UCX_COMMIT=${_UCX_COMMIT}
|
||||||
UCC_COMMIT=${_UCC_COMMIT}
|
UCC_COMMIT=${_UCC_COMMIT}
|
||||||
PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950;gfx1100"
|
PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950"
|
||||||
if [[ $tag =~ "benchmarks" ]]; then
|
if [[ $tag =~ "benchmarks" ]]; then
|
||||||
INDUCTOR_BENCHMARKS=yes
|
INDUCTOR_BENCHMARKS=yes
|
||||||
fi
|
fi
|
||||||
@ -345,7 +344,7 @@ docker build \
|
|||||||
--build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
|
--build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
|
||||||
--build-arg "KATEX=${KATEX:-}" \
|
--build-arg "KATEX=${KATEX:-}" \
|
||||||
--build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \
|
--build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \
|
||||||
--build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}" \
|
--build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx90a;gfx942}" \
|
||||||
--build-arg "IMAGE_NAME=${IMAGE_NAME}" \
|
--build-arg "IMAGE_NAME=${IMAGE_NAME}" \
|
||||||
--build-arg "UCX_COMMIT=${UCX_COMMIT}" \
|
--build-arg "UCX_COMMIT=${UCX_COMMIT}" \
|
||||||
--build-arg "UCC_COMMIT=${UCC_COMMIT}" \
|
--build-arg "UCC_COMMIT=${UCC_COMMIT}" \
|
||||||
@ -362,7 +361,6 @@ docker build \
|
|||||||
--build-arg "OPENBLAS=${OPENBLAS:-}" \
|
--build-arg "OPENBLAS=${OPENBLAS:-}" \
|
||||||
--build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
|
--build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
|
||||||
--build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \
|
--build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \
|
||||||
--build-arg "INSTALL_MINGW=${INSTALL_MINGW:-}" \
|
|
||||||
-f $(dirname ${DOCKERFILE})/Dockerfile \
|
-f $(dirname ${DOCKERFILE})/Dockerfile \
|
||||||
-t "$tmp_tag" \
|
-t "$tmp_tag" \
|
||||||
"$@" \
|
"$@" \
|
||||||
|
|||||||
@ -1 +1 @@
|
|||||||
deb42f2a8e48f5032b4a98ee781a15fa87a157cf
|
e0dda9059d082537cee36be6c5e4fe3b18c880c0
|
||||||
|
|||||||
@ -1 +1 @@
|
|||||||
v2.27.5-1
|
v2.28.3-1
|
||||||
|
|||||||
@ -1 +1 @@
|
|||||||
v2.27.7-1
|
v2.28.3-1
|
||||||
|
|||||||
@ -1 +1 @@
|
|||||||
7416ffcb92cdbe98d9f97e4e6f95247e46dfc9fd
|
27664085f804afc83df26f740bb46c365854f2c4
|
||||||
|
|||||||
@ -83,6 +83,10 @@ function build_cpython {
|
|||||||
py_suffix=${py_ver::-1}
|
py_suffix=${py_ver::-1}
|
||||||
py_folder=$py_suffix
|
py_folder=$py_suffix
|
||||||
fi
|
fi
|
||||||
|
# Update to rc2 due to https://github.com/python/cpython/commit/c72699086fe4
|
||||||
|
if [ "$py_suffix" == "3.14.0" ]; then
|
||||||
|
py_suffix="3.14.0rc2"
|
||||||
|
fi
|
||||||
wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz
|
wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz
|
||||||
do_cpython_build $py_ver Python-$py_suffix
|
do_cpython_build $py_ver Python-$py_suffix
|
||||||
|
|
||||||
|
|||||||
@ -1,10 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# Install MinGW-w64 for Windows cross-compilation
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y g++-mingw-w64-x86-64-posix
|
|
||||||
|
|
||||||
echo "MinGW-w64 installed successfully"
|
|
||||||
x86_64-w64-mingw32-g++ --version
|
|
||||||
@ -19,8 +19,8 @@ pip_install \
|
|||||||
transformers==4.36.2
|
transformers==4.36.2
|
||||||
|
|
||||||
pip_install coloredlogs packaging
|
pip_install coloredlogs packaging
|
||||||
pip_install onnxruntime==1.23.0
|
pip_install onnxruntime==1.22.1
|
||||||
pip_install onnxscript==0.5.4
|
pip_install onnxscript==0.4.0
|
||||||
|
|
||||||
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
|
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
|
||||||
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
|
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
|
||||||
|
|||||||
@ -1,9 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -xe
|
|
||||||
# Script used in Linux x86 and aarch64 CD pipeline
|
|
||||||
|
|
||||||
# Workaround for exposing statically linked libstdc++ CXX11 ABI symbols.
|
|
||||||
# see: https://github.com/pytorch/pytorch/issues/133437
|
|
||||||
LIBNONSHARED=$(gcc -print-file-name=libstdc++_nonshared.a)
|
|
||||||
nm -g $LIBNONSHARED | grep " T " | grep recursive_directory_iterator | cut -c 20- > weaken-symbols.txt
|
|
||||||
objcopy --weaken-symbols weaken-symbols.txt $LIBNONSHARED $LIBNONSHARED
|
|
||||||
@ -39,20 +39,16 @@ case ${DOCKER_TAG_PREFIX} in
|
|||||||
DOCKER_GPU_BUILD_ARG=""
|
DOCKER_GPU_BUILD_ARG=""
|
||||||
;;
|
;;
|
||||||
rocm*)
|
rocm*)
|
||||||
# we want the patch version of 7.0 instead
|
|
||||||
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
|
|
||||||
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
|
|
||||||
fi
|
|
||||||
# we want the patch version of 6.4 instead
|
# we want the patch version of 6.4 instead
|
||||||
if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then
|
if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then
|
||||||
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.4"
|
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
|
||||||
fi
|
fi
|
||||||
BASE_TARGET=rocm
|
BASE_TARGET=rocm
|
||||||
GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete
|
GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete
|
||||||
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||||
# add gfx950, gfx115x conditionally starting in ROCm 7.0
|
# add gfx950 conditionally starting in ROCm 7.0
|
||||||
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
|
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
|
||||||
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
|
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950"
|
||||||
fi
|
fi
|
||||||
DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}"
|
DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}"
|
||||||
;;
|
;;
|
||||||
|
|||||||
@ -130,8 +130,7 @@ ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/op
|
|||||||
RUN for cpython_version in "cp312-cp312" "cp313-cp313" "cp313-cp313t"; do \
|
RUN for cpython_version in "cp312-cp312" "cp313-cp313" "cp313-cp313t"; do \
|
||||||
/opt/python/${cpython_version}/bin/python -m pip install setuptools wheel; \
|
/opt/python/${cpython_version}/bin/python -m pip install setuptools wheel; \
|
||||||
done;
|
done;
|
||||||
ADD ./common/patch_libstdc.sh patch_libstdc.sh
|
|
||||||
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh
|
|
||||||
|
|
||||||
# cmake-3.18.4 from pip; force in case cmake3 already exists
|
# cmake-3.18.4 from pip; force in case cmake3 already exists
|
||||||
RUN yum install -y python3-pip && \
|
RUN yum install -y python3-pip && \
|
||||||
|
|||||||
@ -78,6 +78,4 @@ RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
|
|||||||
RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6
|
RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6
|
||||||
COPY --from=openblas /opt/OpenBLAS/ /opt/OpenBLAS/
|
COPY --from=openblas /opt/OpenBLAS/ /opt/OpenBLAS/
|
||||||
COPY --from=arm_compute /acl /acl
|
COPY --from=arm_compute /acl /acl
|
||||||
ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:/acl/build/:$LD_LIBRARY_PATH
|
ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:/acl/build/:$LD_LIBRARY_PATH
|
||||||
ADD ./common/patch_libstdc.sh patch_libstdc.sh
|
|
||||||
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh
|
|
||||||
@ -106,5 +106,3 @@ COPY --from=arm_compute /acl /acl
|
|||||||
RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda
|
RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda
|
||||||
ENV PATH=/usr/local/cuda/bin:$PATH
|
ENV PATH=/usr/local/cuda/bin:$PATH
|
||||||
ENV LD_LIBRARY_PATH=/acl/build/:$LD_LIBRARY_PATH
|
ENV LD_LIBRARY_PATH=/acl/build/:$LD_LIBRARY_PATH
|
||||||
ADD ./common/patch_libstdc.sh patch_libstdc.sh
|
|
||||||
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh
|
|
||||||
|
|||||||
@ -115,9 +115,6 @@ RUN env GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=True pip3 install grpcio
|
|||||||
# cmake-3.28.0 from pip for onnxruntime
|
# cmake-3.28.0 from pip for onnxruntime
|
||||||
RUN python3 -mpip install cmake==3.28.0
|
RUN python3 -mpip install cmake==3.28.0
|
||||||
|
|
||||||
ADD ./common/patch_libstdc.sh patch_libstdc.sh
|
|
||||||
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh
|
|
||||||
|
|
||||||
# build onnxruntime 1.21.0 from sources.
|
# build onnxruntime 1.21.0 from sources.
|
||||||
# it is not possible to build it from sources using pip,
|
# it is not possible to build it from sources using pip,
|
||||||
# so just build it from upstream repository.
|
# so just build it from upstream repository.
|
||||||
|
|||||||
@ -75,22 +75,18 @@ case ${image} in
|
|||||||
DOCKERFILE_SUFFIX="_cuda_aarch64"
|
DOCKERFILE_SUFFIX="_cuda_aarch64"
|
||||||
;;
|
;;
|
||||||
manylinux2_28-builder:rocm*)
|
manylinux2_28-builder:rocm*)
|
||||||
# we want the patch version of 7.0 instead
|
|
||||||
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
|
|
||||||
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
|
|
||||||
fi
|
|
||||||
# we want the patch version of 6.4 instead
|
# we want the patch version of 6.4 instead
|
||||||
if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then
|
if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then
|
||||||
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.4"
|
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
|
||||||
fi
|
fi
|
||||||
TARGET=rocm_final
|
TARGET=rocm_final
|
||||||
MANY_LINUX_VERSION="2_28"
|
MANY_LINUX_VERSION="2_28"
|
||||||
DEVTOOLSET_VERSION="11"
|
DEVTOOLSET_VERSION="11"
|
||||||
GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete
|
GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete
|
||||||
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||||
# add gfx950, gfx115x conditionally starting in ROCm 7.0
|
# add gfx950 conditionally starting in ROCm 7.0
|
||||||
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
|
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
|
||||||
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
|
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950"
|
||||||
fi
|
fi
|
||||||
DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}"
|
DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}"
|
||||||
;;
|
;;
|
||||||
|
|||||||
@ -10,6 +10,11 @@ BAD_SSL = "https://self-signed.badssl.com"
|
|||||||
|
|
||||||
print("Testing SSL certificate checking for Python:", sys.version)
|
print("Testing SSL certificate checking for Python:", sys.version)
|
||||||
|
|
||||||
|
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] < (3, 4):
|
||||||
|
print("This version never checks SSL certs; skipping tests")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
EXC = OSError
|
EXC = OSError
|
||||||
|
|
||||||
print(f"Connecting to {GOOD_SSL} should work")
|
print(f"Connecting to {GOOD_SSL} should work")
|
||||||
|
|||||||
@ -120,8 +120,9 @@ ninja==1.11.1.4
|
|||||||
numba==0.55.2 ; python_version == "3.10" and platform_machine != "s390x"
|
numba==0.55.2 ; python_version == "3.10" and platform_machine != "s390x"
|
||||||
numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x"
|
numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x"
|
||||||
#Description: Just-In-Time Compiler for Numerical Functions
|
#Description: Just-In-Time Compiler for Numerical Functions
|
||||||
#Pinned versions: 0.55.2, 0.60.0
|
#Pinned versions: 0.54.1, 0.49.0, <=0.49.1
|
||||||
#test that import: test_numba_integration.py
|
#test that import: test_numba_integration.py
|
||||||
|
#For numba issue see https://github.com/pytorch/pytorch/issues/51511
|
||||||
#Need release > 0.61.2 for s390x due to https://github.com/numba/numba/pull/10073
|
#Need release > 0.61.2 for s390x due to https://github.com/numba/numba/pull/10073
|
||||||
|
|
||||||
#numpy
|
#numpy
|
||||||
@ -241,9 +242,10 @@ pygments==2.15.0
|
|||||||
#Pinned versions: 14.1.0
|
#Pinned versions: 14.1.0
|
||||||
#test that import:
|
#test that import:
|
||||||
|
|
||||||
scikit-image==0.22.0
|
scikit-image==0.19.3 ; python_version < "3.10"
|
||||||
|
scikit-image==0.22.0 ; python_version >= "3.10"
|
||||||
#Description: image processing routines
|
#Description: image processing routines
|
||||||
#Pinned versions: 0.22.0
|
#Pinned versions:
|
||||||
#test that import: test_nn.py
|
#test that import: test_nn.py
|
||||||
|
|
||||||
#scikit-learn
|
#scikit-learn
|
||||||
@ -339,7 +341,7 @@ onnx==1.18.0
|
|||||||
#Pinned versions:
|
#Pinned versions:
|
||||||
#test that import:
|
#test that import:
|
||||||
|
|
||||||
onnxscript==0.5.3
|
onnxscript==0.4.0
|
||||||
#Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
|
#Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
|
||||||
#Pinned versions:
|
#Pinned versions:
|
||||||
#test that import:
|
#test that import:
|
||||||
|
|||||||
@ -103,11 +103,6 @@ COPY ci_commit_pins/torchbench.txt torchbench.txt
|
|||||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
|
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
|
||||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt
|
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt
|
||||||
|
|
||||||
ARG INSTALL_MINGW
|
|
||||||
COPY ./common/install_mingw.sh install_mingw.sh
|
|
||||||
RUN if [ -n "${INSTALL_MINGW}" ]; then bash ./install_mingw.sh; fi
|
|
||||||
RUN rm install_mingw.sh
|
|
||||||
|
|
||||||
ARG TRITON
|
ARG TRITON
|
||||||
ARG TRITON_CPU
|
ARG TRITON_CPU
|
||||||
|
|
||||||
|
|||||||
@ -57,8 +57,8 @@ def clone_external_repo(target: str, repo: str, dst: str = "", update_submodules
|
|||||||
logger.info("Successfully cloned %s", target)
|
logger.info("Successfully cloned %s", target)
|
||||||
return r, commit
|
return r, commit
|
||||||
|
|
||||||
except GitCommandError:
|
except GitCommandError as e:
|
||||||
logger.exception("Git operation failed")
|
logger.error("Git operation failed: %s", e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -143,7 +143,7 @@ def sample_vllm_test_library():
|
|||||||
"pytest -v -s compile/test_decorator.py",
|
"pytest -v -s compile/test_decorator.py",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
"vllm_language_model_test_extended_generation_28_failure_test": {
|
"vllm_languagde_model_test_extended_generation_28_failure_test": {
|
||||||
"title": "Language Models Test (Extended Generation) 2.8 release failure",
|
"title": "Language Models Test (Extended Generation) 2.8 release failure",
|
||||||
"id": "vllm_languagde_model_test_extended_generation_28_failure_test",
|
"id": "vllm_languagde_model_test_extended_generation_28_failure_test",
|
||||||
"package_install": [
|
"package_install": [
|
||||||
|
|||||||
@ -63,7 +63,7 @@ class VllmBuildParameters:
|
|||||||
# DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True"
|
# DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True"
|
||||||
use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True)
|
use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True)
|
||||||
dockerfile_path: Path = env_path_field(
|
dockerfile_path: Path = env_path_field(
|
||||||
"DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile"
|
"DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm"
|
||||||
)
|
)
|
||||||
|
|
||||||
# the cleaning script to remove torch dependencies from pip
|
# the cleaning script to remove torch dependencies from pip
|
||||||
|
|||||||
@ -6,7 +6,7 @@ dependencies = [
|
|||||||
"GitPython==3.1.45",
|
"GitPython==3.1.45",
|
||||||
"docker==7.1.0",
|
"docker==7.1.0",
|
||||||
"pytest==7.3.2",
|
"pytest==7.3.2",
|
||||||
"uv==0.9.5"
|
"uv==0.8.6"
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.setuptools]
|
[tool.setuptools]
|
||||||
|
|||||||
@ -5,7 +5,7 @@ DESIRED_ROCM ?= 7.0
|
|||||||
DESIRED_ROCM_SHORT = $(subst .,,$(DESIRED_ROCM))
|
DESIRED_ROCM_SHORT = $(subst .,,$(DESIRED_ROCM))
|
||||||
PACKAGE_NAME = magma-rocm
|
PACKAGE_NAME = magma-rocm
|
||||||
# inherit this from underlying docker image, do not pass this env var to docker
|
# inherit this from underlying docker image, do not pass this env var to docker
|
||||||
#PYTORCH_ROCM_ARCH ?= gfx900;gfx906;gfx908;gfx90a;gfx942;gfx950;gfx1030;gfx1100;gfx1101;gfx1102;gfx1150;gfx1151;gfx1200;gfx1201
|
#PYTORCH_ROCM_ARCH ?= gfx900;gfx906;gfx908;gfx90a;gfx942;gfx950;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201
|
||||||
|
|
||||||
DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
|
DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
|
||||||
-v $(shell git rev-parse --show-toplevel)/.ci:/builder \
|
-v $(shell git rev-parse --show-toplevel)/.ci:/builder \
|
||||||
@ -18,6 +18,7 @@ DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
|
|||||||
.PHONY: all
|
.PHONY: all
|
||||||
all: magma-rocm70
|
all: magma-rocm70
|
||||||
all: magma-rocm64
|
all: magma-rocm64
|
||||||
|
all: magma-rocm63
|
||||||
|
|
||||||
.PHONY:
|
.PHONY:
|
||||||
clean:
|
clean:
|
||||||
@ -33,3 +34,8 @@ magma-rocm70:
|
|||||||
magma-rocm64: DESIRED_ROCM := 6.4
|
magma-rocm64: DESIRED_ROCM := 6.4
|
||||||
magma-rocm64:
|
magma-rocm64:
|
||||||
$(DOCKER_RUN)
|
$(DOCKER_RUN)
|
||||||
|
|
||||||
|
.PHONY: magma-rocm63
|
||||||
|
magma-rocm63: DESIRED_ROCM := 6.3
|
||||||
|
magma-rocm63:
|
||||||
|
$(DOCKER_RUN)
|
||||||
|
|||||||
@ -187,22 +187,19 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then
|
|||||||
export USE_CUFILE=0
|
export USE_CUFILE=0
|
||||||
else
|
else
|
||||||
DEPS_LIST+=(
|
DEPS_LIST+=(
|
||||||
|
"/usr/local/cuda/lib64/libnvToolsExt.so.1"
|
||||||
"/usr/local/cuda/lib64/libcublas.so.12"
|
"/usr/local/cuda/lib64/libcublas.so.12"
|
||||||
"/usr/local/cuda/lib64/libcublasLt.so.12"
|
"/usr/local/cuda/lib64/libcublasLt.so.12"
|
||||||
"/usr/local/cuda/lib64/libcudart.so.12"
|
"/usr/local/cuda/lib64/libcudart.so.12"
|
||||||
"/usr/local/cuda/lib64/libnvrtc.so.12"
|
"/usr/local/cuda/lib64/libnvrtc.so.12"
|
||||||
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12")
|
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12")
|
||||||
DEPS_SONAME+=(
|
DEPS_SONAME+=(
|
||||||
|
"libnvToolsExt.so.1"
|
||||||
"libcublas.so.12"
|
"libcublas.so.12"
|
||||||
"libcublasLt.so.12"
|
"libcublasLt.so.12"
|
||||||
"libcudart.so.12"
|
"libcudart.so.12"
|
||||||
"libnvrtc.so.12"
|
"libnvrtc.so.12"
|
||||||
"libcupti.so.12")
|
"libcupti.so.12")
|
||||||
|
|
||||||
if [[ $CUDA_VERSION != 12.9* ]]; then
|
|
||||||
DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1")
|
|
||||||
DEPS_SONAME+=("libnvToolsExt.so.1")
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "Using nvidia libs from pypi."
|
echo "Using nvidia libs from pypi."
|
||||||
|
|||||||
@ -233,9 +233,7 @@ if [[ "${BUILD_ENVIRONMENT}" != *cuda* ]]; then
|
|||||||
export BUILD_STATIC_RUNTIME_BENCHMARK=ON
|
export BUILD_STATIC_RUNTIME_BENCHMARK=ON
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$BUILD_ENVIRONMENT" == *-full-debug* ]]; then
|
if [[ "$BUILD_ENVIRONMENT" == *-debug* ]]; then
|
||||||
export CMAKE_BUILD_TYPE=Debug
|
|
||||||
elif [[ "$BUILD_ENVIRONMENT" == *-debug* ]]; then
|
|
||||||
export CMAKE_BUILD_TYPE=RelWithAssert
|
export CMAKE_BUILD_TYPE=RelWithAssert
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -301,11 +299,6 @@ else
|
|||||||
python -m build --wheel --no-isolation
|
python -m build --wheel --no-isolation
|
||||||
fi
|
fi
|
||||||
pip_install_whl "$(echo dist/*.whl)"
|
pip_install_whl "$(echo dist/*.whl)"
|
||||||
if [[ "$BUILD_ENVIRONMENT" == *full-debug* ]]; then
|
|
||||||
# Regression test for https://github.com/pytorch/pytorch/issues/164297
|
|
||||||
# Torch should be importable and that's about it
|
|
||||||
pushd /; python -c "import torch;print(torch.__config__.show(), torch.randn(5) + 1.7)"; popd
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "${BUILD_ADDITIONAL_PACKAGES:-}" == *vision* ]]; then
|
if [[ "${BUILD_ADDITIONAL_PACKAGES:-}" == *vision* ]]; then
|
||||||
install_torchvision
|
install_torchvision
|
||||||
|
|||||||
@ -256,7 +256,7 @@ test_torchbench_smoketest() {
|
|||||||
local device=mps
|
local device=mps
|
||||||
local dtypes=(undefined float16 bfloat16 notset)
|
local dtypes=(undefined float16 bfloat16 notset)
|
||||||
local dtype=${dtypes[$1]}
|
local dtype=${dtypes[$1]}
|
||||||
local models=(llama BERT_pytorch dcgan yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor vgg16)
|
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor timm_resnet timm_vovnet vgg16)
|
||||||
|
|
||||||
for backend in eager inductor; do
|
for backend in eager inductor; do
|
||||||
|
|
||||||
@ -319,7 +319,7 @@ test_aoti_torchbench_smoketest() {
|
|||||||
local device=mps
|
local device=mps
|
||||||
local dtypes=(undefined float16 bfloat16 notset)
|
local dtypes=(undefined float16 bfloat16 notset)
|
||||||
local dtype=${dtypes[$1]}
|
local dtype=${dtypes[$1]}
|
||||||
local models=(llama BERT_pytorch dcgan yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor vgg16)
|
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor timm_resnet timm_vovnet vgg16)
|
||||||
|
|
||||||
echo "Launching torchbench inference performance run for AOT Inductor and dtype ${dtype}"
|
echo "Launching torchbench inference performance run for AOT Inductor and dtype ${dtype}"
|
||||||
local dtype_arg="--${dtype}"
|
local dtype_arg="--${dtype}"
|
||||||
|
|||||||
@ -32,9 +32,6 @@ LIBTORCH_NAMESPACE_LIST = (
|
|||||||
"torch::",
|
"torch::",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Patterns for detecting statically linked libstdc++ symbols
|
|
||||||
STATICALLY_LINKED_CXX11_ABI = [re.compile(r".*recursive_directory_iterator.*")]
|
|
||||||
|
|
||||||
|
|
||||||
def _apply_libtorch_symbols(symbols):
|
def _apply_libtorch_symbols(symbols):
|
||||||
return [
|
return [
|
||||||
@ -56,17 +53,12 @@ def get_symbols(lib: str) -> list[tuple[str, str, str]]:
|
|||||||
return [x.split(" ", 2) for x in lines.decode("latin1").split("\n")[:-1]]
|
return [x.split(" ", 2) for x in lines.decode("latin1").split("\n")[:-1]]
|
||||||
|
|
||||||
|
|
||||||
def grep_symbols(
|
def grep_symbols(lib: str, patterns: list[Any]) -> list[str]:
|
||||||
lib: str, patterns: list[Any], symbol_type: str | None = None
|
|
||||||
) -> list[str]:
|
|
||||||
def _grep_symbols(
|
def _grep_symbols(
|
||||||
symbols: list[tuple[str, str, str]], patterns: list[Any]
|
symbols: list[tuple[str, str, str]], patterns: list[Any]
|
||||||
) -> list[str]:
|
) -> list[str]:
|
||||||
rc = []
|
rc = []
|
||||||
for _s_addr, _s_type, s_name in symbols:
|
for _s_addr, _s_type, s_name in symbols:
|
||||||
# Filter by symbol type if specified
|
|
||||||
if symbol_type and _s_type != symbol_type:
|
|
||||||
continue
|
|
||||||
for pattern in patterns:
|
for pattern in patterns:
|
||||||
if pattern.match(s_name):
|
if pattern.match(s_name):
|
||||||
rc.append(s_name)
|
rc.append(s_name)
|
||||||
@ -88,18 +80,6 @@ def grep_symbols(
|
|||||||
return functools.reduce(list.__add__, (x.result() for x in tasks), [])
|
return functools.reduce(list.__add__, (x.result() for x in tasks), [])
|
||||||
|
|
||||||
|
|
||||||
def check_lib_statically_linked_libstdc_cxx_abi_symbols(lib: str) -> None:
|
|
||||||
cxx11_statically_linked_symbols = grep_symbols(
|
|
||||||
lib, STATICALLY_LINKED_CXX11_ABI, symbol_type="T"
|
|
||||||
)
|
|
||||||
num_statically_linked_symbols = len(cxx11_statically_linked_symbols)
|
|
||||||
print(f"num_statically_linked_symbols (T): {num_statically_linked_symbols}")
|
|
||||||
if num_statically_linked_symbols > 0:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Found statically linked libstdc++ symbols (recursive_directory_iterator): {cxx11_statically_linked_symbols[:100]}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def check_lib_symbols_for_abi_correctness(lib: str) -> None:
|
def check_lib_symbols_for_abi_correctness(lib: str) -> None:
|
||||||
print(f"lib: {lib}")
|
print(f"lib: {lib}")
|
||||||
cxx11_symbols = grep_symbols(lib, LIBTORCH_CXX11_PATTERNS)
|
cxx11_symbols = grep_symbols(lib, LIBTORCH_CXX11_PATTERNS)
|
||||||
@ -127,7 +107,6 @@ def main() -> None:
|
|||||||
|
|
||||||
libtorch_cpu_path = str(install_root / "lib" / "libtorch_cpu.so")
|
libtorch_cpu_path = str(install_root / "lib" / "libtorch_cpu.so")
|
||||||
check_lib_symbols_for_abi_correctness(libtorch_cpu_path)
|
check_lib_symbols_for_abi_correctness(libtorch_cpu_path)
|
||||||
check_lib_statically_linked_libstdc_cxx_abi_symbols(libtorch_cpu_path)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@ -34,14 +34,12 @@ fi
|
|||||||
|
|
||||||
|
|
||||||
# Patch numba to avoid CUDA-13 crash, see https://github.com/pytorch/pytorch/issues/162878
|
# Patch numba to avoid CUDA-13 crash, see https://github.com/pytorch/pytorch/issues/162878
|
||||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
|
NUMBA_CUDA_DIR=$(python -c "import os;import numba.cuda; print(os.path.dirname(numba.cuda.__file__))" 2>/dev/null || true)
|
||||||
NUMBA_CUDA_DIR=$(python -c "import os;import numba.cuda; print(os.path.dirname(numba.cuda.__file__))" 2>/dev/null || true)
|
if [ -n "$NUMBA_CUDA_DIR" ]; then
|
||||||
if [ -n "$NUMBA_CUDA_DIR" ]; then
|
NUMBA_PATCH="$(dirname "$(realpath "${BASH_SOURCE[0]}")")/numba-cuda-13.patch"
|
||||||
NUMBA_PATCH="$(dirname "$(realpath "${BASH_SOURCE[0]}")")/numba-cuda-13.patch"
|
pushd "$NUMBA_CUDA_DIR"
|
||||||
pushd "$NUMBA_CUDA_DIR"
|
patch -p4 <"$NUMBA_PATCH"
|
||||||
patch -p4 <"$NUMBA_PATCH"
|
popd
|
||||||
popd
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Environment variables:"
|
echo "Environment variables:"
|
||||||
@ -337,13 +335,13 @@ test_python() {
|
|||||||
|
|
||||||
test_python_smoke() {
|
test_python_smoke() {
|
||||||
# Smoke tests for H100/B200
|
# Smoke tests for H100/B200
|
||||||
time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 inductor/test_max_autotune $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
time python test/run_test.py --include test_matmul_cuda inductor/test_fp8 inductor/test_max_autotune $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||||
assert_git_not_dirty
|
assert_git_not_dirty
|
||||||
}
|
}
|
||||||
|
|
||||||
test_python_smoke_b200() {
|
test_python_smoke_b200() {
|
||||||
# Targeted smoke tests for B200 - staged approach to avoid too many failures
|
# Targeted smoke tests for B200 - staged approach to avoid too many failures
|
||||||
time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
time python test/run_test.py --include test_matmul_cuda inductor/test_fp8 $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||||
assert_git_not_dirty
|
assert_git_not_dirty
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -485,22 +483,6 @@ test_inductor_aoti() {
|
|||||||
/usr/bin/env "${TEST_ENVS[@]}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference cpp/test_vec_half_AVX2 -dist=loadfile
|
/usr/bin/env "${TEST_ENVS[@]}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference cpp/test_vec_half_AVX2 -dist=loadfile
|
||||||
}
|
}
|
||||||
|
|
||||||
test_inductor_aoti_cross_compile_for_windows() {
|
|
||||||
|
|
||||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
|
||||||
mkdir -p "$TEST_REPORTS_DIR"
|
|
||||||
|
|
||||||
# Set WINDOWS_CUDA_HOME environment variable
|
|
||||||
WINDOWS_CUDA_HOME="$(pwd)/win-torch-wheel-extracted"
|
|
||||||
export WINDOWS_CUDA_HOME
|
|
||||||
|
|
||||||
echo "WINDOWS_CUDA_HOME is set to: $WINDOWS_CUDA_HOME"
|
|
||||||
echo "Contents:"
|
|
||||||
ls -lah "$(pwd)/win-torch-wheel-extracted/lib/x64/" || true
|
|
||||||
|
|
||||||
python test/inductor/test_aoti_cross_compile_windows.py -k compile --package-dir "$TEST_REPORTS_DIR" --win-torch-lib-dir "$(pwd)/win-torch-wheel-extracted/torch/lib"
|
|
||||||
}
|
|
||||||
|
|
||||||
test_inductor_cpp_wrapper_shard() {
|
test_inductor_cpp_wrapper_shard() {
|
||||||
if [[ -z "$NUM_TEST_SHARDS" ]]; then
|
if [[ -z "$NUM_TEST_SHARDS" ]]; then
|
||||||
echo "NUM_TEST_SHARDS must be defined to run a Python test shard"
|
echo "NUM_TEST_SHARDS must be defined to run a Python test shard"
|
||||||
@ -854,7 +836,7 @@ test_dynamo_benchmark() {
|
|||||||
elif [[ "${suite}" == "timm_models" ]]; then
|
elif [[ "${suite}" == "timm_models" ]]; then
|
||||||
export TORCHBENCH_ONLY_MODELS="inception_v3"
|
export TORCHBENCH_ONLY_MODELS="inception_v3"
|
||||||
elif [[ "${suite}" == "torchbench" ]]; then
|
elif [[ "${suite}" == "torchbench" ]]; then
|
||||||
export TORCHBENCH_ONLY_MODELS="BERT_pytorch"
|
export TORCHBENCH_ONLY_MODELS="hf_Bert"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
test_single_dynamo_benchmark "dashboard" "$suite" "$shard_id" "$@"
|
test_single_dynamo_benchmark "dashboard" "$suite" "$shard_id" "$@"
|
||||||
@ -885,13 +867,13 @@ test_inductor_torchbench_smoketest_perf() {
|
|||||||
mkdir -p "$TEST_REPORTS_DIR"
|
mkdir -p "$TEST_REPORTS_DIR"
|
||||||
|
|
||||||
python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --float16 --training \
|
python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --float16 --training \
|
||||||
--batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" --only BERT_pytorch \
|
--batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" --only hf_Bert \
|
||||||
--output "$TEST_REPORTS_DIR/inductor_training_smoketest.csv"
|
--output "$TEST_REPORTS_DIR/inductor_training_smoketest.csv"
|
||||||
# The threshold value needs to be actively maintained to make this check useful
|
# The threshold value needs to be actively maintained to make this check useful
|
||||||
python benchmarks/dynamo/check_perf_csv.py -f "$TEST_REPORTS_DIR/inductor_training_smoketest.csv" -t 1.4
|
python benchmarks/dynamo/check_perf_csv.py -f "$TEST_REPORTS_DIR/inductor_training_smoketest.csv" -t 1.4
|
||||||
|
|
||||||
# Check memory compression ratio for a few models
|
# Check memory compression ratio for a few models
|
||||||
for test in BERT_pytorch yolov3; do
|
for test in hf_Albert timm_vision_transformer; do
|
||||||
python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --amp --training \
|
python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --amp --training \
|
||||||
--disable-cudagraphs --batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" \
|
--disable-cudagraphs --batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" \
|
||||||
--only $test --output "$TEST_REPORTS_DIR/inductor_training_smoketest_$test.csv"
|
--only $test --output "$TEST_REPORTS_DIR/inductor_training_smoketest_$test.csv"
|
||||||
@ -902,7 +884,7 @@ test_inductor_torchbench_smoketest_perf() {
|
|||||||
done
|
done
|
||||||
|
|
||||||
# Perform some "warm-start" runs for a few huggingface models.
|
# Perform some "warm-start" runs for a few huggingface models.
|
||||||
for test in AllenaiLongformerBase DistilBertForMaskedLM DistillGPT2 GoogleFnet YituTechConvBert; do
|
for test in AlbertForQuestionAnswering AllenaiLongformerBase DistilBertForMaskedLM DistillGPT2 GoogleFnet YituTechConvBert; do
|
||||||
python benchmarks/dynamo/huggingface.py --accuracy --training --amp --inductor --device cuda --warm-start-latency \
|
python benchmarks/dynamo/huggingface.py --accuracy --training --amp --inductor --device cuda --warm-start-latency \
|
||||||
--only $test --output "$TEST_REPORTS_DIR/inductor_warm_start_smoketest_$test.csv"
|
--only $test --output "$TEST_REPORTS_DIR/inductor_warm_start_smoketest_$test.csv"
|
||||||
python benchmarks/dynamo/check_accuracy.py \
|
python benchmarks/dynamo/check_accuracy.py \
|
||||||
@ -916,7 +898,7 @@ test_inductor_set_cpu_affinity(){
|
|||||||
export LD_PRELOAD="$JEMALLOC_LIB":"$LD_PRELOAD"
|
export LD_PRELOAD="$JEMALLOC_LIB":"$LD_PRELOAD"
|
||||||
export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:-1"
|
export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:-1"
|
||||||
|
|
||||||
if [[ "$(uname -m)" != "aarch64" ]]; then
|
if [[ "${TEST_CONFIG}" != *aarch64* ]]; then
|
||||||
# Use Intel OpenMP for x86
|
# Use Intel OpenMP for x86
|
||||||
IOMP_LIB="$(dirname "$(which python)")/../lib/libiomp5.so"
|
IOMP_LIB="$(dirname "$(which python)")/../lib/libiomp5.so"
|
||||||
export LD_PRELOAD="$IOMP_LIB":"$LD_PRELOAD"
|
export LD_PRELOAD="$IOMP_LIB":"$LD_PRELOAD"
|
||||||
@ -930,7 +912,7 @@ test_inductor_set_cpu_affinity(){
|
|||||||
cores=$((cpus / thread_per_core))
|
cores=$((cpus / thread_per_core))
|
||||||
|
|
||||||
# Set number of cores to 16 on aarch64 for performance runs
|
# Set number of cores to 16 on aarch64 for performance runs
|
||||||
if [[ "$(uname -m)" == "aarch64" && $cores -gt 16 ]]; then
|
if [[ "${TEST_CONFIG}" == *aarch64* && $cores -gt 16 ]]; then
|
||||||
cores=16
|
cores=16
|
||||||
fi
|
fi
|
||||||
export OMP_NUM_THREADS=$cores
|
export OMP_NUM_THREADS=$cores
|
||||||
@ -1631,7 +1613,6 @@ test_operator_benchmark() {
|
|||||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||||
mkdir -p "$TEST_REPORTS_DIR"
|
mkdir -p "$TEST_REPORTS_DIR"
|
||||||
TEST_DIR=$(pwd)
|
TEST_DIR=$(pwd)
|
||||||
ARCH=$(uname -m)
|
|
||||||
|
|
||||||
test_inductor_set_cpu_affinity
|
test_inductor_set_cpu_affinity
|
||||||
|
|
||||||
@ -1646,7 +1627,7 @@ test_operator_benchmark() {
|
|||||||
pip_install pandas
|
pip_install pandas
|
||||||
python check_perf_csv.py \
|
python check_perf_csv.py \
|
||||||
--actual "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv" \
|
--actual "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv" \
|
||||||
--expected "${ARCH}_expected_ci_operator_benchmark_eager_float32_cpu.csv"
|
--expected "expected_ci_operator_benchmark_eager_float32_cpu.csv"
|
||||||
}
|
}
|
||||||
|
|
||||||
test_operator_microbenchmark() {
|
test_operator_microbenchmark() {
|
||||||
@ -1683,7 +1664,7 @@ if [[ "${TEST_CONFIG}" == *numpy_2* ]]; then
|
|||||||
python -m pip install --pre numpy==2.0.2 scipy==1.13.1 numba==0.60.0
|
python -m pip install --pre numpy==2.0.2 scipy==1.13.1 numba==0.60.0
|
||||||
fi
|
fi
|
||||||
python test/run_test.py --include dynamo/test_functions.py dynamo/test_unspec.py test_binary_ufuncs.py test_fake_tensor.py test_linalg.py test_numpy_interop.py test_tensor_creation_ops.py test_torch.py torch_np/test_basic.py
|
python test/run_test.py --include dynamo/test_functions.py dynamo/test_unspec.py test_binary_ufuncs.py test_fake_tensor.py test_linalg.py test_numpy_interop.py test_tensor_creation_ops.py test_torch.py torch_np/test_basic.py
|
||||||
elif [[ "${BUILD_ENVIRONMENT}" == *aarch64* && "${TEST_CONFIG}" == 'default' ]]; then
|
elif [[ "${BUILD_ENVIRONMENT}" == *aarch64* && "${TEST_CONFIG}" != *perf_cpu_aarch64* ]]; then
|
||||||
test_linux_aarch64
|
test_linux_aarch64
|
||||||
elif [[ "${TEST_CONFIG}" == *backward* ]]; then
|
elif [[ "${TEST_CONFIG}" == *backward* ]]; then
|
||||||
test_forward_backward_compatibility
|
test_forward_backward_compatibility
|
||||||
@ -1734,8 +1715,6 @@ elif [[ "${TEST_CONFIG}" == *inductor-triton-cpu* ]]; then
|
|||||||
test_inductor_triton_cpu
|
test_inductor_triton_cpu
|
||||||
elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then
|
elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then
|
||||||
test_inductor_micro_benchmark
|
test_inductor_micro_benchmark
|
||||||
elif [[ "${TEST_CONFIG}" == *aoti_cross_compile_for_windows* ]]; then
|
|
||||||
test_inductor_aoti_cross_compile_for_windows
|
|
||||||
elif [[ "${TEST_CONFIG}" == *huggingface* ]]; then
|
elif [[ "${TEST_CONFIG}" == *huggingface* ]]; then
|
||||||
install_torchvision
|
install_torchvision
|
||||||
id=$((SHARD_NUMBER-1))
|
id=$((SHARD_NUMBER-1))
|
||||||
|
|||||||
@ -15,35 +15,37 @@ if errorlevel 1 exit /b 1
|
|||||||
if not errorlevel 0 exit /b 1
|
if not errorlevel 0 exit /b 1
|
||||||
|
|
||||||
cd %TMP_DIR_WIN%\build\torch\test
|
cd %TMP_DIR_WIN%\build\torch\test
|
||||||
|
|
||||||
:: Enable delayed variable expansion to make the list
|
|
||||||
setlocal enabledelayedexpansion
|
|
||||||
set EXE_LIST=
|
|
||||||
for /r "." %%a in (*.exe) do (
|
for /r "." %%a in (*.exe) do (
|
||||||
if "%%~na" == "c10_intrusive_ptr_benchmark" (
|
call :libtorch_check "%%~na" "%%~fa"
|
||||||
@REM NB: This is not a gtest executable file, thus couldn't be handled by
|
|
||||||
@REM pytest-cpp and is excluded from test discovery by run_test
|
|
||||||
call "%%~fa"
|
|
||||||
if errorlevel 1 goto fail
|
if errorlevel 1 goto fail
|
||||||
if not errorlevel 0 goto fail
|
|
||||||
) else (
|
|
||||||
if "%%~na" == "verify_api_visibility" (
|
|
||||||
@REM Skip verify_api_visibility as it is a compile-level test
|
|
||||||
) else (
|
|
||||||
set EXE_LIST=!EXE_LIST! cpp/%%~na
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
goto :eof
|
||||||
|
|
||||||
|
:libtorch_check
|
||||||
|
|
||||||
cd %CWD%
|
cd %CWD%
|
||||||
set CPP_TESTS_DIR=%TMP_DIR_WIN%\build\torch\test
|
set CPP_TESTS_DIR=%TMP_DIR_WIN%\build\torch\test
|
||||||
|
|
||||||
:: Run python test\run_test.py on the list
|
:: Skip verify_api_visibility as it a compile level test
|
||||||
set NO_TD=True && python test\run_test.py --cpp --verbose -i !EXE_LIST!
|
if "%~1" == "verify_api_visibility" goto :eof
|
||||||
if errorlevel 1 goto fail
|
|
||||||
if not errorlevel 0 goto fail
|
|
||||||
|
|
||||||
goto :eof
|
echo Running "%~2"
|
||||||
|
if "%~1" == "c10_intrusive_ptr_benchmark" (
|
||||||
|
:: NB: This is not a gtest executable file, thus couldn't be handled by pytest-cpp
|
||||||
|
call "%~2"
|
||||||
|
goto :eof
|
||||||
|
)
|
||||||
|
|
||||||
|
python test\run_test.py --cpp --verbose -i "cpp/%~1"
|
||||||
|
if errorlevel 1 (
|
||||||
|
echo %1 failed with exit code %errorlevel%
|
||||||
|
goto fail
|
||||||
|
)
|
||||||
|
if not errorlevel 0 (
|
||||||
|
echo %1 failed with exit code %errorlevel%
|
||||||
|
goto fail
|
||||||
|
)
|
||||||
|
|
||||||
:eof
|
:eof
|
||||||
exit /b 0
|
exit /b 0
|
||||||
|
|||||||
@ -38,7 +38,7 @@ if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# TODO: Move this to .ci/docker/requirements-ci.txt
|
# TODO: Move this to .ci/docker/requirements-ci.txt
|
||||||
python -m pip install "psutil==5.9.1" nvidia-ml-py "pytest-shard==0.1.2"
|
python -m pip install "psutil==5.9.1" "pynvml==11.4.1" "pytest-shard==0.1.2"
|
||||||
|
|
||||||
run_tests() {
|
run_tests() {
|
||||||
# Run nvidia-smi if available
|
# Run nvidia-smi if available
|
||||||
|
|||||||
@ -37,10 +37,10 @@ IF "%CUDA_PATH_V128%"=="" (
|
|||||||
)
|
)
|
||||||
|
|
||||||
IF "%BUILD_VISION%" == "" (
|
IF "%BUILD_VISION%" == "" (
|
||||||
set TORCH_CUDA_ARCH_LIST=7.0;7.5;8.0;8.6;9.0;10.0;12.0
|
set TORCH_CUDA_ARCH_LIST=6.1;7.0;7.5;8.0;8.6;9.0;10.0;12.0
|
||||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||||
) ELSE (
|
) ELSE (
|
||||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90 -gencode=arch=compute_100,code=compute_100 -gencode=arch=compute_120,code=compute_120
|
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90 -gencode=arch=compute_100,code=compute_100 -gencode=arch=compute_120,code=compute_120
|
||||||
)
|
)
|
||||||
|
|
||||||
set "CUDA_PATH=%CUDA_PATH_V128%"
|
set "CUDA_PATH=%CUDA_PATH_V128%"
|
||||||
|
|||||||
@ -71,7 +71,14 @@ export PYTORCH_BUILD_NUMBER=1
|
|||||||
|
|
||||||
# Set triton version as part of PYTORCH_EXTRA_INSTALL_REQUIREMENTS
|
# Set triton version as part of PYTORCH_EXTRA_INSTALL_REQUIREMENTS
|
||||||
TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
|
TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
|
||||||
TRITON_CONSTRAINT="platform_system == 'Linux'"
|
|
||||||
|
# Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT
|
||||||
|
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||||
|
|
||||||
|
# CUDA 12.9/13.0 builds have triton for Linux and Linux aarch64 binaries.
|
||||||
|
if [[ "$DESIRED_CUDA" == "cu129" ]] || [[ "$DESIRED_CUDA" == "cu130" ]]; then
|
||||||
|
TRITON_CONSTRAINT="platform_system == 'Linux'"
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" && ! "$PYTORCH_BUILD_VERSION" =~ .*xpu.* ]]; then
|
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" && ! "$PYTORCH_BUILD_VERSION" =~ .*xpu.* ]]; then
|
||||||
TRITON_REQUIREMENT="triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
|
TRITON_REQUIREMENT="triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
|
||||||
@ -163,13 +170,8 @@ if [[ "$(uname)" != Darwin ]]; then
|
|||||||
MEMORY_LIMIT_MAX_JOBS=12
|
MEMORY_LIMIT_MAX_JOBS=12
|
||||||
NUM_CPUS=$(( $(nproc) - 2 ))
|
NUM_CPUS=$(( $(nproc) - 2 ))
|
||||||
|
|
||||||
if [[ "$(uname)" == Linux ]]; then
|
# Defaults here for **binary** linux builds so they can be changed in one place
|
||||||
# Defaults here for **binary** linux builds so they can be changed in one place
|
export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))}
|
||||||
export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))}
|
|
||||||
else
|
|
||||||
# For other builds
|
|
||||||
export MAX_JOBS=${NUM_CPUS}
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat >>"$envfile" <<EOL
|
cat >>"$envfile" <<EOL
|
||||||
export MAX_JOBS="${MAX_JOBS}"
|
export MAX_JOBS="${MAX_JOBS}"
|
||||||
|
|||||||
@ -59,14 +59,13 @@ performance-*,
|
|||||||
-performance-enum-size,
|
-performance-enum-size,
|
||||||
readability-container-size-empty,
|
readability-container-size-empty,
|
||||||
readability-delete-null-pointer,
|
readability-delete-null-pointer,
|
||||||
readability-duplicate-include,
|
readability-duplicate-include
|
||||||
readability-misplaced-array-index,
|
readability-misplaced-array-index,
|
||||||
readability-redundant*,
|
readability-redundant*
|
||||||
readability-simplify-subscript-expr,
|
readability-simplify-subscript-expr,
|
||||||
readability-string-compare,
|
readability-string-compare,
|
||||||
-readability-redundant-access-specifiers,
|
-readability-redundant-access-specifiers,
|
||||||
-readability-redundant-control-flow,
|
-readability-redundant-control-flow,
|
||||||
-readability-redundant-inline-specifier,
|
|
||||||
'
|
'
|
||||||
HeaderFilterRegex: '^(aten/|c10/|torch/).*$'
|
HeaderFilterRegex: '^(aten/|c10/|torch/).*$'
|
||||||
WarningsAsErrors: '*'
|
WarningsAsErrors: '*'
|
||||||
|
|||||||
8
.flake8
8
.flake8
@ -7,12 +7,16 @@ max-line-length = 120
|
|||||||
# C408 ignored because we like the dict keyword argument syntax
|
# C408 ignored because we like the dict keyword argument syntax
|
||||||
# E501 is not flexible enough, we're using B950 instead
|
# E501 is not flexible enough, we're using B950 instead
|
||||||
ignore =
|
ignore =
|
||||||
E203,E305,E402,E501,E704,E741,F405,F841,F999,W503,W504,C408,E302,W291,E303,F824,
|
E203,E305,E402,E501,E704,E721,E741,F405,F841,F999,W503,W504,C408,E302,W291,E303,F824,
|
||||||
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
|
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
|
||||||
# to line this up with executable bit
|
# to line this up with executable bit
|
||||||
EXE001,
|
EXE001,
|
||||||
# these ignores are from flake8-bugbear; please fix!
|
# these ignores are from flake8-bugbear; please fix!
|
||||||
B007,B008,B017,B019,B023,B028,B903,B905,B906,B907,B908,B910
|
B007,B008,B017,B019,B023,B028,B903,B904,B905,B906,B907,B908,B910
|
||||||
|
# these ignores are from flake8-comprehensions; please fix!
|
||||||
|
C407,
|
||||||
|
# these ignores are from flake8-logging-format; please fix!
|
||||||
|
G100,G101,G200
|
||||||
# these ignores are from flake8-simplify. please fix or ignore with commented reason
|
# these ignores are from flake8-simplify. please fix or ignore with commented reason
|
||||||
SIM105,SIM108,SIM110,SIM111,SIM113,SIM114,SIM115,SIM116,SIM117,SIM118,SIM119,SIM12,
|
SIM105,SIM108,SIM110,SIM111,SIM113,SIM114,SIM115,SIM116,SIM117,SIM118,SIM119,SIM12,
|
||||||
# SIM104 is already covered by pyupgrade ruff
|
# SIM104 is already covered by pyupgrade ruff
|
||||||
|
|||||||
1
.github/ISSUE_TEMPLATE/ci-sev.md
vendored
1
.github/ISSUE_TEMPLATE/ci-sev.md
vendored
@ -8,7 +8,6 @@ assignees: ''
|
|||||||
---
|
---
|
||||||
|
|
||||||
> NOTE: Remember to label this issue with "`ci: sev`"
|
> NOTE: Remember to label this issue with "`ci: sev`"
|
||||||
> If you want autorevert to be disabled, keep the ci: disable-autorevert label
|
|
||||||
|
|
||||||
<!-- Add the `merge blocking` label to this PR to prevent PRs from being merged while this issue is open -->
|
<!-- Add the `merge blocking` label to this PR to prevent PRs from being merged while this issue is open -->
|
||||||
|
|
||||||
|
|||||||
4
.github/ISSUE_TEMPLATE/disable-autorevert.md
vendored
4
.github/ISSUE_TEMPLATE/disable-autorevert.md
vendored
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
name: "D❌\U0001F519 ISABLE AUTOREVERT"
|
name: DISABLE AUTOREVERT
|
||||||
about: Disables autorevert when open
|
about: Disables autorevert when open
|
||||||
title: "[DISABLE AUTOREVERT]"
|
title: "❌\U0001F519 [DISABLE AUTOREVERT]"
|
||||||
labels: 'ci: disable-autorevert'
|
labels: 'ci: disable-autorevert'
|
||||||
assignees: ''
|
assignees: ''
|
||||||
|
|
||||||
|
|||||||
@ -65,7 +65,7 @@ runs:
|
|||||||
cd .ci/lumen_cli
|
cd .ci/lumen_cli
|
||||||
python3 -m pip install -e .
|
python3 -m pip install -e .
|
||||||
)
|
)
|
||||||
MAX_JOBS="$(nproc --ignore=10)"
|
MAX_JOBS="$(nproc --ignore=6)"
|
||||||
export MAX_JOBS
|
export MAX_JOBS
|
||||||
|
|
||||||
# Split the comma-separated list and build each target
|
# Split the comma-separated list and build each target
|
||||||
|
|||||||
2
.github/actions/linux-test/action.yml
vendored
2
.github/actions/linux-test/action.yml
vendored
@ -274,6 +274,8 @@ runs:
|
|||||||
-w /var/lib/jenkins/workspace \
|
-w /var/lib/jenkins/workspace \
|
||||||
"${DOCKER_IMAGE}"
|
"${DOCKER_IMAGE}"
|
||||||
)
|
)
|
||||||
|
# Propagate download.pytorch.org IP to container
|
||||||
|
grep download.pytorch.org /etc/hosts | docker exec -i "${container_name}" sudo bash -c "/bin/cat >> /etc/hosts"
|
||||||
echo "DOCKER_CONTAINER_ID=${container_name}" >> "${GITHUB_ENV}"
|
echo "DOCKER_CONTAINER_ID=${container_name}" >> "${GITHUB_ENV}"
|
||||||
docker exec -t "${container_name}" sh -c "pip install $(echo dist/*.whl)[opt-einsum] && ${TEST_COMMAND}"
|
docker exec -t "${container_name}" sh -c "pip install $(echo dist/*.whl)[opt-einsum] && ${TEST_COMMAND}"
|
||||||
|
|
||||||
|
|||||||
35
.github/actions/setup-linux/action.yml
vendored
35
.github/actions/setup-linux/action.yml
vendored
@ -28,10 +28,6 @@ runs:
|
|||||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||||
echo "system info $(uname -a)"
|
echo "system info $(uname -a)"
|
||||||
|
|
||||||
- name: Print GPU info (if present)
|
|
||||||
shell: bash
|
|
||||||
run: if [ -f /usr/bin/nvidia-smi ]; then nvidia-smi; fi
|
|
||||||
|
|
||||||
- name: Check if in a container runner
|
- name: Check if in a container runner
|
||||||
shell: bash
|
shell: bash
|
||||||
id: check_container_runner
|
id: check_container_runner
|
||||||
@ -86,6 +82,37 @@ runs:
|
|||||||
# Prune all of the docker images
|
# Prune all of the docker images
|
||||||
docker system prune -af
|
docker system prune -af
|
||||||
|
|
||||||
|
- name: Manually resolve download.pytorch.org
|
||||||
|
shell: bash
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
set +e
|
||||||
|
set -x
|
||||||
|
|
||||||
|
PT_DOMAIN=download.pytorch.org
|
||||||
|
# TODO: Flaky access to download.pytorch.org https://github.com/pytorch/pytorch/issues/100400,
|
||||||
|
# cleaning this up once the issue is fixed. There are more than one resolved IP here, the last
|
||||||
|
# one is returned at random
|
||||||
|
RESOLVED_IP=$(dig -4 +short "${PT_DOMAIN}" | tail -n1)
|
||||||
|
|
||||||
|
if [ -z "${RESOLVED_IP}" ]; then
|
||||||
|
echo "Couldn't resolve ${PT_DOMAIN}, retrying with Google DNS..."
|
||||||
|
RESOLVED_IP=$(dig -4 +short "${PT_DOMAIN}" @8.8.8.8 | tail -n1)
|
||||||
|
|
||||||
|
if [ -z "${RESOLVED_IP}" ]; then
|
||||||
|
echo "Couldn't resolve ${PT_DOMAIN}, exiting..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if grep -r "${PT_DOMAIN}" /etc/hosts; then
|
||||||
|
# Clean up any old records first
|
||||||
|
sudo sed -i "/${PT_DOMAIN}/d" /etc/hosts
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${RESOLVED_IP} ${PT_DOMAIN}" | sudo tee -a /etc/hosts
|
||||||
|
cat /etc/hosts
|
||||||
|
|
||||||
- name: Check that the docker daemon is running
|
- name: Check that the docker daemon is running
|
||||||
shell: bash
|
shell: bash
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
|
|||||||
13
.github/actions/setup-rocm/action.yml
vendored
13
.github/actions/setup-rocm/action.yml
vendored
@ -111,16 +111,3 @@ runs:
|
|||||||
# This video group ID maps to subgid 1 inside the docker image due to the /etc/subgid entries.
|
# This video group ID maps to subgid 1 inside the docker image due to the /etc/subgid entries.
|
||||||
# The group name corresponding to group ID 1 can change depending on the OS, so both are necessary.
|
# The group name corresponding to group ID 1 can change depending on the OS, so both are necessary.
|
||||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd $DEVICE_FLAG --group-add video --group-add $render_gid --group-add daemon --group-add bin --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --network=host" >> "${GITHUB_ENV}"
|
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd $DEVICE_FLAG --group-add video --group-add $render_gid --group-add daemon --group-add bin --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --network=host" >> "${GITHUB_ENV}"
|
||||||
|
|
||||||
- name: configure aws credentials
|
|
||||||
id: aws_creds
|
|
||||||
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
|
|
||||||
with:
|
|
||||||
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
|
|
||||||
aws-region: us-east-1
|
|
||||||
role-duration-seconds: 18000
|
|
||||||
|
|
||||||
- name: Login to Amazon ECR
|
|
||||||
id: login-ecr
|
|
||||||
continue-on-error: true
|
|
||||||
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
|
|
||||||
|
|||||||
@ -33,6 +33,10 @@ runs:
|
|||||||
)
|
)
|
||||||
|
|
||||||
echo "CONTAINER_NAME=${container_name}" >> "$GITHUB_ENV"
|
echo "CONTAINER_NAME=${container_name}" >> "$GITHUB_ENV"
|
||||||
|
if [[ "${GPU_ARCH_TYPE}" != "rocm" && "${BUILD_ENVIRONMENT}" != "linux-aarch64-binary-manywheel" && "${BUILD_ENVIRONMENT}" != "linux-s390x-binary-manywheel" && "${GPU_ARCH_TYPE}" != "xpu" ]]; then
|
||||||
|
# Propagate download.pytorch.org IP to container. This is only needed on Linux non aarch64 runner
|
||||||
|
grep download.pytorch.org /etc/hosts | docker exec -i "${container_name}" bash -c "/bin/cat >> /etc/hosts"
|
||||||
|
fi
|
||||||
|
|
||||||
docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh"
|
docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh"
|
||||||
# Generate test script
|
# Generate test script
|
||||||
|
|||||||
2
.github/ci_commit_pins/audio.txt
vendored
2
.github/ci_commit_pins/audio.txt
vendored
@ -1 +1 @@
|
|||||||
69bbe7363897764f9e758d851cd0340147d27f94
|
87ff22e49ed0e92576c4935ccb8c143daac4a3cd
|
||||||
|
|||||||
2
.github/ci_commit_pins/vision.txt
vendored
2
.github/ci_commit_pins/vision.txt
vendored
@ -1 +1 @@
|
|||||||
faffd5cf673615583da6517275e361cb3dbc77e6
|
966da7e46f65d6d49df3e31214470a4fe5cc8e66
|
||||||
|
|||||||
2
.github/ci_commit_pins/vllm.txt
vendored
2
.github/ci_commit_pins/vllm.txt
vendored
@ -1 +1 @@
|
|||||||
e5192819208c4d68194844b7dfafbc00020d0dea
|
78a47f87ce259a48f0391fa9ae15add05ea7432b
|
||||||
|
|||||||
2
.github/ci_commit_pins/xla.txt
vendored
2
.github/ci_commit_pins/xla.txt
vendored
@ -1 +1 @@
|
|||||||
0fa6e3129e61143224663e1ec67980d12b7ec4eb
|
0fc62aa26a30ed7ca419d285f285cb5ba02c4394
|
||||||
|
|||||||
@ -1,41 +1,59 @@
|
|||||||
|
# TODO(elainwy): remove this file after the torch nightly dockerfile is in sync in vllm repo
|
||||||
|
# The vLLM Dockerfile is used to construct vLLM image against torch nightly and torch main that can be directly used for testing
|
||||||
|
|
||||||
ARG CUDA_VERSION=12.8.1
|
ARG CUDA_VERSION=12.8.1
|
||||||
ARG PYTHON_VERSION=3.12
|
ARG PYTHON_VERSION=3.12
|
||||||
|
|
||||||
# BUILD_BASE_IMAGE: used to setup python build xformers, and vllm wheels, It can be replaced with a different base image from local machine,
|
# BUILD_BASE_IMAGE: used to setup python build xformers, and vllm wheels, It can be replaced with a different base image from local machine,
|
||||||
# by default, it uses the torch-nightly-base stage from this docker image
|
# by default, it uses the torch-nightly-base stage from this docker image
|
||||||
ARG BUILD_BASE_IMAGE=torch-nightly-base
|
ARG BUILD_BASE_IMAGE=torch-nightly-base
|
||||||
|
|
||||||
|
# FINAL_BASE_IMAGE: used to set up vllm-instaled environment and build flashinfer,
|
||||||
|
# by default, it uses devel-ubuntu22.04 official image.
|
||||||
ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
|
ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
|
||||||
|
|
||||||
# The logic is copied from https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile
|
# The logic is copied from https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile
|
||||||
ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py"
|
ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py"
|
||||||
|
|
||||||
|
|
||||||
#################### TORCH NIGHTLY BASE IMAGE ####################
|
#################### TORCH NIGHTLY BASE IMAGE ####################
|
||||||
|
# A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci
|
||||||
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base
|
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base
|
||||||
|
|
||||||
ARG CUDA_VERSION
|
ARG CUDA_VERSION
|
||||||
ARG PYTHON_VERSION
|
ARG PYTHON_VERSION
|
||||||
ARG GET_PIP_URL
|
ARG GET_PIP_URL
|
||||||
|
|
||||||
# Install system dependencies and uv, then create Python virtual environment
|
# Install Python and other dependencies
|
||||||
RUN apt-get update -y \
|
RUN apt-get update -y \
|
||||||
&& apt-get install -y ccache software-properties-common git curl sudo vim python3-pip \
|
&& apt-get install -y ccache software-properties-common git curl wget sudo vim \
|
||||||
&& curl -LsSf https://astral.sh/uv/install.sh | sh \
|
&& add-apt-repository -y ppa:deadsnakes/ppa \
|
||||||
&& $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \
|
&& apt-get update -y \
|
||||||
&& rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \
|
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
|
||||||
&& ln -s /opt/venv/bin/python3 /usr/bin/python3 \
|
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
|
||||||
&& ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \
|
&& update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
|
||||||
&& ln -s /opt/venv/bin/pip /usr/bin/pip \
|
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
|
||||||
|
&& curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \
|
||||||
&& python3 --version && python3 -m pip --version
|
&& python3 --version && python3 -m pip --version
|
||||||
|
|
||||||
# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519
|
# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519
|
||||||
# as it was causing spam when compiling the CUTLASS kernels
|
# as it was causing spam when compiling the CUTLASS kernels
|
||||||
RUN apt-get install -y gcc-10 g++-10
|
# Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519)
|
||||||
RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10
|
RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \
|
||||||
RUN <<EOF
|
if command -v apt-get >/dev/null; then \
|
||||||
gcc --version
|
if [ "$current_gcc_version" -lt 10 ]; then \
|
||||||
EOF
|
echo "GCC version is $current_gcc_version, installing gcc-10..."; \
|
||||||
|
apt-get update \
|
||||||
|
&& apt-get install -y gcc-10 g++-10 \
|
||||||
|
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 \
|
||||||
|
&& update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \
|
||||||
|
else \
|
||||||
|
echo "GCC version is $current_gcc_version, no need to install gcc-10."; \
|
||||||
|
fi \
|
||||||
|
fi \
|
||||||
|
&& gcc --version && g++ --version
|
||||||
|
|
||||||
# Install uv for faster pip installs
|
# install uv for faster pip installs
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
python3 -m pip install uv==0.8.4
|
python3 -m pip install uv==0.8.4
|
||||||
|
|
||||||
@ -43,32 +61,36 @@ ENV UV_HTTP_TIMEOUT=500
|
|||||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
||||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
|
# Use copy mode to avoid hardlink failures with Docker cache mounts
|
||||||
ENV UV_LINK_MODE=copy
|
ENV UV_LINK_MODE=copy
|
||||||
|
|
||||||
#################### TORCH NIGHTLY BASE IMAGE ####################
|
#################### TORCH NIGHTLY BASE IMAGE ####################
|
||||||
|
|
||||||
|
|
||||||
#################### BASE BUILD IMAGE ####################
|
#################### BASE BUILD IMAGE ####################
|
||||||
|
# A base image for building vLLM with torch nightly or torch wheels
|
||||||
|
# prepare basic build environment
|
||||||
FROM ${BUILD_BASE_IMAGE} AS base
|
FROM ${BUILD_BASE_IMAGE} AS base
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
ARG CUDA_VERSION
|
ARG CUDA_VERSION
|
||||||
ARG PYTHON_VERSION
|
ARG PYTHON_VERSION
|
||||||
|
|
||||||
# Only work with PyTorch manylinux builder
|
# TODO (huydhn): Only work with PyTorch manylinux builder
|
||||||
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}"
|
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}"
|
||||||
|
|
||||||
# Install some system dependencies and double check python version
|
# Install some system dependencies and double check python version
|
||||||
RUN if command -v apt-get >/dev/null; then \
|
RUN if command -v apt-get >/dev/null; then \
|
||||||
apt-get update -y \
|
apt-get update -y \
|
||||||
&& apt-get install -y ccache software-properties-common git wget sudo vim; \
|
&& apt-get install -y ccache software-properties-common git curl wget sudo vim; \
|
||||||
else \
|
else \
|
||||||
dnf install -y git wget sudo; \
|
dnf install -y git curl wget sudo; \
|
||||||
fi \
|
fi \
|
||||||
&& python3 --version && python3 -m pip --version
|
&& python3 --version && python3 -m pip --version
|
||||||
|
|
||||||
# Install uv for faster pip installs if not existed
|
# Install uv for faster pip installs if not existed
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
python3 -m pip install uv==0.8.4
|
if ! python3 -m uv --version >/dev/null 2>&1; then \
|
||||||
|
python3 -m pip install uv==0.8.4; \
|
||||||
|
fi
|
||||||
ENV UV_HTTP_TIMEOUT=500
|
ENV UV_HTTP_TIMEOUT=500
|
||||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
||||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
|
# Use copy mode to avoid hardlink failures with Docker cache mounts
|
||||||
@ -76,15 +98,15 @@ ENV UV_LINK_MODE=copy
|
|||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
# Install build and runtime dependencies
|
# install build and runtime dependencies
|
||||||
COPY requirements/common.txt requirements/common.txt
|
COPY requirements/common.txt requirements/common.txt
|
||||||
COPY use_existing_torch.py use_existing_torch.py
|
COPY use_existing_torch.py use_existing_torch.py
|
||||||
COPY pyproject.toml pyproject.toml
|
COPY pyproject.toml pyproject.toml
|
||||||
|
|
||||||
# Install build and runtime dependencies without stable torch version
|
# install build and runtime dependencies without stable torch version
|
||||||
RUN python3 use_existing_torch.py
|
RUN python3 use_existing_torch.py
|
||||||
|
|
||||||
# Default mount file as placeholder, this just avoid the mount error
|
# default mount file as placeholder, this just avoid the mount error
|
||||||
# change to a different vllm folder if this does not exist anymore
|
# change to a different vllm folder if this does not exist anymore
|
||||||
ARG TORCH_WHEELS_PATH="./requirements"
|
ARG TORCH_WHEELS_PATH="./requirements"
|
||||||
ARG PINNED_TORCH_VERSION
|
ARG PINNED_TORCH_VERSION
|
||||||
@ -116,36 +138,56 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
uv pip install --system -r requirements/common.txt
|
uv pip install --system -r requirements/common.txt
|
||||||
|
|
||||||
|
# Must put before installing xformers, so it can install the correct version of xfomrers.
|
||||||
|
ARG xformers_cuda_arch_list='7.5;8.0+PTX;9.0a'
|
||||||
|
ENV TORCH_CUDA_ARCH_LIST=${xformers_cuda_arch_list}
|
||||||
|
|
||||||
ARG max_jobs=16
|
ARG max_jobs=16
|
||||||
ENV MAX_JOBS=${max_jobs}
|
ENV MAX_JOBS=${max_jobs}
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH'
|
RUN echo ${TORCH_CUDA_ARCH_LIST}
|
||||||
export TORCH_CUDA_ARCH_LIST='7.5 8.0+PTX 9.0a'
|
RUN echo ${MAX_JOBS}
|
||||||
git clone https://github.com/facebookresearch/xformers.git
|
RUN pip freeze | grep -E 'ninja'
|
||||||
|
|
||||||
pushd xformers
|
# Build xformers with cuda and torch nightly/wheel
|
||||||
git checkout v0.0.32.post2
|
# following official xformers guidance: https://github.com/facebookresearch/xformers#build
|
||||||
git submodule update --init --recursive
|
# sha for https://github.com/facebookresearch/xformers/tree/v0.0.32.post2
|
||||||
python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose
|
ARG XFORMERS_COMMIT=5d4b92a5e5a9c6c6d4878283f47d82e17995b468
|
||||||
popd
|
ENV CCACHE_DIR=/root/.cache/ccache
|
||||||
|
|
||||||
rm -rf xformers
|
RUN --mount=type=cache,target=/root/.cache/ccache \
|
||||||
BASH
|
--mount=type=cache,target=/root/.cache/uv \
|
||||||
|
echo 'git clone xformers...' \
|
||||||
|
&& git clone https://github.com/facebookresearch/xformers.git --recursive \
|
||||||
|
&& cd xformers \
|
||||||
|
&& git checkout ${XFORMERS_COMMIT} \
|
||||||
|
&& git submodule update --init --recursive \
|
||||||
|
&& echo 'finish git clone xformers...' \
|
||||||
|
&& rm -rf build \
|
||||||
|
&& python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose \
|
||||||
|
&& cd .. \
|
||||||
|
&& rm -rf xformers
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
uv pip install --system xformers-dist/*.whl
|
uv pip install --system xformers-dist/*.whl --verbose
|
||||||
|
|
||||||
|
# Build can take a long time, and the torch nightly version fetched from url can be different in next docker stage.
|
||||||
|
# track the nightly torch version used in the build, when we set up runtime environment we can make sure the version is the same
|
||||||
RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio' > torch_build_versions.txt
|
RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio' > torch_build_versions.txt
|
||||||
|
|
||||||
RUN cat torch_build_versions.txt
|
RUN cat torch_build_versions.txt
|
||||||
RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio'
|
RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio'
|
||||||
|
|
||||||
#################### BASE BUILD IMAGE ####################
|
#################### BASE BUILD IMAGE ####################
|
||||||
|
|
||||||
|
|
||||||
#################### WHEEL BUILD IMAGE ####################
|
#################### WHEEL BUILD IMAGE ####################
|
||||||
|
# Image used to build vllm wheel
|
||||||
FROM base AS build
|
FROM base AS build
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN python3 use_existing_torch.py
|
RUN python3 use_existing_torch.py
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
@ -155,17 +197,20 @@ ARG GIT_REPO_CHECK=0
|
|||||||
RUN --mount=type=bind,source=.git,target=.git \
|
RUN --mount=type=bind,source=.git,target=.git \
|
||||||
if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi
|
if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi
|
||||||
|
|
||||||
|
# Max jobs used by Ninja to build extensions
|
||||||
ARG max_jobs=16
|
ARG max_jobs=16
|
||||||
ENV MAX_JOBS=${max_jobs}
|
ENV MAX_JOBS=${max_jobs}
|
||||||
ARG nvcc_threads=8
|
ARG nvcc_threads=4
|
||||||
ENV NVCC_THREADS=$nvcc_threads
|
ENV NVCC_THREADS=$nvcc_threads
|
||||||
|
ARG torch_cuda_arch_list='8.0 8.6 8.9 9.0'
|
||||||
|
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
|
||||||
|
|
||||||
ARG USE_SCCACHE
|
ARG USE_SCCACHE
|
||||||
ARG SCCACHE_BUCKET_NAME=vllm-build-sccache
|
ARG SCCACHE_BUCKET_NAME=vllm-build-sccache
|
||||||
ARG SCCACHE_REGION_NAME=us-west-2
|
ARG SCCACHE_REGION_NAME=us-west-2
|
||||||
ARG SCCACHE_S3_NO_CREDENTIALS=0
|
ARG SCCACHE_S3_NO_CREDENTIALS=0
|
||||||
|
|
||||||
# Use sccache to speed up compilation
|
# if USE_SCCACHE is set, use sccache to speed up compilation
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
--mount=type=bind,source=.git,target=.git \
|
--mount=type=bind,source=.git,target=.git \
|
||||||
if [ "$USE_SCCACHE" = "1" ]; then \
|
if [ "$USE_SCCACHE" = "1" ]; then \
|
||||||
@ -190,9 +235,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
&& sccache --show-stats; \
|
&& sccache --show-stats; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ARG torch_cuda_arch_list='8.0 8.6 8.9 9.0'
|
|
||||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
|
|
||||||
|
|
||||||
ARG vllm_target_device="cuda"
|
ARG vllm_target_device="cuda"
|
||||||
ENV VLLM_TARGET_DEVICE=${vllm_target_device}
|
ENV VLLM_TARGET_DEVICE=${vllm_target_device}
|
||||||
ENV CCACHE_DIR=/root/.cache/ccache
|
ENV CCACHE_DIR=/root/.cache/ccache
|
||||||
@ -206,10 +248,17 @@ RUN --mount=type=cache,target=/root/.cache/ccache \
|
|||||||
export VLLM_DOCKER_BUILD_CONTEXT=1 && \
|
export VLLM_DOCKER_BUILD_CONTEXT=1 && \
|
||||||
python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \
|
python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
RUN echo "[INFO] Listing current directory:" && \
|
||||||
|
ls -al && \
|
||||||
|
echo "[INFO] Showing torch_build_versions.txt content:" && \
|
||||||
|
cat torch_build_versions.txt
|
||||||
|
|
||||||
#################### WHEEL BUILD IMAGE ####################
|
#################### WHEEL BUILD IMAGE ####################
|
||||||
|
|
||||||
|
|
||||||
################### VLLM INSTALLED IMAGE ####################
|
################### VLLM INSTALLED IMAGE ####################
|
||||||
|
# Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer
|
||||||
FROM ${FINAL_BASE_IMAGE} AS vllm-base
|
FROM ${FINAL_BASE_IMAGE} AS vllm-base
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
@ -217,7 +266,7 @@ ARG CUDA_VERSION
|
|||||||
ARG PYTHON_VERSION
|
ARG PYTHON_VERSION
|
||||||
ARG GET_PIP_URL
|
ARG GET_PIP_URL
|
||||||
|
|
||||||
# Only work with PyTorch manylinux builder
|
# TODO (huydhn): Only work with PyTorch manylinux builder
|
||||||
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}"
|
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}"
|
||||||
|
|
||||||
# prepare for environment starts
|
# prepare for environment starts
|
||||||
@ -226,19 +275,20 @@ WORKDIR /workspace
|
|||||||
# Install Python and other dependencies
|
# Install Python and other dependencies
|
||||||
RUN if command -v apt-get >/dev/null; then \
|
RUN if command -v apt-get >/dev/null; then \
|
||||||
apt-get update -y \
|
apt-get update -y \
|
||||||
&& apt-get install -y ccache software-properties-common git sudo vim python3-pip; \
|
&& apt-get install -y ccache software-properties-common git curl wget sudo vim \
|
||||||
|
&& add-apt-repository -y ppa:deadsnakes/ppa \
|
||||||
|
&& apt-get update -y \
|
||||||
|
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
|
||||||
|
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
|
||||||
|
&& update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
|
||||||
|
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
|
||||||
|
&& curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION}; \
|
||||||
else \
|
else \
|
||||||
dnf install -y git wget sudo; \
|
dnf install -y git curl wget sudo; \
|
||||||
fi \
|
fi \
|
||||||
&& curl -LsSf https://astral.sh/uv/install.sh | sh \
|
|
||||||
&& $HOME/.local/bin/uv venv /opt/venv --python ${PYTHON_VERSION} \
|
|
||||||
&& rm -f /usr/bin/python3 /usr/bin/python3-config /usr/bin/pip \
|
|
||||||
&& ln -s /opt/venv/bin/python3 /usr/bin/python3 \
|
|
||||||
&& ln -s /opt/venv/bin/python3-config /usr/bin/python3-config \
|
|
||||||
&& ln -s /opt/venv/bin/pip /usr/bin/pip \
|
|
||||||
&& python3 --version && python3 -m pip --version
|
&& python3 --version && python3 -m pip --version
|
||||||
|
|
||||||
# Get the torch versions, and whls used in previous stage
|
# Get the torch versions, and whls used in previous stagtes for consistency
|
||||||
COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt
|
COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt
|
||||||
COPY --from=base /workspace/xformers-dist /wheels/xformers
|
COPY --from=base /workspace/xformers-dist /wheels/xformers
|
||||||
COPY --from=build /workspace/vllm-dist /wheels/vllm
|
COPY --from=build /workspace/vllm-dist /wheels/vllm
|
||||||
@ -247,29 +297,33 @@ RUN echo "[INFO] Listing current directory before torch install step:" && \
|
|||||||
echo "[INFO] Showing torch_build_versions.txt content:" && \
|
echo "[INFO] Showing torch_build_versions.txt content:" && \
|
||||||
cat torch_build_versions.txt
|
cat torch_build_versions.txt
|
||||||
|
|
||||||
# Install uv for faster pip installs if not existed
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
||||||
python3 -m pip install uv==0.8.4
|
|
||||||
|
|
||||||
ENV UV_HTTP_TIMEOUT=500
|
|
||||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
|
||||||
# Use copy mode to avoid hardlink failures with Docker cache mounts
|
|
||||||
ENV UV_LINK_MODE=copy
|
|
||||||
|
|
||||||
# Install build and runtime dependencies, this is needed for flashinfer install
|
# Install build and runtime dependencies, this is needed for flashinfer install
|
||||||
COPY requirements/build.txt requirements/build.txt
|
COPY requirements/build.txt requirements/build.txt
|
||||||
COPY use_existing_torch.py use_existing_torch.py
|
COPY use_existing_torch.py use_existing_torch.py
|
||||||
RUN python3 use_existing_torch.py
|
RUN python3 use_existing_torch.py
|
||||||
RUN cat requirements/build.txt
|
RUN cat requirements/build.txt
|
||||||
|
|
||||||
|
# Install uv for faster pip installs if not existed
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
|
if ! python3 -m uv --version > /dev/null 2>&1; then \
|
||||||
|
python3 -m pip install uv==0.8.4; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
ENV UV_HTTP_TIMEOUT=500
|
||||||
|
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
||||||
|
# Use copy mode to avoid hardlink failures with Docker cache mounts
|
||||||
|
ENV UV_LINK_MODE=copy
|
||||||
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
uv pip install --system -r requirements/build.txt
|
uv pip install --system -r requirements/build.txt
|
||||||
|
|
||||||
|
|
||||||
# Default mount file as placeholder, this just avoid the mount error
|
# Default mount file as placeholder, this just avoid the mount error
|
||||||
ARG TORCH_WHEELS_PATH="./requirements"
|
ARG TORCH_WHEELS_PATH="./requirements"
|
||||||
# Install torch, torchaudio and torchvision. If TORCH_WHEELS_PATH is default
|
# Install torch, torchaudio and torchvision
|
||||||
# to ./requirements, it will pull the nightly versions using pip. Otherwise,
|
# if TORCH_WHEELS_PATH is default "./requirements", it will pull the nightly versions using pip using torch_build_versions.txt
|
||||||
# it will use the local wheels from TORCH_WHEELS_PATH
|
# otherwise, it will use the whls from TORCH_WHEELS_PATH from the host machine
|
||||||
RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
|
RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
|
||||||
--mount=type=cache,target=/root/.cache/uv \
|
--mount=type=cache,target=/root/.cache/uv \
|
||||||
if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
|
if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
|
||||||
@ -290,14 +344,18 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
# Install xformers wheel from previous stage
|
# Install xformers wheel from previous stage
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
uv pip install --system /wheels/xformers/*.whl --verbose
|
uv pip install --system /wheels/xformers/*.whl --verbose
|
||||||
|
# Build flashinfer from source.
|
||||||
# Build FlashInfer from source
|
|
||||||
ARG torch_cuda_arch_list='8.0;8.9;9.0a;10.0a;12.0'
|
ARG torch_cuda_arch_list='8.0;8.9;9.0a;10.0a;12.0'
|
||||||
|
# install package for build flashinfer
|
||||||
|
# see issue: https://github.com/flashinfer-ai/flashinfer/issues/738
|
||||||
|
|
||||||
|
RUN pip freeze | grep -E 'setuptools|packaging|build'
|
||||||
|
|
||||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
|
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
|
||||||
|
# Build flashinfer for torch nightly from source around 10 mins
|
||||||
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
|
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
|
||||||
|
# Keep this in sync with https://github.com/vllm-project/vllm/blob/main/requirements/cuda.txt
|
||||||
ARG FLASHINFER_GIT_REF="v0.2.14.post1"
|
ARG FLASHINFER_GIT_REF="v0.2.14.post1"
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
git clone --depth 1 --recursive --shallow-submodules \
|
git clone --depth 1 --recursive --shallow-submodules \
|
||||||
--branch ${FLASHINFER_GIT_REF} \
|
--branch ${FLASHINFER_GIT_REF} \
|
||||||
@ -309,7 +367,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
&& cd .. \
|
&& cd .. \
|
||||||
&& rm -rf flashinfer
|
&& rm -rf flashinfer
|
||||||
|
|
||||||
# Install FlashInfer
|
# install flashinfer python
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
uv pip install --system wheels/flashinfer/*.whl --verbose
|
uv pip install --system wheels/flashinfer/*.whl --verbose
|
||||||
|
|
||||||
@ -319,6 +377,49 @@ RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio\|^xformers\|^vllm
|
|||||||
################### VLLM INSTALLED IMAGE ####################
|
################### VLLM INSTALLED IMAGE ####################
|
||||||
|
|
||||||
|
|
||||||
|
#################### UNITTEST IMAGE #############################
|
||||||
|
FROM vllm-base as test
|
||||||
|
|
||||||
|
ENV UV_HTTP_TIMEOUT=500
|
||||||
|
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
||||||
|
# Use copy mode to avoid hardlink failures with Docker cache mounts
|
||||||
|
ENV UV_LINK_MODE=copy
|
||||||
|
|
||||||
|
COPY tests/ tests/
|
||||||
|
COPY examples examples
|
||||||
|
COPY benchmarks benchmarks
|
||||||
|
COPY ./vllm/collect_env.py .
|
||||||
|
COPY requirements/common.txt requirements/common.txt
|
||||||
|
COPY use_existing_torch.py use_existing_torch.py
|
||||||
|
COPY pyproject.toml pyproject.toml
|
||||||
|
# Install build and runtime dependencies without stable torch version
|
||||||
|
COPY requirements/nightly_torch_test.txt requirements/nightly_torch_test.txt
|
||||||
|
|
||||||
|
RUN python3 use_existing_torch.py
|
||||||
|
|
||||||
|
# install packages
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
|
uv pip install --system -r requirements/common.txt
|
||||||
|
# enable fast downloads from hf (for testing)
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
|
uv pip install --system hf_transfer
|
||||||
|
ENV HF_HUB_ENABLE_HF_TRANSFER 1
|
||||||
|
|
||||||
|
# install development dependencies (for testing)
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
|
uv pip install --system -e tests/vllm_test_utils
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
|
uv pip install --system -r requirements/nightly_torch_test.txt
|
||||||
|
|
||||||
|
# Logging to confirm the torch versions
|
||||||
|
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'
|
||||||
|
|
||||||
|
# Logging to confirm all the packages are installed
|
||||||
|
RUN pip freeze
|
||||||
|
|
||||||
|
#################### UNITTEST IMAGE #############################
|
||||||
|
|
||||||
#################### EXPORT STAGE ####################
|
#################### EXPORT STAGE ####################
|
||||||
FROM scratch as export-wheels
|
FROM scratch as export-wheels
|
||||||
|
|
||||||
29
.github/labeler.yml
vendored
29
.github/labeler.yml
vendored
@ -133,32 +133,3 @@
|
|||||||
|
|
||||||
"ciflow/vllm":
|
"ciflow/vllm":
|
||||||
- .github/ci_commit_pins/vllm.txt
|
- .github/ci_commit_pins/vllm.txt
|
||||||
|
|
||||||
"ciflow/b200":
|
|
||||||
- test/test_matmul_cuda.py
|
|
||||||
- test/test_scaled_matmul_cuda.py
|
|
||||||
- test/inductor/test_fp8.py
|
|
||||||
- aten/src/ATen/native/cuda/Blas.cpp
|
|
||||||
- torch/**/*cublas*
|
|
||||||
- torch/_inductor/kernel/mm.py
|
|
||||||
- test/inductor/test_max_autotune.py
|
|
||||||
- third_party/fbgemm
|
|
||||||
|
|
||||||
"ciflow/h100":
|
|
||||||
- test/test_matmul_cuda.py
|
|
||||||
- test/test_scaled_matmul_cuda.py
|
|
||||||
- test/inductor/test_fp8.py
|
|
||||||
- aten/src/ATen/native/cuda/Blas.cpp
|
|
||||||
- torch/**/*cublas*
|
|
||||||
- torch/_inductor/kernel/mm.py
|
|
||||||
- test/inductor/test_max_autotune.py
|
|
||||||
- third_party/fbgemm
|
|
||||||
|
|
||||||
"ciflow/rocm":
|
|
||||||
- test/test_matmul_cuda.py
|
|
||||||
- test/test_scaled_matmul_cuda.py
|
|
||||||
- test/inductor/test_fp8.py
|
|
||||||
- aten/src/ATen/native/cuda/Blas.cpp
|
|
||||||
- torch/_inductor/kernel/mm.py
|
|
||||||
- test/inductor/test_max_autotune.py
|
|
||||||
- third_party/fbgemm
|
|
||||||
|
|||||||
6
.github/pytorch-probot.yml
vendored
6
.github/pytorch-probot.yml
vendored
@ -3,7 +3,6 @@ ciflow_tracking_issue: 64124
|
|||||||
ciflow_push_tags:
|
ciflow_push_tags:
|
||||||
- ciflow/b200
|
- ciflow/b200
|
||||||
- ciflow/b200-symm-mem
|
- ciflow/b200-symm-mem
|
||||||
- ciflow/b200-distributed
|
|
||||||
- ciflow/binaries
|
- ciflow/binaries
|
||||||
- ciflow/binaries_libtorch
|
- ciflow/binaries_libtorch
|
||||||
- ciflow/binaries_wheel
|
- ciflow/binaries_wheel
|
||||||
@ -16,8 +15,7 @@ ciflow_push_tags:
|
|||||||
- ciflow/inductor-micro-benchmark
|
- ciflow/inductor-micro-benchmark
|
||||||
- ciflow/inductor-micro-benchmark-cpu-x86
|
- ciflow/inductor-micro-benchmark-cpu-x86
|
||||||
- ciflow/inductor-perf-compare
|
- ciflow/inductor-perf-compare
|
||||||
- ciflow/inductor-perf-test-nightly-rocm-mi300
|
- ciflow/inductor-perf-test-nightly-rocm
|
||||||
- ciflow/inductor-perf-test-nightly-rocm-mi355
|
|
||||||
- ciflow/inductor-perf-test-nightly-x86-zen
|
- ciflow/inductor-perf-test-nightly-x86-zen
|
||||||
- ciflow/inductor-periodic
|
- ciflow/inductor-periodic
|
||||||
- ciflow/inductor-rocm
|
- ciflow/inductor-rocm
|
||||||
@ -32,8 +30,6 @@ ciflow_push_tags:
|
|||||||
- ciflow/riscv64
|
- ciflow/riscv64
|
||||||
- ciflow/rocm
|
- ciflow/rocm
|
||||||
- ciflow/rocm-mi300
|
- ciflow/rocm-mi300
|
||||||
- ciflow/rocm-mi355
|
|
||||||
- ciflow/rocm-navi31
|
|
||||||
- ciflow/s390
|
- ciflow/s390
|
||||||
- ciflow/slow
|
- ciflow/slow
|
||||||
- ciflow/torchbench
|
- ciflow/torchbench
|
||||||
|
|||||||
BIN
.github/scripts/drci_mocks.json.gz
vendored
BIN
.github/scripts/drci_mocks.json.gz
vendored
Binary file not shown.
8
.github/scripts/filter_test_configs.py
vendored
8
.github/scripts/filter_test_configs.py
vendored
@ -502,7 +502,6 @@ def perform_misc_tasks(
|
|||||||
job_name: str,
|
job_name: str,
|
||||||
pr_body: str,
|
pr_body: str,
|
||||||
branch: Optional[str] = None,
|
branch: Optional[str] = None,
|
||||||
tag: Optional[str] = None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
In addition to apply the filter logic, the script also does the following
|
In addition to apply the filter logic, the script also does the following
|
||||||
@ -510,11 +509,7 @@ def perform_misc_tasks(
|
|||||||
"""
|
"""
|
||||||
set_output(
|
set_output(
|
||||||
"keep-going",
|
"keep-going",
|
||||||
branch == MAIN_BRANCH
|
branch == MAIN_BRANCH or check_for_setting(labels, pr_body, "keep-going"),
|
||||||
or bool(tag and re.match(r"^trunk/[a-f0-9]{40}$", tag))
|
|
||||||
# Pattern for tags created via manual run on HUD
|
|
||||||
or bool(tag and re.match(r"^ciflow/[^/]+/[a-f0-9]{40}$", tag))
|
|
||||||
or check_for_setting(labels, pr_body, "keep-going"),
|
|
||||||
)
|
)
|
||||||
set_output(
|
set_output(
|
||||||
"ci-verbose-test-logs",
|
"ci-verbose-test-logs",
|
||||||
@ -639,7 +634,6 @@ def main() -> None:
|
|||||||
job_name=args.job_name,
|
job_name=args.job_name,
|
||||||
pr_body=pr_body if pr_body else "",
|
pr_body=pr_body if pr_body else "",
|
||||||
branch=args.branch,
|
branch=args.branch,
|
||||||
tag=tag,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Set the filtered test matrix as the output
|
# Set the filtered test matrix as the output
|
||||||
|
|||||||
44
.github/scripts/generate_binary_build_matrix.py
vendored
44
.github/scripts/generate_binary_build_matrix.py
vendored
@ -16,18 +16,16 @@ from typing import Optional
|
|||||||
|
|
||||||
|
|
||||||
# NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this
|
# NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this
|
||||||
CUDA_ARCHES = ["12.6", "12.8", "12.9", "13.0"]
|
CUDA_ARCHES = ["12.6", "12.8", "13.0"]
|
||||||
CUDA_STABLE = "12.8"
|
CUDA_STABLE = "12.8"
|
||||||
CUDA_ARCHES_FULL_VERSION = {
|
CUDA_ARCHES_FULL_VERSION = {
|
||||||
"12.6": "12.6.3",
|
"12.6": "12.6.3",
|
||||||
"12.8": "12.8.1",
|
"12.8": "12.8.1",
|
||||||
"12.9": "12.9.1",
|
|
||||||
"13.0": "13.0.0",
|
"13.0": "13.0.0",
|
||||||
}
|
}
|
||||||
CUDA_ARCHES_CUDNN_VERSION = {
|
CUDA_ARCHES_CUDNN_VERSION = {
|
||||||
"12.6": "9",
|
"12.6": "9",
|
||||||
"12.8": "9",
|
"12.8": "9",
|
||||||
"12.9": "9",
|
|
||||||
"13.0": "9",
|
"13.0": "9",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -40,7 +38,7 @@ CPU_AARCH64_ARCH = ["cpu-aarch64"]
|
|||||||
|
|
||||||
CPU_S390X_ARCH = ["cpu-s390x"]
|
CPU_S390X_ARCH = ["cpu-s390x"]
|
||||||
|
|
||||||
CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "12.9-aarch64", "13.0-aarch64"]
|
CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "13.0-aarch64"]
|
||||||
|
|
||||||
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||||
@ -55,7 +53,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
|||||||
"nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | "
|
"nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | "
|
||||||
"nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | "
|
"nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | "
|
||||||
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | "
|
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | "
|
||||||
"nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | "
|
"nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | "
|
||||||
"nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | "
|
"nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | "
|
||||||
"nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | "
|
"nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | "
|
||||||
"nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | "
|
"nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | "
|
||||||
@ -72,29 +70,12 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
|||||||
"nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | "
|
"nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | "
|
||||||
"nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | "
|
"nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | "
|
||||||
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | "
|
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | "
|
||||||
"nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | "
|
"nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | "
|
||||||
"nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | "
|
"nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | "
|
||||||
"nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | "
|
"nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | "
|
||||||
"nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | "
|
"nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | "
|
||||||
"nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'"
|
"nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'"
|
||||||
),
|
),
|
||||||
"12.9": (
|
|
||||||
"nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | "
|
|
||||||
"nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | "
|
|
||||||
"nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | "
|
|
||||||
"nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | "
|
|
||||||
"nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | "
|
|
||||||
"nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | "
|
|
||||||
"nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | "
|
|
||||||
"nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | "
|
|
||||||
"nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | "
|
|
||||||
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | "
|
|
||||||
"nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | "
|
|
||||||
"nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | "
|
|
||||||
"nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | "
|
|
||||||
"nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | "
|
|
||||||
"nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'"
|
|
||||||
),
|
|
||||||
"13.0": (
|
"13.0": (
|
||||||
"nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | "
|
"nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | "
|
||||||
"nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | "
|
"nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | "
|
||||||
@ -106,7 +87,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
|||||||
"nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | "
|
"nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | "
|
||||||
"nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | "
|
"nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | "
|
||||||
"nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | "
|
"nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | "
|
||||||
"nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | "
|
"nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | "
|
||||||
"nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | "
|
"nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | "
|
||||||
"nvidia-nvtx==13.0.39; platform_system == 'Linux' | "
|
"nvidia-nvtx==13.0.39; platform_system == 'Linux' | "
|
||||||
"nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | "
|
"nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | "
|
||||||
@ -241,11 +222,7 @@ def generate_libtorch_matrix(
|
|||||||
arches += CUDA_ARCHES
|
arches += CUDA_ARCHES
|
||||||
arches += ROCM_ARCHES
|
arches += ROCM_ARCHES
|
||||||
elif os == "windows":
|
elif os == "windows":
|
||||||
# TODO (huydhn): Only build CUDA 12.9 for Linux. This logic is to be cleaned up
|
arches += CUDA_ARCHES
|
||||||
# in 2.10
|
|
||||||
windows_cuda_arches = CUDA_ARCHES.copy()
|
|
||||||
windows_cuda_arches.remove("12.9")
|
|
||||||
arches += windows_cuda_arches
|
|
||||||
if libtorch_variants is None:
|
if libtorch_variants is None:
|
||||||
libtorch_variants = [
|
libtorch_variants = [
|
||||||
"shared-with-deps",
|
"shared-with-deps",
|
||||||
@ -309,11 +286,7 @@ def generate_wheels_matrix(
|
|||||||
if os == "linux":
|
if os == "linux":
|
||||||
arches += CUDA_ARCHES + ROCM_ARCHES + XPU_ARCHES
|
arches += CUDA_ARCHES + ROCM_ARCHES + XPU_ARCHES
|
||||||
elif os == "windows":
|
elif os == "windows":
|
||||||
# TODO (huydhn): Only build CUDA 12.9 for Linux. This logic is to be cleaned up
|
arches += CUDA_ARCHES + XPU_ARCHES
|
||||||
# in 2.10
|
|
||||||
windows_cuda_arches = CUDA_ARCHES.copy()
|
|
||||||
windows_cuda_arches.remove("12.9")
|
|
||||||
arches += windows_cuda_arches + XPU_ARCHES
|
|
||||||
elif os == "linux-aarch64":
|
elif os == "linux-aarch64":
|
||||||
# Separate new if as the CPU type is different and
|
# Separate new if as the CPU type is different and
|
||||||
# uses different build/test scripts
|
# uses different build/test scripts
|
||||||
@ -349,7 +322,7 @@ def generate_wheels_matrix(
|
|||||||
# cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install
|
# cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install
|
||||||
|
|
||||||
if (
|
if (
|
||||||
arch_version in ["13.0", "12.9", "12.8", "12.6"]
|
arch_version in ["13.0", "12.8", "12.6"]
|
||||||
and os == "linux"
|
and os == "linux"
|
||||||
or arch_version in CUDA_AARCH64_ARCHES
|
or arch_version in CUDA_AARCH64_ARCHES
|
||||||
):
|
):
|
||||||
@ -413,6 +386,5 @@ def generate_wheels_matrix(
|
|||||||
|
|
||||||
|
|
||||||
validate_nccl_dep_consistency("13.0")
|
validate_nccl_dep_consistency("13.0")
|
||||||
validate_nccl_dep_consistency("12.9")
|
|
||||||
validate_nccl_dep_consistency("12.8")
|
validate_nccl_dep_consistency("12.8")
|
||||||
validate_nccl_dep_consistency("12.6")
|
validate_nccl_dep_consistency("12.6")
|
||||||
|
|||||||
1
.github/scripts/github_utils.py
vendored
1
.github/scripts/github_utils.py
vendored
@ -18,7 +18,6 @@ class GitHubComment:
|
|||||||
body_text: str
|
body_text: str
|
||||||
created_at: str
|
created_at: str
|
||||||
author_login: str
|
author_login: str
|
||||||
author_url: Optional[str]
|
|
||||||
author_association: str
|
author_association: str
|
||||||
editor_login: Optional[str]
|
editor_login: Optional[str]
|
||||||
database_id: int
|
database_id: int
|
||||||
|
|||||||
BIN
.github/scripts/gql_mocks.json.gz
vendored
BIN
.github/scripts/gql_mocks.json.gz
vendored
Binary file not shown.
2
.github/scripts/test_check_labels.py
vendored
2
.github/scripts/test_check_labels.py
vendored
@ -38,7 +38,6 @@ def mock_get_comments() -> list[GitHubComment]:
|
|||||||
body_text="mock_body_text",
|
body_text="mock_body_text",
|
||||||
created_at="",
|
created_at="",
|
||||||
author_login="",
|
author_login="",
|
||||||
author_url=None,
|
|
||||||
author_association="",
|
author_association="",
|
||||||
editor_login=None,
|
editor_login=None,
|
||||||
database_id=1,
|
database_id=1,
|
||||||
@ -49,7 +48,6 @@ def mock_get_comments() -> list[GitHubComment]:
|
|||||||
body_text=" #" + LABEL_ERR_MSG_TITLE.replace("`", ""),
|
body_text=" #" + LABEL_ERR_MSG_TITLE.replace("`", ""),
|
||||||
created_at="",
|
created_at="",
|
||||||
author_login=BOT_AUTHORS[1],
|
author_login=BOT_AUTHORS[1],
|
||||||
author_url=None,
|
|
||||||
author_association="",
|
author_association="",
|
||||||
editor_login=None,
|
editor_login=None,
|
||||||
database_id=2,
|
database_id=2,
|
||||||
|
|||||||
18
.github/scripts/test_trymerge.py
vendored
18
.github/scripts/test_trymerge.py
vendored
@ -32,7 +32,6 @@ from trymerge import (
|
|||||||
main as trymerge_main,
|
main as trymerge_main,
|
||||||
MandatoryChecksMissingError,
|
MandatoryChecksMissingError,
|
||||||
MergeRule,
|
MergeRule,
|
||||||
PostCommentError,
|
|
||||||
RE_GHSTACK_DESC,
|
RE_GHSTACK_DESC,
|
||||||
read_merge_rules,
|
read_merge_rules,
|
||||||
remove_job_name_suffix,
|
remove_job_name_suffix,
|
||||||
@ -589,23 +588,6 @@ class TestTryMerge(TestCase):
|
|||||||
self.assertEqual(mock_merge_base, pr.get_merge_base())
|
self.assertEqual(mock_merge_base, pr.get_merge_base())
|
||||||
mocked_gh_fetch_merge_base.assert_called_once()
|
mocked_gh_fetch_merge_base.assert_called_once()
|
||||||
|
|
||||||
def test_app_can_revert(self, *args: Any) -> None:
|
|
||||||
pr = GitHubPR("pytorch", "pytorch", 164660)
|
|
||||||
repo = DummyGitRepo()
|
|
||||||
app_comment_id, impostor_comment_id = 3375785595, 3377647892
|
|
||||||
# Check that app can revert
|
|
||||||
self.assertIsNotNone(validate_revert(repo, pr, comment_id=app_comment_id))
|
|
||||||
# But impostor can not
|
|
||||||
self.assertRaises(
|
|
||||||
PostCommentError,
|
|
||||||
lambda: validate_revert(repo, pr, comment_id=impostor_comment_id),
|
|
||||||
)
|
|
||||||
# Despite it's name being the name of the bot
|
|
||||||
self.assertEqual(
|
|
||||||
pr.get_comment_by_id(impostor_comment_id).author_login,
|
|
||||||
"pytorch-auto-revert",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@mock.patch("trymerge.gh_graphql", side_effect=mocked_gh_graphql)
|
@mock.patch("trymerge.gh_graphql", side_effect=mocked_gh_graphql)
|
||||||
@mock.patch("trymerge.gh_fetch_merge_base", return_value="")
|
@mock.patch("trymerge.gh_fetch_merge_base", return_value="")
|
||||||
|
|||||||
13
.github/scripts/trymerge.py
vendored
13
.github/scripts/trymerge.py
vendored
@ -234,7 +234,6 @@ query ($owner: String!, $name: String!, $number: Int!) {
|
|||||||
createdAt
|
createdAt
|
||||||
author {
|
author {
|
||||||
login
|
login
|
||||||
url
|
|
||||||
}
|
}
|
||||||
authorAssociation
|
authorAssociation
|
||||||
editor {
|
editor {
|
||||||
@ -1092,9 +1091,8 @@ class GitHubPR:
|
|||||||
editor = node["editor"]
|
editor = node["editor"]
|
||||||
return GitHubComment(
|
return GitHubComment(
|
||||||
body_text=node["bodyText"],
|
body_text=node["bodyText"],
|
||||||
created_at=node.get("createdAt", ""),
|
created_at=node["createdAt"] if "createdAt" in node else "",
|
||||||
author_login=node["author"]["login"],
|
author_login=node["author"]["login"],
|
||||||
author_url=node["author"].get("url", None),
|
|
||||||
author_association=node["authorAssociation"],
|
author_association=node["authorAssociation"],
|
||||||
editor_login=editor["login"] if editor else None,
|
editor_login=editor["login"] if editor else None,
|
||||||
database_id=node["databaseId"],
|
database_id=node["databaseId"],
|
||||||
@ -2031,17 +2029,16 @@ def validate_revert(
|
|||||||
# For some reason, one can not be a member of private repo, only CONTRIBUTOR
|
# For some reason, one can not be a member of private repo, only CONTRIBUTOR
|
||||||
if pr.is_base_repo_private():
|
if pr.is_base_repo_private():
|
||||||
allowed_reverters.append("CONTRIBUTOR")
|
allowed_reverters.append("CONTRIBUTOR")
|
||||||
# Special case the pytorch-auto-revert app, whose does not have association
|
|
||||||
# But should be able to issue revert command
|
|
||||||
if comment.author_url == "https://github.com/apps/pytorch-auto-revert":
|
|
||||||
allowed_reverters.append("NONE")
|
|
||||||
|
|
||||||
if author_association not in allowed_reverters:
|
if author_association not in allowed_reverters:
|
||||||
raise PostCommentError(
|
raise PostCommentError(
|
||||||
f"Will not revert as @{author_login} is not one of "
|
f"Will not revert as @{author_login} is not one of "
|
||||||
f"[{', '.join(allowed_reverters)}], but instead is {author_association}."
|
f"[{', '.join(allowed_reverters)}], but instead is {author_association}."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Raises exception if matching rule is not found, but ignores all status checks
|
||||||
|
find_matching_merge_rule(
|
||||||
|
pr, repo, skip_mandatory_checks=True, skip_internal_checks=True
|
||||||
|
)
|
||||||
commit_sha = get_pr_commit_sha(repo, pr)
|
commit_sha = get_pr_commit_sha(repo, pr)
|
||||||
return (author_login, commit_sha)
|
return (author_login, commit_sha)
|
||||||
|
|
||||||
|
|||||||
@ -177,9 +177,6 @@ jobs:
|
|||||||
runs-on: linux.rocm.gpu.mi250
|
runs-on: linux.rocm.gpu.mi250
|
||||||
timeout-minutes: !{{ common.timeout_minutes }}
|
timeout-minutes: !{{ common.timeout_minutes }}
|
||||||
!{{ upload.binary_env(config) }}
|
!{{ upload.binary_env(config) }}
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
|
|||||||
@ -26,8 +26,9 @@ name: !{{ build_environment }}
|
|||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v6
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
|
# TODO: Removeme once 3.14 is out
|
||||||
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
||||||
python-version: "!{{ py_ver.strip('t') + ('.4' if '3.14' not in py_ver else '.0') }}"
|
python-version: "!{{ (py_ver.strip('t') + '.4') if '3.14' not in py_ver else '3.14.0-rc.2' }}"
|
||||||
freethreaded: !{{ "true" if py_ver.endswith('t') else "false" }}
|
freethreaded: !{{ "true" if py_ver.endswith('t') else "false" }}
|
||||||
{%- endmacro %}
|
{%- endmacro %}
|
||||||
|
|
||||||
|
|||||||
@ -79,9 +79,9 @@ jobs:
|
|||||||
runs-on: "windows-11-arm64-preview"
|
runs-on: "windows-11-arm64-preview"
|
||||||
{%- else %}
|
{%- else %}
|
||||||
{%- if branches == "nightly" %}
|
{%- if branches == "nightly" %}
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
{%- else %}
|
{%- else %}
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge.nonephemeral"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
timeout-minutes: !{{ common.timeout_minutes_windows_binary }}
|
timeout-minutes: !{{ common.timeout_minutes_windows_binary }}
|
||||||
|
|||||||
2
.github/workflows/_docs.yml
vendored
2
.github/workflows/_docs.yml
vendored
@ -72,7 +72,7 @@ jobs:
|
|||||||
# Let's try to figure out how this can be improved
|
# Let's try to figure out how this can be improved
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
- docs_type: python
|
- docs_type: python
|
||||||
runner: ${{ inputs.runner_prefix }}linux.c7i.2xlarge
|
runner: ${{ inputs.runner_prefix }}linux.2xlarge
|
||||||
# It takes less than 30m to finish python docs unless there are issues
|
# It takes less than 30m to finish python docs unless there are issues
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
# Set a fixed name for this job instead of using the current matrix-generated name, i.e. build-docs (cpp, linux.12xlarge, 180)
|
# Set a fixed name for this job instead of using the current matrix-generated name, i.e. build-docs (cpp, linux.12xlarge, 180)
|
||||||
|
|||||||
9
.github/workflows/_get-changed-files.yml
vendored
9
.github/workflows/_get-changed-files.yml
vendored
@ -40,15 +40,6 @@ jobs:
|
|||||||
# Use gh CLI to get changed files in the PR with explicit repo
|
# Use gh CLI to get changed files in the PR with explicit repo
|
||||||
CHANGED_FILES=$(gh api repos/${{ github.repository }}/pulls/$PR_NUMBER/files --paginate --jq '.[] | select(.status != "removed") | .filename' | tr '\n' ' ' | sed 's/ $//')
|
CHANGED_FILES=$(gh api repos/${{ github.repository }}/pulls/$PR_NUMBER/files --paginate --jq '.[] | select(.status != "removed") | .filename' | tr '\n' ' ' | sed 's/ $//')
|
||||||
|
|
||||||
# See https://github.com/pytorch/pytorch/pull/134215#issuecomment-2332128790
|
|
||||||
PYI_FILES_TO_ADD=""
|
|
||||||
for file in ${CHANGED_FILES}; do
|
|
||||||
if [[ "${file}" == *".pyi.in" ]]; then
|
|
||||||
PYI_FILES_TO_ADD="${PYI_FILES_TO_ADD} ${file//.in/}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
CHANGED_FILES="${CHANGED_FILES}${PYI_FILES_TO_ADD}"
|
|
||||||
|
|
||||||
if [ -z "$CHANGED_FILES" ]; then
|
if [ -z "$CHANGED_FILES" ]; then
|
||||||
echo "No changed files found, setting to '*'"
|
echo "No changed files found, setting to '*'"
|
||||||
CHANGED_FILES="*"
|
CHANGED_FILES="*"
|
||||||
|
|||||||
2
.github/workflows/_linux-build.yml
vendored
2
.github/workflows/_linux-build.yml
vendored
@ -37,7 +37,7 @@ on:
|
|||||||
runner:
|
runner:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
default: "linux.c7i.2xlarge"
|
default: "linux.2xlarge"
|
||||||
description: |
|
description: |
|
||||||
Label of the runner this job should run on.
|
Label of the runner this job should run on.
|
||||||
test-matrix:
|
test-matrix:
|
||||||
|
|||||||
42
.github/workflows/_linux-test.yml
vendored
42
.github/workflows/_linux-test.yml
vendored
@ -224,46 +224,6 @@ jobs:
|
|||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
uses: ./.github/actions/download-td-artifacts
|
uses: ./.github/actions/download-td-artifacts
|
||||||
|
|
||||||
- name: Download Windows torch wheel for cross-compilation
|
|
||||||
if: matrix.win_torch_wheel_artifact != ''
|
|
||||||
uses: seemethere/download-artifact-s3@1da556a7aa0a088e3153970611f6c432d58e80e6 # v4.2.0
|
|
||||||
with:
|
|
||||||
name: ${{ matrix.win_torch_wheel_artifact }}
|
|
||||||
path: win-torch-wheel
|
|
||||||
|
|
||||||
- name: Extract Windows wheel and setup CUDA libraries
|
|
||||||
if: matrix.win_torch_wheel_artifact != ''
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -x
|
|
||||||
|
|
||||||
# Find the wheel file
|
|
||||||
WHEEL_FILE=$(find win-torch-wheel -name "*.whl" -type f | head -n 1)
|
|
||||||
if [ -z "$WHEEL_FILE" ]; then
|
|
||||||
echo "Error: No wheel file found in win-torch-wheel directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "Found wheel file: $WHEEL_FILE"
|
|
||||||
|
|
||||||
# Unzip the wheel file
|
|
||||||
unzip -q "$WHEEL_FILE" -d win-torch-wheel-extracted
|
|
||||||
echo "Extracted wheel contents"
|
|
||||||
|
|
||||||
# Setup CUDA libraries (cuda.lib and cudart.lib) directory
|
|
||||||
mkdir -p win-torch-wheel-extracted/lib/x64
|
|
||||||
if [ -f "win-torch-wheel/cuda.lib" ]; then
|
|
||||||
mv win-torch-wheel/cuda.lib win-torch-wheel-extracted/lib/x64/
|
|
||||||
echo "Moved cuda.lib to win-torch-wheel-extracted/lib/x64/"
|
|
||||||
fi
|
|
||||||
if [ -f "win-torch-wheel/cudart.lib" ]; then
|
|
||||||
mv win-torch-wheel/cudart.lib win-torch-wheel-extracted/lib/x64/
|
|
||||||
echo "Moved cudart.lib to win-torch-wheel-extracted/lib/x64/"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Verify CUDA libraries are present
|
|
||||||
echo "CUDA libraries:"
|
|
||||||
ls -la win-torch-wheel-extracted/lib/x64/ || echo "No CUDA libraries found"
|
|
||||||
|
|
||||||
- name: Parse ref
|
- name: Parse ref
|
||||||
id: parse-ref
|
id: parse-ref
|
||||||
run: .github/scripts/parse_ref.py
|
run: .github/scripts/parse_ref.py
|
||||||
@ -429,6 +389,8 @@ jobs:
|
|||||||
"${DOCKER_IMAGE}" \
|
"${DOCKER_IMAGE}" \
|
||||||
${DOCKER_SHELL_CMD}
|
${DOCKER_SHELL_CMD}
|
||||||
)
|
)
|
||||||
|
# Propagate download.pytorch.org IP to container
|
||||||
|
grep download.pytorch.org /etc/hosts | docker exec -i "${container_name}" sudo bash -c "/bin/cat >> /etc/hosts"
|
||||||
echo "DOCKER_CONTAINER_ID=${container_name}" >> "${GITHUB_ENV}"
|
echo "DOCKER_CONTAINER_ID=${container_name}" >> "${GITHUB_ENV}"
|
||||||
|
|
||||||
if [[ ${BUILD_ENVIRONMENT} == *"s390x"* ]]; then
|
if [[ ${BUILD_ENVIRONMENT} == *"s390x"* ]]; then
|
||||||
|
|||||||
13
.github/workflows/_rocm-test.yml
vendored
13
.github/workflows/_rocm-test.yml
vendored
@ -102,6 +102,19 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
- name: configure aws credentials
|
||||||
|
id: aws_creds
|
||||||
|
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
|
||||||
|
with:
|
||||||
|
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
|
||||||
|
aws-region: us-east-1
|
||||||
|
role-duration-seconds: 18000
|
||||||
|
|
||||||
|
- name: Login to Amazon ECR
|
||||||
|
id: login-ecr
|
||||||
|
continue-on-error: true
|
||||||
|
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
|
||||||
|
|
||||||
- name: Calculate docker image
|
- name: Calculate docker image
|
||||||
id: calculate-docker-image
|
id: calculate-docker-image
|
||||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||||
|
|||||||
25
.github/workflows/_win-build.yml
vendored
25
.github/workflows/_win-build.yml
vendored
@ -168,31 +168,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
.ci/pytorch/win-build.sh
|
.ci/pytorch/win-build.sh
|
||||||
|
|
||||||
# Collect Windows torch libs and CUDA libs for cross-compilation
|
|
||||||
- name: Collect Windows CUDA libs for cross-compilation
|
|
||||||
if: steps.build.outcome != 'skipped' && inputs.cuda-version != 'cpu'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# Create directory structure if does not exist
|
|
||||||
mkdir -p /c/${{ github.run_id }}/build-results
|
|
||||||
|
|
||||||
# Copy CUDA libs
|
|
||||||
CUDA_PATH="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${{ inputs.cuda-version }}"
|
|
||||||
|
|
||||||
if [ -f "${CUDA_PATH}/lib/x64/cuda.lib" ]; then
|
|
||||||
cp "${CUDA_PATH}/lib/x64/cuda.lib" /c/${{ github.run_id }}/build-results/
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f "${CUDA_PATH}/lib/x64/cudart.lib" ]; then
|
|
||||||
cp "${CUDA_PATH}/lib/x64/cudart.lib" /c/${{ github.run_id }}/build-results/
|
|
||||||
fi
|
|
||||||
|
|
||||||
# List collected files
|
|
||||||
echo "Collected CUDA libs:"
|
|
||||||
ls -lah /c/${{ github.run_id }}/build-results/*.lib
|
|
||||||
|
|
||||||
# Upload to github so that people can click and download artifacts
|
# Upload to github so that people can click and download artifacts
|
||||||
- name: Upload artifacts to s3
|
- name: Upload artifacts to s3
|
||||||
if: steps.build.outcome != 'skipped'
|
if: steps.build.outcome != 'skipped'
|
||||||
|
|||||||
62
.github/workflows/b200-distributed.yml
vendored
62
.github/workflows/b200-distributed.yml
vendored
@ -1,62 +0,0 @@
|
|||||||
name: CI for distributed tests on B200
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- .github/workflows/b200-distributed.yml
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- ciflow/b200-distributed/*
|
|
||||||
schedule:
|
|
||||||
- cron: 46 8 * * * # about 1:46am PDT
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
|
|
||||||
get-label-type:
|
|
||||||
if: github.repository_owner == 'pytorch'
|
|
||||||
name: get-label-type
|
|
||||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
|
||||||
with:
|
|
||||||
triggering_actor: ${{ github.triggering_actor }}
|
|
||||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
|
||||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
|
||||||
curr_ref_type: ${{ github.ref_type }}
|
|
||||||
|
|
||||||
linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200:
|
|
||||||
name: linux-jammy-cuda12.8-py3.10-gcc11-build-distributed-b200
|
|
||||||
uses: ./.github/workflows/_linux-build.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runner: linux.12xlarge.memory
|
|
||||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc11-distributed-b200
|
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
|
|
||||||
cuda-arch-list: '10.0'
|
|
||||||
test-matrix: |
|
|
||||||
{ include: [
|
|
||||||
{ config: "distributed", shard: 1, num_shards: 2, runner: "linux.dgx.b200.8" },
|
|
||||||
{ config: "distributed", shard: 2, num_shards: 2, runner: "linux.dgx.b200.8" },
|
|
||||||
]}
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
linux-jammy-cuda12_8-py3_10-gcc11-test-distributed-b200:
|
|
||||||
name: linux-jammy-cuda12.8-py3.10-gcc11-test-b200
|
|
||||||
uses: ./.github/workflows/_linux-test.yml
|
|
||||||
needs:
|
|
||||||
- linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200
|
|
||||||
with:
|
|
||||||
timeout-minutes: 1200
|
|
||||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc11-distributed-b200
|
|
||||||
docker-image: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200.outputs.docker-image }}
|
|
||||||
test-matrix: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc11-build-distributed-b200.outputs.test-matrix }}
|
|
||||||
aws-role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
|
|
||||||
secrets: inherit
|
|
||||||
4
.github/workflows/build-manywheel-images.yml
vendored
4
.github/workflows/build-manywheel-images.yml
vendored
@ -46,12 +46,10 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include: [
|
include: [
|
||||||
{ name: "manylinux2_28-builder", tag: "cuda13.0", runner: "linux.9xlarge.ephemeral" },
|
{ name: "manylinux2_28-builder", tag: "cuda13.0", runner: "linux.9xlarge.ephemeral" },
|
||||||
{ name: "manylinux2_28-builder", tag: "cuda12.8", runner: "linux.9xlarge.ephemeral" },
|
{ name: "manylinux2_28-builder", tag: "cuda12.8", runner: "linux.9xlarge.ephemeral" },
|
||||||
{ name: "manylinux2_28-builder", tag: "cuda12.9", runner: "linux.9xlarge.ephemeral" },
|
|
||||||
{ name: "manylinux2_28-builder", tag: "cuda12.6", runner: "linux.9xlarge.ephemeral" },
|
{ name: "manylinux2_28-builder", tag: "cuda12.6", runner: "linux.9xlarge.ephemeral" },
|
||||||
{ name: "manylinuxaarch64-builder", tag: "cuda13.0", runner: "linux.arm64.2xlarge.ephemeral" },
|
{ name: "manylinuxaarch64-builder", tag: "cuda13.0", runner: "linux.arm64.2xlarge.ephemeral" },
|
||||||
{ name: "manylinuxaarch64-builder", tag: "cuda12.9", runner: "linux.arm64.2xlarge.ephemeral" },
|
|
||||||
{ name: "manylinuxaarch64-builder", tag: "cuda12.8", runner: "linux.arm64.2xlarge.ephemeral" },
|
{ name: "manylinuxaarch64-builder", tag: "cuda12.8", runner: "linux.arm64.2xlarge.ephemeral" },
|
||||||
{ name: "manylinuxaarch64-builder", tag: "cuda12.6", runner: "linux.arm64.2xlarge.ephemeral" },
|
{ name: "manylinuxaarch64-builder", tag: "cuda12.6", runner: "linux.arm64.2xlarge.ephemeral" },
|
||||||
{ name: "manylinux2_28-builder", tag: "rocm6.4", runner: "linux.9xlarge.ephemeral" },
|
{ name: "manylinux2_28-builder", tag: "rocm6.4", runner: "linux.9xlarge.ephemeral" },
|
||||||
|
|||||||
19
.github/workflows/build-vllm-wheel.yml
vendored
19
.github/workflows/build-vllm-wheel.yml
vendored
@ -27,8 +27,9 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python-version: [ '3.12' ]
|
python-version: [ '3.12' ]
|
||||||
|
# TODO (huydhn): Add cu130 after https://github.com/vllm-project/vllm/issues/24464 is resolved
|
||||||
platform: [ 'manylinux_2_28_x86_64', 'manylinux_2_28_aarch64' ]
|
platform: [ 'manylinux_2_28_x86_64', 'manylinux_2_28_aarch64' ]
|
||||||
device: [ 'cu128', 'cu129', 'cu130' ]
|
device: [ 'cu128', 'cu129' ]
|
||||||
include:
|
include:
|
||||||
- platform: manylinux_2_28_x86_64
|
- platform: manylinux_2_28_x86_64
|
||||||
device: cu128
|
device: cu128
|
||||||
@ -38,10 +39,6 @@ jobs:
|
|||||||
device: cu129
|
device: cu129
|
||||||
manylinux-image: 'pytorch/manylinux2_28-builder:cuda12.9'
|
manylinux-image: 'pytorch/manylinux2_28-builder:cuda12.9'
|
||||||
runner: linux.12xlarge.memory
|
runner: linux.12xlarge.memory
|
||||||
- platform: manylinux_2_28_x86_64
|
|
||||||
device: cu130
|
|
||||||
manylinux-image: 'pytorch/manylinux2_28-builder:cuda13.0'
|
|
||||||
runner: linux.12xlarge.memory
|
|
||||||
- platform: manylinux_2_28_aarch64
|
- platform: manylinux_2_28_aarch64
|
||||||
device: cu128
|
device: cu128
|
||||||
manylinux-image: 'pytorch/manylinuxaarch64-builder:cuda12.8'
|
manylinux-image: 'pytorch/manylinuxaarch64-builder:cuda12.8'
|
||||||
@ -50,11 +47,6 @@ jobs:
|
|||||||
device: cu129
|
device: cu129
|
||||||
manylinux-image: 'pytorch/manylinuxaarch64-builder:cuda12.9'
|
manylinux-image: 'pytorch/manylinuxaarch64-builder:cuda12.9'
|
||||||
runner: linux.arm64.r7g.12xlarge.memory
|
runner: linux.arm64.r7g.12xlarge.memory
|
||||||
exclude:
|
|
||||||
# TODO (huydhn): Add cu130 aarch64 once PyTorch is on 2.9+ and
|
|
||||||
# xformers is update to support 13.0
|
|
||||||
- platform: manylinux_2_28_aarch64
|
|
||||||
device: cu130
|
|
||||||
name: "Build ${{ matrix.device }} vLLM wheel on ${{ matrix.platform }}"
|
name: "Build ${{ matrix.device }} vLLM wheel on ${{ matrix.platform }}"
|
||||||
runs-on: ${{ matrix.runner }}
|
runs-on: ${{ matrix.runner }}
|
||||||
timeout-minutes: 480
|
timeout-minutes: 480
|
||||||
@ -177,12 +169,7 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
platform: [ 'manylinux_2_28_x86_64', 'manylinux_2_28_aarch64' ]
|
platform: [ 'manylinux_2_28_x86_64', 'manylinux_2_28_aarch64' ]
|
||||||
device: [ 'cu128', 'cu129', 'cu130' ]
|
device: [ 'cu128', 'cu129' ]
|
||||||
exclude:
|
|
||||||
# TODO (huydhn): Add cu130 aarch64 once PyTorch is on 2.9+ and
|
|
||||||
# xformers is update to support 13.0
|
|
||||||
- platform: manylinux_2_28_aarch64
|
|
||||||
device: cu130
|
|
||||||
env:
|
env:
|
||||||
PLATFORM: ${{ matrix.platform }}
|
PLATFORM: ${{ matrix.platform }}
|
||||||
BUILD_DEVICE: ${{ matrix.device }}
|
BUILD_DEVICE: ${{ matrix.device }}
|
||||||
|
|||||||
364
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
364
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
@ -132,7 +132,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_10-cuda-aarch64-12_6
|
build_name: manywheel-py3_10-cuda-aarch64-12_6
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -178,7 +178,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_10-cuda-aarch64-12_8
|
build_name: manywheel-py3_10-cuda-aarch64-12_8
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -204,52 +204,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_10-cuda-aarch64-12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.10"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.arm64.r7g.12xlarge.memory
|
|
||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
|
||||||
build_name: manywheel-py3_10-cuda-aarch64-12_9
|
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
timeout-minutes: 420
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_10-cuda-aarch64-12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_10-cuda-aarch64-12_9-build
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.10"
|
|
||||||
build_name: manywheel-py3_10-cuda-aarch64-12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_10-cuda-aarch64-13_0-build:
|
manywheel-py3_10-cuda-aarch64-13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -270,7 +224,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_10-cuda-aarch64-13_0
|
build_name: manywheel-py3_10-cuda-aarch64-13_0
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -381,7 +335,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_11-cuda-aarch64-12_6
|
build_name: manywheel-py3_11-cuda-aarch64-12_6
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -427,7 +381,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_11-cuda-aarch64-12_8
|
build_name: manywheel-py3_11-cuda-aarch64-12_8
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -453,52 +407,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_11-cuda-aarch64-12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.11"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.arm64.r7g.12xlarge.memory
|
|
||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
|
||||||
build_name: manywheel-py3_11-cuda-aarch64-12_9
|
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
timeout-minutes: 420
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_11-cuda-aarch64-12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_11-cuda-aarch64-12_9-build
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.11"
|
|
||||||
build_name: manywheel-py3_11-cuda-aarch64-12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_11-cuda-aarch64-13_0-build:
|
manywheel-py3_11-cuda-aarch64-13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -519,7 +427,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_11-cuda-aarch64-13_0
|
build_name: manywheel-py3_11-cuda-aarch64-13_0
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -630,7 +538,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_12-cuda-aarch64-12_6
|
build_name: manywheel-py3_12-cuda-aarch64-12_6
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -676,7 +584,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_12-cuda-aarch64-12_8
|
build_name: manywheel-py3_12-cuda-aarch64-12_8
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -702,52 +610,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_12-cuda-aarch64-12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.12"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.arm64.r7g.12xlarge.memory
|
|
||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
|
||||||
build_name: manywheel-py3_12-cuda-aarch64-12_9
|
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
timeout-minutes: 420
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_12-cuda-aarch64-12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_12-cuda-aarch64-12_9-build
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.12"
|
|
||||||
build_name: manywheel-py3_12-cuda-aarch64-12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_12-cuda-aarch64-13_0-build:
|
manywheel-py3_12-cuda-aarch64-13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -768,7 +630,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_12-cuda-aarch64-13_0
|
build_name: manywheel-py3_12-cuda-aarch64-13_0
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -879,7 +741,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_13-cuda-aarch64-12_6
|
build_name: manywheel-py3_13-cuda-aarch64-12_6
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -925,7 +787,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_13-cuda-aarch64-12_8
|
build_name: manywheel-py3_13-cuda-aarch64-12_8
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -951,52 +813,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_13-cuda-aarch64-12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.13"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.arm64.r7g.12xlarge.memory
|
|
||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
|
||||||
build_name: manywheel-py3_13-cuda-aarch64-12_9
|
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
timeout-minutes: 420
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_13-cuda-aarch64-12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_13-cuda-aarch64-12_9-build
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.13"
|
|
||||||
build_name: manywheel-py3_13-cuda-aarch64-12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_13-cuda-aarch64-13_0-build:
|
manywheel-py3_13-cuda-aarch64-13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -1017,7 +833,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_13-cuda-aarch64-13_0
|
build_name: manywheel-py3_13-cuda-aarch64-13_0
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -1128,7 +944,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_13t-cuda-aarch64-12_6
|
build_name: manywheel-py3_13t-cuda-aarch64-12_6
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -1174,7 +990,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_13t-cuda-aarch64-12_8
|
build_name: manywheel-py3_13t-cuda-aarch64-12_8
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -1200,52 +1016,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_13t-cuda-aarch64-12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.13t"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.arm64.r7g.12xlarge.memory
|
|
||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
|
||||||
build_name: manywheel-py3_13t-cuda-aarch64-12_9
|
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
timeout-minutes: 420
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_13t-cuda-aarch64-12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_13t-cuda-aarch64-12_9-build
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.13t"
|
|
||||||
build_name: manywheel-py3_13t-cuda-aarch64-12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_13t-cuda-aarch64-13_0-build:
|
manywheel-py3_13t-cuda-aarch64-13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -1266,7 +1036,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_13t-cuda-aarch64-13_0
|
build_name: manywheel-py3_13t-cuda-aarch64-13_0
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -1377,7 +1147,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_14-cuda-aarch64-12_6
|
build_name: manywheel-py3_14-cuda-aarch64-12_6
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -1423,7 +1193,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_14-cuda-aarch64-12_8
|
build_name: manywheel-py3_14-cuda-aarch64-12_8
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -1449,52 +1219,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_14-cuda-aarch64-12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.14"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.arm64.r7g.12xlarge.memory
|
|
||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
|
||||||
build_name: manywheel-py3_14-cuda-aarch64-12_9
|
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
timeout-minutes: 420
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_14-cuda-aarch64-12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_14-cuda-aarch64-12_9-build
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.14"
|
|
||||||
build_name: manywheel-py3_14-cuda-aarch64-12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_14-cuda-aarch64-13_0-build:
|
manywheel-py3_14-cuda-aarch64-13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -1515,7 +1239,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_14-cuda-aarch64-13_0
|
build_name: manywheel-py3_14-cuda-aarch64-13_0
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -1626,7 +1350,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_14t-cuda-aarch64-12_6
|
build_name: manywheel-py3_14t-cuda-aarch64-12_6
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -1672,7 +1396,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_14t-cuda-aarch64-12_8
|
build_name: manywheel-py3_14t-cuda-aarch64-12_8
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
@ -1698,52 +1422,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_14t-cuda-aarch64-12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.14t"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.arm64.r7g.12xlarge.memory
|
|
||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
|
||||||
build_name: manywheel-py3_14t-cuda-aarch64-12_9
|
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
timeout-minutes: 420
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_14t-cuda-aarch64-12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_14t-cuda-aarch64-12_9-build
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9-aarch64"
|
|
||||||
GPU_ARCH_TYPE: cuda-aarch64
|
|
||||||
DOCKER_IMAGE: manylinuxaarch64-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.14t"
|
|
||||||
build_name: manywheel-py3_14t-cuda-aarch64-12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_14t-cuda-aarch64-13_0-build:
|
manywheel-py3_14t-cuda-aarch64-13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -1764,7 +1442,7 @@ jobs:
|
|||||||
ALPINE_IMAGE: "arm64v8/alpine"
|
ALPINE_IMAGE: "arm64v8/alpine"
|
||||||
build_name: manywheel-py3_14t-cuda-aarch64-13_0
|
build_name: manywheel-py3_14t-cuda-aarch64-13_0
|
||||||
build_environment: linux-aarch64-binary-manywheel
|
build_environment: linux-aarch64-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
timeout-minutes: 420
|
timeout-minutes: 420
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|||||||
74
.github/workflows/generated-linux-binary-libtorch-nightly.yml
generated
vendored
74
.github/workflows/generated-linux-binary-libtorch-nightly.yml
generated
vendored
@ -248,74 +248,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
libtorch-cuda12_9-shared-with-deps-release-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: libtorch
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: libtorch-cxx11-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
LIBTORCH_CONFIG: release
|
|
||||||
LIBTORCH_VARIANT: shared-with-deps
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
build_name: libtorch-cuda12_9-shared-with-deps-release
|
|
||||||
build_environment: linux-binary-libtorch
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
libtorch-cuda12_9-shared-with-deps-release-test: # Testing
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
needs:
|
|
||||||
- libtorch-cuda12_9-shared-with-deps-release-build
|
|
||||||
- get-label-type
|
|
||||||
uses: ./.github/workflows/_binary-test-linux.yml
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: libtorch
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: libtorch-cxx11-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
LIBTORCH_CONFIG: release
|
|
||||||
LIBTORCH_VARIANT: shared-with-deps
|
|
||||||
build_name: libtorch-cuda12_9-shared-with-deps-release
|
|
||||||
build_environment: linux-binary-libtorch
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
libtorch-cuda12_9-shared-with-deps-release-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: libtorch-cuda12_9-shared-with-deps-release-test
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: libtorch
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: libtorch-cxx11-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
LIBTORCH_CONFIG: release
|
|
||||||
LIBTORCH_VARIANT: shared-with-deps
|
|
||||||
build_name: libtorch-cuda12_9-shared-with-deps-release
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
libtorch-cuda13_0-shared-with-deps-release-build:
|
libtorch-cuda13_0-shared-with-deps-release-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -426,9 +358,6 @@ jobs:
|
|||||||
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
||||||
LIBTORCH_CONFIG: release
|
LIBTORCH_CONFIG: release
|
||||||
LIBTORCH_VARIANT: shared-with-deps
|
LIBTORCH_VARIANT: shared-with-deps
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -544,9 +473,6 @@ jobs:
|
|||||||
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
||||||
LIBTORCH_CONFIG: release
|
LIBTORCH_CONFIG: release
|
||||||
LIBTORCH_VARIANT: shared-with-deps
|
LIBTORCH_VARIANT: shared-with-deps
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
|
|||||||
546
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
546
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
@ -127,7 +127,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_10-cuda12_6
|
build_name: manywheel-py3_10-cuda12_6
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_10-cuda12_6-test: # Testing
|
manywheel-py3_10-cuda12_6-test: # Testing
|
||||||
@ -193,7 +193,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_10-cuda12_8
|
build_name: manywheel-py3_10-cuda12_8
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_10-cuda12_8-test: # Testing
|
manywheel-py3_10-cuda12_8-test: # Testing
|
||||||
@ -241,72 +241,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_10-cuda12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.10"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
build_name: manywheel-py3_10-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_10-cuda12_9-test: # Testing
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
needs:
|
|
||||||
- manywheel-py3_10-cuda12_9-build
|
|
||||||
- get-label-type
|
|
||||||
uses: ./.github/workflows/_binary-test-linux.yml
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.10"
|
|
||||||
build_name: manywheel-py3_10-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_10-cuda12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_10-cuda12_9-test
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.10"
|
|
||||||
build_name: manywheel-py3_10-cuda12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_10-cuda13_0-build:
|
manywheel-py3_10-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -325,7 +259,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_10-cuda13_0
|
build_name: manywheel-py3_10-cuda13_0
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_10-cuda13_0-test: # Testing
|
manywheel-py3_10-cuda13_0-test: # Testing
|
||||||
@ -413,9 +347,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
||||||
DESIRED_PYTHON: "3.10"
|
DESIRED_PYTHON: "3.10"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -528,9 +459,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
||||||
DESIRED_PYTHON: "3.10"
|
DESIRED_PYTHON: "3.10"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -793,7 +721,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_11-cuda12_6
|
build_name: manywheel-py3_11-cuda12_6
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_11-cuda12_6-test: # Testing
|
manywheel-py3_11-cuda12_6-test: # Testing
|
||||||
@ -859,7 +787,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_11-cuda12_8
|
build_name: manywheel-py3_11-cuda12_8
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_11-cuda12_8-test: # Testing
|
manywheel-py3_11-cuda12_8-test: # Testing
|
||||||
@ -907,72 +835,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_11-cuda12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.11"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
build_name: manywheel-py3_11-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_11-cuda12_9-test: # Testing
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
needs:
|
|
||||||
- manywheel-py3_11-cuda12_9-build
|
|
||||||
- get-label-type
|
|
||||||
uses: ./.github/workflows/_binary-test-linux.yml
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.11"
|
|
||||||
build_name: manywheel-py3_11-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_11-cuda12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_11-cuda12_9-test
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.11"
|
|
||||||
build_name: manywheel-py3_11-cuda12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_11-cuda13_0-build:
|
manywheel-py3_11-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -991,7 +853,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_11-cuda13_0
|
build_name: manywheel-py3_11-cuda13_0
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_11-cuda13_0-test: # Testing
|
manywheel-py3_11-cuda13_0-test: # Testing
|
||||||
@ -1079,9 +941,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
||||||
DESIRED_PYTHON: "3.11"
|
DESIRED_PYTHON: "3.11"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -1194,9 +1053,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
||||||
DESIRED_PYTHON: "3.11"
|
DESIRED_PYTHON: "3.11"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -1459,7 +1315,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_12-cuda12_6
|
build_name: manywheel-py3_12-cuda12_6
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_12-cuda12_6-test: # Testing
|
manywheel-py3_12-cuda12_6-test: # Testing
|
||||||
@ -1525,7 +1381,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_12-cuda12_8
|
build_name: manywheel-py3_12-cuda12_8
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_12-cuda12_8-test: # Testing
|
manywheel-py3_12-cuda12_8-test: # Testing
|
||||||
@ -1573,72 +1429,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_12-cuda12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.12"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
build_name: manywheel-py3_12-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_12-cuda12_9-test: # Testing
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
needs:
|
|
||||||
- manywheel-py3_12-cuda12_9-build
|
|
||||||
- get-label-type
|
|
||||||
uses: ./.github/workflows/_binary-test-linux.yml
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.12"
|
|
||||||
build_name: manywheel-py3_12-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_12-cuda12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_12-cuda12_9-test
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.12"
|
|
||||||
build_name: manywheel-py3_12-cuda12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_12-cuda13_0-build:
|
manywheel-py3_12-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -1657,7 +1447,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_12-cuda13_0
|
build_name: manywheel-py3_12-cuda13_0
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_12-cuda13_0-test: # Testing
|
manywheel-py3_12-cuda13_0-test: # Testing
|
||||||
@ -1745,9 +1535,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
||||||
DESIRED_PYTHON: "3.12"
|
DESIRED_PYTHON: "3.12"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -1860,9 +1647,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
||||||
DESIRED_PYTHON: "3.12"
|
DESIRED_PYTHON: "3.12"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -2125,7 +1909,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_13-cuda12_6
|
build_name: manywheel-py3_13-cuda12_6
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_13-cuda12_6-test: # Testing
|
manywheel-py3_13-cuda12_6-test: # Testing
|
||||||
@ -2191,7 +1975,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_13-cuda12_8
|
build_name: manywheel-py3_13-cuda12_8
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_13-cuda12_8-test: # Testing
|
manywheel-py3_13-cuda12_8-test: # Testing
|
||||||
@ -2239,72 +2023,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_13-cuda12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.13"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
build_name: manywheel-py3_13-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_13-cuda12_9-test: # Testing
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
needs:
|
|
||||||
- manywheel-py3_13-cuda12_9-build
|
|
||||||
- get-label-type
|
|
||||||
uses: ./.github/workflows/_binary-test-linux.yml
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.13"
|
|
||||||
build_name: manywheel-py3_13-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_13-cuda12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_13-cuda12_9-test
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.13"
|
|
||||||
build_name: manywheel-py3_13-cuda12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_13-cuda13_0-build:
|
manywheel-py3_13-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -2323,7 +2041,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_13-cuda13_0
|
build_name: manywheel-py3_13-cuda13_0
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_13-cuda13_0-test: # Testing
|
manywheel-py3_13-cuda13_0-test: # Testing
|
||||||
@ -2411,9 +2129,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
||||||
DESIRED_PYTHON: "3.13"
|
DESIRED_PYTHON: "3.13"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -2526,9 +2241,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
||||||
DESIRED_PYTHON: "3.13"
|
DESIRED_PYTHON: "3.13"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -2791,7 +2503,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_13t-cuda12_6
|
build_name: manywheel-py3_13t-cuda12_6
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_13t-cuda12_6-test: # Testing
|
manywheel-py3_13t-cuda12_6-test: # Testing
|
||||||
@ -2857,7 +2569,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_13t-cuda12_8
|
build_name: manywheel-py3_13t-cuda12_8
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_13t-cuda12_8-test: # Testing
|
manywheel-py3_13t-cuda12_8-test: # Testing
|
||||||
@ -2905,72 +2617,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_13t-cuda12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.13t"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
build_name: manywheel-py3_13t-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_13t-cuda12_9-test: # Testing
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
needs:
|
|
||||||
- manywheel-py3_13t-cuda12_9-build
|
|
||||||
- get-label-type
|
|
||||||
uses: ./.github/workflows/_binary-test-linux.yml
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.13t"
|
|
||||||
build_name: manywheel-py3_13t-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_13t-cuda12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_13t-cuda12_9-test
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.13t"
|
|
||||||
build_name: manywheel-py3_13t-cuda12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_13t-cuda13_0-build:
|
manywheel-py3_13t-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -2989,7 +2635,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_13t-cuda13_0
|
build_name: manywheel-py3_13t-cuda13_0
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_13t-cuda13_0-test: # Testing
|
manywheel-py3_13t-cuda13_0-test: # Testing
|
||||||
@ -3077,9 +2723,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
||||||
DESIRED_PYTHON: "3.13t"
|
DESIRED_PYTHON: "3.13t"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -3192,9 +2835,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
||||||
DESIRED_PYTHON: "3.13t"
|
DESIRED_PYTHON: "3.13t"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -3457,7 +3097,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_14-cuda12_6
|
build_name: manywheel-py3_14-cuda12_6
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_14-cuda12_6-test: # Testing
|
manywheel-py3_14-cuda12_6-test: # Testing
|
||||||
@ -3523,7 +3163,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_14-cuda12_8
|
build_name: manywheel-py3_14-cuda12_8
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_14-cuda12_8-test: # Testing
|
manywheel-py3_14-cuda12_8-test: # Testing
|
||||||
@ -3571,72 +3211,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_14-cuda12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.14"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
build_name: manywheel-py3_14-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_14-cuda12_9-test: # Testing
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
needs:
|
|
||||||
- manywheel-py3_14-cuda12_9-build
|
|
||||||
- get-label-type
|
|
||||||
uses: ./.github/workflows/_binary-test-linux.yml
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.14"
|
|
||||||
build_name: manywheel-py3_14-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_14-cuda12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_14-cuda12_9-test
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.14"
|
|
||||||
build_name: manywheel-py3_14-cuda12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_14-cuda13_0-build:
|
manywheel-py3_14-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -3655,7 +3229,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_14-cuda13_0
|
build_name: manywheel-py3_14-cuda13_0
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_14-cuda13_0-test: # Testing
|
manywheel-py3_14-cuda13_0-test: # Testing
|
||||||
@ -3743,9 +3317,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
||||||
DESIRED_PYTHON: "3.14"
|
DESIRED_PYTHON: "3.14"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -3858,9 +3429,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
||||||
DESIRED_PYTHON: "3.14"
|
DESIRED_PYTHON: "3.14"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -4123,7 +3691,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_14t-cuda12_6
|
build_name: manywheel-py3_14t-cuda12_6
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_14t-cuda12_6-test: # Testing
|
manywheel-py3_14t-cuda12_6-test: # Testing
|
||||||
@ -4189,7 +3757,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_14t-cuda12_8
|
build_name: manywheel-py3_14t-cuda12_8
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_14t-cuda12_8-test: # Testing
|
manywheel-py3_14t-cuda12_8-test: # Testing
|
||||||
@ -4237,72 +3805,6 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
uses: ./.github/workflows/_binary-upload.yml
|
||||||
|
|
||||||
manywheel-py3_14t-cuda12_9-build:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.14t"
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
build_name: manywheel-py3_14t-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' | nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' | nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' | nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' | nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' | nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' | nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' | nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' | nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' | nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' | nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' | nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux'
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_14t-cuda12_9-test: # Testing
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
needs:
|
|
||||||
- manywheel-py3_14t-cuda12_9-build
|
|
||||||
- get-label-type
|
|
||||||
uses: ./.github/workflows/_binary-test-linux.yml
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.14t"
|
|
||||||
build_name: manywheel-py3_14t-cuda12_9
|
|
||||||
build_environment: linux-binary-manywheel
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
manywheel-py3_14t-cuda12_9-upload: # Uploading
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
needs: manywheel-py3_14t-cuda12_9-test
|
|
||||||
with:
|
|
||||||
PYTORCH_ROOT: /pytorch
|
|
||||||
PACKAGE_TYPE: manywheel
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: cu129
|
|
||||||
GPU_ARCH_VERSION: "12.9"
|
|
||||||
GPU_ARCH_TYPE: cuda
|
|
||||||
DOCKER_IMAGE: manylinux2_28-builder
|
|
||||||
DOCKER_IMAGE_TAG_PREFIX: cuda12.9
|
|
||||||
DESIRED_PYTHON: "3.14t"
|
|
||||||
build_name: manywheel-py3_14t-cuda12_9
|
|
||||||
secrets:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
uses: ./.github/workflows/_binary-upload.yml
|
|
||||||
|
|
||||||
manywheel-py3_14t-cuda13_0-build:
|
manywheel-py3_14t-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_binary-build-linux.yml
|
uses: ./.github/workflows/_binary-build-linux.yml
|
||||||
@ -4321,7 +3823,7 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build_name: manywheel-py3_14t-cuda13_0
|
build_name: manywheel-py3_14t-cuda13_0
|
||||||
build_environment: linux-binary-manywheel
|
build_environment: linux-binary-manywheel
|
||||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | nvidia-cublas==13.0.0.19; platform_system == 'Linux' | nvidia-cufft==12.0.0.15; platform_system == 'Linux' | nvidia-curand==10.4.0.35; platform_system == 'Linux' | nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | nvidia-nvtx==13.0.39; platform_system == 'Linux' | nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | nvidia-cufile==1.15.0.42; platform_system == 'Linux'
|
||||||
secrets:
|
secrets:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
manywheel-py3_14t-cuda13_0-test: # Testing
|
manywheel-py3_14t-cuda13_0-test: # Testing
|
||||||
@ -4409,9 +3911,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
DOCKER_IMAGE_TAG_PREFIX: rocm6.4
|
||||||
DESIRED_PYTHON: "3.14t"
|
DESIRED_PYTHON: "3.14t"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
@ -4524,9 +4023,6 @@ jobs:
|
|||||||
DOCKER_IMAGE: manylinux2_28-builder
|
DOCKER_IMAGE: manylinux2_28-builder
|
||||||
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
DOCKER_IMAGE_TAG_PREFIX: rocm7.0
|
||||||
DESIRED_PYTHON: "3.14t"
|
DESIRED_PYTHON: "3.14t"
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup ROCm
|
- name: Setup ROCm
|
||||||
uses: ./.github/actions/setup-rocm
|
uses: ./.github/actions/setup-rocm
|
||||||
|
|||||||
1
.github/workflows/generated-macos-arm64-binary-libtorch-release-nightly.yml
generated
vendored
1
.github/workflows/generated-macos-arm64-binary-libtorch-release-nightly.yml
generated
vendored
@ -63,6 +63,7 @@ jobs:
|
|||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v6
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
|
# TODO: Removeme once 3.14 is out
|
||||||
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
||||||
python-version: "3.10.4"
|
python-version: "3.10.4"
|
||||||
freethreaded: false
|
freethreaded: false
|
||||||
|
|||||||
11
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
11
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
@ -59,6 +59,7 @@ jobs:
|
|||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v6
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
|
# TODO: Removeme once 3.14 is out
|
||||||
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
||||||
python-version: "3.10.4"
|
python-version: "3.10.4"
|
||||||
freethreaded: false
|
freethreaded: false
|
||||||
@ -168,6 +169,7 @@ jobs:
|
|||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v6
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
|
# TODO: Removeme once 3.14 is out
|
||||||
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
||||||
python-version: "3.11.4"
|
python-version: "3.11.4"
|
||||||
freethreaded: false
|
freethreaded: false
|
||||||
@ -277,6 +279,7 @@ jobs:
|
|||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v6
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
|
# TODO: Removeme once 3.14 is out
|
||||||
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
||||||
python-version: "3.12.4"
|
python-version: "3.12.4"
|
||||||
freethreaded: false
|
freethreaded: false
|
||||||
@ -386,6 +389,7 @@ jobs:
|
|||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v6
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
|
# TODO: Removeme once 3.14 is out
|
||||||
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
||||||
python-version: "3.13.4"
|
python-version: "3.13.4"
|
||||||
freethreaded: false
|
freethreaded: false
|
||||||
@ -495,6 +499,7 @@ jobs:
|
|||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v6
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
|
# TODO: Removeme once 3.14 is out
|
||||||
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
||||||
python-version: "3.13.4"
|
python-version: "3.13.4"
|
||||||
freethreaded: true
|
freethreaded: true
|
||||||
@ -604,8 +609,9 @@ jobs:
|
|||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v6
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
|
# TODO: Removeme once 3.14 is out
|
||||||
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
||||||
python-version: "3.14.0"
|
python-version: "3.14.0-rc.2"
|
||||||
freethreaded: false
|
freethreaded: false
|
||||||
- name: Checkout PyTorch
|
- name: Checkout PyTorch
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@ -713,8 +719,9 @@ jobs:
|
|||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v6
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
|
# TODO: Removeme once 3.14 is out
|
||||||
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
|
||||||
python-version: "3.14.0"
|
python-version: "3.14.0-rc.2"
|
||||||
freethreaded: true
|
freethreaded: true
|
||||||
- name: Checkout PyTorch
|
- name: Checkout PyTorch
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|||||||
8
.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
generated
vendored
8
.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
generated
vendored
@ -44,7 +44,7 @@ jobs:
|
|||||||
libtorch-cpu-shared-with-deps-debug-build:
|
libtorch-cpu-shared-with-deps-debug-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -291,7 +291,7 @@ jobs:
|
|||||||
libtorch-cuda12_6-shared-with-deps-debug-build:
|
libtorch-cuda12_6-shared-with-deps-debug-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -541,7 +541,7 @@ jobs:
|
|||||||
libtorch-cuda12_8-shared-with-deps-debug-build:
|
libtorch-cuda12_8-shared-with-deps-debug-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -791,7 +791,7 @@ jobs:
|
|||||||
libtorch-cuda13_0-shared-with-deps-debug-build:
|
libtorch-cuda13_0-shared-with-deps-debug-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
|
|||||||
8
.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
generated
vendored
8
.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
generated
vendored
@ -44,7 +44,7 @@ jobs:
|
|||||||
libtorch-cpu-shared-with-deps-release-build:
|
libtorch-cpu-shared-with-deps-release-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -291,7 +291,7 @@ jobs:
|
|||||||
libtorch-cuda12_6-shared-with-deps-release-build:
|
libtorch-cuda12_6-shared-with-deps-release-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -541,7 +541,7 @@ jobs:
|
|||||||
libtorch-cuda12_8-shared-with-deps-release-build:
|
libtorch-cuda12_8-shared-with-deps-release-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -791,7 +791,7 @@ jobs:
|
|||||||
libtorch-cuda13_0-shared-with-deps-release-build:
|
libtorch-cuda13_0-shared-with-deps-release-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
|
|||||||
70
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
70
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
@ -44,7 +44,7 @@ jobs:
|
|||||||
wheel-py3_10-cpu-build:
|
wheel-py3_10-cpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -279,7 +279,7 @@ jobs:
|
|||||||
wheel-py3_10-cuda12_6-build:
|
wheel-py3_10-cuda12_6-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -517,7 +517,7 @@ jobs:
|
|||||||
wheel-py3_10-cuda12_8-build:
|
wheel-py3_10-cuda12_8-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -755,7 +755,7 @@ jobs:
|
|||||||
wheel-py3_10-cuda13_0-build:
|
wheel-py3_10-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -993,7 +993,7 @@ jobs:
|
|||||||
wheel-py3_10-xpu-build:
|
wheel-py3_10-xpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -1229,7 +1229,7 @@ jobs:
|
|||||||
wheel-py3_11-cpu-build:
|
wheel-py3_11-cpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -1464,7 +1464,7 @@ jobs:
|
|||||||
wheel-py3_11-cuda12_6-build:
|
wheel-py3_11-cuda12_6-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -1702,7 +1702,7 @@ jobs:
|
|||||||
wheel-py3_11-cuda12_8-build:
|
wheel-py3_11-cuda12_8-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -1940,7 +1940,7 @@ jobs:
|
|||||||
wheel-py3_11-cuda13_0-build:
|
wheel-py3_11-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -2178,7 +2178,7 @@ jobs:
|
|||||||
wheel-py3_11-xpu-build:
|
wheel-py3_11-xpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -2414,7 +2414,7 @@ jobs:
|
|||||||
wheel-py3_12-cpu-build:
|
wheel-py3_12-cpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -2649,7 +2649,7 @@ jobs:
|
|||||||
wheel-py3_12-cuda12_6-build:
|
wheel-py3_12-cuda12_6-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -2887,7 +2887,7 @@ jobs:
|
|||||||
wheel-py3_12-cuda12_8-build:
|
wheel-py3_12-cuda12_8-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -3125,7 +3125,7 @@ jobs:
|
|||||||
wheel-py3_12-cuda13_0-build:
|
wheel-py3_12-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -3363,7 +3363,7 @@ jobs:
|
|||||||
wheel-py3_12-xpu-build:
|
wheel-py3_12-xpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -3599,7 +3599,7 @@ jobs:
|
|||||||
wheel-py3_13-cpu-build:
|
wheel-py3_13-cpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -3834,7 +3834,7 @@ jobs:
|
|||||||
wheel-py3_13-cuda12_6-build:
|
wheel-py3_13-cuda12_6-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -4072,7 +4072,7 @@ jobs:
|
|||||||
wheel-py3_13-cuda12_8-build:
|
wheel-py3_13-cuda12_8-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -4310,7 +4310,7 @@ jobs:
|
|||||||
wheel-py3_13-cuda13_0-build:
|
wheel-py3_13-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -4548,7 +4548,7 @@ jobs:
|
|||||||
wheel-py3_13-xpu-build:
|
wheel-py3_13-xpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -4784,7 +4784,7 @@ jobs:
|
|||||||
wheel-py3_13t-cpu-build:
|
wheel-py3_13t-cpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -5019,7 +5019,7 @@ jobs:
|
|||||||
wheel-py3_13t-cuda12_6-build:
|
wheel-py3_13t-cuda12_6-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -5257,7 +5257,7 @@ jobs:
|
|||||||
wheel-py3_13t-cuda12_8-build:
|
wheel-py3_13t-cuda12_8-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -5495,7 +5495,7 @@ jobs:
|
|||||||
wheel-py3_13t-cuda13_0-build:
|
wheel-py3_13t-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -5733,7 +5733,7 @@ jobs:
|
|||||||
wheel-py3_13t-xpu-build:
|
wheel-py3_13t-xpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -5969,7 +5969,7 @@ jobs:
|
|||||||
wheel-py3_14-cpu-build:
|
wheel-py3_14-cpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -6204,7 +6204,7 @@ jobs:
|
|||||||
wheel-py3_14-cuda12_6-build:
|
wheel-py3_14-cuda12_6-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -6442,7 +6442,7 @@ jobs:
|
|||||||
wheel-py3_14-cuda12_8-build:
|
wheel-py3_14-cuda12_8-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -6680,7 +6680,7 @@ jobs:
|
|||||||
wheel-py3_14-cuda13_0-build:
|
wheel-py3_14-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -6918,7 +6918,7 @@ jobs:
|
|||||||
wheel-py3_14-xpu-build:
|
wheel-py3_14-xpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -7154,7 +7154,7 @@ jobs:
|
|||||||
wheel-py3_14t-cpu-build:
|
wheel-py3_14t-cpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -7389,7 +7389,7 @@ jobs:
|
|||||||
wheel-py3_14t-cuda12_6-build:
|
wheel-py3_14t-cuda12_6-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -7627,7 +7627,7 @@ jobs:
|
|||||||
wheel-py3_14t-cuda12_8-build:
|
wheel-py3_14t-cuda12_8-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -7865,7 +7865,7 @@ jobs:
|
|||||||
wheel-py3_14t-cuda13_0-build:
|
wheel-py3_14t-cuda13_0-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
@ -8103,7 +8103,7 @@ jobs:
|
|||||||
wheel-py3_14t-xpu-build:
|
wheel-py3_14t-xpu-build:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.12xlarge"
|
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||||
timeout-minutes: 360
|
timeout-minutes: 360
|
||||||
env:
|
env:
|
||||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||||
|
|||||||
2
.github/workflows/h100-distributed.yml
vendored
2
.github/workflows/h100-distributed.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
|||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
with:
|
with:
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
runner: "linux.c7i.12xlarge"
|
runner: "linux.12xlarge"
|
||||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc11-sm90-dist
|
build-environment: linux-jammy-cuda12.8-py3.10-gcc11-sm90-dist
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
|
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
|
||||||
cuda-arch-list: '9.0'
|
cuda-arch-list: '9.0'
|
||||||
|
|||||||
@ -2,7 +2,7 @@ name: inductor-perf-nightly-h100
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: 15 0 * * 1-6
|
- cron: 15 0,12 * * 1-6
|
||||||
- cron: 0 7 * * 0
|
- cron: 0 7 * * 0
|
||||||
# NB: GitHub has an upper limit of 10 inputs here, so before we can sort it
|
# NB: GitHub has an upper limit of 10 inputs here, so before we can sort it
|
||||||
# out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
|
# out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
|
||||||
@ -130,7 +130,7 @@ jobs:
|
|||||||
name: test-periodically
|
name: test-periodically
|
||||||
uses: ./.github/workflows/_linux-test.yml
|
uses: ./.github/workflows/_linux-test.yml
|
||||||
needs: build
|
needs: build
|
||||||
if: github.event.schedule == '15 0 * * 1-6'
|
if: github.event.schedule == '15 0,12 * * 1-6'
|
||||||
with:
|
with:
|
||||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm90
|
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm90
|
||||||
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true
|
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true
|
||||||
|
|||||||
@ -63,7 +63,6 @@ jobs:
|
|||||||
# Same as the build job
|
# Same as the build job
|
||||||
python-version: 3.12.7
|
python-version: 3.12.7
|
||||||
test-matrix: ${{ needs.macos-perf-py3-arm64-build.outputs.test-matrix }}
|
test-matrix: ${{ needs.macos-perf-py3-arm64-build.outputs.test-matrix }}
|
||||||
timeout-minutes: 300
|
|
||||||
disable-monitor: false
|
disable-monitor: false
|
||||||
monitor-log-interval: 15
|
monitor-log-interval: 15
|
||||||
monitor-data-collect-interval: 4
|
monitor-data-collect-interval: 4
|
||||||
|
|||||||
@ -1,132 +0,0 @@
|
|||||||
name: inductor-perf-nightly-rocm-mi300
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- ciflow/inductor-perf-test-nightly-rocm-mi300/*
|
|
||||||
schedule:
|
|
||||||
- cron: 15 0 * * *
|
|
||||||
# NB: GitHub has an upper limit of 10 inputs here, so before we can sort it
|
|
||||||
# out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
training:
|
|
||||||
description: Run training (on by default)?
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
inference:
|
|
||||||
description: Run inference (on by default)?
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
default:
|
|
||||||
description: Run inductor_default?
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
dynamic:
|
|
||||||
description: Run inductor_dynamic_shapes?
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
cppwrapper:
|
|
||||||
description: Run inductor_cpp_wrapper?
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
cudagraphs:
|
|
||||||
description: Run inductor_cudagraphs?
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
freezing_cudagraphs:
|
|
||||||
description: Run inductor_cudagraphs with freezing for inference?
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
aotinductor:
|
|
||||||
description: Run aot_inductor for inference?
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
maxautotune:
|
|
||||||
description: Run inductor_max_autotune?
|
|
||||||
required: false
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
benchmark_configs:
|
|
||||||
description: The list of configs used the benchmark
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: inductor_huggingface_perf_rocm_mi300,inductor_timm_perf_rocm_mi300,inductor_torchbench_perf_rocm_mi300
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
permissions: read-all
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
get-label-type:
|
|
||||||
name: get-label-type
|
|
||||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
|
||||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
|
||||||
with:
|
|
||||||
triggering_actor: ${{ github.triggering_actor }}
|
|
||||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
|
||||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
|
||||||
curr_ref_type: ${{ github.ref_type }}
|
|
||||||
opt_out_experiments: lf
|
|
||||||
|
|
||||||
linux-jammy-rocm-py3_10-inductor-benchmark-build:
|
|
||||||
if: github.repository_owner == 'pytorch'
|
|
||||||
name: rocm-py3_10-inductor-benchmark-build
|
|
||||||
uses: ./.github/workflows/_linux-build.yml
|
|
||||||
with:
|
|
||||||
build-environment: linux-jammy-rocm-py3_10
|
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks
|
|
||||||
test-matrix: |
|
|
||||||
{ include: [
|
|
||||||
{ config: "inductor_huggingface_perf_rocm_mi300", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_huggingface_perf_rocm_mi300", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_huggingface_perf_rocm_mi300", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_huggingface_perf_rocm_mi300", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_huggingface_perf_rocm_mi300", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_timm_perf_rocm_mi300", shard: 1, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_timm_perf_rocm_mi300", shard: 2, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_timm_perf_rocm_mi300", shard: 3, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_timm_perf_rocm_mi300", shard: 4, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_timm_perf_rocm_mi300", shard: 5, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_timm_perf_rocm_mi300", shard: 6, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_timm_perf_rocm_mi300", shard: 7, num_shards: 7, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi300", shard: 1, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi300", shard: 2, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi300", shard: 3, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi300", shard: 4, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi300", shard: 5, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi300", shard: 6, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi300", shard: 7, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi300", shard: 8, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi300", shard: 9, num_shards: 9, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
]}
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
linux-jammy-rocm-py3_10-inductor-benchmark-test:
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
name: rocm-py3_10-inductor-benchmark-test
|
|
||||||
uses: ./.github/workflows/_rocm-test.yml
|
|
||||||
needs: linux-jammy-rocm-py3_10-inductor-benchmark-build
|
|
||||||
with:
|
|
||||||
build-environment: linux-jammy-rocm-py3_10
|
|
||||||
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true
|
|
||||||
docker-image: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.docker-image }}
|
|
||||||
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.test-matrix }}
|
|
||||||
timeout-minutes: 720
|
|
||||||
# Disable monitor in perf tests for more investigation
|
|
||||||
disable-monitor: true
|
|
||||||
monitor-log-interval: 10
|
|
||||||
monitor-data-collect-interval: 2
|
|
||||||
secrets: inherit
|
|
||||||
@ -1,11 +1,11 @@
|
|||||||
name: inductor-perf-nightly-rocm-mi355
|
name: inductor-perf-nightly-rocm
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
tags:
|
tags:
|
||||||
- ciflow/inductor-perf-test-nightly-rocm-mi355/*
|
- ciflow/inductor-perf-test-nightly-rocm/*
|
||||||
schedule:
|
schedule:
|
||||||
- cron: 15 0 * * *
|
- cron: 0 7 * * 0,3
|
||||||
# NB: GitHub has an upper limit of 10 inputs here, so before we can sort it
|
# NB: GitHub has an upper limit of 10 inputs here, so before we can sort it
|
||||||
# out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
|
# out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@ -59,7 +59,7 @@ on:
|
|||||||
description: The list of configs used the benchmark
|
description: The list of configs used the benchmark
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
default: inductor_huggingface_perf_rocm_mi355,inductor_timm_perf_rocm_mi355,inductor_torchbench_perf_rocm_mi355
|
default: inductor_huggingface_perf_rocm,inductor_timm_perf_rocm,inductor_torchbench_perf_rocm
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
|
||||||
@ -88,27 +88,23 @@ jobs:
|
|||||||
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks
|
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "inductor_huggingface_perf_rocm_mi355", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_huggingface_perf_rocm", shard: 1, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_huggingface_perf_rocm_mi355", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_huggingface_perf_rocm", shard: 2, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_huggingface_perf_rocm_mi355", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_huggingface_perf_rocm", shard: 3, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_huggingface_perf_rocm_mi355", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_huggingface_perf_rocm", shard: 4, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_huggingface_perf_rocm_mi355", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_timm_perf_rocm", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_timm_perf_rocm_mi355", shard: 1, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_timm_perf_rocm", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_timm_perf_rocm_mi355", shard: 2, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_timm_perf_rocm", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_timm_perf_rocm_mi355", shard: 3, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_timm_perf_rocm", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_timm_perf_rocm_mi355", shard: 4, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_timm_perf_rocm", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_timm_perf_rocm_mi355", shard: 5, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_torchbench_perf_rocm", shard: 1, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_timm_perf_rocm_mi355", shard: 6, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_torchbench_perf_rocm", shard: 2, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_timm_perf_rocm_mi355", shard: 7, num_shards: 7, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_torchbench_perf_rocm", shard: 3, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi355", shard: 1, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_torchbench_perf_rocm", shard: 4, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi355", shard: 2, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_torchbench_perf_rocm", shard: 5, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi355", shard: 3, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_torchbench_perf_rocm", shard: 6, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi355", shard: 4, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_torchbench_perf_rocm", shard: 7, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi355", shard: 5, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "inductor_torchbench_perf_rocm", shard: 8, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi355", shard: 6, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi355", shard: 7, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi355", shard: 8, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" },
|
|
||||||
{ config: "inductor_torchbench_perf_rocm_mi355", shard: 9, num_shards: 9, runner: "linux.rocm.gpu.mi355.1" },
|
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
10
.github/workflows/inductor-periodic.yml
vendored
10
.github/workflows/inductor-periodic.yml
vendored
@ -106,16 +106,6 @@ jobs:
|
|||||||
{ config: "dynamic_aot_eager_huggingface", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.1" },
|
{ config: "dynamic_aot_eager_huggingface", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "dynamic_aot_eager_timm", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
{ config: "dynamic_aot_eager_timm", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "dynamic_aot_eager_timm", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
{ config: "dynamic_aot_eager_timm", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "dynamic_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "dynamic_inductor_timm", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "dynamic_inductor_timm", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "dynamic_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "dynamic_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "aot_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "aot_inductor_timm", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "aot_inductor_timm", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "aot_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
{ config: "aot_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
|
|||||||
11
.github/workflows/lint.yml
vendored
11
.github/workflows/lint.yml
vendored
@ -12,7 +12,6 @@ on:
|
|||||||
- landchecks/*
|
- landchecks/*
|
||||||
tags:
|
tags:
|
||||||
- ciflow/pull/*
|
- ciflow/pull/*
|
||||||
- ciflow/trunk/*
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
permissions: read-all
|
permissions: read-all
|
||||||
@ -33,12 +32,10 @@ jobs:
|
|||||||
name: Get changed files
|
name: Get changed files
|
||||||
uses: ./.github/workflows/_get-changed-files.yml
|
uses: ./.github/workflows/_get-changed-files.yml
|
||||||
with:
|
with:
|
||||||
all_files: ${{ contains(github.event.pull_request.labels.*.name, 'lint-all-files') || contains(github.event.pull_request.labels.*.name, 'Reverted') || github.event_name == 'push' }}
|
all_files: ${{ contains(github.event.pull_request.labels.*.name, 'lint-all-files') || contains(github.event.pull_request.labels.*.name, 'Reverted') }}
|
||||||
|
|
||||||
lintrunner-clang:
|
lintrunner-clang:
|
||||||
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
|
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
|
||||||
# Needed to prevent deduping on HUD
|
|
||||||
name: lintrunner-clang-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }}
|
|
||||||
needs: [get-label-type, get-changed-files]
|
needs: [get-label-type, get-changed-files]
|
||||||
# Only run if there are changed files relevant to clangtidy / clangformat
|
# Only run if there are changed files relevant to clangtidy / clangformat
|
||||||
if: |
|
if: |
|
||||||
@ -78,7 +75,6 @@ jobs:
|
|||||||
# fails to find types when it should
|
# fails to find types when it should
|
||||||
lintrunner-mypy:
|
lintrunner-mypy:
|
||||||
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
|
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
|
||||||
name: lintrunner-mypy-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }}
|
|
||||||
needs: [get-label-type, get-changed-files]
|
needs: [get-label-type, get-changed-files]
|
||||||
# Only run if there are changed files relevant to mypy
|
# Only run if there are changed files relevant to mypy
|
||||||
if: |
|
if: |
|
||||||
@ -103,7 +99,6 @@ jobs:
|
|||||||
|
|
||||||
lintrunner-noclang:
|
lintrunner-noclang:
|
||||||
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
|
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
|
||||||
name: lintrunner-noclang-${{ needs.get-changed-files.outputs.changed-files == '*' && 'all' || 'partial' }}
|
|
||||||
needs: [get-label-type, get-changed-files]
|
needs: [get-label-type, get-changed-files]
|
||||||
with:
|
with:
|
||||||
timeout: 120
|
timeout: 120
|
||||||
@ -118,9 +113,9 @@ jobs:
|
|||||||
CHANGED_FILES="${{ needs.get-changed-files.outputs.changed-files }}"
|
CHANGED_FILES="${{ needs.get-changed-files.outputs.changed-files }}"
|
||||||
echo "Running all other linters"
|
echo "Running all other linters"
|
||||||
if [ "$CHANGED_FILES" = '*' ]; then
|
if [ "$CHANGED_FILES" = '*' ]; then
|
||||||
ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT,PYREFLY --all-files" .github/scripts/lintrunner.sh
|
ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT --all-files" .github/scripts/lintrunner.sh
|
||||||
else
|
else
|
||||||
ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT,PYREFLY ${CHANGED_FILES}" .github/scripts/lintrunner.sh
|
ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT,MYPY,MYPYSTRICT ${CHANGED_FILES}" .github/scripts/lintrunner.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
quick-checks:
|
quick-checks:
|
||||||
|
|||||||
49
.github/workflows/operator_benchmark.yml
vendored
49
.github/workflows/operator_benchmark.yml
vendored
@ -7,11 +7,9 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
test_mode:
|
test_mode:
|
||||||
type: choice
|
required: false
|
||||||
options:
|
type: string
|
||||||
- 'short'
|
default: 'short'
|
||||||
- 'long'
|
|
||||||
- 'all'
|
|
||||||
description: tag filter for operator benchmarks, options from long, short, all
|
description: tag filter for operator benchmarks, options from long, short, all
|
||||||
schedule:
|
schedule:
|
||||||
# Run at 07:00 UTC every Sunday
|
# Run at 07:00 UTC every Sunday
|
||||||
@ -30,49 +28,38 @@ permissions:
|
|||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
x86-opbenchmark-build:
|
opbenchmark-build:
|
||||||
if: github.repository_owner == 'pytorch'
|
if: github.repository_owner == 'pytorch'
|
||||||
name: x86-opbenchmark-build
|
name: opbenchmark-build
|
||||||
uses: ./.github/workflows/_linux-build.yml
|
uses: ./.github/workflows/_linux-build.yml
|
||||||
with:
|
with:
|
||||||
build-environment: linux-jammy-py3.10-gcc11-build
|
build-environment: linux-jammy-py3.10-gcc11-build
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-py3-gcc11-inductor-benchmarks
|
docker-image-name: ci-image:pytorch-linux-jammy-py3-gcc11-inductor-benchmarks
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "cpu_operator_benchmark_${{ inputs.test_mode || 'short' }}", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
|
{ config: "cpu_operator_benchmark_short", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
x86-opbenchmark-test:
|
opbenchmark-on-demand-build:
|
||||||
name: x86-opbenchmark-test
|
if: ${{ github.event_name == 'workflow_dispatch' && github.repository_owner == 'pytorch' }}
|
||||||
uses: ./.github/workflows/_linux-test.yml
|
name: opbenchmark-on-demand-build
|
||||||
needs: x86-opbenchmark-build
|
|
||||||
with:
|
|
||||||
build-environment: linux-jammy-py3.10-gcc11-build
|
|
||||||
docker-image: ${{ needs.x86-opbenchmark-build.outputs.docker-image }}
|
|
||||||
test-matrix: ${{ needs.x86-opbenchmark-build.outputs.test-matrix }}
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
aarch64-opbenchmark-build:
|
|
||||||
if: github.repository_owner == 'pytorch'
|
|
||||||
name: aarch64-opbenchmark-build
|
|
||||||
uses: ./.github/workflows/_linux-build.yml
|
uses: ./.github/workflows/_linux-build.yml
|
||||||
with:
|
with:
|
||||||
build-environment: linux-jammy-aarch64-py3.10
|
build-environment: linux-jammy-py3.10-gcc11-build
|
||||||
runner: linux.arm64.m7g.4xlarge
|
docker-image-name: ci-image:pytorch-linux-jammy-py3-gcc11-inductor-benchmarks
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-aarch64-py3.10-gcc11
|
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "cpu_operator_benchmark_short", shard: 1, num_shards: 1, runner: "linux.arm64.m8g.4xlarge" },
|
{ config: "cpu_operator_benchmark_${{ inputs.test_mode }}", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
aarch64-opbenchmark-test:
|
opbenchmark-test:
|
||||||
name: aarch64-opbenchmark-test
|
name: opbenchmark-test
|
||||||
uses: ./.github/workflows/_linux-test.yml
|
uses: ./.github/workflows/_linux-test.yml
|
||||||
needs: aarch64-opbenchmark-build
|
needs: opbenchmark-build
|
||||||
with:
|
with:
|
||||||
build-environment: linux-jammy-aarch64-py3.10
|
build-environment: linux-jammy-py3.10-gcc11-build
|
||||||
docker-image: ${{ needs.aarch64-opbenchmark-build.outputs.docker-image }}
|
docker-image: ${{ needs.opbenchmark-build.outputs.docker-image }}
|
||||||
test-matrix: ${{ needs.aarch64-opbenchmark-build.outputs.test-matrix }}
|
test-matrix: ${{ needs.opbenchmark-build.outputs.test-matrix }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|||||||
54
.github/workflows/operator_microbenchmark.yml
vendored
54
.github/workflows/operator_microbenchmark.yml
vendored
@ -18,7 +18,6 @@ permissions:
|
|||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
# H100 A100 runners
|
|
||||||
opmicrobenchmark-build:
|
opmicrobenchmark-build:
|
||||||
if: github.repository_owner == 'pytorch'
|
if: github.repository_owner == 'pytorch'
|
||||||
name: opmicrobenchmark-build
|
name: opmicrobenchmark-build
|
||||||
@ -45,56 +44,3 @@ jobs:
|
|||||||
docker-image: ${{ needs.opmicrobenchmark-build.outputs.docker-image }}
|
docker-image: ${{ needs.opmicrobenchmark-build.outputs.docker-image }}
|
||||||
test-matrix: ${{ needs.opmicrobenchmark-build.outputs.test-matrix }}
|
test-matrix: ${{ needs.opmicrobenchmark-build.outputs.test-matrix }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
# B200 runner
|
|
||||||
opmicrobenchmark-build-b200:
|
|
||||||
if: github.repository_owner == 'pytorch'
|
|
||||||
name: opmicrobenchmark-build-b200
|
|
||||||
uses: ./.github/workflows/_linux-build.yml
|
|
||||||
with:
|
|
||||||
runner: linux.12xlarge.memory
|
|
||||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm100
|
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
|
|
||||||
cuda-arch-list: '10.0'
|
|
||||||
test-matrix: |
|
|
||||||
{ include: [
|
|
||||||
{ config: "operator_microbenchmark_test", shard: 1, num_shards: 1, runner: "linux.dgx.b200" },
|
|
||||||
]}
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
opmicrobenchmark-test-b200:
|
|
||||||
name: opmicrobenchmark-test-b200
|
|
||||||
uses: ./.github/workflows/_linux-test.yml
|
|
||||||
needs: opmicrobenchmark-build-b200
|
|
||||||
with:
|
|
||||||
timeout-minutes: 500
|
|
||||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm100
|
|
||||||
docker-image: ${{ needs.opmicrobenchmark-build-b200.outputs.docker-image }}
|
|
||||||
test-matrix: ${{ needs.opmicrobenchmark-build-b200.outputs.test-matrix }}
|
|
||||||
aws-role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
# ROCM MI300 runner
|
|
||||||
opmicrobenchmark-build-rocm:
|
|
||||||
if: github.repository_owner == 'pytorch'
|
|
||||||
name: opmicrobenchmark-build-rocm
|
|
||||||
uses: ./.github/workflows/_linux-build.yml
|
|
||||||
with:
|
|
||||||
build-environment: linux-jammy-rocm-py3_10
|
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks
|
|
||||||
test-matrix: |
|
|
||||||
{ include: [
|
|
||||||
{ config: "operator_microbenchmark_test", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.1" },
|
|
||||||
]}
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
opmicrobenchmark-test-rocm:
|
|
||||||
name: opmicrobenchmark-test-rocm
|
|
||||||
uses: ./.github/workflows/_rocm-test.yml
|
|
||||||
needs: opmicrobenchmark-build-rocm
|
|
||||||
with:
|
|
||||||
timeout-minutes: 500
|
|
||||||
build-environment: linux-jammy-rocm-py3_10
|
|
||||||
docker-image: ${{ needs.opmicrobenchmark-build-rocm.outputs.docker-image }}
|
|
||||||
test-matrix: ${{ needs.opmicrobenchmark-build-rocm.outputs.test-matrix }}
|
|
||||||
secrets: inherit
|
|
||||||
|
|||||||
31
.github/workflows/periodic.yml
vendored
31
.github/workflows/periodic.yml
vendored
@ -147,16 +147,15 @@ jobs:
|
|||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-debug
|
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-debug
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9
|
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9
|
||||||
cuda-arch-list: 8.9
|
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "default", shard: 1, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
|
{ config: "default", shard: 1, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
|
||||||
{ config: "default", shard: 2, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
|
{ config: "default", shard: 2, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
|
||||||
{ config: "default", shard: 3, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
|
{ config: "default", shard: 3, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
|
||||||
{ config: "default", shard: 4, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
|
{ config: "default", shard: 4, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
|
||||||
{ config: "default", shard: 5, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
|
{ config: "default", shard: 5, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
|
||||||
{ config: "default", shard: 6, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
|
{ config: "default", shard: 6, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
|
||||||
{ config: "default", shard: 7, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", owners: ["oncall:debug-build"] },
|
{ config: "default", shard: 7, num_shards: 7, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu", owners: ["oncall:debug-build"] },
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
@ -183,11 +182,11 @@ jobs:
|
|||||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11
|
docker-image-name: ci-image:pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "nogpu_AVX512", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
{ config: "nogpu_AVX512", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
|
||||||
{ config: "nogpu_AVX512", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
{ config: "nogpu_AVX512", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
|
||||||
{ config: "nogpu_AVX512", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
{ config: "nogpu_AVX512", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
|
||||||
{ config: "nogpu_NO_AVX2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
{ config: "nogpu_NO_AVX2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
|
||||||
{ config: "nogpu_NO_AVX2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
{ config: "nogpu_NO_AVX2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
|
||||||
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
|
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g4dn.4xlarge.nvidia.gpu" },
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
@ -214,9 +213,9 @@ jobs:
|
|||||||
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
|
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu.mi250.4", owners: ["module:rocm", "oncall:distributed"] },
|
{ config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu.4", owners: ["module:rocm", "oncall:distributed"] },
|
||||||
{ config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu.mi250.4", owners: ["module:rocm", "oncall:distributed"] },
|
{ config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu.4", owners: ["module:rocm", "oncall:distributed"] },
|
||||||
{ config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu.mi250.4", owners: ["module:rocm", "oncall:distributed"] },
|
{ config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu.4", owners: ["module:rocm", "oncall:distributed"] },
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
|
|||||||
1
.github/workflows/pull.yml
vendored
1
.github/workflows/pull.yml
vendored
@ -127,6 +127,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/_linux-build.yml
|
uses: ./.github/workflows/_linux-build.yml
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
with:
|
with:
|
||||||
|
# More memory is needed to build with asan
|
||||||
runner: linux.2xlarge.memory
|
runner: linux.2xlarge.memory
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build-environment: linux-jammy-py3.10-clang18-asan
|
build-environment: linux-jammy-py3.10-clang18-asan
|
||||||
|
|||||||
19
.github/workflows/rocm-mi355.yml
vendored
19
.github/workflows/rocm-mi355.yml
vendored
@ -1,9 +1,6 @@
|
|||||||
name: rocm-mi355
|
name: rocm-mi355
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- ciflow/rocm-mi355/*
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: 30 11,1 * * * # about 4:30am PDT and 6:30pm PDT
|
- cron: 30 11,1 * * * # about 4:30am PDT and 6:30pm PDT
|
||||||
@ -45,12 +42,12 @@ jobs:
|
|||||||
sync-tag: rocm-build
|
sync-tag: rocm-build
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" },
|
||||||
{ config: "default", shard: 2, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "default", shard: 2, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" },
|
||||||
{ config: "default", shard: 3, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "default", shard: 3, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" },
|
||||||
{ config: "default", shard: 4, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "default", shard: 4, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" },
|
||||||
{ config: "default", shard: 5, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "default", shard: 5, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" },
|
||||||
{ config: "default", shard: 6, num_shards: 6, runner: "linux.rocm.gpu.mi355.1" },
|
{ config: "default", shard: 6, num_shards: 6, runner: "linux.rocm.gpu.mi355.2" },
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
@ -67,7 +64,5 @@ jobs:
|
|||||||
build-environment: linux-noble-rocm-py3.12-mi355
|
build-environment: linux-noble-rocm-py3.12-mi355
|
||||||
docker-image: ${{ needs.linux-noble-rocm-py3_12-build.outputs.docker-image }}
|
docker-image: ${{ needs.linux-noble-rocm-py3_12-build.outputs.docker-image }}
|
||||||
test-matrix: ${{ needs.linux-noble-rocm-py3_12-build.outputs.test-matrix }}
|
test-matrix: ${{ needs.linux-noble-rocm-py3_12-build.outputs.test-matrix }}
|
||||||
tests-to-include: >-
|
tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor"
|
||||||
${{ github.event_name == 'schedule' && 'test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor test_matmul_cuda test_scaled_matmul_cuda'
|
|
||||||
|| '' }}
|
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|||||||
63
.github/workflows/rocm-navi31.yml
vendored
63
.github/workflows/rocm-navi31.yml
vendored
@ -1,63 +0,0 @@
|
|||||||
name: rocm-navi31
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- ciflow/rocm-navi31/*
|
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
|
||||||
# We have several schedules so jobs can check github.event.schedule to activate only for a fraction of the runs.
|
|
||||||
# Also run less frequently on weekends.
|
|
||||||
- cron: 45 */2 * * 1-5
|
|
||||||
- cron: 45 4,12 * * 0,6
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
permissions: read-all
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
target-determination:
|
|
||||||
if: github.repository_owner == 'pytorch'
|
|
||||||
name: before-test
|
|
||||||
uses: ./.github/workflows/target_determination.yml
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
linux-jammy-rocm-py3_10-build:
|
|
||||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
|
||||||
name: linux-jammy-rocm-py3.10
|
|
||||||
uses: ./.github/workflows/_linux-build.yml
|
|
||||||
with:
|
|
||||||
build-environment: linux-jammy-rocm-py3.10
|
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
|
|
||||||
sync-tag: rocm-build
|
|
||||||
test-matrix: |
|
|
||||||
{ include: [
|
|
||||||
{ config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" },
|
|
||||||
{ config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx1100" },
|
|
||||||
]}
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
linux-jammy-rocm-py3_10-test:
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
contents: read
|
|
||||||
name: linux-jammy-rocm-py3_10
|
|
||||||
uses: ./.github/workflows/_rocm-test.yml
|
|
||||||
needs:
|
|
||||||
- linux-jammy-rocm-py3_10-build
|
|
||||||
- target-determination
|
|
||||||
with:
|
|
||||||
build-environment: linux-jammy-rocm-py3.10
|
|
||||||
docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
|
|
||||||
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
|
|
||||||
tests-to-include: >-
|
|
||||||
${{ github.event_name == 'schedule' && 'test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs
|
|
||||||
test_autograd inductor/test_torchinductor inductor/test_kernel_benchmark
|
|
||||||
inductor/test_pad_mm inductor/test_benchmark_fusion inductor/test_aot_inductor
|
|
||||||
inductor/test_torchinductor inductor/test_decompose_mem_bound_mm
|
|
||||||
inductor/test_flex_attention inductor/test_max_autotune' || '' }}
|
|
||||||
secrets: inherit
|
|
||||||
1
.github/workflows/slow.yml
vendored
1
.github/workflows/slow.yml
vendored
@ -140,6 +140,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/_linux-build.yml
|
uses: ./.github/workflows/_linux-build.yml
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
with:
|
with:
|
||||||
|
# More memory is needed to build with asan
|
||||||
runner: linux.2xlarge.memory
|
runner: linux.2xlarge.memory
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
build-environment: linux-jammy-py3.10-clang18-asan
|
build-environment: linux-jammy-py3.10-clang18-asan
|
||||||
|
|||||||
149
.github/workflows/trunk-tagging.yml
vendored
149
.github/workflows/trunk-tagging.yml
vendored
@ -58,10 +58,8 @@ jobs:
|
|||||||
else
|
else
|
||||||
COMMIT_SHA="${{ github.sha }}"
|
COMMIT_SHA="${{ github.sha }}"
|
||||||
fi
|
fi
|
||||||
{
|
echo "sha=${COMMIT_SHA}" >> "${GITHUB_OUTPUT}"
|
||||||
echo "sha=${COMMIT_SHA}"
|
echo "tag_name=trunk/${COMMIT_SHA}" >> "${GITHUB_OUTPUT}"
|
||||||
echo "tag_name=trunk/${COMMIT_SHA}"
|
|
||||||
} >> "${GITHUB_OUTPUT}"
|
|
||||||
|
|
||||||
- name: Validate commit SHA
|
- name: Validate commit SHA
|
||||||
run: |
|
run: |
|
||||||
@ -89,7 +87,7 @@ jobs:
|
|||||||
echo "✅ Commit ${COMMIT_SHA} is valid (automatic push trigger)"
|
echo "✅ Commit ${COMMIT_SHA} is valid (automatic push trigger)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Create and push tag(s) with retry
|
- name: Create and push tag with retry
|
||||||
id: check_tag
|
id: check_tag
|
||||||
env:
|
env:
|
||||||
TAG_NAME: ${{ steps.commit.outputs.tag_name }}
|
TAG_NAME: ${{ steps.commit.outputs.tag_name }}
|
||||||
@ -114,23 +112,14 @@ jobs:
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
# Counters for summary reporting
|
# Exit early if tag already exists
|
||||||
created_count=0
|
if check_tag_exists; then
|
||||||
skipped_count=0
|
echo "✅ Tag already exists - no action needed"
|
||||||
failed_count=0
|
echo "exists=true" >> "${GITHUB_OUTPUT}"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
# Always write outputs once on exit
|
echo "Tag ${TAG_NAME} does not exist, proceeding with creation"
|
||||||
finish() {
|
|
||||||
set +e
|
|
||||||
if [ -n "${GITHUB_OUTPUT:-}" ]; then
|
|
||||||
{
|
|
||||||
echo "created_count=${created_count}"
|
|
||||||
echo "skipped_count=${skipped_count}"
|
|
||||||
echo "failed_count=${failed_count}"
|
|
||||||
} >> "${GITHUB_OUTPUT}"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
trap finish EXIT
|
|
||||||
|
|
||||||
# Retry configuration
|
# Retry configuration
|
||||||
MAX_RETRIES=5
|
MAX_RETRIES=5
|
||||||
@ -205,111 +194,31 @@ jobs:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# New behavior for push events: enumerate commits in the push and tag each one.
|
# Execute with retry
|
||||||
# For workflow_dispatch, retain existing single-SHA behavior.
|
if retry_with_backoff "tag_with_retry" "Creating tag ${TAG_NAME} for commit ${COMMIT_SHA}"; then
|
||||||
|
echo "exists=false" >> "${GITHUB_OUTPUT}"
|
||||||
# Always fetch tags once up front to improve idempotency in loops
|
|
||||||
git fetch origin --tags --quiet || true
|
|
||||||
|
|
||||||
if [ "${{ github.event_name }}" = "push" ]; then
|
|
||||||
BEFORE_SHA="${{ github.event.before }}"
|
|
||||||
AFTER_SHA="${{ github.sha }}" # same as event.after
|
|
||||||
|
|
||||||
# List commits introduced by this push (old..new), oldest first for stable ordering
|
|
||||||
commits_file="$(mktemp)"
|
|
||||||
git rev-list --reverse "${BEFORE_SHA}..${AFTER_SHA}" > "${commits_file}"
|
|
||||||
|
|
||||||
if [ ! -s "${commits_file}" ]; then
|
|
||||||
echo "No new commits found between ${BEFORE_SHA}..${AFTER_SHA}; nothing to tag."
|
|
||||||
rm -f "${commits_file}"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
commit_count="$(wc -l < "${commits_file}" | tr -d ' ')"
|
|
||||||
echo "Found ${commit_count} commit(s) to tag for push:"
|
|
||||||
while IFS= read -r sha; do
|
|
||||||
printf ' %s\n' "${sha}"
|
|
||||||
done < "${commits_file}"
|
|
||||||
|
|
||||||
while IFS= read -r sha; do
|
|
||||||
TAG_NAME="trunk/${sha}"
|
|
||||||
COMMIT_SHA="${sha}"
|
|
||||||
|
|
||||||
# If tag already exists locally or remotely, skip (idempotent)
|
|
||||||
if check_tag_exists; then
|
|
||||||
echo "✅ Tag ${TAG_NAME} already exists - skipping"
|
|
||||||
skipped_count=$((skipped_count + 1))
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Tag ${TAG_NAME} does not exist, proceeding with creation"
|
|
||||||
|
|
||||||
if retry_with_backoff "tag_with_retry" "Creating tag ${TAG_NAME} for commit ${COMMIT_SHA}"; then
|
|
||||||
created_count=$((created_count + 1))
|
|
||||||
else
|
|
||||||
echo "Tag creation failed after all retry attempts for ${TAG_NAME}"
|
|
||||||
failed_count=$((failed_count + 1))
|
|
||||||
fi
|
|
||||||
done < "${commits_file}"
|
|
||||||
|
|
||||||
rm -f "${commits_file}"
|
|
||||||
|
|
||||||
if [ "${failed_count}" -gt 0 ]; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
exit 0
|
exit 0
|
||||||
else
|
else
|
||||||
# workflow_dispatch path (single SHA tagging preserved)
|
echo "Tag creation failed after all retry attempts"
|
||||||
|
exit 1
|
||||||
# Exit early if tag already exists
|
|
||||||
if check_tag_exists; then
|
|
||||||
echo "✅ Tag already exists - no action needed"
|
|
||||||
skipped_count=1
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Tag ${TAG_NAME} does not exist, proceeding with creation"
|
|
||||||
|
|
||||||
if retry_with_backoff "tag_with_retry" "Creating tag ${TAG_NAME} for commit ${COMMIT_SHA}"; then
|
|
||||||
created_count=1
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo "Tag creation failed after all retry attempts"
|
|
||||||
failed_count=1
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Tag creation summary
|
- name: Tag creation summary
|
||||||
if: always()
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
if [ "${{ github.event_name }}" = "push" ]; then
|
if [ "${{ steps.check_tag.outputs.exists }}" = "true" ]; then
|
||||||
echo "Trigger: push on main"
|
echo "✅ Tag ${{ steps.commit.outputs.tag_name }} already existed - no action needed"
|
||||||
echo "Created: ${{ steps.check_tag.outputs.created_count }}"
|
elif [ "${{ job.status }}" = "success" ]; then
|
||||||
echo "Skipped (already existed): ${{ steps.check_tag.outputs.skipped_count }}"
|
echo "✅ Successfully created tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}"
|
||||||
echo "Failed: ${{ steps.check_tag.outputs.failed_count }}"
|
|
||||||
if [ "${{ steps.check_tag.outputs.failed_count }}" = "0" ]; then
|
|
||||||
echo "✅ Completed tagging for push range ${{ github.event.before }}..${{ github.sha }}"
|
|
||||||
else
|
|
||||||
echo "❌ Some tags failed to create for push range ${{ github.event.before }}..${{ github.sha }}"
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
if [ "${{ steps.check_tag.outputs.failed_count }}" = "0" ]; then
|
echo "❌ Failed to create tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}"
|
||||||
if [ "${{ steps.check_tag.outputs.created_count }}" = "0" ]; then
|
fi
|
||||||
echo "✅ Tag ${{ steps.commit.outputs.tag_name }} already existed - no action needed"
|
|
||||||
else
|
echo ""
|
||||||
echo "✅ Successfully created tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}"
|
echo "Tag details:"
|
||||||
fi
|
echo " Name: ${{ steps.commit.outputs.tag_name }}"
|
||||||
else
|
echo " Commit: ${{ steps.commit.outputs.sha }}"
|
||||||
echo "❌ Failed to create tag ${{ steps.commit.outputs.tag_name }} for commit ${{ steps.commit.outputs.sha }}"
|
echo " Trigger: ${{ github.event_name }}"
|
||||||
fi
|
if [ -n "${{ github.event.inputs.commit_sha }}" ]; then
|
||||||
|
echo " Manual commit: ${{ github.event.inputs.commit_sha }}"
|
||||||
echo ""
|
|
||||||
echo "Tag details:"
|
|
||||||
echo " Name: ${{ steps.commit.outputs.tag_name }}"
|
|
||||||
echo " Commit: ${{ steps.commit.outputs.sha }}"
|
|
||||||
echo " Trigger: ${{ github.event_name }}"
|
|
||||||
if [ -n "${{ github.event.inputs.commit_sha }}" ]; then
|
|
||||||
echo " Manual commit: ${{ github.event.inputs.commit_sha }}"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|||||||
48
.github/workflows/trunk.yml
vendored
48
.github/workflows/trunk.yml
vendored
@ -56,7 +56,7 @@ jobs:
|
|||||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
|
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
|
||||||
build-generates-artifacts: false
|
build-generates-artifacts: false
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||||
runner: "linux.c7i.4xlarge"
|
runner: "linux.4xlarge"
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "default", shard: 1, num_shards: 1 },
|
{ config: "default", shard: 1, num_shards: 1 },
|
||||||
@ -160,10 +160,9 @@ jobs:
|
|||||||
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
|
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
{ config: "default", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
||||||
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
{ config: "default", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
||||||
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
{ config: "default", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
||||||
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
@ -180,13 +179,13 @@ jobs:
|
|||||||
disable-monitor: false
|
disable-monitor: false
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
win-vs2022-cuda12_8-py3-build:
|
win-vs2022-cuda12_6-py3-build:
|
||||||
name: win-vs2022-cuda12.8-py3
|
name: win-vs2022-cuda12.6-py3
|
||||||
uses: ./.github/workflows/_win-build.yml
|
uses: ./.github/workflows/_win-build.yml
|
||||||
needs: get-label-type
|
needs: get-label-type
|
||||||
with:
|
with:
|
||||||
build-environment: win-vs2022-cuda12.8-py3
|
build-environment: win-vs2022-cuda12.6-py3
|
||||||
cuda-version: "12.8"
|
cuda-version: "12.6"
|
||||||
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
|
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
@ -204,6 +203,7 @@ jobs:
|
|||||||
{ include: [
|
{ include: [
|
||||||
{ config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
{ config: "default", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
{ config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
{ config: "default", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.gfx942.1" },
|
||||||
|
{ config: "distributed", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.4" },
|
||||||
]}
|
]}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
@ -221,7 +221,7 @@ jobs:
|
|||||||
build-environment: linux-jammy-rocm-py3.10
|
build-environment: linux-jammy-rocm-py3.10
|
||||||
docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
|
docker-image: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.docker-image }}
|
||||||
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
|
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-build.outputs.test-matrix }}
|
||||||
tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor"
|
tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor distributed/test_c10d_common distributed/test_c10d_nccl"
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
inductor-build:
|
inductor-build:
|
||||||
@ -234,23 +234,6 @@ jobs:
|
|||||||
cuda-arch-list: '8.0'
|
cuda-arch-list: '8.0'
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
# Test cross-compiled models with Windows libs extracted from wheel
|
|
||||||
cross-compile-linux-test:
|
|
||||||
name: cross-compile-linux-test
|
|
||||||
uses: ./.github/workflows/_linux-test.yml
|
|
||||||
needs:
|
|
||||||
- linux-jammy-cuda12_8-py3_10-gcc11-build
|
|
||||||
- get-label-type
|
|
||||||
- win-vs2022-cuda12_8-py3-build
|
|
||||||
with:
|
|
||||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc11
|
|
||||||
docker-image: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc11-build.outputs.docker-image }}
|
|
||||||
test-matrix: |
|
|
||||||
{ include: [
|
|
||||||
{ config: "aoti_cross_compile_for_windows", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g6.4xlarge.experimental.nvidia.gpu", win_torch_wheel_artifact: "win-vs2022-cuda12.8-py3" },
|
|
||||||
]}
|
|
||||||
secrets: inherit
|
|
||||||
|
|
||||||
verify-cachebench-cpu-build:
|
verify-cachebench-cpu-build:
|
||||||
name: verify-cachebench-cpu-build
|
name: verify-cachebench-cpu-build
|
||||||
uses: ./.github/workflows/_linux-build.yml
|
uses: ./.github/workflows/_linux-build.yml
|
||||||
@ -300,14 +283,3 @@ jobs:
|
|||||||
docker-image: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.docker-image }}
|
docker-image: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.docker-image }}
|
||||||
test-matrix: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.test-matrix }}
|
test-matrix: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.test-matrix }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
linux-jammy-py3_10-gcc11-full-debug-build-only:
|
|
||||||
name: linux-jammy-py3.10-gcc11-full-debug-build-only
|
|
||||||
uses: ./.github/workflows/_linux-build.yml
|
|
||||||
needs: get-label-type
|
|
||||||
with:
|
|
||||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
|
||||||
runner: linux.2xlarge.memory
|
|
||||||
build-environment: linux-jammy-py3.10-gcc11-full-debug-build-only
|
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-py3.10-gcc11
|
|
||||||
secrets: inherit
|
|
||||||
|
|||||||
2
.github/workflows/update-viablestrict.yml
vendored
2
.github/workflows/update-viablestrict.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repository: pytorch/pytorch
|
repository: pytorch/pytorch
|
||||||
stable-branch: viable/strict
|
stable-branch: viable/strict
|
||||||
requires: '[\"pull\", \"trunk\", \"lint\", \"linux-aarch64\"]'
|
requires: '[\"pull\", \"trunk\", \"lint\", \"^linux-binary-manywheel$\", \"^linux-binary-libtorch-release$\", \"linux-aarch64\"]'
|
||||||
secret-bot-token: ${{ secrets.MERGEBOT_TOKEN }}
|
secret-bot-token: ${{ secrets.MERGEBOT_TOKEN }}
|
||||||
clickhouse-url: ${{ secrets.CLICKHOUSE_URL }}
|
clickhouse-url: ${{ secrets.CLICKHOUSE_URL }}
|
||||||
clickhouse-username: ${{ secrets.CLICKHOUSE_VIABLESTRICT_USERNAME }}
|
clickhouse-username: ${{ secrets.CLICKHOUSE_VIABLESTRICT_USERNAME }}
|
||||||
|
|||||||
4
.github/workflows/vllm.yml
vendored
4
.github/workflows/vllm.yml
vendored
@ -46,7 +46,7 @@ jobs:
|
|||||||
runner: linux.24xlarge.memory
|
runner: linux.24xlarge.memory
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
{ config: "vllm_basic_correctness_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
||||||
{ config: "vllm_basic_models_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
{ config: "vllm_basic_models_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
||||||
{ config: "vllm_entrypoints_test", shard: 1, num_shards: 1,runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
{ config: "vllm_entrypoints_test", shard: 1, num_shards: 1,runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
||||||
{ config: "vllm_regression_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
{ config: "vllm_regression_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
||||||
@ -54,7 +54,7 @@ jobs:
|
|||||||
{ config: "vllm_pytorch_compilation_unit_tests", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
{ config: "vllm_pytorch_compilation_unit_tests", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
||||||
{ config: "vllm_lora_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
{ config: "vllm_lora_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
||||||
{ config: "vllm_multi_model_test_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
|
{ config: "vllm_multi_model_test_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
|
||||||
{ config: "vllm_language_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
|
{ config: "vllm_languagde_model_test_extended_generation_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu"},
|
||||||
{ config: "vllm_distributed_test_2_gpu_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
{ config: "vllm_distributed_test_2_gpu_28_failure_test", shard: 1, num_shards: 1, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
||||||
{ config: "vllm_lora_test", shard: 0, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
{ config: "vllm_lora_test", shard: 0, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
||||||
{ config: "vllm_lora_test", shard: 1, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
{ config: "vllm_lora_test", shard: 1, num_shards: 4, runner: "linux.g6.4xlarge.experimental.nvidia.gpu" },
|
||||||
|
|||||||
4
.github/workflows/xpu.yml
vendored
4
.github/workflows/xpu.yml
vendored
@ -35,7 +35,7 @@ jobs:
|
|||||||
runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
|
runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
|
||||||
build-environment: linux-jammy-xpu-n-1-py3.10
|
build-environment: linux-jammy-xpu-n-1-py3.10
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-1-py3
|
docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-1-py3
|
||||||
runner: linux.c7i.12xlarge
|
runner: linux.12xlarge
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "default", shard: 1, num_shards: 6, runner: "linux.idc.xpu" },
|
{ config: "default", shard: 1, num_shards: 6, runner: "linux.idc.xpu" },
|
||||||
@ -56,7 +56,7 @@ jobs:
|
|||||||
runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
|
runner_prefix: ${{ needs.get-label-type.outputs.label-type }}
|
||||||
build-environment: linux-jammy-xpu-n-py3.10
|
build-environment: linux-jammy-xpu-n-py3.10
|
||||||
docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3
|
docker-image-name: ci-image:pytorch-linux-jammy-xpu-n-py3
|
||||||
runner: linux.c7i.12xlarge
|
runner: linux.12xlarge
|
||||||
test-matrix: |
|
test-matrix: |
|
||||||
{ include: [
|
{ include: [
|
||||||
{ config: "default", shard: 1, num_shards: 8, runner: "linux.idc.xpu" },
|
{ config: "default", shard: 1, num_shards: 8, runner: "linux.idc.xpu" },
|
||||||
|
|||||||
4
.gitignore
vendored
4
.gitignore
vendored
@ -88,7 +88,7 @@ torch_compile_debug/
|
|||||||
# Listed manually because some files in this directory are not generated
|
# Listed manually because some files in this directory are not generated
|
||||||
torch/testing/_internal/generated/annotated_fn_args.py
|
torch/testing/_internal/generated/annotated_fn_args.py
|
||||||
torch/testing/_internal/data/*.pt
|
torch/testing/_internal/data/*.pt
|
||||||
torch/headeronly/version.h
|
torch/csrc/api/include/torch/version.h
|
||||||
torch/csrc/cudnn/cuDNN.cpp
|
torch/csrc/cudnn/cuDNN.cpp
|
||||||
torch/csrc/generated
|
torch/csrc/generated
|
||||||
torch/csrc/generic/TensorMethods.cpp
|
torch/csrc/generic/TensorMethods.cpp
|
||||||
@ -374,7 +374,6 @@ third_party/ruy/
|
|||||||
third_party/glog/
|
third_party/glog/
|
||||||
|
|
||||||
# Virtualenv
|
# Virtualenv
|
||||||
.venv/
|
|
||||||
venv/
|
venv/
|
||||||
|
|
||||||
# Log files
|
# Log files
|
||||||
@ -396,4 +395,3 @@ android/pytorch_android_torchvision/.cxx
|
|||||||
CLAUDE.local.md
|
CLAUDE.local.md
|
||||||
/test_*.py
|
/test_*.py
|
||||||
/debug_*.py
|
/debug_*.py
|
||||||
CLAUDE_CONTEXT/
|
|
||||||
|
|||||||
@ -18,7 +18,6 @@ exclude_patterns = [
|
|||||||
'torch/_inductor/autoheuristic/artifacts/**',
|
'torch/_inductor/autoheuristic/artifacts/**',
|
||||||
'scripts/**',
|
'scripts/**',
|
||||||
'test/generated_type_hints_smoketest.py',
|
'test/generated_type_hints_smoketest.py',
|
||||||
'test/test_torchfuzz_repros.py',
|
|
||||||
# CPython tests
|
# CPython tests
|
||||||
'test/dynamo/cpython/**',
|
'test/dynamo/cpython/**',
|
||||||
# Tests from the NumPy test suite
|
# Tests from the NumPy test suite
|
||||||
@ -28,7 +27,6 @@ exclude_patterns = [
|
|||||||
'torch/lib/**',
|
'torch/lib/**',
|
||||||
'venv/**',
|
'venv/**',
|
||||||
'**/*.pyi',
|
'**/*.pyi',
|
||||||
"tools/experimental/torchfuzz/**",
|
|
||||||
'tools/test/test_selective_build.py',
|
'tools/test/test_selective_build.py',
|
||||||
]
|
]
|
||||||
command = [
|
command = [
|
||||||
@ -198,7 +196,7 @@ exclude_patterns = [
|
|||||||
'tools/test/gen_operators_yaml_test.py',
|
'tools/test/gen_operators_yaml_test.py',
|
||||||
'tools/test/gen_oplist_test.py',
|
'tools/test/gen_oplist_test.py',
|
||||||
'tools/test/test_selective_build.py',
|
'tools/test/test_selective_build.py',
|
||||||
'tools/experimental/torchfuzz/**',
|
'tools/experimental/dynamic_shapes/torchfuzz/**',
|
||||||
]
|
]
|
||||||
command = [
|
command = [
|
||||||
'python3',
|
'python3',
|
||||||
@ -209,46 +207,6 @@ command = [
|
|||||||
'@{{PATHSFILE}}'
|
'@{{PATHSFILE}}'
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
[[linter]]
|
|
||||||
code = 'PYREFLY'
|
|
||||||
include_patterns = [
|
|
||||||
'torch/**/*.py',
|
|
||||||
'torch/**/*.pyi',
|
|
||||||
'torchgen/**/*.py',
|
|
||||||
'torchgen/**/*.pyi',
|
|
||||||
'functorch/**/*.py',
|
|
||||||
'functorch/**/*.pyi',
|
|
||||||
]
|
|
||||||
exclude_patterns = []
|
|
||||||
command = [
|
|
||||||
'python3',
|
|
||||||
'tools/linter/adapters/pyrefly_linter.py',
|
|
||||||
'--config=pyrefly.toml',
|
|
||||||
]
|
|
||||||
init_command = [
|
|
||||||
'python3',
|
|
||||||
'tools/linter/adapters/pip_init.py',
|
|
||||||
'--dry-run={{DRYRUN}}',
|
|
||||||
'numpy==2.1.0 ; python_version >= "3.12"',
|
|
||||||
'expecttest==0.3.0',
|
|
||||||
'pyrefly==0.36.2',
|
|
||||||
'sympy==1.13.3',
|
|
||||||
'types-requests==2.27.25',
|
|
||||||
'types-pyyaml==6.0.2',
|
|
||||||
'types-tabulate==0.8.8',
|
|
||||||
'types-protobuf==5.29.1.20250403',
|
|
||||||
'types-setuptools==79.0.0.20250422',
|
|
||||||
'types-jinja2==2.11.9',
|
|
||||||
'types-colorama==0.4.6',
|
|
||||||
'filelock==3.18.0',
|
|
||||||
'junitparser==2.1.1',
|
|
||||||
'rich==14.1.0',
|
|
||||||
'optree==0.17.0',
|
|
||||||
'types-openpyxl==3.1.5.20250919',
|
|
||||||
'types-python-dateutil==2.9.0.20251008'
|
|
||||||
]
|
|
||||||
|
|
||||||
[[linter]]
|
[[linter]]
|
||||||
code = 'CLANGTIDY'
|
code = 'CLANGTIDY'
|
||||||
include_patterns = [
|
include_patterns = [
|
||||||
@ -1613,7 +1571,6 @@ exclude_patterns = [
|
|||||||
'torch/_inductor/fx_passes/serialized_patterns/**',
|
'torch/_inductor/fx_passes/serialized_patterns/**',
|
||||||
'torch/_inductor/autoheuristic/artifacts/**',
|
'torch/_inductor/autoheuristic/artifacts/**',
|
||||||
'test/dynamo/cpython/**',
|
'test/dynamo/cpython/**',
|
||||||
'test/test_torchfuzz_repros.py',
|
|
||||||
'scripts/**',
|
'scripts/**',
|
||||||
'third_party/**',
|
'third_party/**',
|
||||||
'fb/**',
|
'fb/**',
|
||||||
|
|||||||
@ -13,9 +13,6 @@ load(":build_variables.bzl", "jit_core_sources", "lazy_tensor_ts_sources", "libt
|
|||||||
load(":ufunc_defs.bzl", "aten_ufunc_generated_cpu_kernel_sources", "aten_ufunc_generated_cpu_sources", "aten_ufunc_generated_cuda_sources")
|
load(":ufunc_defs.bzl", "aten_ufunc_generated_cpu_kernel_sources", "aten_ufunc_generated_cpu_sources", "aten_ufunc_generated_cuda_sources")
|
||||||
load("//:tools/bazel.bzl", "rules")
|
load("//:tools/bazel.bzl", "rules")
|
||||||
|
|
||||||
# Export files for use by torch/headeronly (where version.h generation now lives)
|
|
||||||
exports_files(["version.txt"])
|
|
||||||
|
|
||||||
define_targets(rules = rules)
|
define_targets(rules = rules)
|
||||||
|
|
||||||
COMMON_COPTS = [
|
COMMON_COPTS = [
|
||||||
@ -693,9 +690,7 @@ cc_library(
|
|||||||
"torch/csrc/*/generated/*.h",
|
"torch/csrc/*/generated/*.h",
|
||||||
"torch/csrc/jit/serialization/mobile_bytecode_generated.h",
|
"torch/csrc/jit/serialization/mobile_bytecode_generated.h",
|
||||||
] + torch_cuda_headers,
|
] + torch_cuda_headers,
|
||||||
) + GENERATED_AUTOGRAD_CPP + [
|
) + GENERATED_AUTOGRAD_CPP + [":version_h"],
|
||||||
"//torch/headeronly:version_h",
|
|
||||||
],
|
|
||||||
includes = [
|
includes = [
|
||||||
"third_party/kineto/libkineto/include",
|
"third_party/kineto/libkineto/include",
|
||||||
"torch/csrc",
|
"torch/csrc",
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user