mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-26 08:34:52 +08:00
Compare commits
6 Commits
v2.8.0-rc2
...
benchmarki
| Author | SHA1 | Date | |
|---|---|---|---|
| 0f49e915a9 | |||
| 2f1217f944 | |||
| e0bf01e87b | |||
| 3b5ae0e9fc | |||
| 5f5f654a3e | |||
| 21931cbbc6 |
@ -3,7 +3,9 @@ set -eux -o pipefail
|
||||
|
||||
GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
|
||||
|
||||
if [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then
|
||||
if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then
|
||||
export TORCH_CUDA_ARCH_LIST="9.0"
|
||||
elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then
|
||||
export TORCH_CUDA_ARCH_LIST="9.0;10.0;12.0"
|
||||
fi
|
||||
|
||||
@ -25,7 +27,6 @@ if [ "$DESIRED_CUDA" = "cpu" ]; then
|
||||
USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn
|
||||
else
|
||||
echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA"
|
||||
export USE_SYSTEM_NCCL=1
|
||||
#USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files
|
||||
USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda
|
||||
fi
|
||||
|
||||
@ -88,6 +88,7 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
|
||||
"/usr/local/cuda/lib64/libcusparseLt.so.0",
|
||||
"/usr/local/cuda/lib64/libcusolver.so.11",
|
||||
"/usr/local/cuda/lib64/libcurand.so.10",
|
||||
"/usr/local/cuda/lib64/libnvToolsExt.so.1",
|
||||
"/usr/local/cuda/lib64/libnvJitLink.so.12",
|
||||
"/usr/local/cuda/lib64/libnvrtc.so.12",
|
||||
"/usr/local/cuda/lib64/libcudnn_adv.so.9",
|
||||
@ -107,9 +108,9 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
|
||||
"/usr/local/lib/libnvpl_blas_core.so.0",
|
||||
]
|
||||
|
||||
if "129" in desired_cuda:
|
||||
if "128" in desired_cuda:
|
||||
libs_to_copy += [
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.9",
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.8",
|
||||
"/usr/local/cuda/lib64/libcufile.so.0",
|
||||
"/usr/local/cuda/lib64/libcufile_rdma.so.1",
|
||||
]
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
ARG CUDA_VERSION=12.6
|
||||
ARG CUDA_VERSION=12.4
|
||||
ARG BASE_TARGET=cuda${CUDA_VERSION}
|
||||
ARG ROCM_IMAGE=rocm/dev-almalinux-8:6.3-complete
|
||||
FROM amd64/almalinux:8.10-20250519 as base
|
||||
@ -52,6 +52,10 @@ ENV CUDA_VERSION=${CUDA_VERSION}
|
||||
# Make things in our path by default
|
||||
ENV PATH=/usr/local/cuda-${CUDA_VERSION}/bin:$PATH
|
||||
|
||||
FROM cuda as cuda11.8
|
||||
RUN bash ./install_cuda.sh 11.8
|
||||
ENV DESIRED_CUDA=11.8
|
||||
|
||||
FROM cuda as cuda12.6
|
||||
RUN bash ./install_cuda.sh 12.6
|
||||
ENV DESIRED_CUDA=12.6
|
||||
@ -60,10 +64,6 @@ FROM cuda as cuda12.8
|
||||
RUN bash ./install_cuda.sh 12.8
|
||||
ENV DESIRED_CUDA=12.8
|
||||
|
||||
FROM cuda as cuda12.9
|
||||
RUN bash ./install_cuda.sh 12.9
|
||||
ENV DESIRED_CUDA=12.9
|
||||
|
||||
FROM ${ROCM_IMAGE} as rocm
|
||||
ENV PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
ADD ./common/install_mkl.sh install_mkl.sh
|
||||
@ -78,8 +78,7 @@ RUN bash ./install_mnist.sh
|
||||
FROM base as all_cuda
|
||||
COPY --from=cuda11.8 /usr/local/cuda-11.8 /usr/local/cuda-11.8
|
||||
COPY --from=cuda12.6 /usr/local/cuda-12.6 /usr/local/cuda-12.6
|
||||
COPY --from=cuda12.8 /usr/local/cuda-12.8 /usr/local/cuda-12.8
|
||||
COPY --from=cuda12.9 /usr/local/cuda-12.9 /usr/local/cuda-12.9
|
||||
COPY --from=cuda12.4 /usr/local/cuda-12.8 /usr/local/cuda-12.8
|
||||
|
||||
# Final step
|
||||
FROM ${BASE_TARGET} as final
|
||||
|
||||
@ -50,21 +50,30 @@ if [[ "$image" == *xla* ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "$image" == *-jammy* ]]; then
|
||||
if [[ "$image" == *-focal* ]]; then
|
||||
UBUNTU_VERSION=20.04
|
||||
elif [[ "$image" == *-jammy* ]]; then
|
||||
UBUNTU_VERSION=22.04
|
||||
elif [[ "$image" == *ubuntu* ]]; then
|
||||
extract_version_from_image_name ubuntu UBUNTU_VERSION
|
||||
elif [[ "$image" == *centos* ]]; then
|
||||
extract_version_from_image_name centos CENTOS_VERSION
|
||||
fi
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ]; then
|
||||
OS="ubuntu"
|
||||
elif [ -n "${CENTOS_VERSION}" ]; then
|
||||
OS="centos"
|
||||
else
|
||||
echo "Unable to derive operating system base..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DOCKERFILE="${OS}/Dockerfile"
|
||||
if [[ "$image" == *rocm* ]]; then
|
||||
# When using ubuntu - 22.04, start from Ubuntu docker image, instead of nvidia/cuda docker image.
|
||||
if [[ "$image" == *cuda* && "$UBUNTU_VERSION" != "22.04" ]]; then
|
||||
DOCKERFILE="${OS}-cuda/Dockerfile"
|
||||
elif [[ "$image" == *rocm* ]]; then
|
||||
DOCKERFILE="${OS}-rocm/Dockerfile"
|
||||
elif [[ "$image" == *xpu* ]]; then
|
||||
DOCKERFILE="${OS}-xpu/Dockerfile"
|
||||
@ -89,8 +98,8 @@ tag=$(echo $image | awk -F':' '{print $2}')
|
||||
# configuration, so we hardcode everything here rather than do it
|
||||
# from scratch
|
||||
case "$tag" in
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11)
|
||||
CUDA_VERSION=12.8.1
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc11)
|
||||
CUDA_VERSION=12.6.3
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=11
|
||||
@ -101,7 +110,7 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.8.1
|
||||
CUDA_VERSION=12.8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
@ -112,31 +121,7 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.8.1
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=9
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.13-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.8.1
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.13
|
||||
GCC_VERSION=9
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3-gcc9)
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.6.3
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
@ -183,8 +168,8 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.8.1
|
||||
pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=11.8.0
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
@ -194,25 +179,25 @@ case "$tag" in
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang12-onnx)
|
||||
pytorch-linux-focal-py3-clang10-onnx)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CLANG_VERSION=12
|
||||
CLANG_VERSION=10
|
||||
VISION=yes
|
||||
ONNX=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.9-clang12)
|
||||
pytorch-linux-focal-py3.9-clang10)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CLANG_VERSION=12
|
||||
CLANG_VERSION=10
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.11-clang12)
|
||||
pytorch-linux-focal-py3.11-clang10)
|
||||
ANACONDA_PYTHON_VERSION=3.11
|
||||
CLANG_VERSION=12
|
||||
CLANG_VERSION=10
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.9-gcc9)
|
||||
pytorch-linux-focal-py3.9-gcc9)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
GCC_VERSION=9
|
||||
VISION=yes
|
||||
@ -267,9 +252,9 @@ case "$tag" in
|
||||
DOCS=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-clang12)
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-clang12)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CUDA_VERSION=12.8.1
|
||||
CUDA_VERSION=11.8
|
||||
CUDNN_VERSION=9
|
||||
CLANG_VERSION=12
|
||||
VISION=yes
|
||||
@ -318,15 +303,15 @@ case "$tag" in
|
||||
GCC_VERSION=11
|
||||
TRITON_CPU=yes
|
||||
;;
|
||||
pytorch-linux-jammy-linter)
|
||||
pytorch-linux-focal-linter)
|
||||
# TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
|
||||
# We will need to update mypy version eventually, but that's for another day. The task
|
||||
# would be to upgrade mypy to 1.0.0 with Python 3.11
|
||||
PYTHON_VERSION=3.9
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-linter)
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter)
|
||||
PYTHON_VERSION=3.9
|
||||
CUDA_VERSION=12.8.1
|
||||
CUDA_VERSION=11.8
|
||||
;;
|
||||
pytorch-linux-jammy-aarch64-py3.10-gcc11)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
@ -385,6 +370,14 @@ esac
|
||||
|
||||
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
#when using cudnn version 8 install it separately from cuda
|
||||
if [[ "$image" == *cuda* && ${OS} == "ubuntu" ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
if [[ ${CUDNN_VERSION} == 9 ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
no_cache_flag=""
|
||||
progress_flag=""
|
||||
# Do not use cache and progress=plain when in CI
|
||||
@ -401,6 +394,7 @@ docker build \
|
||||
--build-arg "LLVMDEV=${LLVMDEV:-}" \
|
||||
--build-arg "VISION=${VISION:-}" \
|
||||
--build-arg "UBUNTU_VERSION=${UBUNTU_VERSION}" \
|
||||
--build-arg "CENTOS_VERSION=${CENTOS_VERSION}" \
|
||||
--build-arg "DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" \
|
||||
--build-arg "GLIBC_VERSION=${GLIBC_VERSION}" \
|
||||
--build-arg "CLANG_VERSION=${CLANG_VERSION}" \
|
||||
|
||||
@ -39,7 +39,6 @@ RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
|
||||
@ -1 +1 @@
|
||||
56392aa978594cc155fa8af48cd949f5b5f1823a
|
||||
b173722085b3f555d6ba4533d6bbaddfd7c71144
|
||||
|
||||
@ -1 +1 @@
|
||||
v2.27.3-1
|
||||
v2.26.5-1
|
||||
|
||||
@ -1 +1 @@
|
||||
ae324eeac8e102a2b40370e341460f3791353398
|
||||
b0e26b7359c147b8aa0af686c20510fb9b15990a
|
||||
|
||||
@ -1 +1 @@
|
||||
5389ed797016010543ef1c7b88efc50f7521cb4e
|
||||
c8757738a7418249896224430ce84888e8ecdd79
|
||||
|
||||
@ -30,6 +30,18 @@ install_ubuntu() {
|
||||
maybe_libomp_dev=""
|
||||
fi
|
||||
|
||||
# HACK: UCC testing relies on libnccl library from NVIDIA repo, and version 2.16 crashes
|
||||
# See https://github.com/pytorch/pytorch/pull/105260#issuecomment-1673399729
|
||||
# TODO: Eliminate this hack, we should not relay on apt-get installation
|
||||
# See https://github.com/pytorch/pytorch/issues/144768
|
||||
if [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "11.8"* ]]; then
|
||||
maybe_libnccl_dev="libnccl2=2.15.5-1+cuda11.8 libnccl-dev=2.15.5-1+cuda11.8 --allow-downgrades --allow-change-held-packages"
|
||||
elif [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "12.4"* ]]; then
|
||||
maybe_libnccl_dev="libnccl2=2.26.2-1+cuda12.4 libnccl-dev=2.26.2-1+cuda12.4 --allow-downgrades --allow-change-held-packages"
|
||||
else
|
||||
maybe_libnccl_dev=""
|
||||
fi
|
||||
|
||||
# Install common dependencies
|
||||
apt-get update
|
||||
# TODO: Some of these may not be necessary
|
||||
@ -58,6 +70,7 @@ install_ubuntu() {
|
||||
libasound2-dev \
|
||||
libsndfile-dev \
|
||||
${maybe_libomp_dev} \
|
||||
${maybe_libnccl_dev} \
|
||||
software-properties-common \
|
||||
wget \
|
||||
sudo \
|
||||
|
||||
@ -6,7 +6,7 @@ set -ex
|
||||
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
BASE_URL="https://repo.anaconda.com/miniconda"
|
||||
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
|
||||
if [[ $(uname -m) == "aarch64" ]] || [[ "$BUILD_ENVIRONMENT" == *xpu* ]] || [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
if [[ $(uname -m) == "aarch64" ]] || [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download" # @lint-ignore
|
||||
CONDA_FILE="Miniforge3-Linux-$(uname -m).sh"
|
||||
fi
|
||||
@ -64,11 +64,6 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
# which is provided in libstdcxx 12 and up.
|
||||
conda_install libstdcxx-ng=12.3.0 --update-deps -c conda-forge
|
||||
|
||||
# Miniforge installer doesn't install sqlite by default
|
||||
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
conda_install sqlite
|
||||
fi
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
if [[ $(uname -m) == "aarch64" ]]; then
|
||||
conda_install "openblas==0.3.29=*openmp*"
|
||||
|
||||
@ -40,9 +40,37 @@ function install_cudnn {
|
||||
rm -rf tmp_cudnn
|
||||
}
|
||||
|
||||
function install_118 {
|
||||
CUDNN_VERSION=9.1.0.70
|
||||
echo "Installing CUDA 11.8 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.4.0"
|
||||
install_cuda 11.8.0 cuda_11.8.0_520.61.05_linux
|
||||
|
||||
install_cudnn 11 $CUDNN_VERSION
|
||||
|
||||
CUDA_VERSION=11.8 bash install_nccl.sh
|
||||
|
||||
CUDA_VERSION=11.8 bash install_cusparselt.sh
|
||||
|
||||
ldconfig
|
||||
}
|
||||
|
||||
function install_124 {
|
||||
CUDNN_VERSION=9.1.0.70
|
||||
echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.2"
|
||||
install_cuda 12.4.1 cuda_12.4.1_550.54.15_linux
|
||||
|
||||
install_cudnn 12 $CUDNN_VERSION
|
||||
|
||||
CUDA_VERSION=12.4 bash install_nccl.sh
|
||||
|
||||
CUDA_VERSION=12.4 bash install_cusparselt.sh
|
||||
|
||||
ldconfig
|
||||
}
|
||||
|
||||
function install_126 {
|
||||
CUDNN_VERSION=9.10.2.21
|
||||
echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.7.1"
|
||||
CUDNN_VERSION=9.5.1.17
|
||||
echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
|
||||
install_cuda 12.6.3 cuda_12.6.3_560.35.05_linux
|
||||
|
||||
install_cudnn 12 $CUDNN_VERSION
|
||||
@ -54,20 +82,69 @@ function install_126 {
|
||||
ldconfig
|
||||
}
|
||||
|
||||
function install_129 {
|
||||
CUDNN_VERSION=9.10.2.21
|
||||
echo "Installing CUDA 12.9.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.7.1"
|
||||
# install CUDA 12.9.1 in the same container
|
||||
install_cuda 12.9.1 cuda_12.9.1_575.57.08_linux
|
||||
function prune_118 {
|
||||
echo "Pruning CUDA 11.8 and cuDNN"
|
||||
#####################################################################################
|
||||
# CUDA 11.8 prune static libs
|
||||
#####################################################################################
|
||||
export NVPRUNE="/usr/local/cuda-11.8/bin/nvprune"
|
||||
export CUDA_LIB_DIR="/usr/local/cuda-11.8/lib64"
|
||||
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
install_cudnn 12 $CUDNN_VERSION
|
||||
export GENCODE="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
|
||||
export GENCODE_CUDNN="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
|
||||
|
||||
CUDA_VERSION=12.9 bash install_nccl.sh
|
||||
if [[ -n "$OVERRIDE_GENCODE" ]]; then
|
||||
export GENCODE=$OVERRIDE_GENCODE
|
||||
fi
|
||||
|
||||
CUDA_VERSION=12.9 bash install_cusparselt.sh
|
||||
# all CUDA libs except CuDNN and CuBLAS (cudnn and cublas need arch 3.7 included)
|
||||
ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \
|
||||
| xargs -I {} bash -c \
|
||||
"echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}"
|
||||
|
||||
ldconfig
|
||||
# prune CuDNN and CuBLAS
|
||||
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a
|
||||
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a
|
||||
|
||||
#####################################################################################
|
||||
# CUDA 11.8 prune visual tools
|
||||
#####################################################################################
|
||||
export CUDA_BASE="/usr/local/cuda-11.8/"
|
||||
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2022.3.0 $CUDA_BASE/nsight-systems-2022.4.2/
|
||||
}
|
||||
|
||||
function prune_124 {
|
||||
echo "Pruning CUDA 12.4"
|
||||
#####################################################################################
|
||||
# CUDA 12.4 prune static libs
|
||||
#####################################################################################
|
||||
export NVPRUNE="/usr/local/cuda-12.4/bin/nvprune"
|
||||
export CUDA_LIB_DIR="/usr/local/cuda-12.4/lib64"
|
||||
|
||||
export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
|
||||
export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
|
||||
|
||||
if [[ -n "$OVERRIDE_GENCODE" ]]; then
|
||||
export GENCODE=$OVERRIDE_GENCODE
|
||||
fi
|
||||
if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then
|
||||
export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN
|
||||
fi
|
||||
|
||||
# all CUDA libs except CuDNN and CuBLAS
|
||||
ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \
|
||||
| xargs -I {} bash -c \
|
||||
"echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}"
|
||||
|
||||
# prune CuDNN and CuBLAS
|
||||
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a
|
||||
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a
|
||||
|
||||
#####################################################################################
|
||||
# CUDA 12.4 prune visual tools
|
||||
#####################################################################################
|
||||
export CUDA_BASE="/usr/local/cuda-12.4/"
|
||||
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.1.0 $CUDA_BASE/nsight-systems-2023.4.4/
|
||||
}
|
||||
|
||||
function prune_126 {
|
||||
@ -106,7 +183,7 @@ function prune_126 {
|
||||
|
||||
function install_128 {
|
||||
CUDNN_VERSION=9.8.0.87
|
||||
echo "Installing CUDA 12.8.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.7.1"
|
||||
echo "Installing CUDA 12.8.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
|
||||
# install CUDA 12.8.1 in the same container
|
||||
install_cuda 12.8.1 cuda_12.8.1_570.124.06_linux
|
||||
|
||||
@ -124,11 +201,13 @@ function install_128 {
|
||||
while test $# -gt 0
|
||||
do
|
||||
case "$1" in
|
||||
12.6|12.6.*) install_126; prune_126
|
||||
11.8) install_118; prune_118
|
||||
;;
|
||||
12.8|12.8.*) install_128;
|
||||
12.4) install_124; prune_124
|
||||
;;
|
||||
12.9|12.9.*) install_129;
|
||||
12.6) install_126; prune_126
|
||||
;;
|
||||
12.8) install_128;
|
||||
;;
|
||||
*) echo "bad argument $1"; exit 1
|
||||
;;
|
||||
|
||||
@ -4,10 +4,12 @@ if [[ -n "${CUDNN_VERSION}" ]]; then
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
mkdir tmp_cudnn
|
||||
pushd tmp_cudnn
|
||||
if [[ ${CUDA_VERSION:0:4} == "12.9" || ${CUDA_VERSION:0:4} == "12.8" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.10.2.21_cuda12-archive"
|
||||
if [[ ${CUDA_VERSION:0:4} == "12.8" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.8.0.87_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:4} == "12.6" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.10.2.21_cuda12-archive"
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.5.1.17_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:2} == "12" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive"
|
||||
else
|
||||
|
||||
@ -5,14 +5,25 @@ set -ex
|
||||
# cuSPARSELt license: https://docs.nvidia.com/cuda/cusparselt/license.html
|
||||
mkdir tmp_cusparselt && cd tmp_cusparselt
|
||||
|
||||
if [[ ${CUDA_VERSION:0:4} =~ ^12\.[5-9]$ ]]; then
|
||||
if [[ ${CUDA_VERSION:0:4} =~ ^12\.[5-8]$ ]]; then
|
||||
arch_path='sbsa'
|
||||
export TARGETARCH=${TARGETARCH:-$(uname -m)}
|
||||
if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then
|
||||
arch_path='x86_64'
|
||||
fi
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.7.1.0-archive"
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.6.3.2-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz
|
||||
elif [[ ${CUDA_VERSION:0:4} == "12.4" ]]; then
|
||||
arch_path='sbsa'
|
||||
export TARGETARCH=${TARGETARCH:-$(uname -m)}
|
||||
if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then
|
||||
arch_path='x86_64'
|
||||
fi
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.6.2.3-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz
|
||||
elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-x86_64-0.4.0.7-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/${CUSPARSELT_NAME}.tar.xz
|
||||
else
|
||||
echo "Not sure which libcusparselt version to install for this ${CUDA_VERSION}"
|
||||
fi
|
||||
|
||||
@ -8,6 +8,16 @@ retry () {
|
||||
"$@" || (sleep 10 && "$@") || (sleep 20 && "$@") || (sleep 40 && "$@")
|
||||
}
|
||||
|
||||
# A bunch of custom pip dependencies for ONNX
|
||||
pip_install \
|
||||
beartype==0.15.0 \
|
||||
filelock==3.9.0 \
|
||||
flatbuffers==2.0 \
|
||||
mock==5.0.1 \
|
||||
ninja==1.10.2 \
|
||||
networkx==2.5 \
|
||||
numpy==1.24.2
|
||||
|
||||
# ONNXRuntime should be installed before installing
|
||||
# onnx-weekly. Otherwise, onnx-weekly could be
|
||||
# overwritten by onnx.
|
||||
@ -19,8 +29,11 @@ pip_install \
|
||||
transformers==4.36.2
|
||||
|
||||
pip_install coloredlogs packaging
|
||||
|
||||
pip_install onnxruntime==1.18.1
|
||||
pip_install onnxscript==0.3.0
|
||||
pip_install onnxscript==0.2.6 --no-deps
|
||||
# required by onnxscript
|
||||
pip_install ml_dtypes
|
||||
|
||||
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
|
||||
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
|
||||
|
||||
@ -4,7 +4,8 @@
|
||||
set -ex
|
||||
|
||||
cd /
|
||||
git clone https://github.com/OpenMathLib/OpenBLAS.git -b "${OPENBLAS_VERSION:-v0.3.29}" --depth 1 --shallow-submodules
|
||||
git clone https://github.com/OpenMathLib/OpenBLAS.git -b v0.3.29 --depth 1 --shallow-submodules
|
||||
|
||||
|
||||
OPENBLAS_BUILD_FLAGS="
|
||||
NUM_THREADS=128
|
||||
|
||||
@ -26,11 +26,6 @@ Pin: release o=repo.radeon.com
|
||||
Pin-Priority: 600
|
||||
EOF
|
||||
|
||||
# we want the patch version of 6.4 instead
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.4) ]]; then
|
||||
ROCM_VERSION="${ROCM_VERSION}.1"
|
||||
fi
|
||||
|
||||
# Add amdgpu repository
|
||||
UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
|
||||
echo "deb [arch=amd64] https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
|
||||
@ -72,23 +67,19 @@ EOF
|
||||
|
||||
# ROCm 6.3 had a regression where initializing static code objects had significant overhead
|
||||
# ROCm 6.4 did not yet fix the regression, also HIP branch names are different
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 6.3) ]] && [[ $(ver $ROCM_VERSION) -lt $(ver 7.0) ]]; then
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.4.1) ]]; then
|
||||
HIP_BRANCH=release/rocm-rel-6.4
|
||||
VER_STR=6.4
|
||||
VER_PATCH=.1
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.3) ]] || [[ $(ver $ROCM_VERSION) -eq $(ver 6.4) ]]; then
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.3) ]]; then
|
||||
HIP_BRANCH=rocm-6.3.x
|
||||
VER_STR=6.3
|
||||
elif [[ $(ver $ROCM_VERSION) -eq $(ver 6.4) ]]; then
|
||||
HIP_BRANCH=release/rocm-rel-6.4
|
||||
VER_STR=6.4
|
||||
elif [[ $(ver $ROCM_VERSION) -eq $(ver 6.3) ]]; then
|
||||
HIP_BRANCH=rocm-6.3.x
|
||||
VER_STR=6.3
|
||||
fi
|
||||
# clr build needs CppHeaderParser but can only find it using conda's python
|
||||
/opt/conda/bin/python -m pip install CppHeaderParser
|
||||
git clone https://github.com/ROCm/HIP -b $HIP_BRANCH
|
||||
HIP_COMMON_DIR=$(readlink -f HIP)
|
||||
git clone https://github.com/jeffdaily/clr -b release/rocm-rel-${VER_STR}${VER_PATCH}-statco-hotfix
|
||||
git clone https://github.com/jeffdaily/clr -b release/rocm-rel-${VER_STR}-statco-hotfix
|
||||
mkdir -p clr/build
|
||||
pushd clr/build
|
||||
cmake .. -DCLR_BUILD_HIP=ON -DHIP_COMMON_DIR=$HIP_COMMON_DIR
|
||||
|
||||
@ -51,12 +51,7 @@ as_jenkins git clone --recursive ${TRITON_REPO} triton
|
||||
cd triton
|
||||
as_jenkins git checkout ${TRITON_PINNED_COMMIT}
|
||||
as_jenkins git submodule update --init --recursive
|
||||
|
||||
# Old versions of python have setup.py in ./python; newer versions have it in ./
|
||||
if [ ! -f setup.py ]; then
|
||||
cd python
|
||||
fi
|
||||
|
||||
cd python
|
||||
pip_install pybind11==2.13.6
|
||||
|
||||
# TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527
|
||||
@ -98,6 +93,3 @@ fi
|
||||
if [ -n "${NUMPY_VERSION}" ]; then
|
||||
pip_install "numpy==${NUMPY_VERSION}"
|
||||
fi
|
||||
if [[ "$ANACONDA_PYTHON_VERSION" != 3.9* ]]; then
|
||||
pip_install helion
|
||||
fi
|
||||
|
||||
@ -54,6 +54,16 @@ COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
COPY ./common/install_cusparselt.sh install_cusparselt.sh
|
||||
ENV CUDA_HOME /usr/local/cuda
|
||||
|
||||
FROM cuda as cuda11.8
|
||||
RUN bash ./install_cuda.sh 11.8
|
||||
RUN bash ./install_magma.sh 11.8
|
||||
RUN ln -sf /usr/local/cuda-11.8 /usr/local/cuda
|
||||
|
||||
FROM cuda as cuda12.4
|
||||
RUN bash ./install_cuda.sh 12.4
|
||||
RUN bash ./install_magma.sh 12.4
|
||||
RUN ln -sf /usr/local/cuda-12.4 /usr/local/cuda
|
||||
|
||||
FROM cuda as cuda12.6
|
||||
RUN bash ./install_cuda.sh 12.6
|
||||
RUN bash ./install_magma.sh 12.6
|
||||
@ -64,11 +74,6 @@ RUN bash ./install_cuda.sh 12.8
|
||||
RUN bash ./install_magma.sh 12.8
|
||||
RUN ln -sf /usr/local/cuda-12.8 /usr/local/cuda
|
||||
|
||||
FROM cuda as cuda12.9
|
||||
RUN bash ./install_cuda.sh 12.9
|
||||
RUN bash ./install_magma.sh 12.9
|
||||
RUN ln -sf /usr/local/cuda-12.9 /usr/local/cuda
|
||||
|
||||
FROM cpu as rocm
|
||||
ARG ROCM_VERSION
|
||||
ARG PYTORCH_ROCM_ARCH
|
||||
|
||||
@ -26,7 +26,7 @@ ADD ./common/install_openssl.sh install_openssl.sh
|
||||
RUN bash ./install_openssl.sh && rm install_openssl.sh
|
||||
|
||||
|
||||
# remove unnecessary python versions
|
||||
# remove unncessary python versions
|
||||
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
|
||||
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
|
||||
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
|
||||
@ -103,7 +103,6 @@ ENV SSL_CERT_FILE=/opt/_internal/certs.pem
|
||||
# Install LLVM version
|
||||
COPY --from=openssl /opt/openssl /opt/openssl
|
||||
COPY --from=base /opt/python /opt/python
|
||||
COPY --from=base /usr/local/lib/ /usr/local/lib/
|
||||
COPY --from=base /opt/_internal /opt/_internal
|
||||
COPY --from=base /usr/local/bin/auditwheel /usr/local/bin/auditwheel
|
||||
COPY --from=intel /opt/intel /opt/intel
|
||||
|
||||
@ -2,7 +2,7 @@ FROM quay.io/pypa/manylinux_2_28_aarch64 as base
|
||||
|
||||
ARG GCCTOOLSET_VERSION=13
|
||||
|
||||
# Language variables
|
||||
# Language variabes
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
ENV LANG=en_US.UTF-8
|
||||
ENV LANGUAGE=en_US.UTF-8
|
||||
@ -58,13 +58,12 @@ RUN git config --global --add safe.directory "*"
|
||||
|
||||
FROM base as openblas
|
||||
# Install openblas
|
||||
ARG OPENBLAS_VERSION
|
||||
ADD ./common/install_openblas.sh install_openblas.sh
|
||||
RUN bash ./install_openblas.sh && rm install_openblas.sh
|
||||
|
||||
FROM base as final
|
||||
|
||||
# remove unnecessary python versions
|
||||
# remove unncessary python versions
|
||||
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
|
||||
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
|
||||
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
|
||||
|
||||
@ -60,7 +60,7 @@ RUN bash ./install_openssl.sh && rm install_openssl.sh
|
||||
ENV SSL_CERT_FILE=/opt/_internal/certs.pem
|
||||
|
||||
FROM openssl as final
|
||||
# remove unnecessary python versions
|
||||
# remove unncessary python versions
|
||||
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
|
||||
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
|
||||
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
|
||||
|
||||
@ -120,13 +120,11 @@ RUN python3 -mpip install cmake==3.28.0
|
||||
# so just build it from upstream repository.
|
||||
# h5py is dependency of onnxruntime_training.
|
||||
# h5py==3.11.0 builds with hdf5-devel 1.10.5 from repository.
|
||||
# h5py 3.11.0 doesn't build with numpy >= 2.3.0.
|
||||
# install newest flatbuffers version first:
|
||||
# for some reason old version is getting pulled in otherwise.
|
||||
# packaging package is required for onnxruntime wheel build.
|
||||
RUN pip3 install flatbuffers && \
|
||||
pip3 install cython 'pkgconfig>=1.5.5' 'setuptools>=77' 'numpy<2.3.0' && \
|
||||
pip3 install --no-build-isolation h5py==3.11.0 && \
|
||||
pip3 install h5py==3.11.0 && \
|
||||
pip3 install packaging && \
|
||||
git clone https://github.com/microsoft/onnxruntime && \
|
||||
cd onnxruntime && git checkout v1.21.0 && \
|
||||
|
||||
@ -27,7 +27,6 @@ fi
|
||||
|
||||
MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-}
|
||||
DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-}
|
||||
OPENBLAS_VERSION=${OPENBLAS_VERSION:-}
|
||||
|
||||
case ${image} in
|
||||
manylinux2_28-builder:cpu)
|
||||
@ -41,7 +40,6 @@ case ${image} in
|
||||
GPU_IMAGE=arm64v8/almalinux:8
|
||||
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=13 --build-arg NINJA_VERSION=1.12.1"
|
||||
MANY_LINUX_VERSION="2_28_aarch64"
|
||||
OPENBLAS_VERSION="v0.3.29"
|
||||
;;
|
||||
manylinuxcxx11-abi-builder:cpu-cxx11-abi)
|
||||
TARGET=final
|
||||
@ -111,7 +109,6 @@ tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
${DOCKER_GPU_BUILD_ARG} \
|
||||
--build-arg "GPU_IMAGE=${GPU_IMAGE}" \
|
||||
--build-arg "OPENBLAS_VERSION=${OPENBLAS_VERSION}" \
|
||||
--target "${TARGET}" \
|
||||
-t "${tmp_tag}" \
|
||||
$@ \
|
||||
|
||||
@ -41,11 +41,14 @@ fbscribelogger==0.1.7
|
||||
#Pinned versions: 0.1.6
|
||||
#test that import:
|
||||
|
||||
flatbuffers==24.12.23
|
||||
flatbuffers==2.0 ; platform_machine != "s390x"
|
||||
#Description: cross platform serialization library
|
||||
#Pinned versions: 24.12.23
|
||||
#Pinned versions: 2.0
|
||||
#test that import:
|
||||
|
||||
flatbuffers ; platform_machine == "s390x"
|
||||
#Description: cross platform serialization library; Newer version is required on s390x for new python version
|
||||
|
||||
hypothesis==5.35.1
|
||||
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
||||
#Description: advanced library for generating parametrized tests
|
||||
@ -90,10 +93,10 @@ librosa>=0.6.2 ; python_version < "3.11"
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
mypy==1.16.0
|
||||
mypy==1.15.0
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
#Description: linter
|
||||
#Pinned versions: 1.16.0
|
||||
#Pinned versions: 1.14.0
|
||||
#test that import: test_typing.py, test_type_hints.py
|
||||
|
||||
networkx==2.8.8
|
||||
@ -379,10 +382,3 @@ dataclasses_json==0.6.7
|
||||
|
||||
cmake==4.0.0
|
||||
#Description: required for building
|
||||
|
||||
tlparse==0.3.30
|
||||
#Description: required for log parsing
|
||||
|
||||
cuda-bindings>=12.0,<13.0
|
||||
#Description: required for testing CUDAGraph::raw_cuda_graph(). See https://nvidia.github.io/cuda-python/cuda-bindings/latest/support.html for how this version was chosen. Note "Any fix in the latest bindings would be backported to the prior major version" means that only the newest version of cuda-bindings will get fixes. Depending on the latest version of 12.x is okay because all 12.y versions will be supported via "CUDA minor version compatibility". Pytorch builds against 13.z versions of cuda toolkit work with 12.x versions of cuda-bindings as well because newer drivers work with old toolkits.
|
||||
#test that import: test_cuda.py
|
||||
|
||||
@ -1 +1 @@
|
||||
3.4.0
|
||||
3.3.1
|
||||
|
||||
@ -1 +0,0 @@
|
||||
3.4.0
|
||||
170
.ci/docker/ubuntu-cuda/Dockerfile
Normal file
170
.ci/docker/ubuntu-cuda/Dockerfile
Normal file
@ -0,0 +1,170 @@
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG IMAGE_NAME
|
||||
|
||||
FROM ${IMAGE_NAME} as base
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
COPY ./common/install_docs_reqs.sh install_docs_reqs.sh
|
||||
RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ./common/install_magma_conda.sh install_magma_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh install_magma_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
COPY ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install clang
|
||||
ARG CLANG_VERSION
|
||||
COPY ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# (optional) Install vision packages like OpenCV
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh cache_vision_models.sh common_utils.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# (optional) Install UCC
|
||||
ARG UCX_COMMIT
|
||||
ARG UCC_COMMIT
|
||||
ENV UCX_COMMIT $UCX_COMMIT
|
||||
ENV UCC_COMMIT $UCC_COMMIT
|
||||
ENV UCX_HOME /usr
|
||||
ENV UCC_HOME /usr
|
||||
ADD ./common/install_ucc.sh install_ucc.sh
|
||||
RUN if [ -n "${UCX_COMMIT}" ] && [ -n "${UCC_COMMIT}" ]; then bash ./install_ucc.sh; fi
|
||||
RUN rm install_ucc.sh
|
||||
|
||||
COPY ./common/install_openssl.sh install_openssl.sh
|
||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
||||
RUN bash ./install_openssl.sh
|
||||
ENV OPENSSL_DIR /opt/openssl
|
||||
|
||||
ARG INDUCTOR_BENCHMARKS
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/huggingface.txt huggingface.txt
|
||||
COPY ci_commit_pins/timm.txt timm.txt
|
||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
|
||||
|
||||
ARG TRITON
|
||||
|
||||
FROM base as triton-builder
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton.txt triton.txt
|
||||
COPY triton_version.txt triton_version.txt
|
||||
RUN bash ./install_triton.sh
|
||||
|
||||
FROM base as final
|
||||
COPY --from=triton-builder /opt/triton /opt/triton
|
||||
RUN if [ -n "${TRITON}" ]; then pip install /opt/triton/*.whl; chown -R jenkins:jenkins /opt/conda; fi
|
||||
RUN rm -rf /opt/triton
|
||||
|
||||
ARG HALIDE
|
||||
# Build and install halide
|
||||
COPY ./common/install_halide.sh install_halide.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/halide.txt halide.txt
|
||||
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
|
||||
RUN rm install_halide.sh common_utils.sh halide.txt
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
# See https://github.com/pytorch/pytorch/issues/82174
|
||||
# TODO(sdym@fb.com):
|
||||
# check if this is needed after full off Xenial migration
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI true
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
ENV CMAKE_CUDA_COMPILER_LAUNCHER=/opt/cache/bin/sccache
|
||||
|
||||
# Add jni.h for java host build
|
||||
COPY ./common/install_jni.sh install_jni.sh
|
||||
COPY ./java/jni.h jni.h
|
||||
RUN bash ./install_jni.sh && rm install_jni.sh
|
||||
|
||||
# Install Open MPI for CUDA
|
||||
COPY ./common/install_openmpi.sh install_openmpi.sh
|
||||
RUN if [ -n "${CUDA_VERSION}" ]; then bash install_openmpi.sh; fi
|
||||
RUN rm install_openmpi.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
# AWS specific CUDA build guidance
|
||||
ENV TORCH_CUDA_ARCH_LIST Maxwell
|
||||
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
|
||||
ENV CUDA_PATH /usr/local/cuda
|
||||
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
|
||||
# Install CUDNN
|
||||
ARG CUDNN_VERSION
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cudnn.sh install_cudnn.sh
|
||||
RUN if [ -n "${CUDNN_VERSION}" ]; then bash install_cudnn.sh; fi
|
||||
RUN rm install_cudnn.sh
|
||||
|
||||
# Install CUSPARSELT
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cusparselt.sh install_cusparselt.sh
|
||||
RUN bash install_cusparselt.sh
|
||||
RUN rm install_cusparselt.sh
|
||||
|
||||
# Install NCCL
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_nccl.sh install_nccl.sh
|
||||
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
RUN bash install_nccl.sh
|
||||
RUN rm install_nccl.sh /ci_commit_pins/nccl-cu*
|
||||
ENV USE_SYSTEM_NCCL=1
|
||||
ENV NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
ENV NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
|
||||
# Install CUDSS
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cudss.sh install_cudss.sh
|
||||
RUN bash install_cudss.sh
|
||||
RUN rm install_cudss.sh
|
||||
|
||||
# Delete /usr/local/cuda-11.X/cuda-11.X symlinks
|
||||
RUN if [ -h /usr/local/cuda-11.6/cuda-11.6 ]; then rm /usr/local/cuda-11.6/cuda-11.6; fi
|
||||
RUN if [ -h /usr/local/cuda-11.7/cuda-11.7 ]; then rm /usr/local/cuda-11.7/cuda-11.7; fi
|
||||
RUN if [ -h /usr/local/cuda-12.1/cuda-12.1 ]; then rm /usr/local/cuda-12.1/cuda-12.1; fi
|
||||
RUN if [ -h /usr/local/cuda-12.4/cuda-12.4 ]; then rm /usr/local/cuda-12.4/cuda-12.4; fi
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -25,7 +25,6 @@ RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
|
||||
@ -72,7 +72,7 @@ ARG TRITON
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton-xpu.txt triton-xpu.txt
|
||||
COPY triton_xpu_version.txt triton_version.txt
|
||||
COPY triton_version.txt triton_version.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton-xpu.txt triton_version.txt
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
SHELL=/usr/bin/env bash
|
||||
|
||||
DOCKER_CMD ?= docker
|
||||
DESIRED_CUDA ?= 12.8
|
||||
DESIRED_CUDA ?= 11.8
|
||||
DESIRED_CUDA_SHORT = $(subst .,,$(DESIRED_CUDA))
|
||||
PACKAGE_NAME = magma-cuda
|
||||
CUDA_ARCH_LIST ?= -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90
|
||||
@ -16,21 +16,15 @@ DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
|
||||
magma/build_magma.sh
|
||||
|
||||
.PHONY: all
|
||||
all: magma-cuda129
|
||||
all: magma-cuda128
|
||||
all: magma-cuda126
|
||||
all: magma-cuda118
|
||||
|
||||
.PHONY:
|
||||
clean:
|
||||
$(RM) -r magma-*
|
||||
$(RM) -r output
|
||||
|
||||
.PHONY: magma-cuda129
|
||||
magma-cuda129: DESIRED_CUDA := 12.9
|
||||
magma-cuda129: CUDA_ARCH_LIST += -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
|
||||
magma-cuda129:
|
||||
$(DOCKER_RUN)
|
||||
|
||||
.PHONY: magma-cuda128
|
||||
magma-cuda128: DESIRED_CUDA := 12.8
|
||||
magma-cuda128: CUDA_ARCH_LIST += -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
|
||||
@ -41,3 +35,9 @@ magma-cuda128:
|
||||
magma-cuda126: DESIRED_CUDA := 12.6
|
||||
magma-cuda126:
|
||||
$(DOCKER_RUN)
|
||||
|
||||
.PHONY: magma-cuda118
|
||||
magma-cuda118: DESIRED_CUDA := 11.8
|
||||
magma-cuda118: CUDA_ARCH_LIST += -gencode arch=compute_37,code=sm_37
|
||||
magma-cuda118:
|
||||
$(DOCKER_RUN)
|
||||
|
||||
@ -31,6 +31,7 @@ elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then
|
||||
# Comment out nvidia repositories to prevent them from getting apt-get updated, see https://github.com/pytorch/pytorch/issues/74968
|
||||
# shellcheck disable=SC2046
|
||||
sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list")
|
||||
|
||||
retry apt-get update
|
||||
retry apt-get -y install zip openssl
|
||||
else
|
||||
@ -97,7 +98,6 @@ if [[ -z "$PYTORCH_ROOT" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
pushd "$PYTORCH_ROOT"
|
||||
retry pip install -q cmake
|
||||
python setup.py clean
|
||||
retry pip install -qr requirements.txt
|
||||
case ${DESIRED_PYTHON} in
|
||||
@ -151,7 +151,7 @@ if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
BUILD_LIBTORCH_WHL=0 BUILD_PYTHON_ONLY=1 \
|
||||
BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \
|
||||
USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \
|
||||
CMAKE_FRESH=1 python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR
|
||||
python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR --cmake
|
||||
echo "Finished setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)"
|
||||
else
|
||||
time CMAKE_ARGS=${CMAKE_ARGS[@]} \
|
||||
|
||||
@ -15,9 +15,6 @@ export INSTALL_TEST=0 # dont install test binaries into site-packages
|
||||
export USE_CUPTI_SO=0
|
||||
export USE_CUSPARSELT=${USE_CUSPARSELT:-1} # Enable if not disabled by libtorch build
|
||||
export USE_CUFILE=${USE_CUFILE:-1}
|
||||
export USE_SYSTEM_NCCL=1
|
||||
export NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
export NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
|
||||
# Keep an array of cmake variables to add to
|
||||
if [[ -z "$CMAKE_ARGS" ]]; then
|
||||
@ -54,18 +51,18 @@ cuda_version_nodot=$(echo $CUDA_VERSION | tr -d '.')
|
||||
|
||||
TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;7.5;8.0;8.6"
|
||||
case ${CUDA_VERSION} in
|
||||
12.8|12.9)
|
||||
TORCH_CUDA_ARCH_LIST="7.5;8.0;8.6;9.0;10.0;12.0+PTX" #removing sm_50-sm_70 as these architectures are deprecated in CUDA 12.8/9 and will be removed in future releases
|
||||
12.8)
|
||||
TORCH_CUDA_ARCH_LIST="7.5;8.0;8.6;9.0;10.0;12.0+PTX" #removing sm_50-sm_70 as these architectures are deprecated in CUDA 12.8 and will be removed in future releases
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
# WAR to resolve the ld error in libtorch build with CUDA 12.9
|
||||
if [[ "$DESIRED_CUDA" == "cu129" && "$PACKAGE_TYPE" == "libtorch" ]]; then
|
||||
TORCH_CUDA_ARCH_LIST="7.5;8.0;9.0;10.0;12.0+PTX"
|
||||
fi
|
||||
;;
|
||||
12.6)
|
||||
TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};9.0"
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
;;
|
||||
11.8)
|
||||
TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};3.7;9.0"
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
;;
|
||||
*)
|
||||
echo "unknown cuda version $CUDA_VERSION"
|
||||
exit 1
|
||||
@ -107,11 +104,12 @@ DEPS_SONAME=(
|
||||
)
|
||||
|
||||
|
||||
# CUDA_VERSION 12.6, 12.8, 12.9
|
||||
# CUDA_VERSION 12.6, 12.8
|
||||
if [[ $CUDA_VERSION == 12* ]]; then
|
||||
export USE_STATIC_CUDNN=0
|
||||
# Try parallelizing nvcc as well
|
||||
export TORCH_NVCC_FLAGS="-Xfatbin -compress-all --threads 2"
|
||||
|
||||
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
|
||||
echo "Bundling with cudnn and cublas."
|
||||
DEPS_LIST+=(
|
||||
@ -127,6 +125,7 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
"/usr/local/cuda/lib64/libcublasLt.so.12"
|
||||
"/usr/local/cuda/lib64/libcusparseLt.so.0"
|
||||
"/usr/local/cuda/lib64/libcudart.so.12"
|
||||
"/usr/local/cuda/lib64/libnvToolsExt.so.1"
|
||||
"/usr/local/cuda/lib64/libnvrtc.so.12"
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so"
|
||||
"/usr/local/cuda/lib64/libcufile.so.0"
|
||||
@ -145,6 +144,7 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
"libcublasLt.so.12"
|
||||
"libcusparseLt.so.0"
|
||||
"libcudart.so.12"
|
||||
"libnvToolsExt.so.1"
|
||||
"libnvrtc.so.12"
|
||||
"libnvrtc-builtins.so"
|
||||
"libcufile.so.0"
|
||||
@ -162,10 +162,8 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
'$ORIGIN/../../nvidia/curand/lib'
|
||||
'$ORIGIN/../../nvidia/cusolver/lib'
|
||||
'$ORIGIN/../../nvidia/cusparse/lib'
|
||||
'$ORIGIN/../../nvidia/cusparselt/lib'
|
||||
'$ORIGIN/../../cusparselt/lib'
|
||||
'$ORIGIN/../../nvidia/nccl/lib'
|
||||
'$ORIGIN/../../nvidia/nvshmem/lib'
|
||||
'$ORIGIN/../../nvidia/nvtx/lib'
|
||||
'$ORIGIN/../../nvidia/cufile/lib'
|
||||
)
|
||||
@ -174,9 +172,94 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
export LIB_SO_RPATH=$CUDA_RPATHS':$ORIGIN'
|
||||
export FORCE_RPATH="--force-rpath"
|
||||
export USE_STATIC_NCCL=0
|
||||
export USE_SYSTEM_NCCL=1
|
||||
export ATEN_STATIC_CUDA=0
|
||||
export USE_CUDA_STATIC_LINK=0
|
||||
export USE_CUPTI_SO=1
|
||||
export NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
export NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
fi
|
||||
elif [[ $CUDA_VERSION == "11.8" ]]; then
|
||||
export USE_STATIC_CUDNN=0
|
||||
# Turn USE_CUFILE off for CUDA 11.8 since nvidia-cufile-cu11 and 1.9.0.20 are
|
||||
# not available in PYPI
|
||||
export USE_CUFILE=0
|
||||
# Try parallelizing nvcc as well
|
||||
export TORCH_NVCC_FLAGS="-Xfatbin -compress-all --threads 2"
|
||||
# Bundle ptxas into the wheel, see https://github.com/pytorch/pytorch/pull/119750
|
||||
export BUILD_BUNDLE_PTXAS=1
|
||||
|
||||
# CUDA 11.8 have to ship the libcusparseLt.so.0 with the binary
|
||||
# since nvidia-cusparselt-cu11 is not available in PYPI
|
||||
if [[ $USE_CUSPARSELT == "1" ]]; then
|
||||
DEPS_SONAME+=(
|
||||
"libcusparseLt.so.0"
|
||||
)
|
||||
DEPS_LIST+=(
|
||||
"/usr/local/cuda/lib64/libcusparseLt.so.0"
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
|
||||
echo "Bundling with cudnn and cublas."
|
||||
DEPS_LIST+=(
|
||||
"/usr/local/cuda/lib64/libcudnn_adv.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_cnn.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_graph.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_ops.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9"
|
||||
"/usr/local/cuda/lib64/libcudnn.so.9"
|
||||
"/usr/local/cuda/lib64/libcublas.so.11"
|
||||
"/usr/local/cuda/lib64/libcublasLt.so.11"
|
||||
"/usr/local/cuda/lib64/libcudart.so.11.0"
|
||||
"/usr/local/cuda/lib64/libnvToolsExt.so.1"
|
||||
"/usr/local/cuda/lib64/libnvrtc.so.11.2" # this is not a mistake, it links to more specific cuda version
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so.11.8"
|
||||
)
|
||||
DEPS_SONAME+=(
|
||||
"libcudnn_adv.so.9"
|
||||
"libcudnn_cnn.so.9"
|
||||
"libcudnn_graph.so.9"
|
||||
"libcudnn_ops.so.9"
|
||||
"libcudnn_engines_runtime_compiled.so.9"
|
||||
"libcudnn_engines_precompiled.so.9"
|
||||
"libcudnn_heuristic.so.9"
|
||||
"libcudnn.so.9"
|
||||
"libcublas.so.11"
|
||||
"libcublasLt.so.11"
|
||||
"libcudart.so.11.0"
|
||||
"libnvToolsExt.so.1"
|
||||
"libnvrtc.so.11.2"
|
||||
"libnvrtc-builtins.so.11.8"
|
||||
)
|
||||
else
|
||||
echo "Using nvidia libs from pypi."
|
||||
CUDA_RPATHS=(
|
||||
'$ORIGIN/../../nvidia/cublas/lib'
|
||||
'$ORIGIN/../../nvidia/cuda_cupti/lib'
|
||||
'$ORIGIN/../../nvidia/cuda_nvrtc/lib'
|
||||
'$ORIGIN/../../nvidia/cuda_runtime/lib'
|
||||
'$ORIGIN/../../nvidia/cudnn/lib'
|
||||
'$ORIGIN/../../nvidia/cufft/lib'
|
||||
'$ORIGIN/../../nvidia/curand/lib'
|
||||
'$ORIGIN/../../nvidia/cusolver/lib'
|
||||
'$ORIGIN/../../nvidia/cusparse/lib'
|
||||
'$ORIGIN/../../nvidia/nccl/lib'
|
||||
'$ORIGIN/../../nvidia/nvtx/lib'
|
||||
)
|
||||
CUDA_RPATHS=$(IFS=: ; echo "${CUDA_RPATHS[*]}")
|
||||
export C_SO_RPATH=$CUDA_RPATHS':$ORIGIN:$ORIGIN/lib'
|
||||
export LIB_SO_RPATH=$CUDA_RPATHS':$ORIGIN'
|
||||
export FORCE_RPATH="--force-rpath"
|
||||
export USE_STATIC_NCCL=0
|
||||
export USE_SYSTEM_NCCL=1
|
||||
export ATEN_STATIC_CUDA=0
|
||||
export USE_CUDA_STATIC_LINK=0
|
||||
export USE_CUPTI_SO=1
|
||||
export NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
export NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
fi
|
||||
else
|
||||
echo "Unknown cuda version $CUDA_VERSION"
|
||||
|
||||
@ -92,7 +92,6 @@ if [[ -z "$PYTORCH_ROOT" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
pushd "$PYTORCH_ROOT"
|
||||
retry pip install -q cmake
|
||||
python setup.py clean
|
||||
retry pip install -qr requirements.txt
|
||||
retry pip install -q numpy==2.0.1
|
||||
|
||||
@ -95,7 +95,6 @@ ROCM_SO_FILES=(
|
||||
"libroctracer64.so"
|
||||
"libroctx64.so"
|
||||
"libhipblaslt.so"
|
||||
"libhipsparselt.so"
|
||||
"libhiprtc.so"
|
||||
)
|
||||
|
||||
@ -187,28 +186,20 @@ do
|
||||
OS_SO_FILES[${#OS_SO_FILES[@]}]=$file_name # Append lib to array
|
||||
done
|
||||
|
||||
ARCH=$(echo $PYTORCH_ROCM_ARCH | sed 's/;/|/g') # Replace ; separated arch list to bar for grep
|
||||
|
||||
# rocBLAS library files
|
||||
ROCBLAS_LIB_SRC=$ROCM_HOME/lib/rocblas/library
|
||||
ROCBLAS_LIB_DST=lib/rocblas/library
|
||||
ROCBLAS_ARCH_SPECIFIC_FILES=$(ls $ROCBLAS_LIB_SRC | grep -E $ARCH)
|
||||
ROCBLAS_OTHER_FILES=$(ls $ROCBLAS_LIB_SRC | grep -v gfx)
|
||||
ROCBLAS_LIB_FILES=($ROCBLAS_ARCH_SPECIFIC_FILES $OTHER_FILES)
|
||||
ARCH=$(echo $PYTORCH_ROCM_ARCH | sed 's/;/|/g') # Replace ; seperated arch list to bar for grep
|
||||
ARCH_SPECIFIC_FILES=$(ls $ROCBLAS_LIB_SRC | grep -E $ARCH)
|
||||
OTHER_FILES=$(ls $ROCBLAS_LIB_SRC | grep -v gfx)
|
||||
ROCBLAS_LIB_FILES=($ARCH_SPECIFIC_FILES $OTHER_FILES)
|
||||
|
||||
# hipblaslt library files
|
||||
HIPBLASLT_LIB_SRC=$ROCM_HOME/lib/hipblaslt/library
|
||||
HIPBLASLT_LIB_DST=lib/hipblaslt/library
|
||||
HIPBLASLT_ARCH_SPECIFIC_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -E $ARCH)
|
||||
HIPBLASLT_OTHER_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -v gfx)
|
||||
HIPBLASLT_LIB_FILES=($HIPBLASLT_ARCH_SPECIFIC_FILES $HIPBLASLT_OTHER_FILES)
|
||||
|
||||
# hipsparselt library files
|
||||
HIPSPARSELT_LIB_SRC=$ROCM_HOME/lib/hipsparselt/library
|
||||
HIPSPARSELT_LIB_DST=lib/hipsparselt/library
|
||||
HIPSPARSELT_ARCH_SPECIFIC_FILES=$(ls $HIPSPARSELT_LIB_SRC | grep -E $ARCH)
|
||||
#HIPSPARSELT_OTHER_FILES=$(ls $HIPSPARSELT_LIB_SRC | grep -v gfx)
|
||||
HIPSPARSELT_LIB_FILES=($HIPSPARSELT_ARCH_SPECIFIC_FILES $HIPSPARSELT_OTHER_FILES)
|
||||
ARCH_SPECIFIC_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -E $ARCH)
|
||||
OTHER_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -v gfx)
|
||||
HIPBLASLT_LIB_FILES=($ARCH_SPECIFIC_FILES $OTHER_FILES)
|
||||
|
||||
# ROCm library files
|
||||
ROCM_SO_PATHS=()
|
||||
@ -243,14 +234,12 @@ DEPS_SONAME=(
|
||||
DEPS_AUX_SRCLIST=(
|
||||
"${ROCBLAS_LIB_FILES[@]/#/$ROCBLAS_LIB_SRC/}"
|
||||
"${HIPBLASLT_LIB_FILES[@]/#/$HIPBLASLT_LIB_SRC/}"
|
||||
"${HIPSPARSELT_LIB_FILES[@]/#/$HIPSPARSELT_LIB_SRC/}"
|
||||
"/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||
)
|
||||
|
||||
DEPS_AUX_DSTLIST=(
|
||||
"${ROCBLAS_LIB_FILES[@]/#/$ROCBLAS_LIB_DST/}"
|
||||
"${HIPBLASLT_LIB_FILES[@]/#/$HIPBLASLT_LIB_DST/}"
|
||||
"${HIPSPARSELT_LIB_FILES[@]/#/$HIPSPARSELT_LIB_DST/}"
|
||||
"share/libdrm/amdgpu.ids"
|
||||
)
|
||||
|
||||
|
||||
@ -27,12 +27,6 @@ cmake --version
|
||||
echo "Environment variables:"
|
||||
env
|
||||
|
||||
# The sccache wrapped version of nvcc gets put in /opt/cache/lib in docker since
|
||||
# there are some issues if it is always wrapped, so we need to add it to PATH
|
||||
# during CI builds.
|
||||
# https://github.com/pytorch/pytorch/blob/0b6c0898e6c352c8ea93daec854e704b41485375/.ci/docker/common/install_cache.sh#L97
|
||||
export PATH="/opt/cache/lib:$PATH"
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
|
||||
# Use jemalloc during compilation to mitigate https://github.com/pytorch/pytorch/issues/116289
|
||||
export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
|
||||
@ -58,6 +52,12 @@ fi
|
||||
export USE_LLVM=/opt/llvm
|
||||
export LLVM_DIR=/opt/llvm/lib/cmake/llvm
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *executorch* ]]; then
|
||||
# To build test_edge_op_registration
|
||||
export BUILD_EXECUTORCH=ON
|
||||
export USE_CUDA=0
|
||||
fi
|
||||
|
||||
if ! which conda; then
|
||||
# In ROCm CIs, we are doing cross compilation on build machines with
|
||||
# intel cpu and later run tests on machines with amd cpu.
|
||||
@ -257,7 +257,6 @@ if [[ "$BUILD_ENVIRONMENT" == *-bazel-* ]]; then
|
||||
set -e -o pipefail
|
||||
|
||||
get_bazel
|
||||
python3 tools/optional_submodules.py checkout_eigen
|
||||
|
||||
# Leave 1 CPU free and use only up to 80% of memory to reduce the change of crashing
|
||||
# the runner
|
||||
|
||||
@ -313,7 +313,7 @@ if [[ "$(uname)" == 'Linux' && "$PACKAGE_TYPE" == 'manywheel' ]]; then
|
||||
# Please see issue for reference: https://github.com/pytorch/pytorch/issues/152426
|
||||
if [[ "$(uname -m)" == "s390x" ]]; then
|
||||
cxx_abi="19"
|
||||
elif [[ "$DESIRED_CUDA" != 'xpu' && "$DESIRED_CUDA" != 'rocm'* ]]; then
|
||||
elif [[ "$DESIRED_CUDA" != 'cu118' && "$DESIRED_CUDA" != 'xpu' && "$DESIRED_CUDA" != 'rocm'* ]]; then
|
||||
cxx_abi="18"
|
||||
else
|
||||
cxx_abi="16"
|
||||
|
||||
@ -15,6 +15,6 @@ if [[ "${BUILD_ENVIRONMENT}" == *rocm* ]]; then
|
||||
export PYTORCH_TEST_WITH_ROCM=1
|
||||
fi
|
||||
|
||||
# TODO: Reenable libtorch testing for MacOS, see https://github.com/pytorch/pytorch/issues/62598
|
||||
# TODO: Renable libtorch testing for MacOS, see https://github.com/pytorch/pytorch/issues/62598
|
||||
# shellcheck disable=SC2034
|
||||
BUILD_TEST_LIBTORCH=0
|
||||
|
||||
@ -159,6 +159,11 @@ function install_torchvision() {
|
||||
fi
|
||||
}
|
||||
|
||||
function install_tlparse() {
|
||||
pip_install --user "tlparse==0.3.30"
|
||||
PATH="$(python -m site --user-base)/bin:$PATH"
|
||||
}
|
||||
|
||||
function install_torchrec_and_fbgemm() {
|
||||
local torchrec_commit
|
||||
torchrec_commit=$(get_pinned_commit torchrec)
|
||||
@ -197,7 +202,7 @@ function install_torchrec_and_fbgemm() {
|
||||
|
||||
function clone_pytorch_xla() {
|
||||
if [[ ! -d ./xla ]]; then
|
||||
git clone --recursive -b r2.8 https://github.com/pytorch/xla.git
|
||||
git clone --recursive --quiet https://github.com/pytorch/xla.git
|
||||
pushd xla
|
||||
# pin the xla hash so that we don't get broken by changes to xla
|
||||
git checkout "$(cat ../.github/ci_commit_pins/xla.txt)"
|
||||
|
||||
@ -5,6 +5,11 @@ set -x
|
||||
# shellcheck source=./macos-common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/macos-common.sh"
|
||||
|
||||
if [[ -n "$CONDA_ENV" ]]; then
|
||||
# Use binaries under conda environment
|
||||
export PATH="$CONDA_ENV/bin":$PATH
|
||||
fi
|
||||
|
||||
# Test that OpenMP is enabled
|
||||
pushd test
|
||||
if [[ ! $(python -c "import torch; print(int(torch.backends.openmp.is_available()))") == "1" ]]; then
|
||||
@ -228,52 +233,53 @@ test_torchbench_smoketest() {
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
|
||||
local device=mps
|
||||
local dtypes=(undefined float16 bfloat16 notset)
|
||||
local dtype=${dtypes[$1]}
|
||||
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor timm_resnet timm_vovnet vgg16)
|
||||
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor)
|
||||
local hf_models=(GoogleFnet YituTechConvBert Speech2Text2ForCausalLM)
|
||||
|
||||
for backend in eager inductor; do
|
||||
|
||||
echo "Launching torchbench inference performance run for backend ${backend} and dtype ${dtype}"
|
||||
local dtype_arg="--${dtype}"
|
||||
if [ "$dtype" == notset ]; then
|
||||
dtype_arg="--float32"
|
||||
fi
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv"
|
||||
for model in "${models[@]}"; do
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv" || true
|
||||
if [ "$backend" == "inductor" ]; then
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--accuracy --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_accuracy.csv" || true
|
||||
fi
|
||||
done
|
||||
if [ "$backend" == "inductor" ]; then
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
|
||||
--performance --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_huggingface_${dtype}_inference_${device}_performance.csv" || true
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
|
||||
--accuracy --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_huggingface_${dtype}_inference_${device}_accuracy.csv" || true
|
||||
fi
|
||||
|
||||
if [ "$dtype" == notset ]; then
|
||||
for dtype_ in notset amp; do
|
||||
echo "Launching torchbench training performance run for backend ${backend} and dtype ${dtype_}"
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype_}_training_${device}_performance.csv"
|
||||
local dtype_arg="--${dtype_}"
|
||||
if [ "$dtype_" == notset ]; then
|
||||
for dtype in notset float16 bfloat16; do
|
||||
echo "Launching torchbench inference performance run for backend ${backend} and dtype ${dtype}"
|
||||
local dtype_arg="--${dtype}"
|
||||
if [ "$dtype" == notset ]; then
|
||||
dtype_arg="--float32"
|
||||
fi
|
||||
for model in "${models[@]}"; do
|
||||
fi
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv"
|
||||
for model in "${models[@]}"; do
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv" || true
|
||||
if [ "$backend" == "inductor" ]; then
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --training --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype_}_training_${device}_performance.csv" || true
|
||||
done
|
||||
--accuracy --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_accuracy.csv" || true
|
||||
fi
|
||||
done
|
||||
fi
|
||||
for model in "${hf_models[@]}"; do
|
||||
if [ "$backend" == "inductor" ]; then
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
|
||||
--performance --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_huggingface_${dtype}_inference_${device}_performance.csv" || true
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
|
||||
--accuracy --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_huggingface_${dtype}_inference_${device}_accuracy.csv" || true
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
for dtype in notset amp; do
|
||||
echo "Launching torchbench training performance run for backend ${backend} and dtype ${dtype}"
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv"
|
||||
local dtype_arg="--${dtype}"
|
||||
if [ "$dtype" == notset ]; then
|
||||
dtype_arg="--float32"
|
||||
fi
|
||||
for model in "${models[@]}"; do
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --training --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv" || true
|
||||
done
|
||||
done
|
||||
|
||||
done
|
||||
|
||||
@ -312,6 +318,8 @@ test_timm_perf() {
|
||||
echo "timm benchmark on mps device completed"
|
||||
}
|
||||
|
||||
install_tlparse
|
||||
|
||||
if [[ $TEST_CONFIG == *"perf_all"* ]]; then
|
||||
test_torchbench_perf
|
||||
test_hf_perf
|
||||
@ -323,7 +331,7 @@ elif [[ $TEST_CONFIG == *"perf_hf"* ]]; then
|
||||
elif [[ $TEST_CONFIG == *"perf_timm"* ]]; then
|
||||
test_timm_perf
|
||||
elif [[ $TEST_CONFIG == *"perf_smoketest"* ]]; then
|
||||
test_torchbench_smoketest "${SHARD_NUMBER}"
|
||||
test_torchbench_smoketest
|
||||
elif [[ $TEST_CONFIG == *"mps"* ]]; then
|
||||
test_python_mps
|
||||
elif [[ $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
|
||||
@ -93,7 +93,7 @@ def check_lib_symbols_for_abi_correctness(lib: str) -> None:
|
||||
f"Found pre-cxx11 symbols, but there shouldn't be any, see: {pre_cxx11_symbols[:100]}"
|
||||
)
|
||||
if num_cxx11_symbols < 100:
|
||||
raise RuntimeError("Didn't find enough cxx11 symbols")
|
||||
raise RuntimeError("Didn't find enought cxx11 symbols")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
|
||||
@ -46,9 +46,6 @@ def get_gomp_thread():
|
||||
|
||||
# use the default gomp path of AlmaLinux OS
|
||||
libgomp_path = "/usr/lib64/libgomp.so.1"
|
||||
# if it does not exist, try Ubuntu path
|
||||
if not os.path.exists(libgomp_path):
|
||||
libgomp_path = f"/usr/lib/{os.uname().machine}-linux-gnu/libgomp.so.1"
|
||||
|
||||
os.environ["GOMP_CPU_AFFINITY"] = "0-3"
|
||||
|
||||
|
||||
@ -276,7 +276,7 @@ def smoke_test_cuda(
|
||||
torch_nccl_version = ".".join(str(v) for v in torch.cuda.nccl.version())
|
||||
print(f"Torch nccl; version: {torch_nccl_version}")
|
||||
|
||||
# Pypi dependencies are installed on linux only and nccl is available only on Linux.
|
||||
# Pypi dependencies are installed on linux ony and nccl is availbale only on Linux.
|
||||
if pypi_pkg_check == "enabled" and sys.platform in ["linux", "linux2"]:
|
||||
compare_pypi_to_torch_versions(
|
||||
"cudnn", find_pypi_package_version("nvidia-cudnn"), torch_cudnn_version
|
||||
|
||||
@ -196,7 +196,7 @@ if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/intel/oneapi/mpi/latest/env/vars.sh
|
||||
# Check XPU status before testing
|
||||
timeout 30 xpu-smi discovery || true
|
||||
xpu-smi discovery
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]] ; then
|
||||
@ -212,6 +212,8 @@ if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then
|
||||
export VALGRIND=OFF
|
||||
fi
|
||||
|
||||
install_tlparse
|
||||
|
||||
# DANGER WILL ROBINSON. The LD_PRELOAD here could cause you problems
|
||||
# if you're not careful. Check this if you made some changes and the
|
||||
# ASAN test is not working
|
||||
@ -224,7 +226,7 @@ if [[ "$BUILD_ENVIRONMENT" == *asan* ]]; then
|
||||
export PYTORCH_TEST_WITH_ASAN=1
|
||||
export PYTORCH_TEST_WITH_UBSAN=1
|
||||
# TODO: Figure out how to avoid hard-coding these paths
|
||||
export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-18/bin/llvm-symbolizer
|
||||
export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-15/bin/llvm-symbolizer
|
||||
export TORCH_USE_RTLD_GLOBAL=1
|
||||
# NB: We load libtorch.so with RTLD_GLOBAL for UBSAN, unlike our
|
||||
# default behavior.
|
||||
@ -322,17 +324,6 @@ test_python_smoke() {
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_h100_distributed() {
|
||||
# Distributed tests at H100
|
||||
time python test/run_test.py --include distributed/_composable/test_composability/test_pp_composability.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
# This test requires multicast support
|
||||
time python test/run_test.py --include distributed/_composable/fsdp/test_fully_shard_comm.py -k TestFullyShardAllocFromPG $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
# symmetric memory test
|
||||
time python test/run_test.py --include distributed/test_symmetric_memory.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
time python test/run_test.py --include distributed/test_nvshmem.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_lazy_tensor_meta_reference_disabled() {
|
||||
export TORCH_DISABLE_FUNCTIONALIZATION_META_REFERENCE=1
|
||||
echo "Testing lazy tensor operations without meta reference"
|
||||
@ -599,9 +590,7 @@ test_perf_for_dashboard() {
|
||||
|
||||
local device=cuda
|
||||
if [[ "${TEST_CONFIG}" == *cpu* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *zen_cpu_x86* ]]; then
|
||||
device=zen_cpu_x86
|
||||
elif [[ "${TEST_CONFIG}" == *cpu_x86* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_x86* ]]; then
|
||||
device=cpu_x86
|
||||
elif [[ "${TEST_CONFIG}" == *cpu_aarch64* ]]; then
|
||||
device=cpu_aarch64
|
||||
@ -1142,12 +1131,6 @@ test_custom_backend() {
|
||||
|
||||
test_custom_script_ops() {
|
||||
echo "Testing custom script operators"
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *s390x* ]]; then
|
||||
echo "Skipping custom script operators until it's fixed"
|
||||
return 0
|
||||
fi
|
||||
|
||||
CUSTOM_OP_BUILD="${CUSTOM_TEST_ARTIFACT_BUILD_DIR}/custom-op-build"
|
||||
pushd test/custom_operator
|
||||
cp -a "$CUSTOM_OP_BUILD" build
|
||||
@ -1537,7 +1520,7 @@ test_executorch() {
|
||||
test_linux_aarch64() {
|
||||
python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp test_torch test_dynamic_shapes \
|
||||
test_transformers test_multiprocessing test_numpy_interop test_autograd test_binary_ufuncs test_complex test_spectral_ops \
|
||||
test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops test_ops test_cpp_extensions_open_device_registration \
|
||||
test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops test_ops \
|
||||
--shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose
|
||||
|
||||
# Dynamo tests
|
||||
@ -1571,8 +1554,7 @@ test_operator_benchmark() {
|
||||
|
||||
cd "${TEST_DIR}"/benchmarks/operator_benchmark
|
||||
$TASKSET python -m benchmark_all_test --device "$1" --tag-filter "$2" \
|
||||
--output-csv "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv" \
|
||||
--output-json-for-dashboard "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.json" \
|
||||
--output-dir "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv"
|
||||
|
||||
pip_install pandas
|
||||
python check_perf_csv.py \
|
||||
@ -1657,7 +1639,7 @@ elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
|
||||
install_torchaudio cuda
|
||||
fi
|
||||
install_torchvision
|
||||
TORCH_CUDA_ARCH_LIST="8.0;8.6" install_torchao
|
||||
TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install git+https://github.com/pytorch/ao.git
|
||||
id=$((SHARD_NUMBER-1))
|
||||
# https://github.com/opencv/opencv-python/issues/885
|
||||
pip_install opencv-python==4.8.0.74
|
||||
@ -1742,8 +1724,6 @@ elif [[ "${BUILD_ENVIRONMENT}" == *xpu* ]]; then
|
||||
test_xpu_bin
|
||||
elif [[ "${TEST_CONFIG}" == smoke ]]; then
|
||||
test_python_smoke
|
||||
elif [[ "${TEST_CONFIG}" == h100_distributed ]]; then
|
||||
test_h100_distributed
|
||||
else
|
||||
install_torchvision
|
||||
install_monkeytype
|
||||
|
||||
@ -16,7 +16,7 @@ target_link_libraries(simple-torch-test CUDA::cudart CUDA::cufft CUDA::cusparse
|
||||
find_library(CUDNN_LIBRARY NAMES cudnn)
|
||||
target_link_libraries(simple-torch-test ${CUDNN_LIBRARY} )
|
||||
if(MSVC)
|
||||
file(GLOB TORCH_DLLS "$ENV{CUDA_PATH}/bin/cudnn64_8.dll")
|
||||
file(GLOB TORCH_DLLS "$ENV{CUDA_PATH}/bin/cudnn64_8.dll" "$ENV{NVTOOLSEXT_PATH}/bin/x64/*.dll")
|
||||
message("dlls to copy " ${TORCH_DLLS})
|
||||
add_custom_command(TARGET simple-torch-test
|
||||
POST_BUILD
|
||||
|
||||
@ -31,7 +31,7 @@ PYLONG_API_CHECK=$?
|
||||
if [[ $PYLONG_API_CHECK == 0 ]]; then
|
||||
echo "Usage of PyLong_{From,As}{Unsigned}Long API may lead to overflow errors on Windows"
|
||||
echo "because \`sizeof(long) == 4\` and \`sizeof(unsigned long) == 4\`."
|
||||
echo "Please include \"torch/csrc/utils/python_numbers.h\" and use the corresponding APIs instead."
|
||||
echo "Please include \"torch/csrc/utils/python_numbers.h\" and use the correspoding APIs instead."
|
||||
echo "PyLong_FromLong -> THPUtils_packInt32 / THPUtils_packInt64"
|
||||
echo "PyLong_AsLong -> THPUtils_unpackInt (32-bit) / THPUtils_unpackLong (64-bit)"
|
||||
echo "PyLong_FromUnsignedLong -> THPUtils_packUInt32 / THPUtils_packUInt64"
|
||||
|
||||
@ -10,7 +10,7 @@ set PATH=C:\Program Files\CMake\bin;C:\Program Files\7-Zip;C:\ProgramData\chocol
|
||||
:: able to see what our cl.exe commands are (since you can actually
|
||||
:: just copy-paste them into a local Windows setup to just rebuild a
|
||||
:: single file.)
|
||||
:: log sizes are too long, but leaving this here in case someone wants to use it locally
|
||||
:: log sizes are too long, but leaving this here incase someone wants to use it locally
|
||||
:: set CMAKE_VERBOSE_MAKEFILE=1
|
||||
|
||||
|
||||
|
||||
@ -52,7 +52,7 @@ if __name__ == "__main__":
|
||||
if os.path.exists(debugger):
|
||||
command_args = [debugger, "-o", "-c", "~*g; q"] + command_args
|
||||
command_string = " ".join(command_args)
|
||||
print("Rerunning with traceback enabled")
|
||||
print("Reruning with traceback enabled")
|
||||
print("Command:", command_string)
|
||||
subprocess.run(command_args, check=False)
|
||||
sys.exit(e.returncode)
|
||||
|
||||
59
.ci/pytorch/windows/cuda118.bat
Normal file
59
.ci/pytorch/windows/cuda118.bat
Normal file
@ -0,0 +1,59 @@
|
||||
@echo off
|
||||
|
||||
set MODULE_NAME=pytorch
|
||||
|
||||
IF NOT EXIST "setup.py" IF NOT EXIST "%MODULE_NAME%" (
|
||||
call internal\clone.bat
|
||||
cd %~dp0
|
||||
) ELSE (
|
||||
call internal\clean.bat
|
||||
)
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call internal\check_deps.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
REM Check for optional components
|
||||
|
||||
set USE_CUDA=
|
||||
set CMAKE_GENERATOR=Visual Studio 15 2017 Win64
|
||||
|
||||
IF "%NVTOOLSEXT_PATH%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA Corporation\NvToolsExt\lib\x64\nvToolsExt64_1.lib" (
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
) ELSE (
|
||||
echo NVTX ^(Visual Studio Extension ^for CUDA^) ^not installed, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%CUDA_PATH_V118%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\bin\nvcc.exe" (
|
||||
set "CUDA_PATH_V118=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8"
|
||||
) ELSE (
|
||||
echo CUDA 11.8 not found, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%BUILD_VISION%" == "" (
|
||||
set TORCH_CUDA_ARCH_LIST=3.7+PTX;5.0;6.0;6.1;7.0;7.5;8.0;8.6;9.0
|
||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||
) ELSE (
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90
|
||||
)
|
||||
|
||||
set "CUDA_PATH=%CUDA_PATH_V118%"
|
||||
set "PATH=%CUDA_PATH_V118%\bin;%PATH%"
|
||||
|
||||
:optcheck
|
||||
|
||||
call internal\check_opts.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
if exist "%NIGHTLIES_PYTORCH_ROOT%" cd %NIGHTLIES_PYTORCH_ROOT%\..
|
||||
call %~dp0\internal\copy.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call %~dp0\internal\setup.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
59
.ci/pytorch/windows/cuda124.bat
Normal file
59
.ci/pytorch/windows/cuda124.bat
Normal file
@ -0,0 +1,59 @@
|
||||
@echo off
|
||||
|
||||
set MODULE_NAME=pytorch
|
||||
|
||||
IF NOT EXIST "setup.py" IF NOT EXIST "%MODULE_NAME%" (
|
||||
call internal\clone.bat
|
||||
cd %~dp0
|
||||
) ELSE (
|
||||
call internal\clean.bat
|
||||
)
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call internal\check_deps.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
REM Check for optional components
|
||||
|
||||
set USE_CUDA=
|
||||
set CMAKE_GENERATOR=Visual Studio 15 2017 Win64
|
||||
|
||||
IF "%NVTOOLSEXT_PATH%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA Corporation\NvToolsExt\lib\x64\nvToolsExt64_1.lib" (
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
) ELSE (
|
||||
echo NVTX ^(Visual Studio Extension ^for CUDA^) ^not installed, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%CUDA_PATH_V124%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin\nvcc.exe" (
|
||||
set "CUDA_PATH_V124=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4"
|
||||
) ELSE (
|
||||
echo CUDA 12.4 not found, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%BUILD_VISION%" == "" (
|
||||
set TORCH_CUDA_ARCH_LIST=6.1;7.0;7.5;8.0;8.6;9.0
|
||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||
) ELSE (
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90
|
||||
)
|
||||
|
||||
set "CUDA_PATH=%CUDA_PATH_V124%"
|
||||
set "PATH=%CUDA_PATH_V124%\bin;%PATH%"
|
||||
|
||||
:optcheck
|
||||
|
||||
call internal\check_opts.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
if exist "%NIGHTLIES_PYTORCH_ROOT%" cd %NIGHTLIES_PYTORCH_ROOT%\..
|
||||
call %~dp0\internal\copy.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call %~dp0\internal\setup.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
@ -18,6 +18,15 @@ REM Check for optional components
|
||||
set USE_CUDA=
|
||||
set CMAKE_GENERATOR=Visual Studio 15 2017 Win64
|
||||
|
||||
IF "%NVTOOLSEXT_PATH%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA Corporation\NvToolsExt\lib\x64\nvToolsExt64_1.lib" (
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
) ELSE (
|
||||
echo NVTX ^(Visual Studio Extension ^for CUDA^) ^not installed, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%CUDA_PATH_V126%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.6\bin\nvcc.exe" (
|
||||
set "CUDA_PATH_V126=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.6"
|
||||
|
||||
@ -18,6 +18,15 @@ REM Check for optional components
|
||||
set USE_CUDA=
|
||||
set CMAKE_GENERATOR=Visual Studio 15 2017 Win64
|
||||
|
||||
IF "%NVTOOLSEXT_PATH%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA Corporation\NvToolsExt\lib\x64\nvToolsExt64_1.lib" (
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
) ELSE (
|
||||
echo NVTX ^(Visual Studio Extension ^for CUDA^) ^not installed, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%CUDA_PATH_V128%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\bin\nvcc.exe" (
|
||||
set "CUDA_PATH_V128=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8"
|
||||
|
||||
@ -1,50 +0,0 @@
|
||||
@echo off
|
||||
|
||||
set MODULE_NAME=pytorch
|
||||
|
||||
IF NOT EXIST "setup.py" IF NOT EXIST "%MODULE_NAME%" (
|
||||
call internal\clone.bat
|
||||
cd %~dp0
|
||||
) ELSE (
|
||||
call internal\clean.bat
|
||||
)
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call internal\check_deps.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
REM Check for optional components
|
||||
|
||||
set USE_CUDA=
|
||||
set CMAKE_GENERATOR=Visual Studio 15 2017 Win64
|
||||
|
||||
IF "%CUDA_PATH_V129%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.9\bin\nvcc.exe" (
|
||||
set "CUDA_PATH_V129=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.9"
|
||||
) ELSE (
|
||||
echo CUDA 12.9 not found, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%BUILD_VISION%" == "" (
|
||||
set TORCH_CUDA_ARCH_LIST=7.5;8.0;8.6;9.0;10.0;12.0
|
||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||
) ELSE (
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90 -gencode=arch=compute_100,code=compute_100 -gencode=arch=compute_120,code=compute_120
|
||||
)
|
||||
|
||||
set "CUDA_PATH=%CUDA_PATH_V129%"
|
||||
set "PATH=%CUDA_PATH_V129%\bin;%PATH%"
|
||||
|
||||
:optcheck
|
||||
|
||||
call internal\check_opts.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
if exist "%NIGHTLIES_PYTORCH_ROOT%" cd %NIGHTLIES_PYTORCH_ROOT%\..
|
||||
call %~dp0\internal\copy.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call %~dp0\internal\setup.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
@ -65,7 +65,7 @@ for /F "usebackq delims=" %%i in (`python -c "import sys; print('{0[0]}{0[1]}'.f
|
||||
if %PYVER% LSS 35 (
|
||||
echo Warning: PyTorch for Python 2 under Windows is experimental.
|
||||
echo Python x64 3.5 or up is recommended to compile PyTorch on Windows
|
||||
echo Maybe you can create a virtual environment if you have conda installed:
|
||||
echo Maybe you can create a virual environment if you have conda installed:
|
||||
echo ^> conda create -n test python=3.6 pyyaml numpy
|
||||
echo ^> activate test
|
||||
)
|
||||
|
||||
@ -9,6 +9,7 @@ copy "%CUDA_PATH%\bin\cudnn*64_*.dll*" pytorch\torch\lib
|
||||
copy "%CUDA_PATH%\bin\nvrtc*64_*.dll*" pytorch\torch\lib
|
||||
copy "%CUDA_PATH%\extras\CUPTI\lib64\cupti64_*.dll*" pytorch\torch\lib
|
||||
|
||||
copy "C:\Program Files\NVIDIA Corporation\NvToolsExt\bin\x64\nvToolsExt64_1.dll*" pytorch\torch\lib
|
||||
copy "%PYTHON_LIB_PATH%\libiomp*5md.dll" pytorch\torch\lib
|
||||
|
||||
:: Should be set in build_pytorch.bat
|
||||
|
||||
@ -23,13 +23,66 @@ set CUDNN_LIB_FOLDER="lib\x64"
|
||||
:: Skip all of this if we already have cuda installed
|
||||
if exist "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin\nvcc.exe" goto set_cuda_env_vars
|
||||
|
||||
if %CUDA_VER% EQU 118 goto cuda118
|
||||
if %CUDA_VER% EQU 124 goto cuda124
|
||||
if %CUDA_VER% EQU 126 goto cuda126
|
||||
if %CUDA_VER% EQU 128 goto cuda128
|
||||
if %CUDA_VER% EQU 129 goto cuda129
|
||||
|
||||
echo CUDA %CUDA_VERSION_STR% is not supported
|
||||
exit /b 1
|
||||
|
||||
:cuda118
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_11.8.0_522.06_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_11.8 thrust_11.8 nvcc_11.8 cuobjdump_11.8 nvprune_11.8 nvprof_11.8 cupti_11.8 cublas_11.8 cublas_dev_11.8 cudart_11.8 cufft_11.8 cufft_dev_11.8 curand_11.8 curand_dev_11.8 cusolver_11.8 cusolver_dev_11.8 cusparse_11.8 cusparse_dev_11.8 npp_11.8 npp_dev_11.8 nvrtc_11.8 nvrtc_dev_11.8 nvml_dev_11.8 nvtx_11.8"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.5.0.50_cuda11-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda124
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_12.4.0_551.61_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_12.4 thrust_12.4 nvcc_12.4 cuobjdump_12.4 nvprune_12.4 nvprof_12.4 cupti_12.4 cublas_12.4 cublas_dev_12.4 cudart_12.4 cufft_12.4 cufft_dev_12.4 curand_12.4 curand_dev_12.4 cusolver_12.4 cusolver_dev_12.4 cusparse_12.4 cusparse_dev_12.4 npp_12.4 npp_dev_12.4 nvrtc_12.4 nvrtc_dev_12.4 nvml_dev_12.4 nvjitlink_12.4 nvtx_12.4"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.5.0.50_cuda12-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda126
|
||||
@ -86,39 +139,17 @@ xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda129
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_12.9.1_576.57_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_12.9 thrust_12.9 nvcc_12.9 cuobjdump_12.9 nvprune_12.9 nvprof_12.9 cupti_12.9 cublas_12.9 cublas_dev_12.9 cudart_12.9 cufft_12.9 cufft_dev_12.9 curand_12.9 curand_dev_12.9 cusolver_12.9 cusolver_dev_12.9 cusparse_12.9 cusparse_dev_12.9 npp_12.9 npp_dev_12.9 nvrtc_12.9 nvrtc_dev_12.9 nvml_dev_12.9 nvjitlink_12.9 nvtx_12.9"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.10.2.21_cuda12-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda_common
|
||||
:: NOTE: We only install CUDA if we don't have it installed already.
|
||||
:: With GHA runners these should be pre-installed as part of our AMI process
|
||||
:: If you cannot find the CUDA version you want to build for here then please
|
||||
:: add it @ https://github.com/pytorch/test-infra/tree/main/aws/ami/windows
|
||||
if not exist "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin\nvcc.exe" (
|
||||
if not exist "%SRC_DIR%\temp_build\NvToolsExt.7z" (
|
||||
curl -k -L https://ossci-windows.s3.us-east-1.amazonaws.com/builder/NvToolsExt.7z --output "%SRC_DIR%\temp_build\NvToolsExt.7z"
|
||||
if errorlevel 1 exit /b 1
|
||||
)
|
||||
|
||||
if not exist "%SRC_DIR%\temp_build\gpu_driver_dlls.zip" (
|
||||
curl -k -L "https://ossci-windows.s3.us-east-1.amazonaws.com/builder/additional_dlls.zip" --output "%SRC_DIR%\temp_build\gpu_driver_dlls.zip"
|
||||
if errorlevel 1 exit /b 1
|
||||
@ -145,6 +176,15 @@ if not exist "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_
|
||||
xcopy /Y "%SRC_DIR%\temp_build\cuda\CUDAVisualStudioIntegration\extras\visual_studio_integration\MSBuildExtensions\*.*" "C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\MSBuild\Microsoft\VC\v170\BuildCustomizations"
|
||||
)
|
||||
|
||||
echo Installing NvToolsExt...
|
||||
7z x %SRC_DIR%\temp_build\NvToolsExt.7z -o"%SRC_DIR%\temp_build\NvToolsExt"
|
||||
mkdir "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\bin\x64"
|
||||
mkdir "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\include"
|
||||
mkdir "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\lib\x64"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\NvToolsExt\bin\x64\*.*" "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\bin\x64"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\NvToolsExt\include\*.*" "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\include"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\NvToolsExt\lib\x64\*.*" "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\lib\x64"
|
||||
|
||||
echo Installing cuDNN...
|
||||
7z x %CUDNN_SETUP_FILE% -o"%SRC_DIR%\temp_build\cudnn"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\cudnn\%CUDNN_FOLDER%\bin\*.*" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin"
|
||||
@ -175,3 +215,4 @@ echo Setting up environment...
|
||||
set "PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin;%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\libnvvp;%PATH%"
|
||||
set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%"
|
||||
set "CUDA_PATH_V%CUDA_VER_MAJOR%_%CUDA_VER_MINOR%=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%"
|
||||
set "NVTOOLSEXT_PATH=%ProgramFiles%\NVIDIA Corporation\NvToolsExt"
|
||||
|
||||
@ -99,6 +99,7 @@ goto end
|
||||
:libtorch
|
||||
echo "install and test libtorch"
|
||||
|
||||
if "%VC_YEAR%" == "2019" powershell internal\vs2019_install.ps1
|
||||
if "%VC_YEAR%" == "2022" powershell internal\vs2022_install.ps1
|
||||
|
||||
if ERRORLEVEL 1 exit /b 1
|
||||
@ -110,6 +111,10 @@ pushd tmp\libtorch
|
||||
|
||||
set VC_VERSION_LOWER=17
|
||||
set VC_VERSION_UPPER=18
|
||||
IF "%VC_YEAR%" == "2019" (
|
||||
set VC_VERSION_LOWER=16
|
||||
set VC_VERSION_UPPER=17
|
||||
)
|
||||
|
||||
for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do (
|
||||
if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
|
||||
|
||||
@ -1,7 +1,14 @@
|
||||
if "%VC_YEAR%" == "2019" powershell windows/internal/vs2019_install.ps1
|
||||
if "%VC_YEAR%" == "2022" powershell windows/internal/vs2022_install.ps1
|
||||
|
||||
set VC_VERSION_LOWER=17
|
||||
set VC_VERSION_UPPER=18
|
||||
:: Please don't delete VS2019 as an alternative, in case some Windows compiler issue.
|
||||
:: Reference: https://github.com/pytorch/pytorch/issues/145702#issuecomment-2858693930
|
||||
if "%VC_YEAR%" == "2019" (
|
||||
set VC_VERSION_LOWER=16
|
||||
set VC_VERSION_UPPER=17
|
||||
)
|
||||
|
||||
for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -products Microsoft.VisualStudio.Product.BuildTools -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do (
|
||||
if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" (
|
||||
|
||||
48
.ci/pytorch/windows/internal/vs2019_install.ps1
Normal file
48
.ci/pytorch/windows/internal/vs2019_install.ps1
Normal file
@ -0,0 +1,48 @@
|
||||
# https://developercommunity.visualstudio.com/t/install-specific-version-of-vs-component/1142479
|
||||
# https://docs.microsoft.com/en-us/visualstudio/releases/2019/history#release-dates-and-build-numbers
|
||||
|
||||
# 16.8.6 BuildTools
|
||||
$VS_DOWNLOAD_LINK = "https://ossci-windows.s3.us-east-1.amazonaws.com/vs16.8.6_BuildTools.exe"
|
||||
$COLLECT_DOWNLOAD_LINK = "https://aka.ms/vscollect.exe"
|
||||
$VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStudio.Workload.VCTools",
|
||||
"--add Microsoft.Component.MSBuild",
|
||||
"--add Microsoft.VisualStudio.Component.Roslyn.Compiler",
|
||||
"--add Microsoft.VisualStudio.Component.TextTemplating",
|
||||
"--add Microsoft.VisualStudio.Component.VC.CoreIde",
|
||||
"--add Microsoft.VisualStudio.Component.VC.Redist.14.Latest",
|
||||
"--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core",
|
||||
"--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
|
||||
"--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Win81")
|
||||
|
||||
curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
echo "Download of the VS 2019 Version 16.8.5 installer failed"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if (Test-Path "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe") {
|
||||
$existingPath = & "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -products "Microsoft.VisualStudio.Product.BuildTools" -version "[16, 17)" -property installationPath
|
||||
if ($existingPath -ne $null) {
|
||||
if (!${env:CIRCLECI}) {
|
||||
echo "Found correctly versioned existing BuildTools installation in $existingPath"
|
||||
exit 0
|
||||
}
|
||||
echo "Found existing BuildTools installation in $existingPath, keeping it"
|
||||
}
|
||||
}
|
||||
|
||||
$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru
|
||||
Remove-Item -Path vs_installer.exe -Force
|
||||
$exitCode = $process.ExitCode
|
||||
if (($exitCode -ne 0) -and ($exitCode -ne 3010)) {
|
||||
echo "VS 2019 installer exited with code $exitCode, which should be one of [0, 3010]."
|
||||
curl.exe --retry 3 -kL $COLLECT_DOWNLOAD_LINK --output Collect.exe
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
echo "Download of the VS Collect tool failed."
|
||||
exit 1
|
||||
}
|
||||
Start-Process "${PWD}\Collect.exe" -NoNewWindow -Wait -PassThru
|
||||
New-Item -Path "C:\w\build-results" -ItemType "directory" -Force
|
||||
Copy-Item -Path "C:\Users\${env:USERNAME}\AppData\Local\Temp\vslogs.zip" -Destination "C:\w\build-results\"
|
||||
exit 1
|
||||
}
|
||||
@ -25,8 +25,8 @@ set XPU_EXTRA_INSTALLED=0
|
||||
set XPU_EXTRA_UNINSTALL=0
|
||||
|
||||
if not [%XPU_VERSION%]==[] if [%XPU_VERSION%]==[2025.1] (
|
||||
set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/75d4eb97-914a-4a95-852c-7b9733d80f74/intel-deep-learning-essentials-2025.1.3.8_offline.exe
|
||||
set XPU_BUNDLE_VERSION=2025.1.3+5
|
||||
set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/1a9fff3d-04c2-4d77-8861-3d86c774b66f/intel-deep-learning-essentials-2025.1.1.26_offline.exe
|
||||
set XPU_BUNDLE_VERSION=2025.1.1+23
|
||||
)
|
||||
|
||||
:: Check if XPU bundle is target version or already installed
|
||||
|
||||
@ -206,7 +206,7 @@ if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
BUILD_LIBTORCH_WHL=1 BUILD_PYTHON_ONLY=0 python setup.py bdist_wheel -d "$whl_tmp_dir"
|
||||
echo "Finished setup.py bdist_wheel for split build (BUILD_LIBTORCH_WHL)"
|
||||
echo "Calling setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)"
|
||||
BUILD_LIBTORCH_WHL=0 BUILD_PYTHON_ONLY=1 CMAKE_FRESH=1 python setup.py bdist_wheel -d "$whl_tmp_dir"
|
||||
BUILD_PYTHON_ONLY=1 BUILD_LIBTORCH_WHL=0 python setup.py bdist_wheel -d "$whl_tmp_dir" --cmake
|
||||
echo "Finished setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)"
|
||||
else
|
||||
python setup.py bdist_wheel -d "$whl_tmp_dir"
|
||||
|
||||
@ -105,7 +105,6 @@ fi
|
||||
|
||||
# Set triton via PYTORCH_EXTRA_INSTALL_REQUIREMENTS for triton xpu package
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*xpu.* ]]; then
|
||||
TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_xpu_version.txt)
|
||||
TRITON_REQUIREMENT="pytorch-triton-xpu==${TRITON_VERSION}"
|
||||
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
|
||||
TRITON_SHORTHASH=$(cut -c1-8 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton-xpu.txt)
|
||||
|
||||
157
.circleci/scripts/trigger_azure_pipeline.py
Normal file
157
.circleci/scripts/trigger_azure_pipeline.py
Normal file
@ -0,0 +1,157 @@
|
||||
# Documentation: https://docs.microsoft.com/en-us/rest/api/azure/devops/build/?view=azure-devops-rest-6.0
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
AZURE_PIPELINE_BASE_URL = "https://aiinfra.visualstudio.com/PyTorch/"
|
||||
AZURE_DEVOPS_PAT_BASE64 = os.environ.get("AZURE_DEVOPS_PAT_BASE64_SECRET", "")
|
||||
PIPELINE_ID = "911"
|
||||
PROJECT_ID = "0628bce4-2d33-499e-bac5-530e12db160f"
|
||||
TARGET_BRANCH = os.environ.get("CIRCLE_BRANCH", "main")
|
||||
TARGET_COMMIT = os.environ.get("CIRCLE_SHA1", "")
|
||||
|
||||
build_base_url = AZURE_PIPELINE_BASE_URL + "_apis/build/builds?api-version=6.0"
|
||||
|
||||
s = requests.Session()
|
||||
s.headers.update({"Authorization": "Basic " + AZURE_DEVOPS_PAT_BASE64})
|
||||
|
||||
|
||||
def submit_build(pipeline_id, project_id, source_branch, source_version):
|
||||
print("Submitting build for branch: " + source_branch)
|
||||
print("Commit SHA1: ", source_version)
|
||||
|
||||
run_build_raw = s.post(
|
||||
build_base_url,
|
||||
json={
|
||||
"definition": {"id": pipeline_id},
|
||||
"project": {"id": project_id},
|
||||
"sourceBranch": source_branch,
|
||||
"sourceVersion": source_version,
|
||||
},
|
||||
)
|
||||
|
||||
try:
|
||||
run_build_json = run_build_raw.json()
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
print(e)
|
||||
print(
|
||||
"Failed to parse the response. Check if the Azure DevOps PAT is incorrect or expired."
|
||||
)
|
||||
sys.exit(-1)
|
||||
|
||||
build_id = run_build_json["id"]
|
||||
|
||||
print("Submitted bulid: " + str(build_id))
|
||||
print("Bulid URL: " + run_build_json["url"])
|
||||
return build_id
|
||||
|
||||
|
||||
def get_build(_id):
|
||||
get_build_url = (
|
||||
AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}?api-version=6.0"
|
||||
)
|
||||
get_build_raw = s.get(get_build_url)
|
||||
return get_build_raw.json()
|
||||
|
||||
|
||||
def get_build_logs(_id):
|
||||
get_build_logs_url = (
|
||||
AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}/logs?api-version=6.0"
|
||||
)
|
||||
get_build_logs_raw = s.get(get_build_logs_url)
|
||||
return get_build_logs_raw.json()
|
||||
|
||||
|
||||
def get_log_content(url):
|
||||
resp = s.get(url)
|
||||
return resp.text
|
||||
|
||||
|
||||
def wait_for_build(_id):
|
||||
build_detail = get_build(_id)
|
||||
build_status = build_detail["status"]
|
||||
|
||||
while build_status == "notStarted":
|
||||
print("Waiting for run to start: " + str(_id))
|
||||
sys.stdout.flush()
|
||||
try:
|
||||
build_detail = get_build(_id)
|
||||
build_status = build_detail["status"]
|
||||
except Exception as e:
|
||||
print("Error getting build")
|
||||
print(e)
|
||||
|
||||
time.sleep(30)
|
||||
|
||||
print("Bulid started: ", str(_id))
|
||||
|
||||
handled_logs = set()
|
||||
while build_status == "inProgress":
|
||||
try:
|
||||
print("Waiting for log: " + str(_id))
|
||||
logs = get_build_logs(_id)
|
||||
except Exception as e:
|
||||
print("Error fetching logs")
|
||||
print(e)
|
||||
time.sleep(30)
|
||||
continue
|
||||
|
||||
for log in logs["value"]:
|
||||
log_id = log["id"]
|
||||
if log_id in handled_logs:
|
||||
continue
|
||||
handled_logs.add(log_id)
|
||||
print("Fetching log: \n" + log["url"])
|
||||
try:
|
||||
log_content = get_log_content(log["url"])
|
||||
print(log_content)
|
||||
except Exception as e:
|
||||
print("Error getting log content")
|
||||
print(e)
|
||||
sys.stdout.flush()
|
||||
build_detail = get_build(_id)
|
||||
build_status = build_detail["status"]
|
||||
time.sleep(30)
|
||||
|
||||
build_result = build_detail["result"]
|
||||
|
||||
print("Bulid status: " + build_status)
|
||||
print("Bulid result: " + build_result)
|
||||
|
||||
return build_status, build_result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Convert the branch name for Azure DevOps
|
||||
match = re.search(r"pull/(\d+)", TARGET_BRANCH)
|
||||
if match is not None:
|
||||
pr_num = match.group(1)
|
||||
SOURCE_BRANCH = f"refs/pull/{pr_num}/head"
|
||||
else:
|
||||
SOURCE_BRANCH = f"refs/heads/{TARGET_BRANCH}"
|
||||
|
||||
MAX_RETRY = 2
|
||||
retry = MAX_RETRY
|
||||
|
||||
while retry > 0:
|
||||
build_id = submit_build(PIPELINE_ID, PROJECT_ID, SOURCE_BRANCH, TARGET_COMMIT)
|
||||
build_status, build_result = wait_for_build(build_id)
|
||||
|
||||
if build_result != "succeeded":
|
||||
retry = retry - 1
|
||||
if retry > 0:
|
||||
print("Retrying... remaining attempt: " + str(retry))
|
||||
# Wait a bit before retrying
|
||||
time.sleep((MAX_RETRY - retry) * 120)
|
||||
continue
|
||||
else:
|
||||
print("No more chance to retry. Giving up.")
|
||||
sys.exit(-1)
|
||||
else:
|
||||
break
|
||||
5
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
5
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -12,9 +12,7 @@ body:
|
||||
description: |
|
||||
Please provide a clear and concise description of what the bug is.
|
||||
|
||||
If relevant, add a minimal example so that we can reproduce the error by running the code. It is very important for the snippet to be as succinct (minimal) as possible, so please take time to trim down any irrelevant code to help us debug efficiently.
|
||||
Your example should be fully self-contained and not rely on any artifact that should be downloaded.
|
||||
For example:
|
||||
If relevant, add a minimal example so that we can reproduce the error by running the code. It is very important for the snippet to be as succinct (minimal) as possible, so please take time to trim down any irrelevant code to help us debug efficiently. We are going to copy-paste your code and we expect to get the same result as you did: avoid any external data, and include the relevant imports, etc. For example:
|
||||
|
||||
```python
|
||||
# All necessary imports at the beginning
|
||||
@ -28,7 +26,6 @@ body:
|
||||
If the code is too long (hopefully, it isn't), feel free to put it in a public gist and link it in the issue: https://gist.github.com.
|
||||
|
||||
Please also paste or describe the results you observe instead of the expected results. If you observe an error, please paste the error message including the **full** traceback of the exception. It may be relevant to wrap error messages in ```` ```triple quotes blocks``` ````.
|
||||
If your issue is related to numerical accuracy or reproducibility, please read the [numerical accuracy](https://docs.pytorch.org/docs/stable/notes/numerical_accuracy.html) and [reproducibility](https://docs.pytorch.org/docs/stable/notes/randomness.html) notes. If the difference is not expected as described in these documents, please provide appropriate justification on why one result is wrong and the other is correct.
|
||||
placeholder: |
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
|
||||
2
.github/actionlint.yaml
vendored
2
.github/actionlint.yaml
vendored
@ -14,7 +14,6 @@ self-hosted-runner:
|
||||
- linux.12xlarge
|
||||
- linux.24xlarge
|
||||
- linux.24xlarge.ephemeral
|
||||
- linux.24xlarge.amd
|
||||
- linux.arm64.2xlarge
|
||||
- linux.arm64.2xlarge.ephemeral
|
||||
- linux.arm64.m7g.4xlarge
|
||||
@ -50,7 +49,6 @@ self-hosted-runner:
|
||||
# Organization-wide AMD-hosted runners
|
||||
# MI2xx runners
|
||||
- linux.rocm.gpu
|
||||
- linux.rocm.gpu.mi250
|
||||
- linux.rocm.gpu.2
|
||||
- linux.rocm.gpu.4
|
||||
# MI300 runners
|
||||
|
||||
2
.github/actions/build-android/action.yml
vendored
2
.github/actions/build-android/action.yml
vendored
@ -9,7 +9,7 @@ inputs:
|
||||
arch-for-build-env:
|
||||
description: |
|
||||
arch to pass to build environment.
|
||||
This is currently different than the arch name we use elsewhere, which
|
||||
This is currently different than the arch name we use elswhere, which
|
||||
should be fixed.
|
||||
required: true
|
||||
github-secret:
|
||||
|
||||
@ -157,4 +157,4 @@ runs:
|
||||
echo "Is keep-going label set? ${{ steps.filter.outputs.keep-going }}"
|
||||
|
||||
echo
|
||||
echo "Reenabled issues? ${{ steps.filter.outputs.reenabled-issues }}"
|
||||
echo "Renabled issues? ${{ steps.filter.outputs.reenabled-issues }}"
|
||||
|
||||
2
.github/actions/linux-test/action.yml
vendored
2
.github/actions/linux-test/action.yml
vendored
@ -153,7 +153,7 @@ runs:
|
||||
github-token: ${{ inputs.GITHUB_TOKEN }}
|
||||
|
||||
- name: Check for keep-going label and re-enabled test issues
|
||||
# This uses the filter-test-configs action because it conveniently
|
||||
# This uses the filter-test-configs action because it conviniently
|
||||
# checks for labels and re-enabled test issues. It does not actually do
|
||||
# any filtering. All filtering is done in the build step.
|
||||
id: keep-going
|
||||
|
||||
9
.github/actions/reuse-old-whl/action.yml
vendored
9
.github/actions/reuse-old-whl/action.yml
vendored
@ -13,12 +13,6 @@ inputs:
|
||||
github-token:
|
||||
description: GitHub token
|
||||
required: true
|
||||
job-id:
|
||||
description: Job ID
|
||||
required: true
|
||||
job-name:
|
||||
description: Job name
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
reuse:
|
||||
@ -36,11 +30,8 @@ runs:
|
||||
continue-on-error: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ inputs.github-token }}
|
||||
JOB_ID: ${{ inputs.job-id }}
|
||||
JOB_NAME: ${{ inputs.job-name }}
|
||||
run: |
|
||||
set -x
|
||||
python3 -m pip install boto3==1.35.42
|
||||
python3 ${GITHUB_ACTION_PATH}/reuse_old_whl.py \
|
||||
--build-environment "${{ inputs.build-environment }}" \
|
||||
--run-id "${{ inputs.run-id }}" \
|
||||
|
||||
162
.github/actions/reuse-old-whl/reuse_old_whl.py
vendored
162
.github/actions/reuse-old-whl/reuse_old_whl.py
vendored
@ -1,22 +1,13 @@
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Any, cast, Optional, Union
|
||||
from typing import Any, cast, Optional
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
from tools.stats.upload_metrics import emit_metric
|
||||
|
||||
|
||||
sys.path.remove(str(REPO_ROOT)) # Clean up sys.path after import
|
||||
|
||||
|
||||
FORCE_REBUILD_LABEL = "ci-force-rebuild"
|
||||
|
||||
|
||||
@ -123,43 +114,15 @@ def ok_changed_file(file: str) -> bool:
|
||||
return True
|
||||
if file.startswith("test/") and file.endswith(".py"):
|
||||
return True
|
||||
if file.startswith("docs/") and file.endswith((".md", ".rst")):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def check_changed_files(sha: str) -> bool:
|
||||
# Return true if all the changed files are in the list of allowed files to
|
||||
# be changed to reuse the old whl
|
||||
|
||||
# Removing files in the torch folder is not allowed since rsync will not
|
||||
# remove files
|
||||
removed_files = (
|
||||
subprocess.check_output(
|
||||
[
|
||||
"git",
|
||||
"diff",
|
||||
"--name-only",
|
||||
sha,
|
||||
"HEAD",
|
||||
"--diff-filter=D",
|
||||
"--no-renames",
|
||||
],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
.strip()
|
||||
.split()
|
||||
)
|
||||
if any(file.startswith("torch/") for file in removed_files):
|
||||
print(
|
||||
f"Removed files between {sha} and HEAD: {removed_files}, cannot reuse old whl"
|
||||
)
|
||||
return False
|
||||
|
||||
changed_files = (
|
||||
subprocess.check_output(
|
||||
["git", "diff", "--name-only", sha, "HEAD", "--no-renames"],
|
||||
["git", "diff", "--name-only", sha, "HEAD"],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
@ -216,83 +179,38 @@ def unzip_artifact_and_replace_files() -> None:
|
||||
)
|
||||
os.remove("artifacts.zip")
|
||||
|
||||
head_sha = get_head_sha()
|
||||
|
||||
# Rename wheel into zip
|
||||
wheel_path = Path("artifacts/dist").glob("*.whl")
|
||||
for path in wheel_path:
|
||||
# Should be of the form torch-2.0.0+git1234567-cp37-etc.whl
|
||||
# Should usually be the merge base sha but for the ones that didn't do
|
||||
# the replacement, it won't be. Can probably change it to just be merge
|
||||
# base later
|
||||
old_version = f"+git{path.stem.split('+')[1].split('-')[0][3:]}"
|
||||
new_version = f"+git{head_sha[:7]}"
|
||||
|
||||
def rename_to_new_version(file: Union[str, Path]) -> None:
|
||||
# Rename file with old_version to new_version
|
||||
subprocess.check_output(
|
||||
["mv", file, str(file).replace(old_version, new_version)]
|
||||
)
|
||||
|
||||
def change_content_to_new_version(file: Union[str, Path]) -> None:
|
||||
# Check if is a file
|
||||
if os.path.isdir(file):
|
||||
return
|
||||
# Replace the old version in the file with the new version
|
||||
with open(file) as f:
|
||||
content = f.read()
|
||||
content = content.replace(old_version, new_version)
|
||||
with open(file, "w") as f:
|
||||
f.write(content)
|
||||
|
||||
zip_path = path.with_suffix(".zip")
|
||||
os.rename(path, zip_path)
|
||||
old_stem = zip_path.stem
|
||||
new_path = path.with_suffix(".zip")
|
||||
os.rename(path, new_path)
|
||||
print(f"Renamed {path} to {new_path}")
|
||||
print(new_path.stem)
|
||||
# Unzip the wheel
|
||||
subprocess.check_output(
|
||||
["unzip", "-o", zip_path, "-d", f"artifacts/dist/{old_stem}"],
|
||||
["unzip", "-o", new_path, "-d", f"artifacts/dist/{new_path.stem}"],
|
||||
)
|
||||
|
||||
# Remove the old wheel (which is now a zip file)
|
||||
os.remove(zip_path)
|
||||
|
||||
# Copy python files into the artifact
|
||||
subprocess.check_output(
|
||||
["rsync", "-avz", "torch", f"artifacts/dist/{old_stem}"],
|
||||
["rsync", "-avz", "torch", f"artifacts/dist/{new_path.stem}"],
|
||||
)
|
||||
|
||||
change_content_to_new_version(f"artifacts/dist/{old_stem}/torch/version.py")
|
||||
|
||||
for file in Path(f"artifacts/dist/{old_stem}").glob(
|
||||
"*.dist-info/**",
|
||||
):
|
||||
change_content_to_new_version(file)
|
||||
|
||||
rename_to_new_version(f"artifacts/dist/{old_stem}")
|
||||
new_stem = old_stem.replace(old_version, new_version)
|
||||
|
||||
for file in Path(f"artifacts/dist/{new_stem}").glob(
|
||||
"*.dist-info",
|
||||
):
|
||||
rename_to_new_version(file)
|
||||
|
||||
# Zip the wheel back
|
||||
subprocess.check_output(
|
||||
["zip", "-r", f"{new_stem}.zip", "."],
|
||||
cwd=f"artifacts/dist/{new_stem}",
|
||||
["zip", "-r", f"{new_path.stem}.zip", "."],
|
||||
cwd=f"artifacts/dist/{new_path.stem}",
|
||||
)
|
||||
|
||||
subprocess.check_output(
|
||||
[
|
||||
"mv",
|
||||
f"artifacts/dist/{new_stem}/{new_stem}.zip",
|
||||
f"artifacts/dist/{new_stem}.whl",
|
||||
f"artifacts/dist/{new_path.stem}/{new_path.stem}.zip",
|
||||
f"artifacts/dist/{new_path.stem}.whl",
|
||||
],
|
||||
)
|
||||
|
||||
# Remove the extracted folder
|
||||
subprocess.check_output(
|
||||
["rm", "-rf", f"artifacts/dist/{new_stem}"],
|
||||
["rm", "-rf", f"artifacts/dist/{new_path.stem}"],
|
||||
)
|
||||
|
||||
# Rezip the artifact
|
||||
@ -326,60 +244,46 @@ def parse_args() -> argparse.Namespace:
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def can_reuse_whl(args: argparse.Namespace) -> tuple[bool, str]:
|
||||
if args.github_ref and any(
|
||||
args.github_ref.startswith(x)
|
||||
for x in [
|
||||
"refs/heads/release",
|
||||
"refs/tags/v",
|
||||
"refs/heads/nightly",
|
||||
]
|
||||
):
|
||||
print("Release branch, rebuild whl")
|
||||
return (False, "Release branch")
|
||||
|
||||
if not check_changed_files(get_merge_base()):
|
||||
print("Cannot use old whl due to the changed files, rebuild whl")
|
||||
return (False, "Changed files not allowed")
|
||||
def can_reuse_whl(args: argparse.Namespace) -> bool:
|
||||
# if is_main_branch() or (
|
||||
# args.github_ref
|
||||
# and any(
|
||||
# args.github_ref.startswith(x)
|
||||
# for x in ["refs/heads/release", "refs/tags/v", "refs/heads/main"]
|
||||
# )
|
||||
# ):
|
||||
# print("On main branch or release branch, rebuild whl")
|
||||
# return False
|
||||
|
||||
if check_labels_for_pr():
|
||||
print(f"Found {FORCE_REBUILD_LABEL} label on PR, rebuild whl")
|
||||
return (False, "Found FORCE_REBUILD_LABEL on PR")
|
||||
return False
|
||||
|
||||
if check_issue_open():
|
||||
print("Issue #153759 is open, rebuild whl")
|
||||
return (False, "Issue #153759 is open")
|
||||
return False
|
||||
|
||||
if not check_changed_files(get_merge_base()):
|
||||
print("Cannot use old whl due to the changed files, rebuild whl")
|
||||
return False
|
||||
|
||||
workflow_id = get_workflow_id(args.run_id)
|
||||
if workflow_id is None:
|
||||
print("No workflow ID found, rebuild whl")
|
||||
return (False, "No workflow ID found")
|
||||
return False
|
||||
|
||||
if not find_old_whl(workflow_id, args.build_environment, get_merge_base()):
|
||||
print("No old whl found, rebuild whl")
|
||||
return (False, "No old whl found")
|
||||
# TODO: go backwards from merge base to find more runs
|
||||
return False
|
||||
|
||||
return (True, "Found old whl")
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
|
||||
reuse_whl, reason = can_reuse_whl(args)
|
||||
|
||||
if reuse_whl:
|
||||
if can_reuse_whl(args):
|
||||
print("Reusing old whl")
|
||||
unzip_artifact_and_replace_files()
|
||||
set_output()
|
||||
|
||||
emit_metric(
|
||||
"reuse_old_whl",
|
||||
{
|
||||
"reuse_whl": reuse_whl,
|
||||
"reason": reason,
|
||||
"build_environment": args.build_environment,
|
||||
"merge_base": get_merge_base(),
|
||||
"head_sha": get_head_sha(),
|
||||
},
|
||||
)
|
||||
|
||||
4
.github/actions/setup-linux/action.yml
vendored
4
.github/actions/setup-linux/action.yml
vendored
@ -33,14 +33,14 @@ runs:
|
||||
id: check_container_runner
|
||||
run: echo "IN_CONTAINER_RUNNER=$(if [ -f /.inarc ] || [ -f /.incontainer ]; then echo true ; else echo false; fi)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Start docker if docker daemon is not running
|
||||
- name: Start docker if docker deamon is not running
|
||||
shell: bash
|
||||
if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }}
|
||||
run: |
|
||||
if systemctl is-active --quiet docker; then
|
||||
echo "Docker daemon is running...";
|
||||
else
|
||||
echo "Starting docker daemon..." && sudo systemctl start docker;
|
||||
echo "Starting docker deamon..." && sudo systemctl start docker;
|
||||
fi
|
||||
|
||||
- name: Log in to ECR
|
||||
|
||||
4
.github/actions/setup-xpu/action.yml
vendored
4
.github/actions/setup-xpu/action.yml
vendored
@ -29,13 +29,13 @@ runs:
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
timeout 30 xpu-smi discovery || true
|
||||
xpu-smi discovery
|
||||
|
||||
- name: Runner health check GPU count
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
ngpu=$(timeout 30 xpu-smi discovery | grep -c -E 'Device Name' || true)
|
||||
ngpu=$(xpu-smi discovery | grep -c -E 'Device Name')
|
||||
msg="Please file an issue on pytorch/pytorch reporting the faulty runner. Include a link to the runner logs so the runner can be identified"
|
||||
if [[ $ngpu -eq 0 ]]; then
|
||||
echo "Error: Failed to detect any GPUs on the runner"
|
||||
|
||||
2
.github/ci_commit_pins/audio.txt
vendored
2
.github/ci_commit_pins/audio.txt
vendored
@ -1 +1 @@
|
||||
4e94321c54617dd738a05bfedfc28bc0fa635b5c
|
||||
1a8f6213b0b61efc6a4862bc45b853551a93dbb6
|
||||
|
||||
2
.github/ci_commit_pins/xla.txt
vendored
2
.github/ci_commit_pins/xla.txt
vendored
@ -1 +1 @@
|
||||
r2.8
|
||||
edc1a882d872dd7f1362e4312fd045a1d81b3355
|
||||
|
||||
1
.github/labeler.yml
vendored
1
.github/labeler.yml
vendored
@ -116,6 +116,7 @@
|
||||
"release notes: inductor (aoti)":
|
||||
- torch/_C/_aoti.pyi
|
||||
- torch/_dynamo/repro/aoti.py
|
||||
- torch/_export/serde/aoti_schema.py
|
||||
- torch/_higher_order_ops/aoti_call_delegate.py
|
||||
- torch/_inductor/codegen/aoti_runtime/**
|
||||
- torch/_inductor/codegen/aoti_hipify_utils.py
|
||||
|
||||
2
.github/merge_rules.yaml
vendored
2
.github/merge_rules.yaml
vendored
@ -123,8 +123,6 @@
|
||||
- torch/*docs.py
|
||||
approved_by:
|
||||
- svekars
|
||||
- sekyondaMeta
|
||||
- AlannaBurke
|
||||
mandatory_checks_name:
|
||||
- EasyCLA
|
||||
- Lint
|
||||
|
||||
2
.github/pytorch-probot.yml
vendored
2
.github/pytorch-probot.yml
vendored
@ -11,7 +11,6 @@ ciflow_push_tags:
|
||||
- ciflow/inductor-perf-compare
|
||||
- ciflow/inductor-micro-benchmark
|
||||
- ciflow/inductor-micro-benchmark-cpu-x86
|
||||
- ciflow/inductor-perf-test-nightly-x86-zen
|
||||
- ciflow/inductor-cu126
|
||||
- ciflow/linux-aarch64
|
||||
- ciflow/mps
|
||||
@ -29,7 +28,6 @@ ciflow_push_tags:
|
||||
- ciflow/op-benchmark
|
||||
- ciflow/pull
|
||||
- ciflow/h100
|
||||
- ciflow/h100-distributed
|
||||
retryable_workflows:
|
||||
- pull
|
||||
- trunk
|
||||
|
||||
2
.github/requirements-gha-cache.txt
vendored
2
.github/requirements-gha-cache.txt
vendored
@ -10,5 +10,5 @@ lintrunner==0.10.7
|
||||
ninja==1.10.0.post1
|
||||
nvidia-ml-py==11.525.84
|
||||
pyyaml==6.0
|
||||
requests==2.32.4
|
||||
requests==2.32.2
|
||||
rich==10.9.0
|
||||
|
||||
1
.github/requirements/conda-env-macOS-ARM64
vendored
1
.github/requirements/conda-env-macOS-ARM64
vendored
@ -2,4 +2,5 @@
|
||||
certifi
|
||||
pip=23.2.1
|
||||
pkg-config=0.29.2
|
||||
setuptools=72.1.0
|
||||
wheel=0.37.1
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
boto3==1.35.42
|
||||
cmake==3.27.*
|
||||
cmake==3.25.*
|
||||
expecttest==0.3.0
|
||||
fbscribelogger==0.1.7
|
||||
filelock==3.6.0
|
||||
@ -14,7 +14,7 @@ opt-einsum>=3.3
|
||||
optree==0.13.0
|
||||
packaging==23.1
|
||||
parameterized==0.8.1
|
||||
pillow==10.3.0
|
||||
pillow==10.0.1
|
||||
protobuf==5.29.4
|
||||
psutil==5.9.1
|
||||
pygments==2.15.0
|
||||
@ -26,9 +26,7 @@ pytest-xdist==3.3.1
|
||||
pytest==7.3.2
|
||||
pyyaml==6.0.2
|
||||
scipy==1.12.0
|
||||
setuptools==72.1.0
|
||||
sympy==1.13.3
|
||||
tlparse==0.3.30
|
||||
tensorboard==2.13.0
|
||||
typing-extensions==4.12.2
|
||||
unittest-xml-reporting<=3.2.0,>=2.0.0
|
||||
|
||||
2
.github/scripts/amd/patch_triton_wheel.sh
vendored
2
.github/scripts/amd/patch_triton_wheel.sh
vendored
@ -78,7 +78,7 @@ for pkg in /$WHEELHOUSE_DIR/*triton*.whl; do
|
||||
echo "Copied $filepath to $patchedpath"
|
||||
done
|
||||
|
||||
# Go through all required shared objects and see if any of our other objects are dependants. If so, replace so.ver with so
|
||||
# Go through all required shared objects and see if any of our other objects are dependants. If so, replace so.ver wth so
|
||||
for ((i=0;i<${#deps[@]};++i)); do
|
||||
echo "replacing "${deps_soname[i]} ${patched[i]}
|
||||
replace_needed_sofiles $PREFIX/$ROCM_LIB ${deps_soname[i]} ${patched[i]}
|
||||
|
||||
30
.github/scripts/build_triton_wheel.py
vendored
30
.github/scripts/build_triton_wheel.py
vendored
@ -21,11 +21,8 @@ def read_triton_pin(device: str = "cuda") -> str:
|
||||
return f.read().strip()
|
||||
|
||||
|
||||
def read_triton_version(device: str = "cuda") -> str:
|
||||
triton_version_file = "triton_version.txt"
|
||||
if device == "xpu":
|
||||
triton_version_file = "triton_xpu_version.txt"
|
||||
with open(REPO_DIR / ".ci" / "docker" / triton_version_file) as f:
|
||||
def read_triton_version() -> str:
|
||||
with open(REPO_DIR / ".ci" / "docker" / "triton_version.txt") as f:
|
||||
return f.read().strip()
|
||||
|
||||
|
||||
@ -68,7 +65,6 @@ def build_triton(
|
||||
with TemporaryDirectory() as tmpdir:
|
||||
triton_basedir = Path(tmpdir) / "triton"
|
||||
triton_pythondir = triton_basedir / "python"
|
||||
|
||||
triton_repo = "https://github.com/openai/triton"
|
||||
if device == "rocm":
|
||||
triton_pkg_name = "pytorch-triton-rocm"
|
||||
@ -94,7 +90,7 @@ def build_triton(
|
||||
patch_init_py(
|
||||
triton_pythondir / "triton" / "__init__.py",
|
||||
version=f"{version}",
|
||||
expected_version=read_triton_version(device),
|
||||
expected_version=None,
|
||||
)
|
||||
|
||||
if device == "rocm":
|
||||
@ -105,19 +101,11 @@ def build_triton(
|
||||
)
|
||||
print("ROCm libraries setup for triton installation...")
|
||||
|
||||
# old triton versions have setup.py in the python/ dir,
|
||||
# new versions have it in the root dir.
|
||||
triton_setupdir = (
|
||||
triton_basedir
|
||||
if (triton_basedir / "setup.py").exists()
|
||||
else triton_pythondir
|
||||
)
|
||||
|
||||
check_call(
|
||||
[sys.executable, "setup.py", "bdist_wheel"], cwd=triton_setupdir, env=env
|
||||
[sys.executable, "setup.py", "bdist_wheel"], cwd=triton_pythondir, env=env
|
||||
)
|
||||
|
||||
whl_path = next(iter((triton_setupdir / "dist").glob("*.whl")))
|
||||
whl_path = next(iter((triton_pythondir / "dist").glob("*.whl")))
|
||||
shutil.copy(whl_path, Path.cwd())
|
||||
|
||||
if device == "rocm":
|
||||
@ -140,19 +128,15 @@ def main() -> None:
|
||||
parser.add_argument("--py-version", type=str)
|
||||
parser.add_argument("--commit-hash", type=str)
|
||||
parser.add_argument("--with-clang-ldd", action="store_true")
|
||||
parser.add_argument("--triton-version", type=str, default=None)
|
||||
parser.add_argument("--triton-version", type=str, default=read_triton_version())
|
||||
args = parser.parse_args()
|
||||
|
||||
triton_version = read_triton_version(args.device)
|
||||
if args.triton_version:
|
||||
triton_version = args.triton_version
|
||||
|
||||
build_triton(
|
||||
device=args.device,
|
||||
commit_hash=(
|
||||
args.commit_hash if args.commit_hash else read_triton_pin(args.device)
|
||||
),
|
||||
version=triton_version,
|
||||
version=args.triton_version,
|
||||
py_version=args.py_version,
|
||||
release=args.release,
|
||||
with_clang_ldd=args.with_clang_ldd,
|
||||
|
||||
6
.github/scripts/filter_test_configs.py
vendored
6
.github/scripts/filter_test_configs.py
vendored
@ -40,9 +40,9 @@ SUPPORTED_PERIODICAL_MODES: dict[str, Callable[[Optional[str]], bool]] = {
|
||||
}
|
||||
|
||||
# The link to the published list of disabled jobs
|
||||
DISABLED_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/disabled-jobs.json?versionId=HnkH0xQWnnsoeMsSIVf9291NE5c4jWSa"
|
||||
DISABLED_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/disabled-jobs.json"
|
||||
# and unstable jobs
|
||||
UNSTABLE_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/unstable-jobs.json?versionId=iP_F8gBs60PfOMAJ8gnn1paVrzM1WYsK"
|
||||
UNSTABLE_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/unstable-jobs.json"
|
||||
|
||||
# Some constants used to handle disabled and unstable jobs
|
||||
JOB_NAME_SEP = "/"
|
||||
@ -80,7 +80,7 @@ def parse_args() -> Any:
|
||||
parser.add_argument(
|
||||
"--job-name",
|
||||
type=str,
|
||||
help="the name of the current job, i.e. linux-jammy-py3.8-gcc7 / build",
|
||||
help="the name of the current job, i.e. linux-focal-py3.8-gcc7 / build",
|
||||
)
|
||||
parser.add_argument("--pr-number", type=str, help="the pull request number")
|
||||
parser.add_argument("--tag", type=str, help="the associated tag if it exists")
|
||||
|
||||
69
.github/scripts/generate_binary_build_matrix.py
vendored
69
.github/scripts/generate_binary_build_matrix.py
vendored
@ -15,21 +15,21 @@ import os
|
||||
from typing import Optional
|
||||
|
||||
|
||||
# NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this
|
||||
CUDA_ARCHES = ["12.6", "12.8", "12.9"]
|
||||
# NOTE: Also update the CUDA sources in tools/nightly.py when changing this list
|
||||
CUDA_ARCHES = ["11.8", "12.6", "12.8"]
|
||||
CUDA_STABLE = "12.6"
|
||||
CUDA_ARCHES_FULL_VERSION = {
|
||||
"11.8": "11.8.0",
|
||||
"12.6": "12.6.3",
|
||||
"12.8": "12.8.1",
|
||||
"12.9": "12.9.1",
|
||||
}
|
||||
CUDA_ARCHES_CUDNN_VERSION = {
|
||||
"11.8": "9",
|
||||
"12.6": "9",
|
||||
"12.8": "9",
|
||||
"12.9": "9",
|
||||
}
|
||||
|
||||
# NOTE: Please also update the ROCm sources in `PIP_SOURCES` in tools/nightly.py when changing this
|
||||
# NOTE: Also update the ROCm sources in tools/nightly.py when changing this list
|
||||
ROCM_ARCHES = ["6.3", "6.4"]
|
||||
|
||||
XPU_ARCHES = ["xpu"]
|
||||
@ -38,23 +38,35 @@ CPU_AARCH64_ARCH = ["cpu-aarch64"]
|
||||
|
||||
CPU_S390X_ARCH = ["cpu-s390x"]
|
||||
|
||||
CUDA_AARCH64_ARCHES = ["12.9-aarch64"]
|
||||
CUDA_AARCH64_ARCHES = ["12.8-aarch64"]
|
||||
|
||||
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"11.8": (
|
||||
"nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | " # noqa: B950
|
||||
"nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu11==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
),
|
||||
"12.6": (
|
||||
"nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.27.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
@ -63,42 +75,25 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.27.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
),
|
||||
"12.9": (
|
||||
"nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.27.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
),
|
||||
"xpu": (
|
||||
"intel-cmplr-lib-rt==2025.1.1 | "
|
||||
"intel-cmplr-lib-ur==2025.1.1 | "
|
||||
"intel-cmplr-lic-rt==2025.1.1 | "
|
||||
"intel-sycl-rt==2025.1.1 | "
|
||||
"oneccl-devel==2021.15.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"oneccl==2021.15.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"oneccl-devel==2021.15.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"oneccl==2021.15.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"impi-rt==2021.15.0; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"onemkl-sycl-blas==2025.1.0 | "
|
||||
"onemkl-sycl-dft==2025.1.0 | "
|
||||
@ -112,7 +107,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"tbb==2022.1.0 | "
|
||||
"tcmlib==1.3.0 | "
|
||||
"umf==0.10.0 | "
|
||||
"intel-pti==0.12.3"
|
||||
"intel-pti==0.12.0"
|
||||
),
|
||||
}
|
||||
|
||||
@ -316,10 +311,10 @@ def generate_wheels_matrix(
|
||||
continue
|
||||
|
||||
if use_split_build and (
|
||||
arch_version not in ["12.6", "12.8", "12.9", "cpu"] or os != "linux"
|
||||
arch_version not in ["12.6", "12.8", "11.8", "cpu"] or os != "linux"
|
||||
):
|
||||
raise RuntimeError(
|
||||
"Split build is only supported on linux with cuda 12* and cpu.\n"
|
||||
"Split build is only supported on linux with cuda 12*, 11.8, and cpu.\n"
|
||||
f"Currently attempting to build on arch version {arch_version} and os {os}.\n"
|
||||
"Please modify the matrix generation to exclude this combination."
|
||||
)
|
||||
@ -327,7 +322,7 @@ def generate_wheels_matrix(
|
||||
# cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install
|
||||
|
||||
if (
|
||||
arch_version in ["12.9", "12.8", "12.6"]
|
||||
arch_version in ["12.8", "12.6", "11.8"]
|
||||
and os == "linux"
|
||||
or arch_version in CUDA_AARCH64_ARCHES
|
||||
):
|
||||
@ -416,6 +411,6 @@ def generate_wheels_matrix(
|
||||
return ret
|
||||
|
||||
|
||||
validate_nccl_dep_consistency("12.9")
|
||||
validate_nccl_dep_consistency("12.8")
|
||||
validate_nccl_dep_consistency("12.6")
|
||||
validate_nccl_dep_consistency("11.8")
|
||||
|
||||
2
.github/scripts/generate_ci_workflows.py
vendored
2
.github/scripts/generate_ci_workflows.py
vendored
@ -152,7 +152,7 @@ LINUX_BINARY_SMOKE_WORKFLOWS = [
|
||||
package_type="manywheel",
|
||||
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
|
||||
OperatingSystem.LINUX,
|
||||
arches=["12.6", "12.8", "12.9", "6.4"],
|
||||
arches=["11.8", "12.6", "12.8"],
|
||||
python_versions=["3.9"],
|
||||
),
|
||||
branches="main",
|
||||
|
||||
2
.github/scripts/get_workflow_job_id.py
vendored
2
.github/scripts/get_workflow_job_id.py
vendored
@ -64,7 +64,7 @@ def fetch_url(
|
||||
)
|
||||
exception_message = (
|
||||
"Is github alright?",
|
||||
f"Received status code '{err.code}' when attempting to retrieve {url}:\n",
|
||||
f"Recieved status code '{err.code}' when attempting to retrieve {url}:\n",
|
||||
f"{err.reason}\n\nheaders={err.headers}",
|
||||
)
|
||||
raise RuntimeError(exception_message) from err
|
||||
|
||||
2
.github/scripts/gitutils.py
vendored
2
.github/scripts/gitutils.py
vendored
@ -211,7 +211,7 @@ class GitRepo:
|
||||
self, from_branch: str, to_branch: str
|
||||
) -> tuple[list[str], list[str]]:
|
||||
"""
|
||||
Returns list of commits that are missing in each other branch since their merge base
|
||||
Returns list of commmits that are missing in each other branch since their merge base
|
||||
Might be slow if merge base is between two branches is pretty far off
|
||||
"""
|
||||
from_ref = self.rev_parse(from_branch)
|
||||
|
||||
BIN
.github/scripts/gql_mocks.json.gz
vendored
BIN
.github/scripts/gql_mocks.json.gz
vendored
Binary file not shown.
2
.github/scripts/pr-sanity-check.sh
vendored
2
.github/scripts/pr-sanity-check.sh
vendored
@ -12,7 +12,7 @@ BASE=${BASE:-HEAD~1}
|
||||
HEAD=${HEAD:-HEAD}
|
||||
|
||||
ancestor=$(git merge-base "${BASE}" "${HEAD}")
|
||||
echo "INFO: Checking against the following stats"
|
||||
echo "INFO: Checking aginst the following stats"
|
||||
(
|
||||
set -x
|
||||
git diff --stat=10000 "$ancestor" "${HEAD}" | sed '$d' > "${TMPFILE}"
|
||||
|
||||
10
.github/scripts/test_filter_test_configs.py
vendored
10
.github/scripts/test_filter_test_configs.py
vendored
@ -347,26 +347,26 @@ class TestConfigFilter(TestCase):
|
||||
{
|
||||
"job_name": "a-ci-job",
|
||||
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
|
||||
"description": "Replicate each periodic mode in a different config",
|
||||
"descripion": "Replicate each periodic mode in a different config",
|
||||
},
|
||||
{
|
||||
"job_name": "a-ci-cuda11.8-job",
|
||||
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
|
||||
"description": "Replicate each periodic mode in a different config for a CUDA job",
|
||||
"descripion": "Replicate each periodic mode in a different config for a CUDA job",
|
||||
},
|
||||
{
|
||||
"job_name": "a-ci-rocm-job",
|
||||
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
|
||||
"description": "Replicate each periodic mode in a different config for a ROCm job",
|
||||
"descripion": "Replicate each periodic mode in a different config for a ROCm job",
|
||||
},
|
||||
{
|
||||
"job_name": "",
|
||||
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
|
||||
"description": "Empty job name",
|
||||
"descripion": "Empty job name",
|
||||
},
|
||||
{
|
||||
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
|
||||
"description": "Missing job name",
|
||||
"descripion": "Missing job name",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
51
.github/scripts/test_trymerge.py
vendored
51
.github/scripts/test_trymerge.py
vendored
@ -19,7 +19,6 @@ from urllib.error import HTTPError
|
||||
from github_utils import gh_graphql
|
||||
from gitutils import get_git_remote_name, get_git_repo_dir, GitRepo
|
||||
from trymerge import (
|
||||
_revlist_to_prs,
|
||||
categorize_checks,
|
||||
DRCI_CHECKRUN_NAME,
|
||||
find_matching_merge_rule,
|
||||
@ -265,7 +264,7 @@ class DummyGitRepo(GitRepo):
|
||||
return ["FakeCommitSha"]
|
||||
|
||||
def commit_message(self, ref: str) -> str:
|
||||
return "super awesome commit message"
|
||||
return "super awsome commit message"
|
||||
|
||||
|
||||
@mock.patch("trymerge.gh_graphql", side_effect=mocked_gh_graphql)
|
||||
@ -433,7 +432,7 @@ class TestTryMerge(TestCase):
|
||||
)
|
||||
|
||||
def test_cancelled_gets_ignored(self, *args: Any) -> None:
|
||||
"""Tests that cancelled workflow does not override existing successful status"""
|
||||
"""Tests that cancelled workflow does not override existing successfull status"""
|
||||
pr = GitHubPR("pytorch", "pytorch", 110367)
|
||||
conclusions = pr.get_checkrun_conclusions()
|
||||
lint_checks = [name for name in conclusions.keys() if "Lint" in name]
|
||||
@ -1089,51 +1088,5 @@ class TestGitHubPRGhstackDependencies(TestCase):
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("trymerge.gh_graphql", side_effect=mocked_gh_graphql)
|
||||
@mock.patch("trymerge.gh_fetch_merge_base", return_value="")
|
||||
@mock.patch(
|
||||
"trymerge.get_drci_classifications", side_effect=mocked_drci_classifications
|
||||
)
|
||||
@mock.patch.object(DummyGitRepo, "commit_message")
|
||||
class TestRevListToPR(TestCase):
|
||||
# Tests for _revlist_to_prs function
|
||||
def test__revlist_to_prs_zero_matches(
|
||||
self, mock_commit_message: mock.MagicMock, *args: Any
|
||||
) -> None:
|
||||
# If zero PRs are mentioned in the commit message, it should raise an error
|
||||
pr_num = 154098
|
||||
pr = GitHubPR("pytorch", "pytorch", pr_num)
|
||||
repo = DummyGitRepo()
|
||||
mock_commit_message.return_value = "no PRs"
|
||||
self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"PRs mentioned in commit dummy: 0.",
|
||||
lambda: _revlist_to_prs(repo, pr, ["dummy"]),
|
||||
)
|
||||
|
||||
def test__revlist_to_prs_two_prs(
|
||||
self, mock_commit_message: mock.MagicMock, *args: Any
|
||||
) -> None:
|
||||
# If two PRs are mentioned in the commit message, it should raise an error
|
||||
pr_num = 154394
|
||||
pr = GitHubPR("pytorch", "pytorch", pr_num)
|
||||
repo = DummyGitRepo()
|
||||
# https://github.com/pytorch/pytorch/commit/343c56e7650f55fd030aca0b9275d6d73501d3f4
|
||||
|
||||
commit_message = """add sticky cache pgo
|
||||
|
||||
ghstack-source-id: 9bc6dee0b427819f978bfabccb72727ba8be2f81
|
||||
Pull-Request-resolved: https://github.com/pytorch/pytorch/pull/154098
|
||||
|
||||
ghstack-source-id: 9bc6dee0b427819f978bfabccb72727ba8be2f81
|
||||
Pull Request resolved: https://github.com/pytorch/pytorch/pull/154394"""
|
||||
mock_commit_message.return_value = commit_message
|
||||
self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"PRs mentioned in commit dummy: 2.",
|
||||
lambda: _revlist_to_prs(repo, pr, ["dummy"]),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
15
.github/scripts/trymerge.py
vendored
15
.github/scripts/trymerge.py
vendored
@ -628,17 +628,11 @@ def _revlist_to_prs(
|
||||
rc: list[tuple[GitHubPR, str]] = []
|
||||
for idx, rev in enumerate(rev_list):
|
||||
msg = repo.commit_message(rev)
|
||||
# findall doesn't return named captures, so we need to use finditer
|
||||
all_matches = list(RE_PULL_REQUEST_RESOLVED.finditer(msg))
|
||||
if len(all_matches) != 1:
|
||||
m = RE_PULL_REQUEST_RESOLVED.search(msg)
|
||||
if m is None:
|
||||
raise RuntimeError(
|
||||
f"Found an unexpected number of PRs mentioned in commit {rev}: "
|
||||
f"{len(all_matches)}. This is probably because you are using an "
|
||||
"old version of ghstack. Please update ghstack and resubmit "
|
||||
"your PRs"
|
||||
f"Could not find PR-resolved string in {msg} of ghstacked PR {pr.pr_num}"
|
||||
)
|
||||
|
||||
m = all_matches[0]
|
||||
if pr.org != m.group("owner") or pr.project != m.group("repo"):
|
||||
raise RuntimeError(
|
||||
f"PR {m.group('number')} resolved to wrong owner/repo pair"
|
||||
@ -672,9 +666,6 @@ def get_ghstack_prs(
|
||||
|
||||
assert pr.is_ghstack_pr()
|
||||
entire_stack = _revlist_to_prs(repo, pr, reversed(rev_list), skip_func)
|
||||
print(
|
||||
f"Found {len(entire_stack)} PRs in the stack for {pr.pr_num}: {[x[0].pr_num for x in entire_stack]}"
|
||||
)
|
||||
|
||||
for stacked_pr, rev in entire_stack:
|
||||
if stacked_pr.is_closed():
|
||||
|
||||
9
.github/scripts/windows/build_magma.bat
vendored
9
.github/scripts/windows/build_magma.bat
vendored
@ -17,6 +17,7 @@ if errorlevel 1 exit /b 1
|
||||
|
||||
set "PATH=C:\Tools;C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUVER%\bin;C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUVER%\libnvvp;%PATH%"
|
||||
set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUVER%
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
|
||||
mkdir magma_cuda%CUVER_NODOT%
|
||||
cd magma_cuda%CUVER_NODOT%
|
||||
@ -34,15 +35,15 @@ cd magma
|
||||
mkdir build && cd build
|
||||
|
||||
set GPU_TARGET=All
|
||||
if "%CUVER_NODOT%" == "129" (
|
||||
set CUDA_ARCH_LIST=-gencode=arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90 -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
|
||||
)
|
||||
if "%CUVER_NODOT%" == "128" (
|
||||
set CUDA_ARCH_LIST=-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90 -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
|
||||
)
|
||||
if "%CUVER_NODOT%" == "126" (
|
||||
if "%CUVER_NODOT:~0,2%" == "12" if NOT "%CUVER_NODOT%" == "128" (
|
||||
set CUDA_ARCH_LIST=-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90
|
||||
)
|
||||
if "%CUVER_NODOT%" == "118" (
|
||||
set CUDA_ARCH_LIST= -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90
|
||||
)
|
||||
|
||||
set CC=cl.exe
|
||||
set CXX=cl.exe
|
||||
|
||||
2
.github/templates/common.yml.j2
vendored
2
.github/templates/common.yml.j2
vendored
@ -32,7 +32,7 @@ concurrency:
|
||||
{%- macro setup_ec2_windows() -%}
|
||||
!{{ display_ec2_information() }}
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@ -56,7 +56,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.8
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -114,12 +114,12 @@ jobs:
|
||||
ALPINE_IMAGE: "docker.io/s390x/alpine"
|
||||
{%- elif config["gpu_arch_type"] == "rocm" %}
|
||||
runs_on: linux.rocm.gpu
|
||||
{%- elif config["gpu_arch_type"] == "cuda" and config["gpu_arch_version"] in ["12.8", "12.9"] %}
|
||||
{%- elif config["gpu_arch_type"] == "cuda" and config["gpu_arch_version"] == "12.8" %}
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8 and 12.9 build need sm_70+ runner
|
||||
{%- elif config["gpu_arch_type"] == "cuda" %}
|
||||
runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8 build needs sm_70+ runner
|
||||
{%- elif config["gpu_arch_type"] == "cuda" and config["gpu_arch_version"] != "12.8"%}
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
runs_on: linux.4xlarge.nvidia.gpu # for other cuda versions, we use 4xlarge runner
|
||||
runs_on: linux.4xlarge.nvidia.gpu
|
||||
{%- else %}
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
runs_on: linux.4xlarge
|
||||
@ -150,10 +150,10 @@ jobs:
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-registry: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/') && '308535385114.dkr.ecr.us-east-1.amazonaws.com' || 'docker.io' }}
|
||||
docker-image-name: !{{ config["container_image"] }}
|
||||
@ -161,7 +161,7 @@ jobs:
|
||||
docker-build-dir: .ci/docker
|
||||
working-directory: pytorch
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
- name: Test Pytorch binary
|
||||
@ -171,7 +171,7 @@ jobs:
|
||||
- name: Teardown XPU
|
||||
uses: ./.github/actions/teardown-xpu
|
||||
{%- else %}
|
||||
runs-on: linux.rocm.gpu.mi250
|
||||
runs-on: linux.rocm.gpu
|
||||
timeout-minutes: !{{ common.timeout_minutes }}
|
||||
!{{ upload.binary_env(config) }}
|
||||
steps:
|
||||
@ -182,7 +182,7 @@ jobs:
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
@ -196,7 +196,7 @@ jobs:
|
||||
role-duration-seconds: 18000
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-registry: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/') && '308535385114.dkr.ecr.us-east-1.amazonaws.com' || 'docker.io' }}
|
||||
docker-image-name: !{{ config["container_image"] }}
|
||||
@ -204,7 +204,7 @@ jobs:
|
||||
docker-build-dir: .ci/docker
|
||||
working-directory: pytorch
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
- name: Test Pytorch binary
|
||||
|
||||
@ -76,7 +76,7 @@ jobs:
|
||||
elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
|
||||
echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
|
||||
fi
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
@ -64,7 +64,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.8
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -135,7 +135,7 @@ jobs:
|
||||
{%- else %}
|
||||
!{{ set_runner_specific_vars() }}
|
||||
!{{ common.setup_ec2_windows() }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
{%- endif %}
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
@ -211,7 +211,7 @@ jobs:
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_rust.bat"
|
||||
{%- else %}
|
||||
!{{ common.setup_ec2_windows() }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ set_runner_specific_vars() }}
|
||||
{%- endif %}
|
||||
- uses: !{{ common.download_artifact_action }}
|
||||
|
||||
14
.github/workflows/_bazel-build-test.yml
vendored
14
.github/workflows/_bazel-build-test.yml
vendored
@ -47,7 +47,7 @@ jobs:
|
||||
reenabled-issues: ${{ steps.filter.outputs.reenabled-issues }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.8
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
@ -69,25 +69,25 @@ jobs:
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.8
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image-name }}
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -97,7 +97,7 @@ jobs:
|
||||
run: echo "IN_CONTAINER_RUNNER=$(if [ -f /.inarc ] || [ -f /.incontainer ]; then echo true ; else echo false; fi)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
|
||||
if: ${{ inputs.cuda-version != 'cpu' && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }}
|
||||
|
||||
- name: Output disk space left
|
||||
@ -209,5 +209,5 @@ jobs:
|
||||
file-suffix: bazel-${{ github.job }}_${{ steps.get-job-id.outputs.job-id }}
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
if: always()
|
||||
|
||||
13
.github/workflows/_binary-build-linux.yml
vendored
13
.github/workflows/_binary-build-linux.yml
vendored
@ -151,13 +151,13 @@ jobs:
|
||||
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
if: inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.github-token }}
|
||||
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.8
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' }}
|
||||
|
||||
@ -187,6 +187,7 @@ jobs:
|
||||
- name: Checkout PyTorch to pytorch dir
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
@ -221,9 +222,9 @@ jobs:
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }}
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
# If doing this in release/2.8 or release branch, use docker.io. Otherwise
|
||||
# If doing this in main or release branch, use docker.io. Otherwise
|
||||
# use ECR
|
||||
docker-registry: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/') && '308535385114.dkr.ecr.us-east-1.amazonaws.com' || 'docker.io' }}
|
||||
docker-image-name: ${{ inputs.DOCKER_IMAGE }}
|
||||
@ -235,7 +236,7 @@ jobs:
|
||||
|
||||
- name: Pull Docker image
|
||||
if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }}
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -292,7 +293,7 @@ jobs:
|
||||
|
||||
- name: Teardown Linux
|
||||
if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
|
||||
- name: Chown workspace
|
||||
if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
|
||||
13
.github/workflows/_binary-test-linux.yml
vendored
13
.github/workflows/_binary-test-linux.yml
vendored
@ -134,14 +134,14 @@ jobs:
|
||||
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
if: inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.github-token }}
|
||||
|
||||
# Setup the environment
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.8
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' }}
|
||||
|
||||
@ -164,6 +164,7 @@ jobs:
|
||||
- name: Checkout PyTorch to pytorch dir
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
show-progress: false
|
||||
path: pytorch
|
||||
@ -194,7 +195,7 @@ jobs:
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
|
||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
|
||||
if: ${{ inputs.GPU_ARCH_TYPE == 'cuda' && steps.filter.outputs.is-test-matrix-empty == 'False' }}
|
||||
|
||||
- name: configure aws credentials
|
||||
@ -209,7 +210,7 @@ jobs:
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }}
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-registry: ${{ startsWith(github.event.ref, 'refs/tags/ciflow/') && '308535385114.dkr.ecr.us-east-1.amazonaws.com' || 'docker.io' }}
|
||||
docker-image-name: ${{ inputs.DOCKER_IMAGE }}
|
||||
@ -219,7 +220,7 @@ jobs:
|
||||
|
||||
- name: Pull Docker image
|
||||
if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }}
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -231,7 +232,7 @@ jobs:
|
||||
|
||||
- name: Teardown Linux
|
||||
if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.8
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
|
||||
- name: Chown workspace
|
||||
if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user