mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-24 23:54:56 +08:00
Compare commits
1 Commits
delete-qua
...
mlazos/hc1
| Author | SHA1 | Date | |
|---|---|---|---|
| 3698b009cd |
@ -3,7 +3,9 @@ set -eux -o pipefail
|
||||
|
||||
GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
|
||||
|
||||
if [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then
|
||||
if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then
|
||||
export TORCH_CUDA_ARCH_LIST="9.0"
|
||||
elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then
|
||||
export TORCH_CUDA_ARCH_LIST="9.0;10.0;12.0"
|
||||
fi
|
||||
|
||||
|
||||
@ -79,7 +79,6 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
|
||||
os.system(f"unzip {wheel_path} -d {folder}/tmp")
|
||||
libs_to_copy = [
|
||||
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12",
|
||||
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so",
|
||||
"/usr/local/cuda/lib64/libcudnn.so.9",
|
||||
"/usr/local/cuda/lib64/libcublas.so.12",
|
||||
"/usr/local/cuda/lib64/libcublasLt.so.12",
|
||||
@ -89,7 +88,7 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
|
||||
"/usr/local/cuda/lib64/libcusparseLt.so.0",
|
||||
"/usr/local/cuda/lib64/libcusolver.so.11",
|
||||
"/usr/local/cuda/lib64/libcurand.so.10",
|
||||
"/usr/local/cuda/lib64/libnccl.so.2",
|
||||
"/usr/local/cuda/lib64/libnvToolsExt.so.1",
|
||||
"/usr/local/cuda/lib64/libnvJitLink.so.12",
|
||||
"/usr/local/cuda/lib64/libnvrtc.so.12",
|
||||
"/usr/local/cuda/lib64/libcudnn_adv.so.9",
|
||||
@ -109,9 +108,9 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
|
||||
"/usr/local/lib/libnvpl_blas_core.so.0",
|
||||
]
|
||||
|
||||
if "129" in desired_cuda:
|
||||
if "128" in desired_cuda:
|
||||
libs_to_copy += [
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.9",
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.8",
|
||||
"/usr/local/cuda/lib64/libcufile.so.0",
|
||||
"/usr/local/cuda/lib64/libcufile_rdma.so.1",
|
||||
]
|
||||
|
||||
@ -89,8 +89,8 @@ tag=$(echo $image | awk -F':' '{print $2}')
|
||||
# configuration, so we hardcode everything here rather than do it
|
||||
# from scratch
|
||||
case "$tag" in
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11)
|
||||
CUDA_VERSION=12.8.1
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3-gcc11)
|
||||
CUDA_VERSION=12.6.3
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=11
|
||||
@ -275,6 +275,17 @@ case "$tag" in
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang12-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CLANG_VERSION=12
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang15-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
CLANG_VERSION=15
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang18-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
CLANG_VERSION=18
|
||||
|
||||
@ -39,7 +39,6 @@ RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
|
||||
@ -1 +1 @@
|
||||
56392aa978594cc155fa8af48cd949f5b5f1823a
|
||||
f50bfa92602b45dca884a9e511e5d9ddbe8ba314
|
||||
|
||||
@ -1 +1 @@
|
||||
v2.27.3-1
|
||||
v2.26.5-1
|
||||
|
||||
@ -1 +1 @@
|
||||
ae324eeac8e102a2b40370e341460f3791353398
|
||||
b0e26b7359c147b8aa0af686c20510fb9b15990a
|
||||
|
||||
@ -1 +1 @@
|
||||
ae848267bebc65c6181e8cc5e64a6357d2679260
|
||||
c8757738a7418249896224430ce84888e8ecdd79
|
||||
|
||||
@ -6,7 +6,7 @@ set -ex
|
||||
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
BASE_URL="https://repo.anaconda.com/miniconda"
|
||||
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
|
||||
if [[ $(uname -m) == "aarch64" ]] || [[ "$BUILD_ENVIRONMENT" == *xpu* ]] || [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
if [[ $(uname -m) == "aarch64" ]] || [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download" # @lint-ignore
|
||||
CONDA_FILE="Miniforge3-Linux-$(uname -m).sh"
|
||||
fi
|
||||
@ -64,11 +64,6 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
# which is provided in libstdcxx 12 and up.
|
||||
conda_install libstdcxx-ng=12.3.0 --update-deps -c conda-forge
|
||||
|
||||
# Miniforge installer doesn't install sqlite by default
|
||||
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
conda_install sqlite
|
||||
fi
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
if [[ $(uname -m) == "aarch64" ]]; then
|
||||
conda_install "openblas==0.3.29=*openmp*"
|
||||
|
||||
@ -3,10 +3,11 @@
|
||||
set -uex -o pipefail
|
||||
|
||||
PYTHON_DOWNLOAD_URL=https://www.python.org/ftp/python
|
||||
PYTHON_DOWNLOAD_GITHUB_BRANCH=https://github.com/python/cpython/archive/refs/heads # @lint-ignore
|
||||
GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py
|
||||
|
||||
# Python versions to be installed in /opt/$VERSION_NO
|
||||
CPYTHON_VERSIONS=${CPYTHON_VERSIONS:-"3.9.0 3.10.1 3.11.0 3.12.0 3.13.0 3.13.0t 3.14.0 3.14.0t"}
|
||||
CPYTHON_VERSIONS=${CPYTHON_VERSIONS:-"3.9.0 3.10.1 3.11.0 3.12.0 3.13.0 3.13.0t"}
|
||||
|
||||
function check_var {
|
||||
if [ -z "$1" ]; then
|
||||
@ -23,8 +24,9 @@ function do_cpython_build {
|
||||
tar -xzf Python-$py_ver.tgz
|
||||
|
||||
local additional_flags=""
|
||||
if [[ "$py_ver" == *"t" ]]; then
|
||||
if [ "$py_ver" == "3.13.0t" ]; then
|
||||
additional_flags=" --disable-gil"
|
||||
mv cpython-3.13/ cpython-3.13t/
|
||||
fi
|
||||
|
||||
pushd $py_folder
|
||||
@ -74,20 +76,24 @@ function do_cpython_build {
|
||||
function build_cpython {
|
||||
local py_ver=$1
|
||||
check_var $py_ver
|
||||
local py_suffix=$py_ver
|
||||
local py_folder=$py_ver
|
||||
check_var $PYTHON_DOWNLOAD_URL
|
||||
local py_ver_folder=$py_ver
|
||||
|
||||
# Special handling for nogil
|
||||
if [[ "${py_ver}" == *"t" ]]; then
|
||||
py_suffix=${py_ver::-1}
|
||||
py_folder=$py_suffix
|
||||
if [ "$py_ver" = "3.13.0t" ]; then
|
||||
PY_VER_SHORT="3.13"
|
||||
PYT_VER_SHORT="3.13t"
|
||||
check_var $PYTHON_DOWNLOAD_GITHUB_BRANCH
|
||||
wget $PYTHON_DOWNLOAD_GITHUB_BRANCH/$PY_VER_SHORT.tar.gz -O Python-$py_ver.tgz
|
||||
do_cpython_build $py_ver cpython-$PYT_VER_SHORT
|
||||
elif [ "$py_ver" = "3.13.0" ]; then
|
||||
PY_VER_SHORT="3.13"
|
||||
check_var $PYTHON_DOWNLOAD_GITHUB_BRANCH
|
||||
wget $PYTHON_DOWNLOAD_GITHUB_BRANCH/$PY_VER_SHORT.tar.gz -O Python-$py_ver.tgz
|
||||
do_cpython_build $py_ver cpython-$PY_VER_SHORT
|
||||
else
|
||||
wget -q $PYTHON_DOWNLOAD_URL/$py_ver_folder/Python-$py_ver.tgz
|
||||
do_cpython_build $py_ver Python-$py_ver
|
||||
fi
|
||||
# Only b3 is available now
|
||||
if [ "$py_suffix" == "3.14.0" ]; then
|
||||
py_suffix="3.14.0b3"
|
||||
fi
|
||||
wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz
|
||||
do_cpython_build $py_ver Python-$py_suffix
|
||||
|
||||
rm -f Python-$py_ver.tgz
|
||||
}
|
||||
|
||||
@ -10,8 +10,6 @@ else
|
||||
arch_path='sbsa'
|
||||
fi
|
||||
|
||||
NVSHMEM_VERSION=3.3.9
|
||||
|
||||
function install_cuda {
|
||||
version=$1
|
||||
runfile=$2
|
||||
@ -42,52 +40,13 @@ function install_cudnn {
|
||||
rm -rf tmp_cudnn
|
||||
}
|
||||
|
||||
function install_nvshmem {
|
||||
cuda_major_version=$1 # e.g. "12"
|
||||
nvshmem_version=$2 # e.g. "3.3.9"
|
||||
|
||||
case "${arch_path}" in
|
||||
sbsa)
|
||||
dl_arch="aarch64"
|
||||
;;
|
||||
x86_64)
|
||||
dl_arch="x64"
|
||||
;;
|
||||
*)
|
||||
dl_arch="${arch}"
|
||||
;;
|
||||
esac
|
||||
|
||||
tmpdir="tmp_nvshmem"
|
||||
mkdir -p "${tmpdir}" && cd "${tmpdir}"
|
||||
|
||||
# nvSHMEM license: https://docs.nvidia.com/nvshmem/api/sla.html
|
||||
filename="libnvshmem_cuda${cuda_major_version}-linux-${arch_path}-${nvshmem_version}"
|
||||
url="https://developer.download.nvidia.com/compute/redist/nvshmem/${nvshmem_version}/builds/cuda${cuda_major_version}/txz/agnostic/${dl_arch}/${filename}.tar.gz"
|
||||
|
||||
# download, unpack, install
|
||||
wget -q "${url}"
|
||||
tar xf "${filename}.tar.gz"
|
||||
cp -a "libnvshmem/include/"* /usr/local/include/
|
||||
cp -a "libnvshmem/lib/"* /usr/local/lib/
|
||||
|
||||
# cleanup
|
||||
cd ..
|
||||
rm -rf "${tmpdir}"
|
||||
|
||||
echo "nvSHMEM ${nvshmem_version} for CUDA ${cuda_major_version} (${arch_path}) installed."
|
||||
}
|
||||
|
||||
|
||||
function install_126 {
|
||||
CUDNN_VERSION=9.10.2.21
|
||||
echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
|
||||
CUDNN_VERSION=9.5.1.17
|
||||
echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.7.1"
|
||||
install_cuda 12.6.3 cuda_12.6.3_560.35.05_linux
|
||||
|
||||
install_cudnn 12 $CUDNN_VERSION
|
||||
|
||||
install_nvshmem 12 $NVSHMEM_VERSION
|
||||
|
||||
CUDA_VERSION=12.6 bash install_nccl.sh
|
||||
|
||||
CUDA_VERSION=12.6 bash install_cusparselt.sh
|
||||
@ -96,16 +55,14 @@ function install_126 {
|
||||
}
|
||||
|
||||
function install_129 {
|
||||
CUDNN_VERSION=9.10.2.21
|
||||
echo "Installing CUDA 12.9.1 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
|
||||
CUDNN_VERSION=9.10.1.4
|
||||
echo "Installing CUDA 12.9.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.7.1"
|
||||
# install CUDA 12.9.1 in the same container
|
||||
install_cuda 12.9.1 cuda_12.9.1_575.57.08_linux
|
||||
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
install_cudnn 12 $CUDNN_VERSION
|
||||
|
||||
install_nvshmem 12 $NVSHMEM_VERSION
|
||||
|
||||
CUDA_VERSION=12.9 bash install_nccl.sh
|
||||
|
||||
CUDA_VERSION=12.9 bash install_cusparselt.sh
|
||||
@ -149,15 +106,13 @@ function prune_126 {
|
||||
|
||||
function install_128 {
|
||||
CUDNN_VERSION=9.8.0.87
|
||||
echo "Installing CUDA 12.8.1 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
|
||||
echo "Installing CUDA 12.8.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.7.1"
|
||||
# install CUDA 12.8.1 in the same container
|
||||
install_cuda 12.8.1 cuda_12.8.1_570.124.06_linux
|
||||
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
install_cudnn 12 $CUDNN_VERSION
|
||||
|
||||
install_nvshmem 12 $NVSHMEM_VERSION
|
||||
|
||||
CUDA_VERSION=12.8 bash install_nccl.sh
|
||||
|
||||
CUDA_VERSION=12.8 bash install_cusparselt.sh
|
||||
|
||||
@ -7,7 +7,7 @@ if [[ -n "${CUDNN_VERSION}" ]]; then
|
||||
if [[ ${CUDA_VERSION:0:4} == "12.9" || ${CUDA_VERSION:0:4} == "12.8" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.10.2.21_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:4} == "12.6" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.10.2.21_cuda12-archive"
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.5.1.17_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive"
|
||||
else
|
||||
|
||||
@ -20,7 +20,7 @@ pip_install \
|
||||
|
||||
pip_install coloredlogs packaging
|
||||
pip_install onnxruntime==1.18.1
|
||||
pip_install onnxscript==0.3.1
|
||||
pip_install onnxscript==0.3.0
|
||||
|
||||
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
|
||||
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
|
||||
|
||||
@ -26,11 +26,6 @@ Pin: release o=repo.radeon.com
|
||||
Pin-Priority: 600
|
||||
EOF
|
||||
|
||||
# we want the patch version of 6.4 instead
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.4) ]]; then
|
||||
ROCM_VERSION="${ROCM_VERSION}.1"
|
||||
fi
|
||||
|
||||
# Add amdgpu repository
|
||||
UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
|
||||
echo "deb [arch=amd64] https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
|
||||
@ -72,23 +67,19 @@ EOF
|
||||
|
||||
# ROCm 6.3 had a regression where initializing static code objects had significant overhead
|
||||
# ROCm 6.4 did not yet fix the regression, also HIP branch names are different
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 6.3) ]] && [[ $(ver $ROCM_VERSION) -lt $(ver 7.0) ]]; then
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.4.1) ]]; then
|
||||
HIP_BRANCH=release/rocm-rel-6.4
|
||||
VER_STR=6.4
|
||||
VER_PATCH=.1
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.3) ]] || [[ $(ver $ROCM_VERSION) -eq $(ver 6.4) ]]; then
|
||||
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.3) ]]; then
|
||||
HIP_BRANCH=rocm-6.3.x
|
||||
VER_STR=6.3
|
||||
elif [[ $(ver $ROCM_VERSION) -eq $(ver 6.4) ]]; then
|
||||
HIP_BRANCH=release/rocm-rel-6.4
|
||||
VER_STR=6.4
|
||||
elif [[ $(ver $ROCM_VERSION) -eq $(ver 6.3) ]]; then
|
||||
HIP_BRANCH=rocm-6.3.x
|
||||
VER_STR=6.3
|
||||
fi
|
||||
# clr build needs CppHeaderParser but can only find it using conda's python
|
||||
/opt/conda/bin/python -m pip install CppHeaderParser
|
||||
git clone https://github.com/ROCm/HIP -b $HIP_BRANCH
|
||||
HIP_COMMON_DIR=$(readlink -f HIP)
|
||||
git clone https://github.com/jeffdaily/clr -b release/rocm-rel-${VER_STR}${VER_PATCH}-statco-hotfix
|
||||
git clone https://github.com/jeffdaily/clr -b release/rocm-rel-${VER_STR}-statco-hotfix
|
||||
mkdir -p clr/build
|
||||
pushd clr/build
|
||||
cmake .. -DCLR_BUILD_HIP=ON -DHIP_COMMON_DIR=$HIP_COMMON_DIR
|
||||
|
||||
@ -5,12 +5,7 @@ set -eou pipefail
|
||||
|
||||
function do_install() {
|
||||
rocm_version=$1
|
||||
if [[ ${rocm_version} =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
# chop off any patch version
|
||||
rocm_version="${rocm_version%.*}"
|
||||
fi
|
||||
|
||||
rocm_version_nodot=${rocm_version//./}
|
||||
rocm_version_nodot=${1//./}
|
||||
|
||||
# Version 2.7.2 + ROCm related updates
|
||||
MAGMA_VERSION=a1625ff4d9bc362906bd01f805dbbe12612953f6
|
||||
|
||||
@ -98,10 +98,6 @@ fi
|
||||
if [ -n "${NUMPY_VERSION}" ]; then
|
||||
pip_install "numpy==${NUMPY_VERSION}"
|
||||
fi
|
||||
|
||||
# IMPORTANT: helion needs to be installed without dependencies.
|
||||
# It depends on torch and triton. We don't want to install
|
||||
# triton and torch from production on Docker CI images
|
||||
if [[ "$ANACONDA_PYTHON_VERSION" != 3.9* ]]; then
|
||||
pip_install helion --no-deps
|
||||
pip_install helion
|
||||
fi
|
||||
|
||||
@ -39,10 +39,6 @@ case ${DOCKER_TAG_PREFIX} in
|
||||
DOCKER_GPU_BUILD_ARG=""
|
||||
;;
|
||||
rocm*)
|
||||
# we want the patch version of 6.4 instead
|
||||
if [[ $(ver $GPU_ARCH_VERSION) -eq $(ver 6.4) ]]; then
|
||||
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.1"
|
||||
fi
|
||||
BASE_TARGET=rocm
|
||||
GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete
|
||||
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
|
||||
@ -26,7 +26,7 @@ ADD ./common/install_openssl.sh install_openssl.sh
|
||||
RUN bash ./install_openssl.sh && rm install_openssl.sh
|
||||
|
||||
|
||||
# remove unnecessary python versions
|
||||
# remove unncessary python versions
|
||||
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
|
||||
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
|
||||
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
|
||||
|
||||
@ -2,7 +2,7 @@ FROM quay.io/pypa/manylinux_2_28_aarch64 as base
|
||||
|
||||
ARG GCCTOOLSET_VERSION=13
|
||||
|
||||
# Language variables
|
||||
# Language variabes
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
ENV LANG=en_US.UTF-8
|
||||
ENV LANGUAGE=en_US.UTF-8
|
||||
@ -64,7 +64,7 @@ RUN bash ./install_openblas.sh && rm install_openblas.sh
|
||||
|
||||
FROM base as final
|
||||
|
||||
# remove unnecessary python versions
|
||||
# remove unncessary python versions
|
||||
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
|
||||
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
|
||||
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
|
||||
|
||||
@ -60,7 +60,7 @@ RUN bash ./install_openssl.sh && rm install_openssl.sh
|
||||
ENV SSL_CERT_FILE=/opt/_internal/certs.pem
|
||||
|
||||
FROM openssl as final
|
||||
# remove unnecessary python versions
|
||||
# remove unncessary python versions
|
||||
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
|
||||
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
|
||||
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
|
||||
|
||||
@ -120,19 +120,15 @@ RUN python3 -mpip install cmake==3.28.0
|
||||
# so just build it from upstream repository.
|
||||
# h5py is dependency of onnxruntime_training.
|
||||
# h5py==3.11.0 builds with hdf5-devel 1.10.5 from repository.
|
||||
# h5py 3.11.0 doesn't build with numpy >= 2.3.0.
|
||||
# install newest flatbuffers version first:
|
||||
# for some reason old version is getting pulled in otherwise.
|
||||
# packaging package is required for onnxruntime wheel build.
|
||||
RUN pip3 install flatbuffers && \
|
||||
pip3 install cython 'pkgconfig>=1.5.5' 'setuptools>=77' 'numpy<2.3.0' && \
|
||||
pip3 install --no-build-isolation h5py==3.11.0 && \
|
||||
pip3 install h5py==3.11.0 && \
|
||||
pip3 install packaging && \
|
||||
git clone https://github.com/microsoft/onnxruntime && \
|
||||
cd onnxruntime && git checkout v1.21.0 && \
|
||||
git submodule update --init --recursive && \
|
||||
wget https://github.com/microsoft/onnxruntime/commit/f57db79743c4d1a3553aa05cf95bcd10966030e6.patch && \
|
||||
patch -p1 < f57db79743c4d1a3553aa05cf95bcd10966030e6.patch && \
|
||||
./build.sh --config Release --parallel 0 --enable_pybind \
|
||||
--build_wheel --enable_training --enable_training_apis \
|
||||
--enable_training_ops --skip_tests --allow_running_as_root \
|
||||
|
||||
@ -75,10 +75,6 @@ case ${image} in
|
||||
DOCKERFILE_SUFFIX="_cuda_aarch64"
|
||||
;;
|
||||
manylinux2_28-builder:rocm*)
|
||||
# we want the patch version of 6.4 instead
|
||||
if [[ $(ver $GPU_ARCH_VERSION) -eq $(ver 6.4) ]]; then
|
||||
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.1"
|
||||
fi
|
||||
TARGET=rocm_final
|
||||
MANY_LINUX_VERSION="2_28"
|
||||
DEVTOOLSET_VERSION="11"
|
||||
|
||||
@ -90,10 +90,10 @@ librosa>=0.6.2 ; python_version < "3.11"
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
mypy==1.16.0
|
||||
mypy==1.15.0
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
#Description: linter
|
||||
#Pinned versions: 1.16.0
|
||||
#Pinned versions: 1.14.0
|
||||
#test that import: test_typing.py, test_type_hints.py
|
||||
|
||||
networkx==2.8.8
|
||||
@ -339,7 +339,7 @@ onnx==1.18.0
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
onnxscript==0.3.1
|
||||
onnxscript==0.2.6
|
||||
#Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
@ -382,7 +382,3 @@ cmake==4.0.0
|
||||
|
||||
tlparse==0.3.30
|
||||
#Description: required for log parsing
|
||||
|
||||
cuda-bindings>=12.0,<13.0 ; platform_machine != "s390x"
|
||||
#Description: required for testing CUDAGraph::raw_cuda_graph(). See https://nvidia.github.io/cuda-python/cuda-bindings/latest/support.html for how this version was chosen. Note "Any fix in the latest bindings would be backported to the prior major version" means that only the newest version of cuda-bindings will get fixes. Depending on the latest version of 12.x is okay because all 12.y versions will be supported via "CUDA minor version compatibility". Pytorch builds against 13.z versions of cuda toolkit work with 12.x versions of cuda-bindings as well because newer drivers work with old toolkits.
|
||||
#test that import: test_cuda.py
|
||||
|
||||
@ -19,10 +19,9 @@ sphinx_sitemap==2.6.0
|
||||
#Description: This is used to generate sitemap for PyTorch docs
|
||||
#Pinned versions: 2.6.0
|
||||
|
||||
matplotlib==3.5.3 ; python_version < "3.13"
|
||||
matplotlib==3.6.3 ; python_version >= "3.13"
|
||||
matplotlib==3.5.3
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 3.6.3 if python > 3.12. Otherwise 3.5.3.
|
||||
#Pinned versions: 3.5.3
|
||||
|
||||
tensorboard==2.13.0 ; python_version < "3.13"
|
||||
tensorboard==2.18.0 ; python_version >= "3.13"
|
||||
|
||||
@ -1 +1 @@
|
||||
3.4.0
|
||||
3.3.1
|
||||
|
||||
@ -1 +1 @@
|
||||
3.4.0
|
||||
3.3.1
|
||||
|
||||
@ -25,7 +25,6 @@ RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
|
||||
@ -31,6 +31,7 @@ elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then
|
||||
# Comment out nvidia repositories to prevent them from getting apt-get updated, see https://github.com/pytorch/pytorch/issues/74968
|
||||
# shellcheck disable=SC2046
|
||||
sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list")
|
||||
|
||||
retry apt-get update
|
||||
retry apt-get -y install zip openssl
|
||||
else
|
||||
@ -97,7 +98,6 @@ if [[ -z "$PYTORCH_ROOT" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
pushd "$PYTORCH_ROOT"
|
||||
retry pip install -q cmake
|
||||
python setup.py clean
|
||||
retry pip install -qr requirements.txt
|
||||
case ${DESIRED_PYTHON} in
|
||||
@ -151,7 +151,7 @@ if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
BUILD_LIBTORCH_WHL=0 BUILD_PYTHON_ONLY=1 \
|
||||
BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \
|
||||
USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \
|
||||
CMAKE_FRESH=1 python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR
|
||||
python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR --cmake
|
||||
echo "Finished setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)"
|
||||
else
|
||||
time CMAKE_ARGS=${CMAKE_ARGS[@]} \
|
||||
|
||||
@ -51,22 +51,16 @@ else
|
||||
fi
|
||||
|
||||
cuda_version_nodot=$(echo $CUDA_VERSION | tr -d '.')
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
|
||||
TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;7.5;8.0;8.6"
|
||||
case ${CUDA_VERSION} in
|
||||
#removing sm_50-sm_70 as these architectures are deprecated in CUDA 12.8/9 and will be removed in future releases
|
||||
12.8)
|
||||
TORCH_CUDA_ARCH_LIST="7.5;8.0;8.6;9.0;10.0;12.0"
|
||||
;;
|
||||
12.9)
|
||||
TORCH_CUDA_ARCH_LIST="7.5;8.0;8.6;9.0;10.0;12.0+PTX"
|
||||
# WAR to resolve the ld error in libtorch build with CUDA 12.9
|
||||
if [[ "$PACKAGE_TYPE" == "libtorch" ]]; then
|
||||
TORCH_CUDA_ARCH_LIST="7.5;8.0;9.0;10.0;12.0+PTX"
|
||||
fi
|
||||
12.8|12.9)
|
||||
TORCH_CUDA_ARCH_LIST="7.5;8.0;8.6;9.0;10.0;12.0+PTX" #removing sm_50-sm_70 as these architectures are deprecated in CUDA 12.8/9 and will be removed in future releases
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
;;
|
||||
12.6)
|
||||
TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;7.5;8.0;8.6;9.0"
|
||||
TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};9.0"
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
;;
|
||||
*)
|
||||
echo "unknown cuda version $CUDA_VERSION"
|
||||
@ -109,11 +103,12 @@ DEPS_SONAME=(
|
||||
)
|
||||
|
||||
|
||||
# CUDA_VERSION 12.6, 12.8, 12.9
|
||||
# CUDA_VERSION 12.6, 12.8
|
||||
if [[ $CUDA_VERSION == 12* ]]; then
|
||||
export USE_STATIC_CUDNN=0
|
||||
# Try parallelizing nvcc as well
|
||||
export TORCH_NVCC_FLAGS="-Xfatbin -compress-all --threads 2"
|
||||
|
||||
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
|
||||
echo "Bundling with cudnn and cublas."
|
||||
DEPS_LIST+=(
|
||||
@ -129,12 +124,11 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
"/usr/local/cuda/lib64/libcublasLt.so.12"
|
||||
"/usr/local/cuda/lib64/libcusparseLt.so.0"
|
||||
"/usr/local/cuda/lib64/libcudart.so.12"
|
||||
"/usr/local/cuda/lib64/libnvToolsExt.so.1"
|
||||
"/usr/local/cuda/lib64/libnvrtc.so.12"
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so"
|
||||
"/usr/local/cuda/lib64/libcufile.so.0"
|
||||
"/usr/local/cuda/lib64/libcufile_rdma.so.1"
|
||||
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12"
|
||||
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so"
|
||||
)
|
||||
DEPS_SONAME+=(
|
||||
"libcudnn_adv.so.9"
|
||||
@ -149,18 +143,12 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
"libcublasLt.so.12"
|
||||
"libcusparseLt.so.0"
|
||||
"libcudart.so.12"
|
||||
"libnvToolsExt.so.1"
|
||||
"libnvrtc.so.12"
|
||||
"libnvrtc-builtins.so"
|
||||
"libcufile.so.0"
|
||||
"libcufile_rdma.so.1"
|
||||
"libcupti.so.12"
|
||||
"libnvperf_host.so"
|
||||
)
|
||||
# Add libnvToolsExt only if CUDA version is not 12.9
|
||||
if [[ $CUDA_VERSION != 12.9* ]]; then
|
||||
DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1")
|
||||
DEPS_SONAME+=("libnvToolsExt.so.1")
|
||||
fi
|
||||
else
|
||||
echo "Using nvidia libs from pypi."
|
||||
CUDA_RPATHS=(
|
||||
|
||||
@ -92,7 +92,6 @@ if [[ -z "$PYTORCH_ROOT" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
pushd "$PYTORCH_ROOT"
|
||||
retry pip install -q cmake
|
||||
python setup.py clean
|
||||
retry pip install -qr requirements.txt
|
||||
retry pip install -q numpy==2.0.1
|
||||
|
||||
@ -187,7 +187,7 @@ do
|
||||
OS_SO_FILES[${#OS_SO_FILES[@]}]=$file_name # Append lib to array
|
||||
done
|
||||
|
||||
ARCH=$(echo $PYTORCH_ROCM_ARCH | sed 's/;/|/g') # Replace ; separated arch list to bar for grep
|
||||
ARCH=$(echo $PYTORCH_ROCM_ARCH | sed 's/;/|/g') # Replace ; seperated arch list to bar for grep
|
||||
|
||||
# rocBLAS library files
|
||||
ROCBLAS_LIB_SRC=$ROCM_HOME/lib/rocblas/library
|
||||
|
||||
@ -198,8 +198,10 @@ fi
|
||||
|
||||
# We only build FlashAttention files for CUDA 8.0+, and they require large amounts of
|
||||
# memory to build and will OOM
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]] && [[ 1 -eq $(echo "${TORCH_CUDA_ARCH_LIST} >= 8.0" | bc) ]]; then
|
||||
export BUILD_CUSTOM_STEP="ninja -C build flash_attention -j 2"
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]] && [[ 1 -eq $(echo "${TORCH_CUDA_ARCH_LIST} >= 8.0" | bc) ]] && [ -z "$MAX_JOBS_OVERRIDE" ]; then
|
||||
echo "WARNING: FlashAttention files require large amounts of memory to build and will OOM"
|
||||
echo "Setting MAX_JOBS=(nproc-2)/3 to reduce memory usage"
|
||||
export MAX_JOBS="$(( $(nproc --ignore=2) / 3 ))"
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *clang* ]]; then
|
||||
@ -255,7 +257,6 @@ if [[ "$BUILD_ENVIRONMENT" == *-bazel-* ]]; then
|
||||
set -e -o pipefail
|
||||
|
||||
get_bazel
|
||||
python3 tools/optional_submodules.py checkout_eigen
|
||||
|
||||
# Leave 1 CPU free and use only up to 80% of memory to reduce the change of crashing
|
||||
# the runner
|
||||
@ -393,8 +394,10 @@ else
|
||||
# This is an attempt to mitigate flaky libtorch build OOM error. By default, the build parallelization
|
||||
# is set to be the number of CPU minus 2. So, let's try a more conservative value here. A 4xlarge has
|
||||
# 16 CPUs
|
||||
MAX_JOBS=$(nproc --ignore=4)
|
||||
export MAX_JOBS
|
||||
if [ -z "$MAX_JOBS_OVERRIDE" ]; then
|
||||
MAX_JOBS=$(nproc --ignore=4)
|
||||
export MAX_JOBS
|
||||
fi
|
||||
|
||||
# NB: Install outside of source directory (at the same level as the root
|
||||
# pytorch folder) so that it doesn't get cleaned away prior to docker push.
|
||||
|
||||
@ -13,13 +13,6 @@ if [[ "$BUILD_ENVIRONMENT" != *win-* ]]; then
|
||||
fi
|
||||
|
||||
if which sccache > /dev/null; then
|
||||
# Clear SCCACHE_BUCKET and SCCACHE_REGION if they are empty, otherwise
|
||||
# sccache will complain about invalid bucket configuration
|
||||
if [[ -z "${SCCACHE_BUCKET:-}" ]]; then
|
||||
unset SCCACHE_BUCKET
|
||||
unset SCCACHE_REGION
|
||||
fi
|
||||
|
||||
# Save sccache logs to file
|
||||
sccache --stop-server > /dev/null 2>&1 || true
|
||||
rm -f ~/sccache_error.log || true
|
||||
|
||||
@ -15,6 +15,6 @@ if [[ "${BUILD_ENVIRONMENT}" == *rocm* ]]; then
|
||||
export PYTORCH_TEST_WITH_ROCM=1
|
||||
fi
|
||||
|
||||
# TODO: Reenable libtorch testing for MacOS, see https://github.com/pytorch/pytorch/issues/62598
|
||||
# TODO: Renable libtorch testing for MacOS, see https://github.com/pytorch/pytorch/issues/62598
|
||||
# shellcheck disable=SC2034
|
||||
BUILD_TEST_LIBTORCH=0
|
||||
|
||||
@ -93,7 +93,7 @@ def check_lib_symbols_for_abi_correctness(lib: str) -> None:
|
||||
f"Found pre-cxx11 symbols, but there shouldn't be any, see: {pre_cxx11_symbols[:100]}"
|
||||
)
|
||||
if num_cxx11_symbols < 100:
|
||||
raise RuntimeError("Didn't find enough cxx11 symbols")
|
||||
raise RuntimeError("Didn't find enought cxx11 symbols")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
|
||||
@ -276,7 +276,7 @@ def smoke_test_cuda(
|
||||
torch_nccl_version = ".".join(str(v) for v in torch.cuda.nccl.version())
|
||||
print(f"Torch nccl; version: {torch_nccl_version}")
|
||||
|
||||
# Pypi dependencies are installed on linux only and nccl is available only on Linux.
|
||||
# Pypi dependencies are installed on linux ony and nccl is availbale only on Linux.
|
||||
if pypi_pkg_check == "enabled" and sys.platform in ["linux", "linux2"]:
|
||||
compare_pypi_to_torch_versions(
|
||||
"cudnn", find_pypi_package_version("nvidia-cudnn"), torch_cudnn_version
|
||||
|
||||
@ -11,8 +11,6 @@ export TERM=vt100
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
# shellcheck source=./common-build.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh"
|
||||
|
||||
# Do not change workspace permissions for ROCm and s390x CI jobs
|
||||
# as it can leave workspace with bad permissions for cancelled jobs
|
||||
@ -165,6 +163,8 @@ elif [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
export PYTORCH_TESTING_DEVICE_ONLY_FOR="xpu"
|
||||
# setting PYTHON_TEST_EXTRA_OPTION
|
||||
export PYTHON_TEST_EXTRA_OPTION="--xpu"
|
||||
# Disable sccache for xpu test due to flaky issue https://github.com/pytorch/pytorch/issues/143585
|
||||
sudo rm -rf /opt/cache
|
||||
fi
|
||||
|
||||
if [[ "$TEST_CONFIG" == *crossref* ]]; then
|
||||
@ -196,7 +196,7 @@ if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
source /opt/intel/oneapi/mpi/latest/env/vars.sh
|
||||
# Check XPU status before testing
|
||||
timeout 30 xpu-smi discovery || true
|
||||
xpu-smi discovery
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]] ; then
|
||||
@ -224,7 +224,7 @@ if [[ "$BUILD_ENVIRONMENT" == *asan* ]]; then
|
||||
export PYTORCH_TEST_WITH_ASAN=1
|
||||
export PYTORCH_TEST_WITH_UBSAN=1
|
||||
# TODO: Figure out how to avoid hard-coding these paths
|
||||
export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-18/bin/llvm-symbolizer
|
||||
export ASAN_SYMBOLIZER_PATH=/usr/lib/llvm-15/bin/llvm-symbolizer
|
||||
export TORCH_USE_RTLD_GLOBAL=1
|
||||
# NB: We load libtorch.so with RTLD_GLOBAL for UBSAN, unlike our
|
||||
# default behavior.
|
||||
@ -325,17 +325,6 @@ test_python_smoke() {
|
||||
test_h100_distributed() {
|
||||
# Distributed tests at H100
|
||||
time python test/run_test.py --include distributed/_composable/test_composability/test_pp_composability.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
# This test requires multicast support
|
||||
time python test/run_test.py --include distributed/_composable/fsdp/test_fully_shard_comm.py -k TestFullyShardAllocFromPG $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_h100_symm_mem() {
|
||||
# symmetric memory test
|
||||
time python test/run_test.py --include distributed/test_symmetric_memory.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
time python test/run_test.py --include distributed/test_nvshmem.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
time python test/run_test.py --include distributed/test_nvshmem_triton.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
time python test/run_test.py --include distributed/test_nccl.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
@ -353,7 +342,6 @@ test_dynamo_wrapped_shard() {
|
||||
exit 1
|
||||
fi
|
||||
python tools/dynamo/verify_dynamo.py
|
||||
python tools/dynamo/gb_id_mapping.py verify
|
||||
# PLEASE DO NOT ADD ADDITIONAL EXCLUDES HERE.
|
||||
# Instead, use @skipIfTorchDynamo on your tests.
|
||||
time python test/run_test.py --dynamo \
|
||||
@ -368,17 +356,6 @@ test_dynamo_wrapped_shard() {
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_einops() {
|
||||
pip install einops==0.6.1
|
||||
time python test/run_test.py --einops --verbose --upload-artifacts-while-running
|
||||
pip install einops==0.7.0
|
||||
time python test/run_test.py --einops --verbose --upload-artifacts-while-running
|
||||
pip install einops==0.8.1
|
||||
time python test/run_test.py --einops --verbose --upload-artifacts-while-running
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
|
||||
test_inductor_distributed() {
|
||||
# Smuggle a few multi-gpu tests here so that we don't have to request another large node
|
||||
echo "Testing multi_gpu tests in test_torchinductor"
|
||||
@ -436,21 +413,14 @@ test_inductor_aoti() {
|
||||
python3 tools/amd_build/build_amd.py
|
||||
fi
|
||||
if [[ "$BUILD_ENVIRONMENT" == *sm86* ]]; then
|
||||
BUILD_COMMAND=(TORCH_CUDA_ARCH_LIST=8.6 USE_FLASH_ATTENTION=OFF python setup.py develop)
|
||||
BUILD_AOT_INDUCTOR_TEST=1 TORCH_CUDA_ARCH_LIST=8.6 USE_FLASH_ATTENTION=OFF python setup.py develop
|
||||
# TODO: Replace me completely, as one should not use conda libstdc++, nor need special path to TORCH_LIB
|
||||
TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="/opt/conda/envs/py_3.10/lib:${TORCH_LIB_DIR}:${LD_LIBRARY_PATH}")
|
||||
LD_LIBRARY_PATH=/opt/conda/envs/py_3.10/lib/:${TORCH_LIB_DIR}:$LD_LIBRARY_PATH
|
||||
CPP_TESTS_DIR="${BUILD_BIN_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference -dist=loadfile
|
||||
else
|
||||
BUILD_COMMAND=(python setup.py develop)
|
||||
TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}")
|
||||
BUILD_AOT_INDUCTOR_TEST=1 python setup.py develop
|
||||
CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference -dist=loadfile
|
||||
fi
|
||||
|
||||
# aoti cmake custom command requires `torch` to be installed
|
||||
# initialize the cmake build cache and install torch
|
||||
/usr/bin/env "${BUILD_COMMAND[@]}"
|
||||
# rebuild with the build cache with `BUILD_AOT_INDUCTOR_TEST` enabled
|
||||
/usr/bin/env CMAKE_FRESH=1 BUILD_AOT_INDUCTOR_TEST=1 "${BUILD_COMMAND[@]}"
|
||||
|
||||
/usr/bin/env "${TEST_ENVS[@]}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference -dist=loadfile
|
||||
}
|
||||
|
||||
test_inductor_cpp_wrapper_shard() {
|
||||
@ -463,26 +433,47 @@ test_inductor_cpp_wrapper_shard() {
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
|
||||
if [[ "$1" -eq "2" ]]; then
|
||||
# For now, manually put the opinfo tests in shard 2, and all other tests in
|
||||
# shard 1. Run all CPU tests, as well as specific GPU tests triggering past
|
||||
# bugs, for now.
|
||||
python test/run_test.py \
|
||||
--include inductor/test_torchinductor_opinfo \
|
||||
-k 'linalg or to_sparse or TestInductorOpInfoCPU' \
|
||||
--verbose
|
||||
exit
|
||||
fi
|
||||
|
||||
# Run certain inductor unit tests with cpp wrapper. In the end state, we
|
||||
# should be able to run all the inductor unit tests with cpp_wrapper.
|
||||
#
|
||||
# TODO: I'm pretty sure that "TestInductorOpInfoCPU" is not a valid filter,
|
||||
# but change that in another PR to more accurately monitor the increased CI
|
||||
# usage.
|
||||
python test/run_test.py \
|
||||
--include inductor/test_torchinductor_opinfo \
|
||||
-k 'linalg or to_sparse or TestInductorOpInfoCPU' \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
python test/run_test.py \
|
||||
--include inductor/test_torchinductor inductor/test_max_autotune inductor/test_cpu_repro \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
python test/run_test.py --inductor \
|
||||
--include test_torch \
|
||||
-k 'take' \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
python test/run_test.py --inductor --include test_torch -k 'take' --verbose
|
||||
|
||||
# Run inductor benchmark tests with cpp wrapper.
|
||||
# Skip benchmark tests if it's in rerun-disabled-mode.
|
||||
if [[ "${PYTORCH_TEST_RERUN_DISABLED_TESTS}" == "1" ]]; then
|
||||
echo "skip dynamo benchmark tests for rerun-disabled-test"
|
||||
else
|
||||
echo "run dynamo benchmark tests with cpp wrapper"
|
||||
python benchmarks/dynamo/timm_models.py --device cuda --accuracy --amp \
|
||||
--training --inductor --disable-cudagraphs --only vit_base_patch16_224 \
|
||||
--output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_training.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/inductor_cpp_wrapper_training.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${MAYBE_ROCM}inductor_timm_training.csv"
|
||||
|
||||
python benchmarks/dynamo/torchbench.py --device cuda --accuracy \
|
||||
--bfloat16 --inference --inductor --only hf_T5 --output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv"
|
||||
python benchmarks/dynamo/torchbench.py --device cuda --accuracy \
|
||||
--bfloat16 --inference --inductor --only llama --output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv"
|
||||
python benchmarks/dynamo/torchbench.py --device cuda --accuracy \
|
||||
--bfloat16 --inference --inductor --only moco --output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${MAYBE_ROCM}inductor_torchbench_inference.csv"
|
||||
fi
|
||||
}
|
||||
|
||||
# "Global" flags for inductor benchmarking controlled by TEST_CONFIG
|
||||
@ -603,9 +594,7 @@ test_perf_for_dashboard() {
|
||||
|
||||
local device=cuda
|
||||
if [[ "${TEST_CONFIG}" == *cpu* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_x86_zen* ]]; then
|
||||
device=cpu_x86_zen
|
||||
elif [[ "${TEST_CONFIG}" == *cpu_x86* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_x86* ]]; then
|
||||
device=cpu_x86
|
||||
elif [[ "${TEST_CONFIG}" == *cpu_aarch64* ]]; then
|
||||
device=cpu_aarch64
|
||||
@ -621,11 +610,7 @@ test_perf_for_dashboard() {
|
||||
|
||||
for mode in "${modes[@]}"; do
|
||||
if [[ "$mode" == "inference" ]]; then
|
||||
if [[ "$device" == "cpu_x86" ]]; then
|
||||
dtype=amp
|
||||
else
|
||||
dtype=bfloat16
|
||||
fi
|
||||
dtype=bfloat16
|
||||
elif [[ "$mode" == "training" ]]; then
|
||||
dtype=amp
|
||||
fi
|
||||
@ -637,10 +622,6 @@ test_perf_for_dashboard() {
|
||||
target_flag+=( --no-translation-validation)
|
||||
fi
|
||||
|
||||
if [[ "$DASHBOARD_TAG" == *freezing-true* ]]; then
|
||||
target_flag+=( --freezing)
|
||||
fi
|
||||
|
||||
if [[ "$DASHBOARD_TAG" == *default-true* ]]; then
|
||||
$TASKSET python "benchmarks/dynamo/$suite.py" \
|
||||
"${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" --disable-cudagraphs "$@" \
|
||||
@ -1154,12 +1135,6 @@ test_custom_backend() {
|
||||
|
||||
test_custom_script_ops() {
|
||||
echo "Testing custom script operators"
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *s390x* ]]; then
|
||||
echo "Skipping custom script operators until it's fixed"
|
||||
return 0
|
||||
fi
|
||||
|
||||
CUSTOM_OP_BUILD="${CUSTOM_TEST_ARTIFACT_BUILD_DIR}/custom-op-build"
|
||||
pushd test/custom_operator
|
||||
cp -a "$CUSTOM_OP_BUILD" build
|
||||
@ -1694,11 +1669,11 @@ elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
|
||||
PYTHONPATH=$(pwd)/torchbench test_dynamo_benchmark torchbench "$id"
|
||||
fi
|
||||
elif [[ "${TEST_CONFIG}" == *inductor_cpp_wrapper* ]]; then
|
||||
install_torchaudio cuda
|
||||
install_torchvision
|
||||
checkout_install_torchbench hf_T5 llama moco
|
||||
PYTHONPATH=$(pwd)/torchbench test_inductor_cpp_wrapper_shard "$SHARD_NUMBER"
|
||||
if [[ "$SHARD_NUMBER" -eq "1" ]]; then
|
||||
test_inductor_aoti
|
||||
fi
|
||||
test_inductor_aoti
|
||||
elif [[ "${TEST_CONFIG}" == *inductor* ]]; then
|
||||
install_torchvision
|
||||
test_inductor_shard "${SHARD_NUMBER}"
|
||||
@ -1707,8 +1682,6 @@ elif [[ "${TEST_CONFIG}" == *inductor* ]]; then
|
||||
test_inductor_distributed
|
||||
fi
|
||||
fi
|
||||
elif [[ "${TEST_CONFIG}" == *einops* ]]; then
|
||||
test_einops
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo_wrapped* ]]; then
|
||||
install_torchvision
|
||||
test_dynamo_wrapped_shard "${SHARD_NUMBER}"
|
||||
@ -1758,8 +1731,6 @@ elif [[ "${TEST_CONFIG}" == smoke ]]; then
|
||||
test_python_smoke
|
||||
elif [[ "${TEST_CONFIG}" == h100_distributed ]]; then
|
||||
test_h100_distributed
|
||||
elif [[ "${TEST_CONFIG}" == "h100-symm-mem" ]]; then
|
||||
test_h100_symm_mem
|
||||
else
|
||||
install_torchvision
|
||||
install_monkeytype
|
||||
|
||||
@ -31,7 +31,7 @@ PYLONG_API_CHECK=$?
|
||||
if [[ $PYLONG_API_CHECK == 0 ]]; then
|
||||
echo "Usage of PyLong_{From,As}{Unsigned}Long API may lead to overflow errors on Windows"
|
||||
echo "because \`sizeof(long) == 4\` and \`sizeof(unsigned long) == 4\`."
|
||||
echo "Please include \"torch/csrc/utils/python_numbers.h\" and use the corresponding APIs instead."
|
||||
echo "Please include \"torch/csrc/utils/python_numbers.h\" and use the correspoding APIs instead."
|
||||
echo "PyLong_FromLong -> THPUtils_packInt32 / THPUtils_packInt64"
|
||||
echo "PyLong_AsLong -> THPUtils_unpackInt (32-bit) / THPUtils_unpackLong (64-bit)"
|
||||
echo "PyLong_FromUnsignedLong -> THPUtils_packUInt32 / THPUtils_packUInt64"
|
||||
|
||||
@ -10,7 +10,7 @@ set PATH=C:\Program Files\CMake\bin;C:\Program Files\7-Zip;C:\ProgramData\chocol
|
||||
:: able to see what our cl.exe commands are (since you can actually
|
||||
:: just copy-paste them into a local Windows setup to just rebuild a
|
||||
:: single file.)
|
||||
:: log sizes are too long, but leaving this here in case someone wants to use it locally
|
||||
:: log sizes are too long, but leaving this here incase someone wants to use it locally
|
||||
:: set CMAKE_VERBOSE_MAKEFILE=1
|
||||
|
||||
|
||||
|
||||
@ -52,7 +52,7 @@ if __name__ == "__main__":
|
||||
if os.path.exists(debugger):
|
||||
command_args = [debugger, "-o", "-c", "~*g; q"] + command_args
|
||||
command_string = " ".join(command_args)
|
||||
print("Rerunning with traceback enabled")
|
||||
print("Reruning with traceback enabled")
|
||||
print("Command:", command_string)
|
||||
subprocess.run(command_args, check=False)
|
||||
sys.exit(e.returncode)
|
||||
|
||||
@ -52,9 +52,6 @@ python -m pip install parameterized==0.8.1
|
||||
# Install pulp for testing ilps under torch\distributed\_tools
|
||||
python -m pip install pulp==2.9.0
|
||||
|
||||
# Install expecttest to merge https://github.com/pytorch/pytorch/pull/155308
|
||||
python -m pip install expecttest==0.3.0
|
||||
|
||||
run_tests() {
|
||||
# Run nvidia-smi if available
|
||||
for path in '/c/Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe' /c/Windows/System32/nvidia-smi.exe; do
|
||||
|
||||
@ -1,59 +0,0 @@
|
||||
@echo off
|
||||
|
||||
set MODULE_NAME=pytorch
|
||||
|
||||
IF NOT EXIST "setup.py" IF NOT EXIST "%MODULE_NAME%" (
|
||||
call internal\clone.bat
|
||||
cd %~dp0
|
||||
) ELSE (
|
||||
call internal\clean.bat
|
||||
)
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call internal\check_deps.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
REM Check for optional components
|
||||
|
||||
set USE_CUDA=
|
||||
set CMAKE_GENERATOR=Visual Studio 15 2017 Win64
|
||||
|
||||
IF "%NVTOOLSEXT_PATH%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA Corporation\NvToolsExt\lib\x64\nvToolsExt64_1.lib" (
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
) ELSE (
|
||||
echo NVTX ^(Visual Studio Extension ^for CUDA^) ^not installed, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%CUDA_PATH_V129%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.9\bin\nvcc.exe" (
|
||||
set "CUDA_PATH_V129=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.9"
|
||||
) ELSE (
|
||||
echo CUDA 12.9 not found, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%BUILD_VISION%" == "" (
|
||||
set TORCH_CUDA_ARCH_LIST=7.5;8.0;8.6;9.0;10.0;12.0
|
||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||
) ELSE (
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90 -gencode=arch=compute_100,code=compute_100 -gencode=arch=compute_120,code=compute_120
|
||||
)
|
||||
|
||||
set "CUDA_PATH=%CUDA_PATH_V129%"
|
||||
set "PATH=%CUDA_PATH_V129%\bin;%PATH%"
|
||||
|
||||
:optcheck
|
||||
|
||||
call internal\check_opts.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
if exist "%NIGHTLIES_PYTORCH_ROOT%" cd %NIGHTLIES_PYTORCH_ROOT%\..
|
||||
call %~dp0\internal\copy.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call %~dp0\internal\setup.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
@ -65,7 +65,7 @@ for /F "usebackq delims=" %%i in (`python -c "import sys; print('{0[0]}{0[1]}'.f
|
||||
if %PYVER% LSS 35 (
|
||||
echo Warning: PyTorch for Python 2 under Windows is experimental.
|
||||
echo Python x64 3.5 or up is recommended to compile PyTorch on Windows
|
||||
echo Maybe you can create a virtual environment if you have conda installed:
|
||||
echo Maybe you can create a virual environment if you have conda installed:
|
||||
echo ^> conda create -n test python=3.6 pyyaml numpy
|
||||
echo ^> activate test
|
||||
)
|
||||
|
||||
@ -8,7 +8,6 @@ copy "%CUDA_PATH%\bin\cusolver*64_*.dll*" pytorch\torch\lib
|
||||
copy "%CUDA_PATH%\bin\cudnn*64_*.dll*" pytorch\torch\lib
|
||||
copy "%CUDA_PATH%\bin\nvrtc*64_*.dll*" pytorch\torch\lib
|
||||
copy "%CUDA_PATH%\extras\CUPTI\lib64\cupti64_*.dll*" pytorch\torch\lib
|
||||
copy "%CUDA_PATH%\extras\CUPTI\lib64\nvperf_host*.dll*" pytorch\torch\lib
|
||||
|
||||
copy "C:\Program Files\NVIDIA Corporation\NvToolsExt\bin\x64\nvToolsExt64_1.dll*" pytorch\torch\lib
|
||||
copy "%PYTHON_LIB_PATH%\libiomp*5md.dll" pytorch\torch\lib
|
||||
|
||||
@ -23,9 +23,9 @@ set CUDNN_LIB_FOLDER="lib\x64"
|
||||
:: Skip all of this if we already have cuda installed
|
||||
if exist "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin\nvcc.exe" goto set_cuda_env_vars
|
||||
|
||||
if %CUDA_VER% EQU 124 goto cuda124
|
||||
if %CUDA_VER% EQU 126 goto cuda126
|
||||
if %CUDA_VER% EQU 128 goto cuda128
|
||||
if %CUDA_VER% EQU 129 goto cuda129
|
||||
|
||||
echo CUDA %CUDA_VERSION_STR% is not supported
|
||||
exit /b 1
|
||||
@ -86,33 +86,6 @@ xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda129
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_12.9.1_576.57_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_12.9 thrust_12.9 nvcc_12.9 cuobjdump_12.9 nvprune_12.9 nvprof_12.9 cupti_12.9 cublas_12.9 cublas_dev_12.9 cudart_12.9 cufft_12.9 cufft_dev_12.9 curand_12.9 curand_dev_12.9 cusolver_12.9 cusolver_dev_12.9 cusparse_12.9 cusparse_dev_12.9 npp_12.9 npp_dev_12.9 nvrtc_12.9 nvrtc_dev_12.9 nvml_dev_12.9 nvjitlink_12.9 nvtx_12.9"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.10.2.21_cuda12-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda_common
|
||||
:: NOTE: We only install CUDA if we don't have it installed already.
|
||||
:: With GHA runners these should be pre-installed as part of our AMI process
|
||||
|
||||
@ -18,5 +18,3 @@ start /wait "" python-amd64.exe /quiet InstallAllUsers=1 PrependPath=0 Include_t
|
||||
if errorlevel 1 exit /b 1
|
||||
|
||||
set "PATH=%CD%\Python\Scripts;%CD%\Python;%PATH%"
|
||||
%PYTHON_EXEC% -m pip install --upgrade pip setuptools packaging wheel
|
||||
if errorlevel 1 exit /b 1
|
||||
|
||||
@ -206,7 +206,7 @@ if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
BUILD_LIBTORCH_WHL=1 BUILD_PYTHON_ONLY=0 python setup.py bdist_wheel -d "$whl_tmp_dir"
|
||||
echo "Finished setup.py bdist_wheel for split build (BUILD_LIBTORCH_WHL)"
|
||||
echo "Calling setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)"
|
||||
BUILD_LIBTORCH_WHL=0 BUILD_PYTHON_ONLY=1 CMAKE_FRESH=1 python setup.py bdist_wheel -d "$whl_tmp_dir"
|
||||
BUILD_PYTHON_ONLY=1 BUILD_LIBTORCH_WHL=0 python setup.py bdist_wheel -d "$whl_tmp_dir" --cmake
|
||||
echo "Finished setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)"
|
||||
else
|
||||
python setup.py bdist_wheel -d "$whl_tmp_dir"
|
||||
|
||||
@ -75,8 +75,8 @@ TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
|
||||
# Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
|
||||
# CUDA 12.9 builds have triton for Linux and Linux aarch64 binaries.
|
||||
if [[ "$DESIRED_CUDA" == "cu129" ]]; then
|
||||
# CUDA 12.8 builds have triton for Linux and Linux aarch64 binaries.
|
||||
if [[ "$DESIRED_CUDA" == cu128 ]]; then
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux'"
|
||||
fi
|
||||
|
||||
|
||||
157
.circleci/scripts/trigger_azure_pipeline.py
Normal file
157
.circleci/scripts/trigger_azure_pipeline.py
Normal file
@ -0,0 +1,157 @@
|
||||
# Documentation: https://docs.microsoft.com/en-us/rest/api/azure/devops/build/?view=azure-devops-rest-6.0
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
AZURE_PIPELINE_BASE_URL = "https://aiinfra.visualstudio.com/PyTorch/"
|
||||
AZURE_DEVOPS_PAT_BASE64 = os.environ.get("AZURE_DEVOPS_PAT_BASE64_SECRET", "")
|
||||
PIPELINE_ID = "911"
|
||||
PROJECT_ID = "0628bce4-2d33-499e-bac5-530e12db160f"
|
||||
TARGET_BRANCH = os.environ.get("CIRCLE_BRANCH", "main")
|
||||
TARGET_COMMIT = os.environ.get("CIRCLE_SHA1", "")
|
||||
|
||||
build_base_url = AZURE_PIPELINE_BASE_URL + "_apis/build/builds?api-version=6.0"
|
||||
|
||||
s = requests.Session()
|
||||
s.headers.update({"Authorization": "Basic " + AZURE_DEVOPS_PAT_BASE64})
|
||||
|
||||
|
||||
def submit_build(pipeline_id, project_id, source_branch, source_version):
|
||||
print("Submitting build for branch: " + source_branch)
|
||||
print("Commit SHA1: ", source_version)
|
||||
|
||||
run_build_raw = s.post(
|
||||
build_base_url,
|
||||
json={
|
||||
"definition": {"id": pipeline_id},
|
||||
"project": {"id": project_id},
|
||||
"sourceBranch": source_branch,
|
||||
"sourceVersion": source_version,
|
||||
},
|
||||
)
|
||||
|
||||
try:
|
||||
run_build_json = run_build_raw.json()
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
print(e)
|
||||
print(
|
||||
"Failed to parse the response. Check if the Azure DevOps PAT is incorrect or expired."
|
||||
)
|
||||
sys.exit(-1)
|
||||
|
||||
build_id = run_build_json["id"]
|
||||
|
||||
print("Submitted bulid: " + str(build_id))
|
||||
print("Bulid URL: " + run_build_json["url"])
|
||||
return build_id
|
||||
|
||||
|
||||
def get_build(_id):
|
||||
get_build_url = (
|
||||
AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}?api-version=6.0"
|
||||
)
|
||||
get_build_raw = s.get(get_build_url)
|
||||
return get_build_raw.json()
|
||||
|
||||
|
||||
def get_build_logs(_id):
|
||||
get_build_logs_url = (
|
||||
AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}/logs?api-version=6.0"
|
||||
)
|
||||
get_build_logs_raw = s.get(get_build_logs_url)
|
||||
return get_build_logs_raw.json()
|
||||
|
||||
|
||||
def get_log_content(url):
|
||||
resp = s.get(url)
|
||||
return resp.text
|
||||
|
||||
|
||||
def wait_for_build(_id):
|
||||
build_detail = get_build(_id)
|
||||
build_status = build_detail["status"]
|
||||
|
||||
while build_status == "notStarted":
|
||||
print("Waiting for run to start: " + str(_id))
|
||||
sys.stdout.flush()
|
||||
try:
|
||||
build_detail = get_build(_id)
|
||||
build_status = build_detail["status"]
|
||||
except Exception as e:
|
||||
print("Error getting build")
|
||||
print(e)
|
||||
|
||||
time.sleep(30)
|
||||
|
||||
print("Bulid started: ", str(_id))
|
||||
|
||||
handled_logs = set()
|
||||
while build_status == "inProgress":
|
||||
try:
|
||||
print("Waiting for log: " + str(_id))
|
||||
logs = get_build_logs(_id)
|
||||
except Exception as e:
|
||||
print("Error fetching logs")
|
||||
print(e)
|
||||
time.sleep(30)
|
||||
continue
|
||||
|
||||
for log in logs["value"]:
|
||||
log_id = log["id"]
|
||||
if log_id in handled_logs:
|
||||
continue
|
||||
handled_logs.add(log_id)
|
||||
print("Fetching log: \n" + log["url"])
|
||||
try:
|
||||
log_content = get_log_content(log["url"])
|
||||
print(log_content)
|
||||
except Exception as e:
|
||||
print("Error getting log content")
|
||||
print(e)
|
||||
sys.stdout.flush()
|
||||
build_detail = get_build(_id)
|
||||
build_status = build_detail["status"]
|
||||
time.sleep(30)
|
||||
|
||||
build_result = build_detail["result"]
|
||||
|
||||
print("Bulid status: " + build_status)
|
||||
print("Bulid result: " + build_result)
|
||||
|
||||
return build_status, build_result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Convert the branch name for Azure DevOps
|
||||
match = re.search(r"pull/(\d+)", TARGET_BRANCH)
|
||||
if match is not None:
|
||||
pr_num = match.group(1)
|
||||
SOURCE_BRANCH = f"refs/pull/{pr_num}/head"
|
||||
else:
|
||||
SOURCE_BRANCH = f"refs/heads/{TARGET_BRANCH}"
|
||||
|
||||
MAX_RETRY = 2
|
||||
retry = MAX_RETRY
|
||||
|
||||
while retry > 0:
|
||||
build_id = submit_build(PIPELINE_ID, PROJECT_ID, SOURCE_BRANCH, TARGET_COMMIT)
|
||||
build_status, build_result = wait_for_build(build_id)
|
||||
|
||||
if build_result != "succeeded":
|
||||
retry = retry - 1
|
||||
if retry > 0:
|
||||
print("Retrying... remaining attempt: " + str(retry))
|
||||
# Wait a bit before retrying
|
||||
time.sleep((MAX_RETRY - retry) * 120)
|
||||
continue
|
||||
else:
|
||||
print("No more chance to retry. Giving up.")
|
||||
sys.exit(-1)
|
||||
else:
|
||||
break
|
||||
2
.github/actionlint.yaml
vendored
2
.github/actionlint.yaml
vendored
@ -14,7 +14,6 @@ self-hosted-runner:
|
||||
- linux.12xlarge
|
||||
- linux.24xlarge
|
||||
- linux.24xlarge.ephemeral
|
||||
- linux.24xlarge.amd
|
||||
- linux.arm64.2xlarge
|
||||
- linux.arm64.2xlarge.ephemeral
|
||||
- linux.arm64.m7g.4xlarge
|
||||
@ -50,7 +49,6 @@ self-hosted-runner:
|
||||
# Organization-wide AMD-hosted runners
|
||||
# MI2xx runners
|
||||
- linux.rocm.gpu
|
||||
- linux.rocm.gpu.mi250
|
||||
- linux.rocm.gpu.2
|
||||
- linux.rocm.gpu.4
|
||||
# MI300 runners
|
||||
|
||||
2
.github/actions/build-android/action.yml
vendored
2
.github/actions/build-android/action.yml
vendored
@ -9,7 +9,7 @@ inputs:
|
||||
arch-for-build-env:
|
||||
description: |
|
||||
arch to pass to build environment.
|
||||
This is currently different than the arch name we use elsewhere, which
|
||||
This is currently different than the arch name we use elswhere, which
|
||||
should be fixed.
|
||||
required: true
|
||||
github-secret:
|
||||
|
||||
@ -125,7 +125,7 @@ runs:
|
||||
TAG: ${{ steps.parse-ref.outputs.tag }}
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
SCHEDULE: ${{ github.event.schedule }}
|
||||
HEAD_BRANCH: ${{ steps.parse-ref.outputs.branch }}
|
||||
HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
|
||||
id: filter
|
||||
run: |
|
||||
echo "Workflow: ${GITHUB_WORKFLOW}"
|
||||
@ -157,4 +157,4 @@ runs:
|
||||
echo "Is keep-going label set? ${{ steps.filter.outputs.keep-going }}"
|
||||
|
||||
echo
|
||||
echo "Reenabled issues? ${{ steps.filter.outputs.reenabled-issues }}"
|
||||
echo "Renabled issues? ${{ steps.filter.outputs.reenabled-issues }}"
|
||||
|
||||
2
.github/actions/linux-test/action.yml
vendored
2
.github/actions/linux-test/action.yml
vendored
@ -153,7 +153,7 @@ runs:
|
||||
github-token: ${{ inputs.GITHUB_TOKEN }}
|
||||
|
||||
- name: Check for keep-going label and re-enabled test issues
|
||||
# This uses the filter-test-configs action because it conveniently
|
||||
# This uses the filter-test-configs action because it conviniently
|
||||
# checks for labels and re-enabled test issues. It does not actually do
|
||||
# any filtering. All filtering is done in the build step.
|
||||
id: keep-going
|
||||
|
||||
9
.github/actions/reuse-old-whl/action.yml
vendored
9
.github/actions/reuse-old-whl/action.yml
vendored
@ -13,12 +13,6 @@ inputs:
|
||||
github-token:
|
||||
description: GitHub token
|
||||
required: true
|
||||
job-id:
|
||||
description: Job ID
|
||||
required: true
|
||||
job-name:
|
||||
description: Job name
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
reuse:
|
||||
@ -36,11 +30,8 @@ runs:
|
||||
continue-on-error: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ inputs.github-token }}
|
||||
JOB_ID: ${{ inputs.job-id }}
|
||||
JOB_NAME: ${{ inputs.job-name }}
|
||||
run: |
|
||||
set -x
|
||||
python3 -m pip install boto3==1.35.42
|
||||
python3 ${GITHUB_ACTION_PATH}/reuse_old_whl.py \
|
||||
--build-environment "${{ inputs.build-environment }}" \
|
||||
--run-id "${{ inputs.run-id }}" \
|
||||
|
||||
60
.github/actions/reuse-old-whl/reuse_old_whl.py
vendored
60
.github/actions/reuse-old-whl/reuse_old_whl.py
vendored
@ -1,7 +1,6 @@
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Any, cast, Optional, Union
|
||||
@ -9,14 +8,6 @@ from typing import Any, cast, Optional, Union
|
||||
import requests
|
||||
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
from tools.stats.upload_metrics import emit_metric
|
||||
|
||||
|
||||
sys.path.remove(str(REPO_ROOT)) # Clean up sys.path after import
|
||||
|
||||
|
||||
FORCE_REBUILD_LABEL = "ci-force-rebuild"
|
||||
|
||||
|
||||
@ -132,26 +123,17 @@ def check_changed_files(sha: str) -> bool:
|
||||
# Return true if all the changed files are in the list of allowed files to
|
||||
# be changed to reuse the old whl
|
||||
|
||||
# Removing files in the torch folder is not allowed since rsync will not
|
||||
# remove files
|
||||
# Removing any files is not allowed since rysnc will not remove files
|
||||
removed_files = (
|
||||
subprocess.check_output(
|
||||
[
|
||||
"git",
|
||||
"diff",
|
||||
"--name-only",
|
||||
sha,
|
||||
"HEAD",
|
||||
"--diff-filter=D",
|
||||
"--no-renames",
|
||||
],
|
||||
["git", "diff", "--name-only", sha, "HEAD", "--diff-filter=D"],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
.strip()
|
||||
.split()
|
||||
)
|
||||
if any(file.startswith("torch/") for file in removed_files):
|
||||
if removed_files:
|
||||
print(
|
||||
f"Removed files between {sha} and HEAD: {removed_files}, cannot reuse old whl"
|
||||
)
|
||||
@ -159,7 +141,7 @@ def check_changed_files(sha: str) -> bool:
|
||||
|
||||
changed_files = (
|
||||
subprocess.check_output(
|
||||
["git", "diff", "--name-only", sha, "HEAD", "--no-renames"],
|
||||
["git", "diff", "--name-only", sha, "HEAD"],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
@ -304,7 +286,8 @@ def unzip_artifact_and_replace_files() -> None:
|
||||
|
||||
|
||||
def set_output() -> None:
|
||||
print("Setting output reuse=true")
|
||||
# Disable for now so we can monitor first
|
||||
# pass
|
||||
if os.getenv("GITHUB_OUTPUT"):
|
||||
with open(str(os.getenv("GITHUB_OUTPUT")), "a") as env:
|
||||
print("reuse=true", file=env)
|
||||
@ -325,7 +308,7 @@ def parse_args() -> argparse.Namespace:
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def can_reuse_whl(args: argparse.Namespace) -> tuple[bool, str]:
|
||||
def can_reuse_whl(args: argparse.Namespace) -> bool:
|
||||
if args.github_ref and any(
|
||||
args.github_ref.startswith(x)
|
||||
for x in [
|
||||
@ -335,50 +318,37 @@ def can_reuse_whl(args: argparse.Namespace) -> tuple[bool, str]:
|
||||
]
|
||||
):
|
||||
print("Release branch, rebuild whl")
|
||||
return (False, "Release branch")
|
||||
return False
|
||||
|
||||
if not check_changed_files(get_merge_base()):
|
||||
print("Cannot use old whl due to the changed files, rebuild whl")
|
||||
return (False, "Changed files not allowed")
|
||||
return False
|
||||
|
||||
if check_labels_for_pr():
|
||||
print(f"Found {FORCE_REBUILD_LABEL} label on PR, rebuild whl")
|
||||
return (False, "Found FORCE_REBUILD_LABEL on PR")
|
||||
return False
|
||||
|
||||
if check_issue_open():
|
||||
print("Issue #153759 is open, rebuild whl")
|
||||
return (False, "Issue #153759 is open")
|
||||
return False
|
||||
|
||||
workflow_id = get_workflow_id(args.run_id)
|
||||
if workflow_id is None:
|
||||
print("No workflow ID found, rebuild whl")
|
||||
return (False, "No workflow ID found")
|
||||
return False
|
||||
|
||||
if not find_old_whl(workflow_id, args.build_environment, get_merge_base()):
|
||||
print("No old whl found, rebuild whl")
|
||||
return (False, "No old whl found")
|
||||
# TODO: go backwards from merge base to find more runs
|
||||
return False
|
||||
|
||||
return (True, "Found old whl")
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
|
||||
reuse_whl, reason = can_reuse_whl(args)
|
||||
|
||||
if reuse_whl:
|
||||
if can_reuse_whl(args):
|
||||
print("Reusing old whl")
|
||||
unzip_artifact_and_replace_files()
|
||||
set_output()
|
||||
|
||||
emit_metric(
|
||||
"reuse_old_whl",
|
||||
{
|
||||
"reuse_whl": reuse_whl,
|
||||
"reason": reason,
|
||||
"build_environment": args.build_environment,
|
||||
"merge_base": get_merge_base(),
|
||||
"head_sha": get_head_sha(),
|
||||
},
|
||||
)
|
||||
|
||||
4
.github/actions/setup-linux/action.yml
vendored
4
.github/actions/setup-linux/action.yml
vendored
@ -33,14 +33,14 @@ runs:
|
||||
id: check_container_runner
|
||||
run: echo "IN_CONTAINER_RUNNER=$(if [ -f /.inarc ] || [ -f /.incontainer ]; then echo true ; else echo false; fi)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Start docker if docker daemon is not running
|
||||
- name: Start docker if docker deamon is not running
|
||||
shell: bash
|
||||
if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }}
|
||||
run: |
|
||||
if systemctl is-active --quiet docker; then
|
||||
echo "Docker daemon is running...";
|
||||
else
|
||||
echo "Starting docker daemon..." && sudo systemctl start docker;
|
||||
echo "Starting docker deamon..." && sudo systemctl start docker;
|
||||
fi
|
||||
|
||||
- name: Log in to ECR
|
||||
|
||||
2
.github/ci_commit_pins/audio.txt
vendored
2
.github/ci_commit_pins/audio.txt
vendored
@ -1 +1 @@
|
||||
70caf76066ef2c1054d6128b11769dc816a779e7
|
||||
4cb7f57d31b0b288696f09b89e890e5fac092eed
|
||||
|
||||
2
.github/ci_commit_pins/xla.txt
vendored
2
.github/ci_commit_pins/xla.txt
vendored
@ -1 +1 @@
|
||||
1c00dea2c9adb2137903c86b4191e8c247f8fda9
|
||||
9a517a95f620dc0960d1feec97b50ac6ea7f1854
|
||||
|
||||
9
.github/label_to_label.yml
vendored
9
.github/label_to_label.yml
vendored
@ -48,12 +48,3 @@
|
||||
- "module: dynamic shapes"
|
||||
then:
|
||||
- "oncall: pt2"
|
||||
- any:
|
||||
- "release notes: distributed (c10d)"
|
||||
- "release notes: distributed (symm_mem)"
|
||||
- "release notes: distributed (pipeline)"
|
||||
- "release notes: distributed (fsdp)"
|
||||
- "release notes: distributed (dtensor)"
|
||||
- "oncall: distributed"
|
||||
then:
|
||||
- "ciflow/h100-distributed"
|
||||
|
||||
1
.github/labeler.yml
vendored
1
.github/labeler.yml
vendored
@ -116,6 +116,7 @@
|
||||
"release notes: inductor (aoti)":
|
||||
- torch/_C/_aoti.pyi
|
||||
- torch/_dynamo/repro/aoti.py
|
||||
- torch/_export/serde/aoti_schema.py
|
||||
- torch/_higher_order_ops/aoti_call_delegate.py
|
||||
- torch/_inductor/codegen/aoti_runtime/**
|
||||
- torch/_inductor/codegen/aoti_hipify_utils.py
|
||||
|
||||
2
.github/merge_rules.yaml
vendored
2
.github/merge_rules.yaml
vendored
@ -384,7 +384,6 @@
|
||||
- leslie-fang-intel
|
||||
- jgong5
|
||||
- EikanWang
|
||||
- CaoE
|
||||
mandatory_checks_name:
|
||||
- EasyCLA
|
||||
- Lint
|
||||
@ -436,7 +435,6 @@
|
||||
approved_by:
|
||||
- leslie-fang-intel
|
||||
- jgong5
|
||||
- CaoE
|
||||
mandatory_checks_name:
|
||||
- EasyCLA
|
||||
- Lint
|
||||
|
||||
3
.github/pytorch-probot.yml
vendored
3
.github/pytorch-probot.yml
vendored
@ -4,7 +4,6 @@ ciflow_push_tags:
|
||||
- ciflow/binaries
|
||||
- ciflow/binaries_libtorch
|
||||
- ciflow/binaries_wheel
|
||||
- ciflow/triton_binaries
|
||||
- ciflow/inductor
|
||||
- ciflow/inductor-periodic
|
||||
- ciflow/inductor-rocm
|
||||
@ -12,7 +11,6 @@ ciflow_push_tags:
|
||||
- ciflow/inductor-perf-compare
|
||||
- ciflow/inductor-micro-benchmark
|
||||
- ciflow/inductor-micro-benchmark-cpu-x86
|
||||
- ciflow/inductor-perf-test-nightly-x86-zen
|
||||
- ciflow/inductor-cu126
|
||||
- ciflow/linux-aarch64
|
||||
- ciflow/mps
|
||||
@ -31,7 +29,6 @@ ciflow_push_tags:
|
||||
- ciflow/pull
|
||||
- ciflow/h100
|
||||
- ciflow/h100-distributed
|
||||
- ciflow/h100-symm-mem
|
||||
retryable_workflows:
|
||||
- pull
|
||||
- trunk
|
||||
|
||||
2
.github/requirements-gha-cache.txt
vendored
2
.github/requirements-gha-cache.txt
vendored
@ -10,5 +10,5 @@ lintrunner==0.10.7
|
||||
ninja==1.10.0.post1
|
||||
nvidia-ml-py==11.525.84
|
||||
pyyaml==6.0
|
||||
requests==2.32.4
|
||||
requests==2.32.2
|
||||
rich==10.9.0
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
boto3==1.35.42
|
||||
cmake==3.27.*
|
||||
cmake==3.25.*
|
||||
expecttest==0.3.0
|
||||
fbscribelogger==0.1.7
|
||||
filelock==3.6.0
|
||||
|
||||
2
.github/scripts/amd/patch_triton_wheel.sh
vendored
2
.github/scripts/amd/patch_triton_wheel.sh
vendored
@ -78,7 +78,7 @@ for pkg in /$WHEELHOUSE_DIR/*triton*.whl; do
|
||||
echo "Copied $filepath to $patchedpath"
|
||||
done
|
||||
|
||||
# Go through all required shared objects and see if any of our other objects are dependants. If so, replace so.ver with so
|
||||
# Go through all required shared objects and see if any of our other objects are dependants. If so, replace so.ver wth so
|
||||
for ((i=0;i<${#deps[@]};++i)); do
|
||||
echo "replacing "${deps_soname[i]} ${patched[i]}
|
||||
replace_needed_sofiles $PREFIX/$ROCM_LIB ${deps_soname[i]} ${patched[i]}
|
||||
|
||||
17
.github/scripts/build_triton_wheel.py
vendored
17
.github/scripts/build_triton_wheel.py
vendored
@ -21,11 +21,8 @@ def read_triton_pin(device: str = "cuda") -> str:
|
||||
return f.read().strip()
|
||||
|
||||
|
||||
def read_triton_version(device: str = "cuda") -> str:
|
||||
triton_version_file = "triton_version.txt"
|
||||
if device == "xpu":
|
||||
triton_version_file = "triton_xpu_version.txt"
|
||||
with open(REPO_DIR / ".ci" / "docker" / triton_version_file) as f:
|
||||
def read_triton_version() -> str:
|
||||
with open(REPO_DIR / ".ci" / "docker" / "triton_version.txt") as f:
|
||||
return f.read().strip()
|
||||
|
||||
|
||||
@ -94,7 +91,7 @@ def build_triton(
|
||||
patch_init_py(
|
||||
triton_pythondir / "triton" / "__init__.py",
|
||||
version=f"{version}",
|
||||
expected_version=read_triton_version(device),
|
||||
expected_version=None,
|
||||
)
|
||||
|
||||
if device == "rocm":
|
||||
@ -140,19 +137,15 @@ def main() -> None:
|
||||
parser.add_argument("--py-version", type=str)
|
||||
parser.add_argument("--commit-hash", type=str)
|
||||
parser.add_argument("--with-clang-ldd", action="store_true")
|
||||
parser.add_argument("--triton-version", type=str, default=None)
|
||||
parser.add_argument("--triton-version", type=str, default=read_triton_version())
|
||||
args = parser.parse_args()
|
||||
|
||||
triton_version = read_triton_version(args.device)
|
||||
if args.triton_version:
|
||||
triton_version = args.triton_version
|
||||
|
||||
build_triton(
|
||||
device=args.device,
|
||||
commit_hash=(
|
||||
args.commit_hash if args.commit_hash else read_triton_pin(args.device)
|
||||
),
|
||||
version=triton_version,
|
||||
version=args.triton_version,
|
||||
py_version=args.py_version,
|
||||
release=args.release,
|
||||
with_clang_ldd=args.with_clang_ldd,
|
||||
|
||||
30
.github/scripts/delete_old_branches.py
vendored
30
.github/scripts/delete_old_branches.py
vendored
@ -275,7 +275,7 @@ def delete_branches() -> None:
|
||||
delete_branch(git_repo, branch)
|
||||
|
||||
|
||||
def delete_old_tags() -> None:
|
||||
def delete_old_ciflow_tags() -> None:
|
||||
# Deletes ciflow tags if they are associated with a closed PR or a specific
|
||||
# commit. Lightweight tags don't have information about the date they were
|
||||
# created, so we can't check how old they are. The script just assumes that
|
||||
@ -288,29 +288,23 @@ def delete_old_tags() -> None:
|
||||
delete_branch(git_repo, f"refs/tags/{tag}")
|
||||
|
||||
tags = git_repo._run_git("tag").splitlines()
|
||||
open_pr_numbers = [x["number"] for x in get_open_prs()]
|
||||
|
||||
CIFLOW_TAG_REGEX = re.compile(r"^ciflow\/.*\/(\d{5,6}|[0-9a-f]{40})$")
|
||||
AUTO_REVERT_TAG_REGEX = re.compile(r"^trunk\/[0-9a-f]{40}$")
|
||||
for tag in tags:
|
||||
try:
|
||||
if ESTIMATED_TOKENS[0] > 400:
|
||||
print("Estimated tokens exceeded, exiting")
|
||||
break
|
||||
|
||||
if not CIFLOW_TAG_REGEX.match(tag) and not AUTO_REVERT_TAG_REGEX.match(tag):
|
||||
if not tag.startswith("ciflow/"):
|
||||
continue
|
||||
|
||||
# This checks the date of the commit associated with the tag instead
|
||||
# of the tag itself since lightweight tags don't have this
|
||||
# information. I think it should be ok since this only runs once a
|
||||
# day
|
||||
tag_info = git_repo._run_git("show", "-s", "--format=%ct", tag)
|
||||
tag_timestamp = int(tag_info.strip())
|
||||
# Maybe some timezone issues, but a few hours shouldn't matter
|
||||
tag_age_days = (datetime.now().timestamp() - tag_timestamp) / SEC_IN_DAY
|
||||
|
||||
if tag_age_days > 7:
|
||||
print(f"[{tag}] Tag is older than 7 days, deleting")
|
||||
re_match_pr = re.match(r"^ciflow\/.*\/(\d{5,6})$", tag)
|
||||
re_match_sha = re.match(r"^ciflow\/.*\/([0-9a-f]{40})$", tag)
|
||||
if re_match_pr:
|
||||
pr_number = int(re_match_pr.group(1))
|
||||
if pr_number in open_pr_numbers:
|
||||
continue
|
||||
delete_tag(tag)
|
||||
elif re_match_sha:
|
||||
delete_tag(tag)
|
||||
except Exception as e:
|
||||
print(f"Failed to check tag {tag}: {e}")
|
||||
@ -318,4 +312,4 @@ def delete_old_tags() -> None:
|
||||
|
||||
if __name__ == "__main__":
|
||||
delete_branches()
|
||||
delete_old_tags()
|
||||
delete_old_ciflow_tags()
|
||||
|
||||
16
.github/scripts/filter_test_configs.py
vendored
16
.github/scripts/filter_test_configs.py
vendored
@ -18,7 +18,6 @@ import yaml
|
||||
|
||||
|
||||
REENABLE_TEST_REGEX = "(?i)(Close(d|s)?|Resolve(d|s)?|Fix(ed|es)?) (#|https://github.com/pytorch/pytorch/issues/)([0-9]+)"
|
||||
MAIN_BRANCH = "main"
|
||||
|
||||
PREFIX = "test-config/"
|
||||
|
||||
@ -98,7 +97,7 @@ def parse_args() -> Any:
|
||||
parser.add_argument(
|
||||
"--branch",
|
||||
type=str,
|
||||
default=MAIN_BRANCH,
|
||||
default="main",
|
||||
help="the branch name",
|
||||
)
|
||||
return parser.parse_args()
|
||||
@ -457,7 +456,6 @@ def download_json(url: str, headers: dict[str, str], num_retries: int = 3) -> An
|
||||
|
||||
|
||||
def set_output(name: str, val: Any) -> None:
|
||||
print(f"Setting output {name}={val}")
|
||||
if os.getenv("GITHUB_OUTPUT"):
|
||||
with open(str(os.getenv("GITHUB_OUTPUT")), "a") as env:
|
||||
print(f"{name}={val}", file=env)
|
||||
@ -497,20 +495,13 @@ def check_for_setting(labels: set[str], body: str, setting: str) -> bool:
|
||||
|
||||
|
||||
def perform_misc_tasks(
|
||||
labels: set[str],
|
||||
test_matrix: dict[str, list[Any]],
|
||||
job_name: str,
|
||||
pr_body: str,
|
||||
branch: Optional[str] = None,
|
||||
labels: set[str], test_matrix: dict[str, list[Any]], job_name: str, pr_body: str
|
||||
) -> None:
|
||||
"""
|
||||
In addition to apply the filter logic, the script also does the following
|
||||
misc tasks to set keep-going and is-unstable variables
|
||||
"""
|
||||
set_output(
|
||||
"keep-going",
|
||||
branch == MAIN_BRANCH or check_for_setting(labels, pr_body, "keep-going"),
|
||||
)
|
||||
set_output("keep-going", check_for_setting(labels, pr_body, "keep-going"))
|
||||
set_output(
|
||||
"ci-verbose-test-logs",
|
||||
check_for_setting(labels, pr_body, "ci-verbose-test-logs"),
|
||||
@ -633,7 +624,6 @@ def main() -> None:
|
||||
test_matrix=filtered_test_matrix,
|
||||
job_name=args.job_name,
|
||||
pr_body=pr_body if pr_body else "",
|
||||
branch=args.branch,
|
||||
)
|
||||
|
||||
# Set the filtered test matrix as the output
|
||||
|
||||
29
.github/scripts/generate_binary_build_matrix.py
vendored
29
.github/scripts/generate_binary_build_matrix.py
vendored
@ -17,7 +17,7 @@ from typing import Optional
|
||||
|
||||
# NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this
|
||||
CUDA_ARCHES = ["12.6", "12.8", "12.9"]
|
||||
CUDA_STABLE = "12.8"
|
||||
CUDA_STABLE = "12.6"
|
||||
CUDA_ARCHES_FULL_VERSION = {
|
||||
"12.6": "12.6.3",
|
||||
"12.8": "12.8.1",
|
||||
@ -38,7 +38,7 @@ CPU_AARCH64_ARCH = ["cpu-aarch64"]
|
||||
|
||||
CPU_S390X_ARCH = ["cpu-s390x"]
|
||||
|
||||
CUDA_AARCH64_ARCHES = ["12.9-aarch64"]
|
||||
CUDA_AARCH64_ARCHES = ["12.8-aarch64"]
|
||||
|
||||
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
@ -46,15 +46,15 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.27.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvshmem-cu12==3.3.9; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
@ -63,15 +63,15 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.27.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvshmem-cu12==3.3.9; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
@ -86,9 +86,8 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.27.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvshmem-cu12==3.3.9; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
@ -223,8 +222,13 @@ def generate_libtorch_matrix(
|
||||
if os == "linux":
|
||||
arches += CUDA_ARCHES
|
||||
arches += ROCM_ARCHES
|
||||
# will add in a separate PR for 12.9
|
||||
if "12.9" in arches:
|
||||
arches.remove("12.9")
|
||||
elif os == "windows":
|
||||
arches += CUDA_ARCHES
|
||||
if "12.9" in arches:
|
||||
arches.remove("12.9")
|
||||
if libtorch_variants is None:
|
||||
libtorch_variants = [
|
||||
"shared-with-deps",
|
||||
@ -290,6 +294,9 @@ def generate_wheels_matrix(
|
||||
arches += CUDA_ARCHES + ROCM_ARCHES + XPU_ARCHES
|
||||
elif os == "windows":
|
||||
arches += CUDA_ARCHES + XPU_ARCHES
|
||||
# skip CUDA 12.9 builds on Windows
|
||||
if "12.9" in arches:
|
||||
arches.remove("12.9")
|
||||
elif os == "linux-aarch64":
|
||||
# Separate new if as the CPU type is different and
|
||||
# uses different build/test scripts
|
||||
|
||||
2
.github/scripts/generate_ci_workflows.py
vendored
2
.github/scripts/generate_ci_workflows.py
vendored
@ -152,7 +152,7 @@ LINUX_BINARY_SMOKE_WORKFLOWS = [
|
||||
package_type="manywheel",
|
||||
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
|
||||
OperatingSystem.LINUX,
|
||||
arches=["12.6", "12.8", "12.9", "6.4"],
|
||||
arches=["12.6", "12.8", "12.9"],
|
||||
python_versions=["3.9"],
|
||||
),
|
||||
branches="main",
|
||||
|
||||
4
.github/scripts/get_workflow_job_id.py
vendored
4
.github/scripts/get_workflow_job_id.py
vendored
@ -64,7 +64,7 @@ def fetch_url(
|
||||
)
|
||||
exception_message = (
|
||||
"Is github alright?",
|
||||
f"Received status code '{err.code}' when attempting to retrieve {url}:\n",
|
||||
f"Recieved status code '{err.code}' when attempting to retrieve {url}:\n",
|
||||
f"{err.reason}\n\nheaders={err.headers}",
|
||||
)
|
||||
raise RuntimeError(exception_message) from err
|
||||
@ -136,10 +136,10 @@ def find_job_id_name(args: Any) -> tuple[str, str]:
|
||||
|
||||
|
||||
def set_output(name: str, val: Any) -> None:
|
||||
print(f"Setting output {name}={val}")
|
||||
if os.getenv("GITHUB_OUTPUT"):
|
||||
with open(str(os.getenv("GITHUB_OUTPUT")), "a") as env:
|
||||
print(f"{name}={val}", file=env)
|
||||
print(f"setting {name}={val}")
|
||||
else:
|
||||
print(f"::set-output name={name}::{val}")
|
||||
|
||||
|
||||
2
.github/scripts/gitutils.py
vendored
2
.github/scripts/gitutils.py
vendored
@ -211,7 +211,7 @@ class GitRepo:
|
||||
self, from_branch: str, to_branch: str
|
||||
) -> tuple[list[str], list[str]]:
|
||||
"""
|
||||
Returns list of commits that are missing in each other branch since their merge base
|
||||
Returns list of commmits that are missing in each other branch since their merge base
|
||||
Might be slow if merge base is between two branches is pretty far off
|
||||
"""
|
||||
from_ref = self.rev_parse(from_branch)
|
||||
|
||||
1
.github/scripts/parse_ref.py
vendored
1
.github/scripts/parse_ref.py
vendored
@ -5,7 +5,6 @@ import re
|
||||
|
||||
|
||||
def set_output(name: str, val: str) -> None:
|
||||
print(f"Setting output {name}={val}")
|
||||
if os.getenv("GITHUB_OUTPUT"):
|
||||
with open(str(os.getenv("GITHUB_OUTPUT")), "a") as env:
|
||||
print(f"{name}={val}", file=env)
|
||||
|
||||
2
.github/scripts/pr-sanity-check.sh
vendored
2
.github/scripts/pr-sanity-check.sh
vendored
@ -12,7 +12,7 @@ BASE=${BASE:-HEAD~1}
|
||||
HEAD=${HEAD:-HEAD}
|
||||
|
||||
ancestor=$(git merge-base "${BASE}" "${HEAD}")
|
||||
echo "INFO: Checking against the following stats"
|
||||
echo "INFO: Checking aginst the following stats"
|
||||
(
|
||||
set -x
|
||||
git diff --stat=10000 "$ancestor" "${HEAD}" | sed '$d' > "${TMPFILE}"
|
||||
|
||||
64
.github/scripts/tag_docker_images_for_release.py
vendored
Normal file
64
.github/scripts/tag_docker_images_for_release.py
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
import argparse
|
||||
import subprocess
|
||||
|
||||
import generate_binary_build_matrix
|
||||
|
||||
|
||||
def tag_image(
|
||||
image: str,
|
||||
default_tag: str,
|
||||
release_version: str,
|
||||
dry_run: str,
|
||||
tagged_images: dict[str, bool],
|
||||
) -> None:
|
||||
if image in tagged_images:
|
||||
return
|
||||
release_image = image.replace(f"-{default_tag}", f"-{release_version}")
|
||||
print(f"Tagging {image} to {release_image} , dry_run: {dry_run}")
|
||||
|
||||
if dry_run == "disabled":
|
||||
subprocess.check_call(["docker", "pull", image])
|
||||
subprocess.check_call(["docker", "tag", image, release_image])
|
||||
subprocess.check_call(["docker", "push", release_image])
|
||||
tagged_images[image] = True
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
help="Version to tag",
|
||||
type=str,
|
||||
default="2.2",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
help="No Runtime Error check",
|
||||
type=str,
|
||||
choices=["enabled", "disabled"],
|
||||
default="enabled",
|
||||
)
|
||||
|
||||
options = parser.parse_args()
|
||||
tagged_images: dict[str, bool] = {}
|
||||
platform_images = [
|
||||
generate_binary_build_matrix.WHEEL_CONTAINER_IMAGES,
|
||||
generate_binary_build_matrix.LIBTORCH_CONTAINER_IMAGES,
|
||||
]
|
||||
default_tag = generate_binary_build_matrix.DEFAULT_TAG
|
||||
|
||||
for platform_image in platform_images: # type: ignore[attr-defined]
|
||||
for arch in platform_image.keys(): # type: ignore[attr-defined]
|
||||
if arch == "cpu-s390x":
|
||||
continue
|
||||
tag_image(
|
||||
platform_image[arch], # type: ignore[index]
|
||||
default_tag,
|
||||
options.version,
|
||||
options.dry_run,
|
||||
tagged_images,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
56
.github/scripts/test_delete_old_branches.py
vendored
56
.github/scripts/test_delete_old_branches.py
vendored
@ -1,56 +0,0 @@
|
||||
import os
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
|
||||
os.environ["GITHUB_TOKEN"] = "test_token"
|
||||
|
||||
from delete_old_branches import delete_old_tags
|
||||
|
||||
|
||||
@patch("delete_old_branches.delete_branch")
|
||||
@patch("gitutils.GitRepo._run_git")
|
||||
class TestDeleteTag(unittest.TestCase):
|
||||
def test_delete_tag(
|
||||
self, mock_run_git: "MagicMock", mock_delete_tag: "MagicMock"
|
||||
) -> None:
|
||||
for tag in [
|
||||
"ciflow/branch/12345",
|
||||
"ciflow/commitsha/1234567890abcdef1234567890abcdef12345678",
|
||||
"trunk/1234567890abcdef1234567890abcdef12345678",
|
||||
]:
|
||||
mock_run_git.side_effect = [
|
||||
tag,
|
||||
str(int(datetime.now().timestamp() - 8 * 24 * 60 * 60)), # 8 days ago
|
||||
]
|
||||
delete_old_tags()
|
||||
mock_delete_tag.assert_called_once()
|
||||
mock_delete_tag.reset_mock()
|
||||
|
||||
# Don't delete if the tag is not old enough
|
||||
mock_run_git.side_effect = [
|
||||
tag,
|
||||
str(int(datetime.now().timestamp() - 6 * 24 * 60 * 60)), # 6 days ago
|
||||
]
|
||||
delete_old_tags()
|
||||
mock_delete_tag.assert_not_called()
|
||||
|
||||
def test_do_not_delete_tag(
|
||||
self, mock_run_git: "MagicMock", mock_delete_tag: "MagicMock"
|
||||
) -> None:
|
||||
for tag in [
|
||||
"ciflow/doesntseemtomatch",
|
||||
"trunk/doesntseemtomatch",
|
||||
"doesntseemtomatch",
|
||||
]:
|
||||
mock_run_git.side_effect = [
|
||||
tag,
|
||||
str(int(datetime.now().timestamp() - 8 * 24 * 60 * 60)), # 8 days ago
|
||||
]
|
||||
delete_old_tags()
|
||||
mock_delete_tag.assert_not_called()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
10
.github/scripts/test_filter_test_configs.py
vendored
10
.github/scripts/test_filter_test_configs.py
vendored
@ -347,26 +347,26 @@ class TestConfigFilter(TestCase):
|
||||
{
|
||||
"job_name": "a-ci-job",
|
||||
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
|
||||
"description": "Replicate each periodic mode in a different config",
|
||||
"descripion": "Replicate each periodic mode in a different config",
|
||||
},
|
||||
{
|
||||
"job_name": "a-ci-cuda11.8-job",
|
||||
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
|
||||
"description": "Replicate each periodic mode in a different config for a CUDA job",
|
||||
"descripion": "Replicate each periodic mode in a different config for a CUDA job",
|
||||
},
|
||||
{
|
||||
"job_name": "a-ci-rocm-job",
|
||||
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
|
||||
"description": "Replicate each periodic mode in a different config for a ROCm job",
|
||||
"descripion": "Replicate each periodic mode in a different config for a ROCm job",
|
||||
},
|
||||
{
|
||||
"job_name": "",
|
||||
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
|
||||
"description": "Empty job name",
|
||||
"descripion": "Empty job name",
|
||||
},
|
||||
{
|
||||
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
|
||||
"description": "Missing job name",
|
||||
"descripion": "Missing job name",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
4
.github/scripts/test_trymerge.py
vendored
4
.github/scripts/test_trymerge.py
vendored
@ -265,7 +265,7 @@ class DummyGitRepo(GitRepo):
|
||||
return ["FakeCommitSha"]
|
||||
|
||||
def commit_message(self, ref: str) -> str:
|
||||
return "super awesome commit message"
|
||||
return "super awsome commit message"
|
||||
|
||||
|
||||
@mock.patch("trymerge.gh_graphql", side_effect=mocked_gh_graphql)
|
||||
@ -433,7 +433,7 @@ class TestTryMerge(TestCase):
|
||||
)
|
||||
|
||||
def test_cancelled_gets_ignored(self, *args: Any) -> None:
|
||||
"""Tests that cancelled workflow does not override existing successful status"""
|
||||
"""Tests that cancelled workflow does not override existing successfull status"""
|
||||
pr = GitHubPR("pytorch", "pytorch", 110367)
|
||||
conclusions = pr.get_checkrun_conclusions()
|
||||
lint_checks = [name for name in conclusions.keys() if "Lint" in name]
|
||||
|
||||
2
.github/scripts/trymerge.py
vendored
2
.github/scripts/trymerge.py
vendored
@ -634,7 +634,7 @@ def _revlist_to_prs(
|
||||
raise RuntimeError(
|
||||
f"Found an unexpected number of PRs mentioned in commit {rev}: "
|
||||
f"{len(all_matches)}. This is probably because you are using an "
|
||||
"old version of ghstack. Please update ghstack and resubmit "
|
||||
"old verion of ghstack. Please update ghstack and resubmit "
|
||||
"your PRs"
|
||||
)
|
||||
|
||||
|
||||
8
.github/scripts/windows/build_magma.bat
vendored
8
.github/scripts/windows/build_magma.bat
vendored
@ -35,15 +35,15 @@ cd magma
|
||||
mkdir build && cd build
|
||||
|
||||
set GPU_TARGET=All
|
||||
if "%CUVER_NODOT%" == "129" (
|
||||
set CUDA_ARCH_LIST=-gencode=arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90 -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
|
||||
)
|
||||
if "%CUVER_NODOT%" == "128" (
|
||||
set CUDA_ARCH_LIST=-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90 -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
|
||||
)
|
||||
if "%CUVER_NODOT%" == "126" (
|
||||
if "%CUVER_NODOT:~0,2%" == "12" if NOT "%CUVER_NODOT%" == "128" (
|
||||
set CUDA_ARCH_LIST=-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90
|
||||
)
|
||||
if "%CUVER_NODOT%" == "118" (
|
||||
set CUDA_ARCH_LIST= -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90
|
||||
)
|
||||
|
||||
set CC=cl.exe
|
||||
set CXX=cl.exe
|
||||
|
||||
2
.github/scripts/windows/build_triton.bat
vendored
2
.github/scripts/windows/build_triton.bat
vendored
@ -15,4 +15,4 @@ call conda run -n %PYTHON_PREFIX% pip install wheel pybind11 certifi cython cmak
|
||||
dir "%VC_INSTALL_PATH%"
|
||||
|
||||
call "%VC_INSTALL_PATH%\VC\Auxiliary\Build\vcvarsall.bat" x64
|
||||
call conda run -n %PYTHON_PREFIX% python .github/scripts/build_triton_wheel.py --device=%BUILD_DEVICE% %RELEASE%
|
||||
call conda run -n %PYTHON_PREFIX% python .github/scripts/build_triton_wheel.py --device=%BUILD_DEVICE% %TRITON_VERSION% %RELEASE%
|
||||
|
||||
@ -171,7 +171,7 @@ jobs:
|
||||
- name: Teardown XPU
|
||||
uses: ./.github/actions/teardown-xpu
|
||||
{%- else %}
|
||||
runs-on: linux.rocm.gpu.mi250
|
||||
runs-on: linux.rocm.gpu
|
||||
timeout-minutes: !{{ common.timeout_minutes }}
|
||||
!{{ upload.binary_env(config) }}
|
||||
steps:
|
||||
|
||||
35
.github/workflows/_linux-build.yml
vendored
35
.github/workflows/_linux-build.yml
vendored
@ -69,6 +69,11 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
max-jobs:
|
||||
description: |
|
||||
Overwrite the number of jobs to use for the build
|
||||
required: false
|
||||
type: string
|
||||
disable-monitor:
|
||||
description: |
|
||||
Disable utilization monitoring for build job
|
||||
@ -94,7 +99,7 @@ on:
|
||||
commit with no cpp changes from this commit
|
||||
required: false
|
||||
type: boolean
|
||||
default: true
|
||||
default: false
|
||||
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN:
|
||||
@ -153,13 +158,6 @@ jobs:
|
||||
role-session-name: gha-linux-build
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Get workflow job id
|
||||
id: get-job-id
|
||||
uses: ./.github/actions/get-workflow-job-id
|
||||
if: always()
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Check if can use old whl build
|
||||
id: use-old-whl
|
||||
uses: ./.github/actions/reuse-old-whl
|
||||
@ -168,8 +166,6 @@ jobs:
|
||||
build-environment: ${{ inputs.build-environment }}
|
||||
run-id: ${{ github.run_id }}
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
job-id: ${{ steps.get-job-id.outputs.job-id }}
|
||||
job-name: ${{ steps.get-job-id.outputs.job-name }}
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
@ -185,7 +181,7 @@ jobs:
|
||||
ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
shell: bash
|
||||
run: |
|
||||
tag=${ECR_DOCKER_IMAGE##*:}
|
||||
tag=${ECR_DOCKER_IMAGE##*/}
|
||||
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
|
||||
|
||||
- name: Pull docker image
|
||||
@ -198,6 +194,13 @@ jobs:
|
||||
id: parse-ref
|
||||
run: .github/scripts/parse_ref.py
|
||||
|
||||
- name: Get workflow job id
|
||||
id: get-job-id
|
||||
uses: ./.github/actions/get-workflow-job-id
|
||||
if: always()
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Apply the filter logic to the build step too if the test-config label is already there
|
||||
- name: Select all requested test configurations (if the test matrix is available)
|
||||
id: filter
|
||||
@ -261,6 +264,7 @@ jobs:
|
||||
OUR_GITHUB_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
|
||||
MAX_JOBS_OVERRIDE: ${{ inputs.max-jobs }}
|
||||
run: |
|
||||
START_TIME=$(date +%s)
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"s390x"* ]]; then
|
||||
@ -280,6 +284,12 @@ jobs:
|
||||
DOCKER_SHELL_CMD=
|
||||
fi
|
||||
|
||||
if [[ ${MAX_JOBS_OVERRIDE} == "" ]]; then
|
||||
MAX_JOBS="$(nproc --ignore=2)"
|
||||
else
|
||||
MAX_JOBS="${MAX_JOBS_OVERRIDE}"
|
||||
fi
|
||||
|
||||
# Leaving 1GB for the runner and other things
|
||||
TOTAL_AVAILABLE_MEMORY_IN_GB=$(awk '/MemTotal/ { printf "%.3f \n", $2/1024/1024 - 1 }' /proc/meminfo)
|
||||
# https://docs.docker.com/engine/containers/resource_constraints/#--memory-swap-details, the 3GB swap
|
||||
@ -291,7 +301,8 @@ jobs:
|
||||
# shellcheck disable=SC2086
|
||||
container_name=$(docker run \
|
||||
-e BUILD_ENVIRONMENT \
|
||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
||||
-e MAX_JOBS=${MAX_JOBS} \
|
||||
-e MAX_JOBS_OVERRIDE \
|
||||
-e AWS_DEFAULT_REGION \
|
||||
-e PR_NUMBER \
|
||||
-e SHA1 \
|
||||
|
||||
58
.github/workflows/_linux-test.yml
vendored
58
.github/workflows/_linux-test.yml
vendored
@ -90,13 +90,10 @@ jobs:
|
||||
environment: ${{ github.ref == 'refs/heads/main' && 'scribe-protected' || startsWith(github.ref, 'refs/heads/release/') && 'scribe-protected' || contains(github.event.pull_request.labels.*.name, 'ci-scribe') && 'scribe-pr' || '' }}
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
if: ${{ matrix.runner != 'B200' && inputs.build-environment != 'linux-s390x-binary-manywheel' }}
|
||||
if: ${{ !contains(matrix.runner, 'gcp.a100') && inputs.build-environment != 'linux-s390x-binary-manywheel' }}
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
instructions: |
|
||||
@ -108,31 +105,18 @@ jobs:
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
- name: Setup Python
|
||||
if: matrix.runner == 'B200'
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.12'
|
||||
cache: pip
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel' && matrix.runner != 'B200'
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
|
||||
- name: configure aws credentials
|
||||
if: ${{ inputs.aws-role-to-assume != '' && inputs.build-environment != 'linux-s390x-binary-manywheel' }}
|
||||
if : ${{ inputs.aws-role-to-assume != '' && inputs.build-environment != 'linux-s390x-binary-manywheel' }}
|
||||
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
|
||||
with:
|
||||
role-to-assume: ${{ inputs.aws-role-to-assume }}
|
||||
role-session-name: gha-linux-test
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
if: ${{ inputs.aws-role-to-assume != '' && matrix.runner == 'B200' }}
|
||||
id: login-ecr
|
||||
continue-on-error: true
|
||||
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
@ -147,7 +131,7 @@ jobs:
|
||||
ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
shell: bash
|
||||
run: |
|
||||
tag=${ECR_DOCKER_IMAGE##*:}
|
||||
tag=${ECR_DOCKER_IMAGE##*/}
|
||||
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
|
||||
|
||||
- name: Pull docker image
|
||||
@ -164,17 +148,17 @@ jobs:
|
||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
||||
id: install-nvidia-driver
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
|
||||
if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' && matrix.runner != 'B200' }}
|
||||
if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }}
|
||||
|
||||
- name: Setup GPU_FLAG for docker run
|
||||
id: setup-gpu-flag
|
||||
run: echo "GPU_FLAG=--gpus all -e NVIDIA_DRIVER_CAPABILITIES=all" >> "${GITHUB_ENV}"
|
||||
if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && (steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'true' || matrix.runner == 'B200') }}
|
||||
if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'true' }}
|
||||
|
||||
- name: Setup SCCACHE_SERVER_PORT environment for docker run when on container
|
||||
id: setup-sscache-port-flag
|
||||
run: echo "SCCACHE_SERVER_PORT_DOCKER_FLAG=-e SCCACHE_SERVER_PORT=$((RUNNER_UID + 4226))" >> "${GITHUB_ENV}"
|
||||
if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'true' && matrix.runner != 'B200' }}
|
||||
if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'true' }}
|
||||
|
||||
- name: Lock NVIDIA A100 40GB Frequency
|
||||
run: |
|
||||
@ -223,7 +207,7 @@ jobs:
|
||||
run: .github/scripts/parse_ref.py
|
||||
|
||||
- name: Check for keep-going label and re-enabled test issues
|
||||
# This uses the filter-test-configs action because it conveniently
|
||||
# This uses the filter-test-configs action because it conviniently
|
||||
# checks for labels and re-enabled test issues. It does not actually do
|
||||
# any filtering. All filtering is done in the build step.
|
||||
id: keep-going
|
||||
@ -241,12 +225,6 @@ jobs:
|
||||
run: |
|
||||
echo "timeout=$((JOB_TIMEOUT-30))" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Preserve github env variables for use in docker
|
||||
shell: bash
|
||||
run: |
|
||||
env | grep '^GITHUB' >> "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
env | grep '^CI' >> "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
|
||||
- name: Test
|
||||
id: test
|
||||
timeout-minutes: ${{ fromJson(steps.test-timeout.outputs.timeout) }}
|
||||
@ -275,8 +253,8 @@ jobs:
|
||||
NO_TD: ${{ steps.keep-going.outputs.ci-no-td }}
|
||||
TD_DISTRIBUTED: ${{ steps.keep-going.outputs.ci-td-distributed }}
|
||||
# Do not set SCCACHE_S3_KEY_PREFIX to share the cache between all build jobs
|
||||
SCCACHE_BUCKET: ${{ matrix.runner != 'B200' && 'ossci-compiler-cache-circleci-v2' || '' }}
|
||||
SCCACHE_REGION: ${{ matrix.runner != 'B200' && 'us-east-1' || '' }}
|
||||
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
|
||||
SCCACHE_REGION: us-east-1
|
||||
SHM_SIZE: ${{ contains(inputs.build-environment, 'cuda') && '2g' || '1g' }}
|
||||
DOCKER_IMAGE: ${{ inputs.docker-image }}
|
||||
XLA_CUDA: ${{ contains(inputs.build-environment, 'xla') && '0' || '' }}
|
||||
@ -286,6 +264,7 @@ jobs:
|
||||
DASHBOARD_TAG: ${{ inputs.dashboard-tag }}
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
|
||||
IS_A100_RUNNER: ${{ contains(matrix.runner, 'a100') && '1' || '0' }}
|
||||
ARTIFACTS_FILE_SUFFIX: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }}
|
||||
run: |
|
||||
set -x
|
||||
@ -311,6 +290,10 @@ jobs:
|
||||
# if for some reason cleanup action doesn't stop container
|
||||
# when job is cancelled
|
||||
DOCKER_SHELL_CMD="sleep 12h"
|
||||
|
||||
# since some steps are skipped on s390x, if they are necessary, run them here
|
||||
env | grep '^GITHUB' >> "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
env | grep '^CI' >> "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
else
|
||||
SHM_OPTS="--shm-size=${SHM_SIZE}"
|
||||
JENKINS_USER="--user jenkins"
|
||||
@ -362,6 +345,7 @@ jobs:
|
||||
-e HUGGING_FACE_HUB_TOKEN \
|
||||
-e SCRIBE_GRAPHQL_ACCESS_TOKEN \
|
||||
-e DASHBOARD_TAG \
|
||||
-e IS_A100_RUNNER \
|
||||
-e ARTIFACTS_FILE_SUFFIX \
|
||||
--memory="${TOTAL_AVAILABLE_MEMORY_IN_GB%.*}g" \
|
||||
--memory-swap="${TOTAL_MEMORY_WITH_SWAP}g" \
|
||||
@ -400,18 +384,8 @@ jobs:
|
||||
test_config: ${{ matrix.config }}
|
||||
job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }}
|
||||
|
||||
- name: Authenticate with AWS
|
||||
if: ${{ matrix.runner == 'B200' }}
|
||||
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_upload-benchmark-results
|
||||
# The max duration enforced by the server side
|
||||
role-duration-seconds: 18000
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Upload the benchmark results
|
||||
uses: pytorch/test-infra/.github/actions/upload-benchmark-results@main
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
with:
|
||||
benchmark-results-dir: test/test-reports
|
||||
dry-run: false
|
||||
|
||||
2
.github/workflows/_mac-build.yml
vendored
2
.github/workflows/_mac-build.yml
vendored
@ -123,7 +123,7 @@ jobs:
|
||||
else
|
||||
# The runner has access to the S3 bucket via IAM profile without the need
|
||||
# for any credential
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"0
|
||||
echo "SCCACHE_S3_KEY_PREFIX=${GITHUB_WORKFLOW}" >> "${GITHUB_ENV}"
|
||||
fi
|
||||
|
||||
|
||||
6
.github/workflows/_mac-test.yml
vendored
6
.github/workflows/_mac-test.yml
vendored
@ -88,10 +88,6 @@ jobs:
|
||||
pkill "${PROCESS}" || true
|
||||
done
|
||||
|
||||
- name: Clean up leftover miniconda installation
|
||||
continue-on-error: true
|
||||
run: brew uninstall miniconda || true
|
||||
|
||||
- name: Clean up leftover local python3 site-packages on MacOS pet runner
|
||||
continue-on-error: true
|
||||
run: |
|
||||
@ -154,7 +150,7 @@ jobs:
|
||||
run: .github/scripts/parse_ref.py
|
||||
|
||||
- name: Check for keep-going label and re-enabled test issues
|
||||
# This uses the filter-test-configs action because it conveniently
|
||||
# This uses the filter-test-configs action because it conviniently
|
||||
# checks for labels and re-enabled test issues. It does not actually do
|
||||
# any filtering. All filtering is done in the build step.
|
||||
id: keep-going
|
||||
|
||||
2
.github/workflows/_rocm-test.yml
vendored
2
.github/workflows/_rocm-test.yml
vendored
@ -150,7 +150,7 @@ jobs:
|
||||
run: .github/scripts/parse_ref.py
|
||||
|
||||
- name: Check for keep-going label and re-enabled test issues
|
||||
# This uses the filter-test-configs action because it conveniently
|
||||
# This uses the filter-test-configs action because it conviniently
|
||||
# checks for labels and re-enabled test issues. It does not actually do
|
||||
# any filtering. All filtering is done in the build step.
|
||||
id: keep-going
|
||||
|
||||
2
.github/workflows/_runner-determinator.yml
vendored
2
.github/workflows/_runner-determinator.yml
vendored
@ -7,7 +7,7 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
description: |
|
||||
List of experiments for this workflow. If not defined, all default experiments are included.
|
||||
List of experiments for this workfow. If not defined, all default experiments are included.
|
||||
opt_out_experiments:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
2
.github/workflows/_win-test.yml
vendored
2
.github/workflows/_win-test.yml
vendored
@ -158,7 +158,7 @@ jobs:
|
||||
uses: ./.github/actions/download-td-artifacts
|
||||
|
||||
- name: Check for keep-going label and re-enabled test issues
|
||||
# This uses the filter-test-configs action because it conveniently
|
||||
# This uses the filter-test-configs action because it conviniently
|
||||
# checks for labels and re-enabled test issues. It does not actually do
|
||||
# any filtering. All filtering is done in the build step.
|
||||
id: keep-going
|
||||
|
||||
7
.github/workflows/_xpu-test.yml
vendored
7
.github/workflows/_xpu-test.yml
vendored
@ -105,7 +105,7 @@ jobs:
|
||||
ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
shell: bash
|
||||
run: |
|
||||
tag=${ECR_DOCKER_IMAGE##*:}
|
||||
tag=${ECR_DOCKER_IMAGE##*/}
|
||||
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
|
||||
|
||||
- name: Pull docker image
|
||||
@ -147,7 +147,7 @@ jobs:
|
||||
run: .github/scripts/parse_ref.py
|
||||
|
||||
- name: Check for keep-going label and re-enabled test issues
|
||||
# This uses the filter-test-configs action because it conveniently
|
||||
# This uses the filter-test-configs action because it conviniently
|
||||
# checks for labels and re-enabled test issues. It does not actually do
|
||||
# any filtering. All filtering is done in the build step.
|
||||
id: keep-going
|
||||
@ -191,6 +191,9 @@ jobs:
|
||||
SHARD_NUMBER: ${{ matrix.shard }}
|
||||
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
|
||||
REENABLED_ISSUES: ${{ steps.keep-going.outputs.reenabled-issues }}
|
||||
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
|
||||
SCCACHE_REGION: us-east-1
|
||||
SCCACHE_S3_KEY_PREFIX: ${{ github.workflow }}
|
||||
DOCKER_IMAGE: ${{ inputs.docker-image }}
|
||||
XLA_CLANG_CACHE_S3_BUCKET_NAME: ossci-compiler-clang-cache-circleci-xla
|
||||
PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }}
|
||||
|
||||
4
.github/workflows/build-almalinux-images.yml
vendored
4
.github/workflows/build-almalinux-images.yml
vendored
@ -23,7 +23,7 @@ on:
|
||||
env:
|
||||
DOCKER_REGISTRY: "docker.io"
|
||||
DOCKER_BUILDKIT: 1
|
||||
WITH_PUSH: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release') || startsWith(github.ref, 'refs/tags/v')) }}
|
||||
WITH_PUSH: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release')) }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
@ -32,7 +32,7 @@ concurrency:
|
||||
jobs:
|
||||
build-docker:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
environment: ${{ (github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release') || startsWith(github.ref, 'refs/tags/v')) && 'docker-build') || '' }}
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
runs-on: linux.9xlarge.ephemeral
|
||||
strategy:
|
||||
matrix:
|
||||
|
||||
4
.github/workflows/build-libtorch-images.yml
vendored
4
.github/workflows/build-libtorch-images.yml
vendored
@ -22,7 +22,7 @@ on:
|
||||
env:
|
||||
DOCKER_REGISTRY: "docker.io"
|
||||
DOCKER_BUILDKIT: 1
|
||||
WITH_PUSH: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release') || startsWith(github.ref, 'refs/tags/v')) }}
|
||||
WITH_PUSH: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release')) }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
@ -40,7 +40,7 @@ jobs:
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
build:
|
||||
environment: ${{ (github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release') || startsWith(github.ref, 'refs/tags/v')) && 'docker-build') || '' }}
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: ${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral
|
||||
name: libtorch-cxx11-builder:${{ matrix.tag }}
|
||||
|
||||
2
.github/workflows/build-magma-rocm-linux.yml
vendored
2
.github/workflows/build-magma-rocm-linux.yml
vendored
@ -29,7 +29,7 @@ concurrency:
|
||||
jobs:
|
||||
build-linux-magma-rocm:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
runs-on: linux.2xlarge
|
||||
runs-on: linux.12xlarge
|
||||
permissions:
|
||||
id-token: write
|
||||
strategy:
|
||||
|
||||
2
.github/workflows/build-magma-windows.yml
vendored
2
.github/workflows/build-magma-windows.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
runs-on: windows-2022
|
||||
strategy:
|
||||
matrix:
|
||||
cuda_version: ["129", "128", "126"]
|
||||
cuda_version: ["128", "126"]
|
||||
config: ["Release", "Debug"]
|
||||
env:
|
||||
CUDA_VERSION: ${{ matrix.cuda_version }}
|
||||
|
||||
@ -12,7 +12,7 @@ on:
|
||||
env:
|
||||
DOCKER_REGISTRY: "docker.io"
|
||||
DOCKER_BUILDKIT: 1
|
||||
WITH_PUSH: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release') || startsWith(github.ref, 'refs/tags/v')) }}
|
||||
WITH_PUSH: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release')) }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
@ -21,7 +21,7 @@ concurrency:
|
||||
jobs:
|
||||
build-docker-cpu-s390x:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
environment: ${{ (github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release') || startsWith(github.ref, 'refs/tags/v')) && 'docker-build') || '' }}
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
runs-on: linux.s390x
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
@ -53,7 +53,7 @@ jobs:
|
||||
docker tag "${CREATED_FULL_DOCKER_IMAGE_NAME}" "${DOCKER_IMAGE_NAME_PREFIX}-${GIT_COMMIT_SHA}"
|
||||
docker tag "${CREATED_FULL_DOCKER_IMAGE_NAME}" "${DOCKER_IMAGE_NAME_PREFIX}-${CI_FOLDER_SHA}"
|
||||
|
||||
# Pretty sure Github will mask tokens and I'm not sure if it will even be
|
||||
# Prety sure Github will mask tokens and I'm not sure if it will even be
|
||||
# printed due to pipe, but just in case
|
||||
set +x
|
||||
if [[ "${WITH_PUSH:-false}" == "true" ]]; then
|
||||
|
||||
5
.github/workflows/build-manywheel-images.yml
vendored
5
.github/workflows/build-manywheel-images.yml
vendored
@ -23,7 +23,8 @@ on:
|
||||
env:
|
||||
DOCKER_REGISTRY: "docker.io"
|
||||
DOCKER_BUILDKIT: 1
|
||||
WITH_PUSH: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release') || startsWith(github.ref, 'refs/tags/v')) }}
|
||||
WITH_PUSH: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release')) }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
@ -40,7 +41,7 @@ jobs:
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
build:
|
||||
environment: ${{ (github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release') || startsWith(github.ref, 'refs/tags/v')) && 'docker-build') || '' }}
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
9
.github/workflows/build-triton-wheel.yml
vendored
9
.github/workflows/build-triton-wheel.yml
vendored
@ -8,7 +8,6 @@ on:
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
- 'ciflow/triton_binaries/*'
|
||||
paths:
|
||||
- .github/workflows/build-triton-wheel.yml
|
||||
- .github/scripts/build_triton_wheel.py
|
||||
@ -157,8 +156,9 @@ jobs:
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_DEVICE}" == xpu ]]; then
|
||||
TRITON_VERSION=$(cat .ci/docker/triton_xpu_version.txt)
|
||||
docker exec -t "${container_name}" bash -c "dnf install -y gcc-toolset-13-gcc-c++"
|
||||
docker exec -t "${container_name}" bash -c "source /opt/rh/gcc-toolset-13/enable && ${PYTHON_EXECUTABLE} /pytorch/.github/scripts/build_triton_wheel.py --device=$BUILD_DEVICE $RELEASE"
|
||||
docker exec -t "${container_name}" bash -c "source /opt/rh/gcc-toolset-13/enable && ${PYTHON_EXECUTABLE} /pytorch/.github/scripts/build_triton_wheel.py --device=$BUILD_DEVICE --triton-version=$TRITON_VERSION $RELEASE"
|
||||
else
|
||||
docker exec -t "${container_name}" bash -c "${PYTHON_EXECUTABLE} /pytorch/.github/scripts/build_triton_wheel.py --device=$BUILD_DEVICE $RELEASE $WITH_CLANG_LDD"
|
||||
fi
|
||||
@ -255,6 +255,11 @@ jobs:
|
||||
if [[ "${IS_RELEASE_TAG}" == true ]]; then
|
||||
export RELEASE="--release"
|
||||
fi
|
||||
export TRITON_VERSION=""
|
||||
if [[ "${{ matrix.device }}" == "xpu" ]]; then
|
||||
triton_version=$(cat .ci/docker/ci_commit_pins/triton-xpu.txt)
|
||||
export TRITON_VERSION="--triton-version=${triton_version}"
|
||||
fi
|
||||
.github/scripts/windows/build_triton.bat
|
||||
mkdir -p "${RUNNER_TEMP}/artifacts/"
|
||||
mv ./*.whl "${RUNNER_TEMP}/artifacts/"
|
||||
|
||||
4
.github/workflows/create_release.yml
vendored
4
.github/workflows/create_release.yml
vendored
@ -55,8 +55,6 @@ jobs:
|
||||
tag_or_branch="${tag_or_branch//\//_}"
|
||||
echo "PT_RELEASE_NAME=pytorch-$tag_or_branch" >> "$GITHUB_ENV"
|
||||
echo "PT_RELEASE_FILE=pytorch-$tag_or_branch.tar.gz" >> "$GITHUB_ENV"
|
||||
- name: Checkout optional submodules
|
||||
run: python3 tools/optional_submodules.py
|
||||
- name: Create source distribution
|
||||
run: |
|
||||
# Create new folder with specified name so extracting the archive yields that
|
||||
@ -82,7 +80,7 @@ jobs:
|
||||
path: ${{ env.PT_RELEASE_FILE }}
|
||||
- name: Set output
|
||||
id: release_name
|
||||
run: echo "pt_release_name=${{ env.PT_RELEASE_NAME }}.tar.gz" >> "${GITHUB_OUTPUT}"
|
||||
run: echo "name=pt_release_name::${{ env.PT_RELEASE_NAME }}.tar.gz" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
upload_source_code_to_s3:
|
||||
if: ${{ github.repository == 'pytorch/pytorch' && github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && contains(github.ref, 'rc') }}
|
||||
|
||||
3
.github/workflows/docker-builds.yml
vendored
3
.github/workflows/docker-builds.yml
vendored
@ -49,7 +49,7 @@ jobs:
|
||||
matrix:
|
||||
runner: [linux.12xlarge]
|
||||
docker-image-name: [
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11,
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3-gcc11,
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3.12-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3.13-gcc9-inductor-benchmarks,
|
||||
@ -69,6 +69,7 @@ jobs:
|
||||
pytorch-linux-jammy-py3.12-halide,
|
||||
pytorch-linux-jammy-xpu-2025.0-py3,
|
||||
pytorch-linux-jammy-xpu-2025.1-py3,
|
||||
pytorch-linux-jammy-py3-clang15-asan,
|
||||
pytorch-linux-jammy-py3-clang18-asan,
|
||||
pytorch-linux-jammy-py3-clang12-onnx,
|
||||
pytorch-linux-jammy-linter,
|
||||
|
||||
2
.github/workflows/docker-release.yml
vendored
2
.github/workflows/docker-release.yml
vendored
@ -156,7 +156,7 @@ jobs:
|
||||
|
||||
docker push ghcr.io/pytorch/pytorch-nightly:"${PYTORCH_NIGHTLY_COMMIT}${CUDA_SUFFIX}"
|
||||
|
||||
# Please note, here we need to pin specific version of CUDA as with latest label
|
||||
# Please note, here we ned to pin specific verison of CUDA as with latest label
|
||||
if [[ ${CUDA_VERSION_SHORT} == "${STABLE_CUDA_VERSION}" ]]; then
|
||||
docker tag ghcr.io/pytorch/pytorch-nightly:"${PYTORCH_NIGHTLY_COMMIT}${CUDA_SUFFIX}" \
|
||||
ghcr.io/pytorch/pytorch-nightly:latest
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user