mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-26 16:44:54 +08:00
Compare commits
3 Commits
dev/joona/
...
mlazos/hc2
| Author | SHA1 | Date | |
|---|---|---|---|
| 334d3ab3b1 | |||
| ecc1899929 | |||
| c831f3b11a |
@ -136,9 +136,6 @@ def complete_wheel(folder: str) -> str:
|
||||
"""
|
||||
wheel_name = list_dir(f"/{folder}/dist")[0]
|
||||
|
||||
# Please note for cuda we don't run auditwheel since we use custom script to package
|
||||
# the cuda dependencies to the wheel file using update_wheel() method.
|
||||
# However we need to make sure filename reflects the correct Manylinux platform.
|
||||
if "pytorch" in folder and not enable_cuda:
|
||||
print("Repairing Wheel with AuditWheel")
|
||||
check_call(["auditwheel", "repair", f"dist/{wheel_name}"], cwd=folder)
|
||||
@ -150,14 +147,7 @@ def complete_wheel(folder: str) -> str:
|
||||
f"/{folder}/dist/{repaired_wheel_name}",
|
||||
)
|
||||
else:
|
||||
repaired_wheel_name = wheel_name.replace(
|
||||
"linux_aarch64", "manylinux_2_28_aarch64"
|
||||
)
|
||||
print(f"Renaming {wheel_name} wheel to {repaired_wheel_name}")
|
||||
os.rename(
|
||||
f"/{folder}/dist/{wheel_name}",
|
||||
f"/{folder}/dist/{repaired_wheel_name}",
|
||||
)
|
||||
repaired_wheel_name = wheel_name
|
||||
|
||||
print(f"Copying {repaired_wheel_name} to artifacts")
|
||||
shutil.copy2(
|
||||
|
||||
@ -44,8 +44,6 @@ FROM base as cuda
|
||||
ARG CUDA_VERSION=12.4
|
||||
RUN rm -rf /usr/local/cuda-*
|
||||
ADD ./common/install_cuda.sh install_cuda.sh
|
||||
COPY ./common/install_nccl.sh install_nccl.sh
|
||||
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
ENV CUDA_HOME=/usr/local/cuda-${CUDA_VERSION}
|
||||
# Preserve CUDA_VERSION for the builds
|
||||
ENV CUDA_VERSION=${CUDA_VERSION}
|
||||
|
||||
@ -1,60 +1,82 @@
|
||||
#!/usr/bin/env bash
|
||||
# Script used only in CD pipeline
|
||||
|
||||
set -exou pipefail
|
||||
set -eou pipefail
|
||||
|
||||
image="$1"
|
||||
shift
|
||||
|
||||
if [ -z "${image}" ]; then
|
||||
echo "Usage: $0 IMAGENAME:ARCHTAG"
|
||||
echo "Usage: $0 IMAGE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Go from imagename:tag to tag
|
||||
DOCKER_TAG_PREFIX=$(echo "${image}" | awk -F':' '{print $2}')
|
||||
DOCKER_IMAGE_NAME="pytorch/${image}"
|
||||
|
||||
CUDA_VERSION=""
|
||||
if [[ "${DOCKER_TAG_PREFIX}" == cuda* ]]; then
|
||||
# extract cuda version from image name and tag. e.g. manylinux2_28-builder:cuda12.8 returns 12.8
|
||||
CUDA_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'cuda' '{print $2}')
|
||||
fi
|
||||
|
||||
case ${DOCKER_TAG_PREFIX} in
|
||||
cpu)
|
||||
BASE_TARGET=base
|
||||
;;
|
||||
cuda*)
|
||||
BASE_TARGET=cuda${CUDA_VERSION}
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Unknown docker tag ${DOCKER_TAG_PREFIX}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# TODO: Remove LimitNOFILE=1048576 patch once https://github.com/pytorch/test-infra/issues/5712
|
||||
# is resolved. This patch is required in order to fix timing out of Docker build on Amazon Linux 2023.
|
||||
sudo sed -i s/LimitNOFILE=infinity/LimitNOFILE=1048576/ /usr/lib/systemd/system/docker.service
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart docker
|
||||
|
||||
export DOCKER_BUILDKIT=1
|
||||
TOPDIR=$(git rev-parse --show-toplevel)
|
||||
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
docker build \
|
||||
--target final \
|
||||
--progress plain \
|
||||
--build-arg "BASE_TARGET=${BASE_TARGET}" \
|
||||
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \
|
||||
--build-arg "DEVTOOLSET_VERSION=11" \
|
||||
-t ${tmp_tag} \
|
||||
$@ \
|
||||
-f "${TOPDIR}/.ci/docker/almalinux/Dockerfile" \
|
||||
${TOPDIR}/.ci/docker/
|
||||
CUDA_VERSION=${CUDA_VERSION:-12.1}
|
||||
|
||||
if [ -n "${CUDA_VERSION}" ]; then
|
||||
case ${CUDA_VERSION} in
|
||||
cpu)
|
||||
BASE_TARGET=base
|
||||
DOCKER_TAG=cpu
|
||||
;;
|
||||
all)
|
||||
BASE_TARGET=all_cuda
|
||||
DOCKER_TAG=latest
|
||||
;;
|
||||
*)
|
||||
BASE_TARGET=cuda${CUDA_VERSION}
|
||||
DOCKER_TAG=cuda${CUDA_VERSION}
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
(
|
||||
set -x
|
||||
# TODO: Remove LimitNOFILE=1048576 patch once https://github.com/pytorch/test-infra/issues/5712
|
||||
# is resolved. This patch is required in order to fix timing out of Docker build on Amazon Linux 2023.
|
||||
sudo sed -i s/LimitNOFILE=infinity/LimitNOFILE=1048576/ /usr/lib/systemd/system/docker.service
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart docker
|
||||
|
||||
docker build \
|
||||
--target final \
|
||||
--progress plain \
|
||||
--build-arg "BASE_TARGET=${BASE_TARGET}" \
|
||||
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \
|
||||
--build-arg "DEVTOOLSET_VERSION=11" \
|
||||
-t ${DOCKER_IMAGE_NAME} \
|
||||
$@ \
|
||||
-f "${TOPDIR}/.ci/docker/almalinux/Dockerfile" \
|
||||
${TOPDIR}/.ci/docker/
|
||||
)
|
||||
|
||||
if [[ "${DOCKER_TAG}" =~ ^cuda* ]]; then
|
||||
# Test that we're using the right CUDA compiler
|
||||
docker run --rm "${tmp_tag}" nvcc --version | grep "cuda_${CUDA_VERSION}"
|
||||
(
|
||||
set -x
|
||||
docker run --rm "${DOCKER_IMAGE_NAME}" nvcc --version | grep "cuda_${CUDA_VERSION}"
|
||||
)
|
||||
fi
|
||||
|
||||
GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)}
|
||||
GIT_BRANCH_NAME=${GITHUB_REF##*/}
|
||||
GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)}
|
||||
DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE_NAME}-${GIT_BRANCH_NAME}
|
||||
DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE_NAME}-${GIT_COMMIT_SHA}
|
||||
if [[ "${WITH_PUSH:-}" == true ]]; then
|
||||
(
|
||||
set -x
|
||||
docker push "${DOCKER_IMAGE_NAME}"
|
||||
if [[ -n ${GITHUB_REF} ]]; then
|
||||
docker tag ${DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_BRANCH_TAG}
|
||||
docker tag ${DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_SHA_TAG}
|
||||
docker push "${DOCKER_IMAGE_BRANCH_TAG}"
|
||||
docker push "${DOCKER_IMAGE_SHA_TAG}"
|
||||
fi
|
||||
)
|
||||
fi
|
||||
|
||||
@ -460,18 +460,10 @@ if [[ "$image" == *cuda* && ${OS} == "ubuntu" ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
no_cache_flag=""
|
||||
progress_flag=""
|
||||
# Do not use cache and progress=plain when in CI
|
||||
if [[ -n "${CI:-}" ]]; then
|
||||
no_cache_flag="--no-cache"
|
||||
progress_flag="--progress=plain"
|
||||
fi
|
||||
|
||||
# Build image
|
||||
docker build \
|
||||
${no_cache_flag} \
|
||||
${progress_flag} \
|
||||
--no-cache \
|
||||
--progress=plain \
|
||||
--build-arg "BUILD_ENVIRONMENT=${image}" \
|
||||
--build-arg "PROTOBUF=${PROTOBUF:-}" \
|
||||
--build-arg "LLVMDEV=${LLVMDEV:-}" \
|
||||
@ -523,7 +515,7 @@ docker build \
|
||||
UBUNTU_VERSION=$(echo ${UBUNTU_VERSION} | sed 's/-rc$//')
|
||||
|
||||
function drun() {
|
||||
docker run --rm "$tmp_tag" "$@"
|
||||
docker run --rm "$tmp_tag" $*
|
||||
}
|
||||
|
||||
if [[ "$OS" == "ubuntu" ]]; then
|
||||
@ -571,14 +563,3 @@ if [ -n "$KATEX" ]; then
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
HAS_TRITON=$(drun python -c "import triton" > /dev/null 2>&1 && echo "yes" || echo "no")
|
||||
if [[ -n "$TRITON" || -n "$TRITON_CPU" ]]; then
|
||||
if [ "$HAS_TRITON" = "no" ]; then
|
||||
echo "expecting triton to be installed, but it is not"
|
||||
exit 1
|
||||
fi
|
||||
elif [ "$HAS_TRITON" = "yes" ]; then
|
||||
echo "expecting triton to not be installed, but it is"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -68,7 +68,7 @@ COPY ./common/install_rocm.sh install_rocm.sh
|
||||
RUN bash ./install_rocm.sh
|
||||
RUN rm install_rocm.sh
|
||||
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION}
|
||||
RUN bash ./install_rocm_magma.sh
|
||||
RUN rm install_rocm_magma.sh
|
||||
COPY ./common/install_amdsmi.sh install_amdsmi.sh
|
||||
RUN bash ./install_amdsmi.sh
|
||||
|
||||
@ -1 +1 @@
|
||||
381ae5d57d35c165d98df728380b20fbde350392
|
||||
cedf52aa8e4df879886270a5920da6fe84cbaa67
|
||||
|
||||
@ -62,7 +62,7 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
|
||||
# libstdcxx from conda default channels are too old, we need GLIBCXX_3.4.30
|
||||
# which is provided in libstdcxx 12 and up.
|
||||
conda_install libstdcxx-ng=12.3.0 --update-deps -c conda-forge
|
||||
conda_install libstdcxx-ng=12.3.0 -c conda-forge
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
if [[ $(uname -m) == "aarch64" ]]; then
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
|
||||
set -ex
|
||||
|
||||
NCCL_VERSION=v2.26.2-1
|
||||
CUDNN_VERSION=9.5.1.17
|
||||
|
||||
function install_cusparselt_040 {
|
||||
@ -39,7 +40,8 @@ function install_cusparselt_063 {
|
||||
|
||||
function install_118 {
|
||||
CUDNN_VERSION=9.1.0.70
|
||||
echo "Installing CUDA 11.8 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.4.0"
|
||||
NCCL_VERSION=v2.21.5-1
|
||||
echo "Installing CUDA 11.8 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.4.0"
|
||||
rm -rf /usr/local/cuda-11.8 /usr/local/cuda
|
||||
# install CUDA 11.8.0 in the same container
|
||||
wget -q https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
|
||||
@ -57,7 +59,14 @@ function install_118 {
|
||||
cd ..
|
||||
rm -rf tmp_cudnn
|
||||
|
||||
CUDA_VERSION=11.8 bash install_nccl.sh
|
||||
# NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
|
||||
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
|
||||
git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git
|
||||
cd nccl && make -j src.build
|
||||
cp -a build/include/* /usr/local/cuda/include/
|
||||
cp -a build/lib/* /usr/local/cuda/lib64/
|
||||
cd ..
|
||||
rm -rf nccl
|
||||
|
||||
install_cusparselt_040
|
||||
|
||||
@ -66,7 +75,7 @@ function install_118 {
|
||||
|
||||
function install_124 {
|
||||
CUDNN_VERSION=9.1.0.70
|
||||
echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.2"
|
||||
echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.2"
|
||||
rm -rf /usr/local/cuda-12.4 /usr/local/cuda
|
||||
# install CUDA 12.4.1 in the same container
|
||||
wget -q https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run
|
||||
@ -84,7 +93,14 @@ function install_124 {
|
||||
cd ..
|
||||
rm -rf tmp_cudnn
|
||||
|
||||
CUDA_VERSION=12.4 bash install_nccl.sh
|
||||
# NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
|
||||
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
|
||||
git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git
|
||||
cd nccl && make -j src.build
|
||||
cp -a build/include/* /usr/local/cuda/include/
|
||||
cp -a build/lib/* /usr/local/cuda/lib64/
|
||||
cd ..
|
||||
rm -rf nccl
|
||||
|
||||
install_cusparselt_062
|
||||
|
||||
@ -92,7 +108,7 @@ function install_124 {
|
||||
}
|
||||
|
||||
function install_126 {
|
||||
echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
|
||||
echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.3"
|
||||
rm -rf /usr/local/cuda-12.6 /usr/local/cuda
|
||||
# install CUDA 12.6.3 in the same container
|
||||
wget -q https://developer.download.nvidia.com/compute/cuda/12.6.3/local_installers/cuda_12.6.3_560.35.05_linux.run
|
||||
@ -110,7 +126,14 @@ function install_126 {
|
||||
cd ..
|
||||
rm -rf tmp_cudnn
|
||||
|
||||
CUDA_VERSION=12.6 bash install_nccl.sh
|
||||
# NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
|
||||
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
|
||||
git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git
|
||||
cd nccl && make -j src.build
|
||||
cp -a build/include/* /usr/local/cuda/include/
|
||||
cp -a build/lib/* /usr/local/cuda/lib64/
|
||||
cd ..
|
||||
rm -rf nccl
|
||||
|
||||
install_cusparselt_063
|
||||
|
||||
@ -218,7 +241,7 @@ function prune_126 {
|
||||
|
||||
function install_128 {
|
||||
CUDNN_VERSION=9.8.0.87
|
||||
echo "Installing CUDA 12.8.0 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
|
||||
echo "Installing CUDA 12.8.0 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.3"
|
||||
rm -rf /usr/local/cuda-12.8 /usr/local/cuda
|
||||
# install CUDA 12.8.0 in the same container
|
||||
wget -q https://developer.download.nvidia.com/compute/cuda/12.8.0/local_installers/cuda_12.8.0_570.86.10_linux.run
|
||||
@ -236,7 +259,14 @@ function install_128 {
|
||||
cd ..
|
||||
rm -rf tmp_cudnn
|
||||
|
||||
CUDA_VERSION=12.8 bash install_nccl.sh
|
||||
# NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
|
||||
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
|
||||
git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git
|
||||
cd nccl && make -j src.build
|
||||
cp -a build/include/* /usr/local/cuda/include/
|
||||
cp -a build/lib/* /usr/local/cuda/lib64/
|
||||
cd ..
|
||||
rm -rf nccl
|
||||
|
||||
install_cusparselt_063
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
|
||||
set -ex
|
||||
|
||||
NCCL_VERSION=v2.26.2-1
|
||||
CUDNN_VERSION=9.8.0.87
|
||||
|
||||
function install_cusparselt_063 {
|
||||
@ -17,7 +18,7 @@ function install_cusparselt_063 {
|
||||
}
|
||||
|
||||
function install_128 {
|
||||
echo "Installing CUDA 12.8.0 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
|
||||
echo "Installing CUDA 12.8.0 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.3"
|
||||
rm -rf /usr/local/cuda-12.8 /usr/local/cuda
|
||||
# install CUDA 12.8.0 in the same container
|
||||
wget -q https://developer.download.nvidia.com/compute/cuda/12.8.0/local_installers/cuda_12.8.0_570.86.10_linux_sbsa.run
|
||||
@ -35,7 +36,14 @@ function install_128 {
|
||||
cd ..
|
||||
rm -rf tmp_cudnn
|
||||
|
||||
CUDA_VERSION=12.8 bash install_nccl.sh
|
||||
# NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
|
||||
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
|
||||
git clone -b ${NCCL_VERSION} --depth 1 https://github.com/NVIDIA/nccl.git
|
||||
cd nccl && make -j src.build
|
||||
cp -a build/include/* /usr/local/cuda/include/
|
||||
cp -a build/lib/* /usr/local/cuda/lib64/
|
||||
cd ..
|
||||
rm -rf nccl
|
||||
|
||||
install_cusparselt_063
|
||||
|
||||
|
||||
@ -50,7 +50,8 @@ setup_executorch() {
|
||||
pushd executorch
|
||||
|
||||
export PYTHON_EXECUTABLE=python
|
||||
export CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
|
||||
export EXECUTORCH_BUILD_PYBIND=ON
|
||||
export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
|
||||
|
||||
as_jenkins .ci/scripts/setup-linux.sh --build-tool cmake || true
|
||||
popd
|
||||
|
||||
@ -35,9 +35,7 @@ git clone https://github.com/halide/Halide.git
|
||||
pushd Halide
|
||||
git checkout ${COMMIT} && git submodule update --init --recursive
|
||||
pip_install -r requirements.txt
|
||||
# NOTE: pybind has a requirement for cmake > 3.5 so set the minimum cmake version here with a flag
|
||||
# Context: https://github.com/pytorch/pytorch/issues/150420
|
||||
cmake -G Ninja -DCMAKE_POLICY_VERSION_MINIMUM=3.5 -DCMAKE_BUILD_TYPE=Release -S . -B build
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -S . -B build
|
||||
cmake --build build
|
||||
test -e ${CONDA_PREFIX}/lib/python3 || ln -s python${ANACONDA_PYTHON_VERSION} ${CONDA_PREFIX}/lib/python3
|
||||
cmake --install build --prefix ${CONDA_PREFIX}
|
||||
|
||||
@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
NCCL_VERSION=""
|
||||
if [[ ${CUDA_VERSION:0:2} == "11" ]]; then
|
||||
NCCL_VERSION=$(cat ci_commit_pins/nccl-cu11.txt)
|
||||
elif [[ ${CUDA_VERSION:0:2} == "12" ]]; then
|
||||
NCCL_VERSION=$(cat ci_commit_pins/nccl-cu12.txt)
|
||||
else
|
||||
echo "Unexpected CUDA_VERSION ${CUDA_VERSION}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n "${NCCL_VERSION}" ]]; then
|
||||
# NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
|
||||
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
|
||||
git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git
|
||||
pushd nccl
|
||||
make -j src.build
|
||||
cp -a build/include/* /usr/local/cuda/include/
|
||||
cp -a build/lib/* /usr/local/cuda/lib64/
|
||||
popd
|
||||
rm -rf nccl
|
||||
ldconfig
|
||||
fi
|
||||
@ -19,13 +19,6 @@ install_ubuntu() {
|
||||
apt-get install -y libc++1
|
||||
apt-get install -y libc++abi1
|
||||
|
||||
# Make sure rocm packages from repo.radeon.com have highest priority
|
||||
cat << EOF > /etc/apt/preferences.d/rocm-pin-600
|
||||
Package: *
|
||||
Pin: release o=repo.radeon.com
|
||||
Pin-Priority: 600
|
||||
EOF
|
||||
|
||||
# Add amdgpu repository
|
||||
UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
|
||||
echo "deb [arch=amd64] https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
|
||||
|
||||
@ -25,7 +25,9 @@ python3 -m pip install meson ninja
|
||||
###########################
|
||||
### clone repo
|
||||
###########################
|
||||
GIT_SSL_NO_VERIFY=true git clone https://gitlab.freedesktop.org/mesa/drm.git
|
||||
# TEMPORARY FIX: https://gitlab.freedesktop.org/mesa/drm.git is down until 2025/03/22
|
||||
# GIT_SSL_NO_VERIFY=true git clone https://gitlab.freedesktop.org/mesa/drm.git
|
||||
GIT_SSL_NO_VERIFY=true git clone git://anongit.freedesktop.org/mesa/drm
|
||||
pushd drm
|
||||
|
||||
###########################
|
||||
|
||||
@ -1,32 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
# Script used only in CD pipeline
|
||||
#!/bin/bash
|
||||
# Script used in CI and CD pipeline
|
||||
|
||||
set -eou pipefail
|
||||
set -ex
|
||||
|
||||
function do_install() {
|
||||
rocm_version=$1
|
||||
rocm_version_nodot=${1//./}
|
||||
# Magma build scripts need `python`
|
||||
ln -sf /usr/bin/python3 /usr/bin/python
|
||||
|
||||
# Version 2.7.2 + ROCm related updates
|
||||
MAGMA_VERSION=a1625ff4d9bc362906bd01f805dbbe12612953f6
|
||||
magma_archive="magma-rocm${rocm_version_nodot}-${MAGMA_VERSION}-1.tar.bz2"
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
almalinux)
|
||||
yum install -y gcc-gfortran
|
||||
;;
|
||||
*)
|
||||
echo "No preinstalls to build magma..."
|
||||
;;
|
||||
esac
|
||||
|
||||
rocm_dir="/opt/rocm"
|
||||
(
|
||||
set -x
|
||||
tmp_dir=$(mktemp -d)
|
||||
pushd ${tmp_dir}
|
||||
curl -OLs https://ossci-linux.s3.us-east-1.amazonaws.com/${magma_archive}
|
||||
if tar -xvf "${magma_archive}"
|
||||
then
|
||||
mkdir -p "${rocm_dir}/magma"
|
||||
mv include "${rocm_dir}/magma/include"
|
||||
mv lib "${rocm_dir}/magma/lib"
|
||||
else
|
||||
echo "${magma_archive} not found, skipping magma install"
|
||||
fi
|
||||
popd
|
||||
)
|
||||
}
|
||||
MKLROOT=${MKLROOT:-/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION}
|
||||
|
||||
do_install $1
|
||||
# "install" hipMAGMA into /opt/rocm/magma by copying after build
|
||||
git clone https://bitbucket.org/icl/magma.git
|
||||
pushd magma
|
||||
|
||||
# Version 2.7.2 + ROCm related updates
|
||||
git checkout a1625ff4d9bc362906bd01f805dbbe12612953f6
|
||||
|
||||
cp make.inc-examples/make.inc.hip-gcc-mkl make.inc
|
||||
echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc
|
||||
if [[ -f "${MKLROOT}/lib/libmkl_core.a" ]]; then
|
||||
echo 'LIB = -Wl,--start-group -lmkl_gf_lp64 -lmkl_gnu_thread -lmkl_core -Wl,--end-group -lpthread -lstdc++ -lm -lgomp -lhipblas -lhipsparse' >> make.inc
|
||||
fi
|
||||
echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib -ldl' >> make.inc
|
||||
echo 'DEVCCFLAGS += --gpu-max-threads-per-block=256' >> make.inc
|
||||
export PATH="${PATH}:/opt/rocm/bin"
|
||||
if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then
|
||||
amdgpu_targets=`echo $PYTORCH_ROCM_ARCH | sed 's/;/ /g'`
|
||||
else
|
||||
amdgpu_targets=`rocm_agent_enumerator | grep -v gfx000 | sort -u | xargs`
|
||||
fi
|
||||
for arch in $amdgpu_targets; do
|
||||
echo "DEVCCFLAGS += --offload-arch=$arch" >> make.inc
|
||||
done
|
||||
# hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition
|
||||
sed -i 's/^FOPENMP/#FOPENMP/g' make.inc
|
||||
make -f make.gen.hipMAGMA -j $(nproc)
|
||||
LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT="${MKLROOT}"
|
||||
make testing/testing_dgemm -j $(nproc) MKLROOT="${MKLROOT}"
|
||||
popd
|
||||
mv magma /opt/rocm
|
||||
|
||||
@ -2,12 +2,6 @@
|
||||
|
||||
set -ex
|
||||
|
||||
mkdir -p /opt/triton
|
||||
if [ -z "${TRITON}" ] && [ -z "${TRITON_CPU}" ]; then
|
||||
echo "TRITON and TRITON_CPU are not set. Exiting..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
get_conda_version() {
|
||||
@ -58,7 +52,6 @@ cd triton
|
||||
as_jenkins git checkout ${TRITON_PINNED_COMMIT}
|
||||
as_jenkins git submodule update --init --recursive
|
||||
cd python
|
||||
pip_install pybind11==2.13.6
|
||||
|
||||
# TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527
|
||||
as_jenkins sed -i -e 's/https:\/\/tritonlang.blob.core.windows.net\/llvm-builds/https:\/\/oaitriton.blob.core.windows.net\/public\/llvm-builds/g' setup.py
|
||||
@ -67,22 +60,17 @@ if [ -n "${UBUNTU_VERSION}" ] && [ -n "${GCC_VERSION}" ] && [[ "${GCC_VERSION}"
|
||||
# Triton needs at least gcc-9 to build
|
||||
apt-get install -y g++-9
|
||||
|
||||
CXX=g++-9 conda_run python setup.py bdist_wheel
|
||||
CXX=g++-9 pip_install .
|
||||
elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then
|
||||
# Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain
|
||||
add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
apt-get install -y g++-9
|
||||
|
||||
CXX=g++-9 conda_run python setup.py bdist_wheel
|
||||
CXX=g++-9 pip_install .
|
||||
else
|
||||
conda_run python setup.py bdist_wheel
|
||||
pip_install .
|
||||
fi
|
||||
|
||||
# Copy the wheel to /opt for multi stage docker builds
|
||||
cp dist/*.whl /opt/triton
|
||||
# Install the wheel for docker builds that don't use multi stage
|
||||
pip_install dist/*.whl
|
||||
|
||||
if [ -n "${CONDA_CMAKE}" ]; then
|
||||
# TODO: This is to make sure that the same cmake and numpy version from install conda
|
||||
# script is used. Without this step, the newer cmake version (3.25.2) downloaded by
|
||||
|
||||
@ -49,8 +49,6 @@ RUN bash ./install_mkl.sh && rm install_mkl.sh
|
||||
FROM cpu as cuda
|
||||
ADD ./common/install_cuda.sh install_cuda.sh
|
||||
ADD ./common/install_magma.sh install_magma.sh
|
||||
COPY ./common/install_nccl.sh install_nccl.sh
|
||||
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
ENV CUDA_HOME /usr/local/cuda
|
||||
|
||||
FROM cuda as cuda11.8
|
||||
@ -74,7 +72,6 @@ RUN bash ./install_magma.sh 12.8
|
||||
RUN ln -sf /usr/local/cuda-12.8 /usr/local/cuda
|
||||
|
||||
FROM cpu as rocm
|
||||
ARG ROCM_VERSION
|
||||
ARG PYTORCH_ROCM_ARCH
|
||||
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
|
||||
ENV MKLROOT /opt/intel
|
||||
@ -89,11 +86,11 @@ ADD ./common/install_rocm_magma.sh install_rocm_magma.sh
|
||||
# gfortran and python needed for building magma from source for ROCm
|
||||
RUN apt-get update -y && \
|
||||
apt-get install gfortran -y && \
|
||||
apt-get install python3 python-is-python3 -y && \
|
||||
apt-get install python -y && \
|
||||
apt-get clean
|
||||
|
||||
RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh
|
||||
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} && rm install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh
|
||||
|
||||
FROM ${BASE_TARGET} as final
|
||||
COPY --from=openssl /opt/openssl /opt/openssl
|
||||
|
||||
@ -1,63 +1,83 @@
|
||||
#!/usr/bin/env bash
|
||||
# Script used only in CD pipeline
|
||||
|
||||
set -eoux pipefail
|
||||
set -eou pipefail
|
||||
|
||||
image="$1"
|
||||
shift
|
||||
|
||||
if [ -z "${image}" ]; then
|
||||
echo "Usage: $0 IMAGENAME:ARCHTAG"
|
||||
echo "Usage: $0 IMAGE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DOCKER_IMAGE="pytorch/${image}"
|
||||
|
||||
TOPDIR=$(git rev-parse --show-toplevel)
|
||||
|
||||
GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu}
|
||||
GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
|
||||
|
||||
WITH_PUSH=${WITH_PUSH:-}
|
||||
|
||||
DOCKER=${DOCKER:-docker}
|
||||
|
||||
# Go from imagename:tag to tag
|
||||
DOCKER_TAG_PREFIX=$(echo "${image}" | awk -F':' '{print $2}')
|
||||
|
||||
GPU_ARCH_VERSION=""
|
||||
if [[ "${DOCKER_TAG_PREFIX}" == cuda* ]]; then
|
||||
# extract cuda version from image name. e.g. manylinux2_28-builder:cuda12.8 returns 12.8
|
||||
GPU_ARCH_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'cuda' '{print $2}')
|
||||
elif [[ "${DOCKER_TAG_PREFIX}" == rocm* ]]; then
|
||||
# extract rocm version from image name. e.g. manylinux2_28-builder:rocm6.2.4 returns 6.2.4
|
||||
GPU_ARCH_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'rocm' '{print $2}')
|
||||
fi
|
||||
|
||||
case ${DOCKER_TAG_PREFIX} in
|
||||
case ${GPU_ARCH_TYPE} in
|
||||
cpu)
|
||||
BASE_TARGET=cpu
|
||||
DOCKER_TAG=cpu
|
||||
GPU_IMAGE=ubuntu:20.04
|
||||
DOCKER_GPU_BUILD_ARG=""
|
||||
;;
|
||||
cuda*)
|
||||
cuda)
|
||||
BASE_TARGET=cuda${GPU_ARCH_VERSION}
|
||||
DOCKER_TAG=cuda${GPU_ARCH_VERSION}
|
||||
GPU_IMAGE=ubuntu:20.04
|
||||
DOCKER_GPU_BUILD_ARG=""
|
||||
;;
|
||||
rocm*)
|
||||
rocm)
|
||||
BASE_TARGET=rocm
|
||||
GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete
|
||||
DOCKER_TAG=rocm${GPU_ARCH_VERSION}
|
||||
GPU_IMAGE=rocm/dev-ubuntu-20.04:${GPU_ARCH_VERSION}-complete
|
||||
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}"
|
||||
DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}"
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Unrecognized DOCKER_TAG_PREFIX: ${DOCKER_TAG_PREFIX}"
|
||||
echo "ERROR: Unrecognized GPU_ARCH_TYPE: ${GPU_ARCH_TYPE}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
DOCKER_BUILDKIT=1 ${DOCKER} build \
|
||||
--target final \
|
||||
${DOCKER_GPU_BUILD_ARG} \
|
||||
--build-arg "GPU_IMAGE=${GPU_IMAGE}" \
|
||||
--build-arg "BASE_TARGET=${BASE_TARGET}" \
|
||||
-t "${tmp_tag}" \
|
||||
$@ \
|
||||
-f "${TOPDIR}/.ci/docker/libtorch/Dockerfile" \
|
||||
"${TOPDIR}/.ci/docker/"
|
||||
(
|
||||
set -x
|
||||
DOCKER_BUILDKIT=1 ${DOCKER} build \
|
||||
--target final \
|
||||
${DOCKER_GPU_BUILD_ARG} \
|
||||
--build-arg "GPU_IMAGE=${GPU_IMAGE}" \
|
||||
--build-arg "BASE_TARGET=${BASE_TARGET}" \
|
||||
-t "${DOCKER_IMAGE}" \
|
||||
$@ \
|
||||
-f "${TOPDIR}/.ci/docker/libtorch/Dockerfile" \
|
||||
"${TOPDIR}/.ci/docker/"
|
||||
|
||||
)
|
||||
|
||||
GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)}
|
||||
GIT_BRANCH_NAME=${GITHUB_REF##*/}
|
||||
GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)}
|
||||
DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE}-${GIT_BRANCH_NAME}
|
||||
DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE}-${GIT_COMMIT_SHA}
|
||||
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
(
|
||||
set -x
|
||||
${DOCKER} push "${DOCKER_IMAGE}"
|
||||
if [[ -n ${GITHUB_REF} ]]; then
|
||||
${DOCKER} tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_BRANCH_TAG}
|
||||
${DOCKER} tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_SHA_TAG}
|
||||
${DOCKER} push "${DOCKER_IMAGE_BRANCH_TAG}"
|
||||
${DOCKER} push "${DOCKER_IMAGE_SHA_TAG}"
|
||||
fi
|
||||
)
|
||||
fi
|
||||
|
||||
@ -30,9 +30,7 @@ RUN bash ./install_python.sh && rm install_python.sh /opt/requirements-ci.txt
|
||||
# Install cuda and cudnn
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cuda.sh install_cuda.sh
|
||||
COPY ./common/install_nccl.sh install_nccl.sh
|
||||
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh install_nccl.sh /ci_commit_pins/nccl-cu*
|
||||
RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh
|
||||
ENV DESIRED_CUDA ${CUDA_VERSION}
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH
|
||||
|
||||
|
||||
@ -64,9 +64,7 @@ FROM base as cuda
|
||||
ARG BASE_CUDA_VERSION=10.2
|
||||
# Install CUDA
|
||||
ADD ./common/install_cuda.sh install_cuda.sh
|
||||
COPY ./common/install_nccl.sh install_nccl.sh
|
||||
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh install_nccl.sh /ci_commit_pins/nccl-cu*
|
||||
RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh
|
||||
|
||||
FROM base as intel
|
||||
# MKL
|
||||
@ -197,6 +195,6 @@ RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh
|
||||
# cmake3 is needed for the MIOpen build
|
||||
RUN ln -sf /usr/local/bin/cmake /usr/bin/cmake3
|
||||
ADD ./common/install_rocm_magma.sh install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} && rm install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh
|
||||
ADD ./common/install_miopen.sh install_miopen.sh
|
||||
RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh
|
||||
|
||||
@ -36,9 +36,7 @@ FROM base as cuda
|
||||
ARG BASE_CUDA_VERSION=11.8
|
||||
# Install CUDA
|
||||
ADD ./common/install_cuda.sh install_cuda.sh
|
||||
COPY ./common/install_nccl.sh install_nccl.sh
|
||||
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh install_nccl.sh ci_commit_pins/nccl-cu*
|
||||
RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh
|
||||
|
||||
FROM base as intel
|
||||
# MKL
|
||||
@ -160,7 +158,7 @@ ADD ./common/install_rocm_drm.sh install_rocm_drm.sh
|
||||
RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh
|
||||
ENV MKLROOT /opt/intel
|
||||
ADD ./common/install_rocm_magma.sh install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} && rm install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh
|
||||
ADD ./common/install_miopen.sh install_miopen.sh
|
||||
RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh
|
||||
|
||||
|
||||
@ -67,9 +67,7 @@ FROM base as cuda
|
||||
ARG BASE_CUDA_VERSION
|
||||
# Install CUDA
|
||||
ADD ./common/install_cuda_aarch64.sh install_cuda_aarch64.sh
|
||||
COPY ./common/install_nccl.sh install_nccl.sh
|
||||
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
RUN bash ./install_cuda_aarch64.sh ${BASE_CUDA_VERSION} && rm install_cuda_aarch64.sh install_nccl.sh ci_commit_pins/nccl-cu*
|
||||
RUN bash ./install_cuda_aarch64.sh ${BASE_CUDA_VERSION} && rm install_cuda_aarch64.sh
|
||||
|
||||
FROM base as magma
|
||||
ARG BASE_CUDA_VERSION
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
# Script used only in CD pipeline
|
||||
|
||||
set -exou pipefail
|
||||
set -eou pipefail
|
||||
|
||||
TOPDIR=$(git rev-parse --show-toplevel)
|
||||
|
||||
@ -9,110 +9,152 @@ image="$1"
|
||||
shift
|
||||
|
||||
if [ -z "${image}" ]; then
|
||||
echo "Usage: $0 IMAGE:ARCHTAG"
|
||||
echo "Usage: $0 IMAGE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Go from imagename:tag to tag
|
||||
DOCKER_TAG_PREFIX=$(echo "${image}" | awk -F':' '{print $2}')
|
||||
DOCKER_IMAGE="pytorch/${image}"
|
||||
|
||||
GPU_ARCH_VERSION=""
|
||||
if [[ "${DOCKER_TAG_PREFIX}" == cuda* ]]; then
|
||||
# extract cuda version from image name. e.g. manylinux2_28-builder:cuda12.8 returns 12.8
|
||||
GPU_ARCH_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'cuda' '{print $2}')
|
||||
elif [[ "${DOCKER_TAG_PREFIX}" == rocm* ]]; then
|
||||
# extract rocm version from image name. e.g. manylinux2_28-builder:rocm6.2.4 returns 6.2.4
|
||||
GPU_ARCH_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'rocm' '{print $2}')
|
||||
fi
|
||||
DOCKER_REGISTRY="${DOCKER_REGISTRY:-docker.io}"
|
||||
|
||||
GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu}
|
||||
GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
|
||||
MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-}
|
||||
DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-}
|
||||
WITH_PUSH=${WITH_PUSH:-}
|
||||
|
||||
case ${image} in
|
||||
manylinux2_28-builder:cpu)
|
||||
case ${GPU_ARCH_TYPE} in
|
||||
cpu)
|
||||
TARGET=cpu_final
|
||||
DOCKER_TAG=cpu
|
||||
GPU_IMAGE=centos:7
|
||||
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9"
|
||||
;;
|
||||
cpu-manylinux_2_28)
|
||||
TARGET=cpu_final
|
||||
DOCKER_TAG=cpu
|
||||
GPU_IMAGE=amd64/almalinux:8
|
||||
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11"
|
||||
MANY_LINUX_VERSION="2_28"
|
||||
;;
|
||||
manylinuxaarch64-builder:cpu-aarch64)
|
||||
cpu-aarch64)
|
||||
TARGET=final
|
||||
DOCKER_TAG=cpu-aarch64
|
||||
GPU_IMAGE=arm64v8/centos:7
|
||||
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=10"
|
||||
MANY_LINUX_VERSION="aarch64"
|
||||
;;
|
||||
manylinux2_28_aarch64-builder:cpu-aarch64)
|
||||
cpu-aarch64-2_28)
|
||||
TARGET=final
|
||||
DOCKER_TAG=cpu-aarch64
|
||||
GPU_IMAGE=arm64v8/almalinux:8
|
||||
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11 --build-arg NINJA_VERSION=1.12.1"
|
||||
MANY_LINUX_VERSION="2_28_aarch64"
|
||||
;;
|
||||
manylinuxcxx11-abi-builder:cpu-cxx11-abi)
|
||||
cpu-cxx11-abi)
|
||||
TARGET=final
|
||||
DOCKER_TAG=cpu-cxx11-abi
|
||||
GPU_IMAGE=""
|
||||
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9"
|
||||
MANY_LINUX_VERSION="cxx11-abi"
|
||||
;;
|
||||
manylinuxs390x-builder:cpu-s390x)
|
||||
cpu-s390x)
|
||||
TARGET=final
|
||||
DOCKER_TAG=cpu-s390x
|
||||
GPU_IMAGE=s390x/almalinux:8
|
||||
DOCKER_GPU_BUILD_ARG=""
|
||||
MANY_LINUX_VERSION="s390x"
|
||||
;;
|
||||
manylinux2_28-builder:cuda*)
|
||||
cuda)
|
||||
TARGET=cuda_final
|
||||
DOCKER_TAG=cuda${GPU_ARCH_VERSION}
|
||||
# Keep this up to date with the minimum version of CUDA we currently support
|
||||
GPU_IMAGE=centos:7
|
||||
DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=9"
|
||||
;;
|
||||
cuda-manylinux_2_28)
|
||||
TARGET=cuda_final
|
||||
DOCKER_TAG=cuda${GPU_ARCH_VERSION}
|
||||
GPU_IMAGE=amd64/almalinux:8
|
||||
DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=11"
|
||||
MANY_LINUX_VERSION="2_28"
|
||||
;;
|
||||
manylinuxaarch64-builder:cuda*)
|
||||
cuda-aarch64)
|
||||
TARGET=cuda_final
|
||||
DOCKER_TAG=cuda${GPU_ARCH_VERSION}
|
||||
GPU_IMAGE=arm64v8/centos:7
|
||||
DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=11"
|
||||
MANY_LINUX_VERSION="aarch64"
|
||||
DOCKERFILE_SUFFIX="_cuda_aarch64"
|
||||
;;
|
||||
manylinux2_28-builder:rocm*)
|
||||
rocm|rocm-manylinux_2_28)
|
||||
TARGET=rocm_final
|
||||
DOCKER_TAG=rocm${GPU_ARCH_VERSION}
|
||||
GPU_IMAGE=rocm/dev-centos-7:${GPU_ARCH_VERSION}-complete
|
||||
DEVTOOLSET_VERSION="9"
|
||||
MANY_LINUX_VERSION="2_28"
|
||||
DEVTOOLSET_VERSION="11"
|
||||
GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete
|
||||
if [ ${GPU_ARCH_TYPE} == "rocm-manylinux_2_28" ]; then
|
||||
MANY_LINUX_VERSION="2_28"
|
||||
DEVTOOLSET_VERSION="11"
|
||||
GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete
|
||||
fi
|
||||
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
||||
DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}"
|
||||
;;
|
||||
manylinux2_28-builder:xpu)
|
||||
xpu)
|
||||
TARGET=xpu_final
|
||||
DOCKER_TAG=xpu
|
||||
GPU_IMAGE=amd64/almalinux:8
|
||||
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11"
|
||||
MANY_LINUX_VERSION="2_28"
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: Unrecognized image name: ${image}"
|
||||
echo "ERROR: Unrecognized GPU_ARCH_TYPE: ${GPU_ARCH_TYPE}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
IMAGES=''
|
||||
|
||||
if [[ -n ${MANY_LINUX_VERSION} && -z ${DOCKERFILE_SUFFIX} ]]; then
|
||||
DOCKERFILE_SUFFIX=_${MANY_LINUX_VERSION}
|
||||
fi
|
||||
# Only activate this if in CI
|
||||
if [ "$(uname -m)" != "s390x" ] && [ -v CI ]; then
|
||||
# TODO: Remove LimitNOFILE=1048576 patch once https://github.com/pytorch/test-infra/issues/5712
|
||||
# is resolved. This patch is required in order to fix timing out of Docker build on Amazon Linux 2023.
|
||||
sudo sed -i s/LimitNOFILE=infinity/LimitNOFILE=1048576/ /usr/lib/systemd/system/docker.service
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart docker
|
||||
(
|
||||
set -x
|
||||
|
||||
# Only activate this if in CI
|
||||
if [ "$(uname -m)" != "s390x" ] && [ -v CI ]; then
|
||||
# TODO: Remove LimitNOFILE=1048576 patch once https://github.com/pytorch/test-infra/issues/5712
|
||||
# is resolved. This patch is required in order to fix timing out of Docker build on Amazon Linux 2023.
|
||||
sudo sed -i s/LimitNOFILE=infinity/LimitNOFILE=1048576/ /usr/lib/systemd/system/docker.service
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart docker
|
||||
fi
|
||||
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
${DOCKER_GPU_BUILD_ARG} \
|
||||
--build-arg "GPU_IMAGE=${GPU_IMAGE}" \
|
||||
--target "${TARGET}" \
|
||||
-t "${DOCKER_IMAGE}" \
|
||||
$@ \
|
||||
-f "${TOPDIR}/.ci/docker/manywheel/Dockerfile${DOCKERFILE_SUFFIX}" \
|
||||
"${TOPDIR}/.ci/docker/"
|
||||
)
|
||||
|
||||
GITHUB_REF=${GITHUB_REF:-"dev")}
|
||||
GIT_BRANCH_NAME=${GITHUB_REF##*/}
|
||||
GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)}
|
||||
DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE}-${GIT_BRANCH_NAME}
|
||||
DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE}-${GIT_COMMIT_SHA}
|
||||
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
(
|
||||
set -x
|
||||
docker push "${DOCKER_IMAGE}"
|
||||
if [[ -n ${GITHUB_REF} ]]; then
|
||||
docker tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_BRANCH_TAG}
|
||||
docker tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_SHA_TAG}
|
||||
docker push "${DOCKER_IMAGE_BRANCH_TAG}"
|
||||
docker push "${DOCKER_IMAGE_SHA_TAG}"
|
||||
fi
|
||||
)
|
||||
fi
|
||||
|
||||
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
${DOCKER_GPU_BUILD_ARG} \
|
||||
--build-arg "GPU_IMAGE=${GPU_IMAGE}" \
|
||||
--target "${TARGET}" \
|
||||
-t "${tmp_tag}" \
|
||||
$@ \
|
||||
-f "${TOPDIR}/.ci/docker/manywheel/Dockerfile${DOCKERFILE_SUFFIX}" \
|
||||
"${TOPDIR}/.ci/docker/"
|
||||
|
||||
@ -1,20 +1,15 @@
|
||||
sphinx==5.3.0
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 5.3.0
|
||||
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@a98ffecb792d50df495be401becbf5c414421423#egg=pytorch_sphinx_theme2
|
||||
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
|
||||
|
||||
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
|
||||
# but it doesn't seem to work and hangs around idly. The initial thought is probably
|
||||
# something related to Docker setup. We can investigate this later
|
||||
|
||||
sphinxcontrib.katex==0.8.6
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 0.8.6
|
||||
|
||||
sphinxext-opengraph==0.9.1
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 0.9.1
|
||||
|
||||
matplotlib==3.5.3
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 3.5.3
|
||||
@ -51,6 +46,5 @@ myst-nb==0.17.2
|
||||
# The following are required to build torch.distributed.elastic.rendezvous.etcd* docs
|
||||
python-etcd==0.4.5
|
||||
sphinx-copybutton==0.5.0
|
||||
sphinx-design==0.4.0
|
||||
sphinxcontrib-mermaid==1.0.0
|
||||
sphinx-panels==0.4.1
|
||||
myst-parser==0.18.1
|
||||
|
||||
@ -2,7 +2,7 @@ ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG IMAGE_NAME
|
||||
|
||||
FROM ${IMAGE_NAME} as base
|
||||
FROM ${IMAGE_NAME}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
@ -90,20 +90,14 @@ RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
ARG TRITON
|
||||
|
||||
FROM base as triton-builder
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton.txt triton.txt
|
||||
COPY triton_version.txt triton_version.txt
|
||||
RUN bash ./install_triton.sh
|
||||
|
||||
FROM base as final
|
||||
COPY --from=triton-builder /opt/triton /opt/triton
|
||||
RUN if [ -n "${TRITON}" ]; then pip install /opt/triton/*.whl; chown -R jenkins:jenkins /opt/conda; fi
|
||||
RUN rm -rf /opt/triton
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt
|
||||
|
||||
ARG HALIDE
|
||||
# Build and install halide
|
||||
@ -158,16 +152,6 @@ COPY ./common/install_cusparselt.sh install_cusparselt.sh
|
||||
RUN bash install_cusparselt.sh
|
||||
RUN rm install_cusparselt.sh
|
||||
|
||||
# Install NCCL
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_nccl.sh install_nccl.sh
|
||||
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
RUN bash install_nccl.sh
|
||||
RUN rm install_nccl.sh /ci_commit_pins/nccl-cu*
|
||||
ENV USE_SYSTEM_NCCL=1
|
||||
ENV NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
ENV NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
|
||||
# Install CUDSS
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cudss.sh install_cudss.sh
|
||||
|
||||
@ -63,7 +63,7 @@ COPY ./common/install_rocm.sh install_rocm.sh
|
||||
RUN bash ./install_rocm.sh
|
||||
RUN rm install_rocm.sh
|
||||
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION}
|
||||
RUN bash ./install_rocm_magma.sh
|
||||
RUN rm install_rocm_magma.sh
|
||||
ADD ./common/install_miopen.sh install_miopen.sh
|
||||
RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
FROM ubuntu:${UBUNTU_VERSION} as base
|
||||
FROM ubuntu:${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
@ -52,16 +52,9 @@ RUN bash ./install_lcov.sh && rm install_lcov.sh
|
||||
# Install cuda and cudnn
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cuda.sh install_cuda.sh
|
||||
COPY ./common/install_nccl.sh install_nccl.sh
|
||||
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh install_nccl.sh /ci_commit_pins/nccl-cu*
|
||||
RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh
|
||||
ENV DESIRED_CUDA ${CUDA_VERSION}
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH
|
||||
# No effect if cuda not installed
|
||||
ENV USE_SYSTEM_NCCL=1
|
||||
ENV NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
ENV NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
|
||||
|
||||
# (optional) Install UCC
|
||||
ARG UCX_COMMIT
|
||||
@ -115,21 +108,20 @@ RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_d
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
|
||||
|
||||
ARG TRITON
|
||||
ARG TRITON_CPU
|
||||
|
||||
# Create a separate stage for building Triton and Triton-CPU. install_triton
|
||||
# will check for the presence of env vars
|
||||
FROM base as triton-builder
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton.txt triton.txt
|
||||
COPY ci_commit_pins/triton-cpu.txt triton-cpu.txt
|
||||
RUN bash ./install_triton.sh
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton.txt
|
||||
|
||||
FROM base as final
|
||||
COPY --from=triton-builder /opt/triton /opt/triton
|
||||
RUN if [ -n "${TRITON}" ] || [ -n "${TRITON_CPU}" ]; then pip install /opt/triton/*.whl; chown -R jenkins:jenkins /opt/conda; fi
|
||||
RUN rm -rf /opt/triton
|
||||
ARG TRITON_CPU
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton-cpu.txt triton-cpu.txt
|
||||
RUN if [ -n "${TRITON_CPU}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton-cpu.txt
|
||||
|
||||
ARG EXECUTORCH
|
||||
# Build and install executorch
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
SHELL=/usr/bin/env bash
|
||||
|
||||
DOCKER_CMD ?= docker
|
||||
DESIRED_ROCM ?= 6.4
|
||||
DESIRED_ROCM ?= 6.3
|
||||
DESIRED_ROCM_SHORT = $(subst .,,$(DESIRED_ROCM))
|
||||
PACKAGE_NAME = magma-rocm
|
||||
# inherit this from underlying docker image, do not pass this env var to docker
|
||||
@ -16,7 +16,6 @@ DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
|
||||
magma-rocm/build_magma.sh
|
||||
|
||||
.PHONY: all
|
||||
all: magma-rocm64
|
||||
all: magma-rocm63
|
||||
all: magma-rocm624
|
||||
|
||||
@ -25,11 +24,6 @@ clean:
|
||||
$(RM) -r magma-*
|
||||
$(RM) -r output
|
||||
|
||||
.PHONY: magma-rocm64
|
||||
magma-rocm64: DESIRED_ROCM := 6.4
|
||||
magma-rocm64:
|
||||
$(DOCKER_RUN)
|
||||
|
||||
.PHONY: magma-rocm63
|
||||
magma-rocm63: DESIRED_ROCM := 6.3
|
||||
magma-rocm63:
|
||||
|
||||
@ -301,18 +301,6 @@ else
|
||||
fi
|
||||
pip_install_whl "$(echo dist/*.whl)"
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
|
||||
echo "Checking that xpu is compiled"
|
||||
pushd dist/
|
||||
if python -c 'import torch; exit(0 if torch.xpu._is_compiled() else 1)'; then
|
||||
echo "XPU support is compiled in."
|
||||
else
|
||||
echo "XPU support is NOT compiled in."
|
||||
exit 1
|
||||
fi
|
||||
popd
|
||||
fi
|
||||
|
||||
# TODO: I'm not sure why, but somehow we lose verbose commands
|
||||
set -x
|
||||
|
||||
|
||||
@ -59,6 +59,16 @@ else
|
||||
export install_root="$(dirname $(which python))/../lib/python${py_dot}/site-packages/torch/"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Setup XPU ENV
|
||||
###############################################################################
|
||||
if [[ "$DESIRED_CUDA" == 'xpu' ]]; then
|
||||
set +u
|
||||
# Refer https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html
|
||||
source /opt/intel/oneapi/compiler/latest/env/vars.sh
|
||||
source /opt/intel/oneapi/pti/latest/env/vars.sh
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Check GCC ABI
|
||||
###############################################################################
|
||||
@ -216,14 +226,6 @@ else
|
||||
fi
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Check XPU configured correctly
|
||||
###############################################################################
|
||||
if [[ "$DESIRED_CUDA" == 'xpu' && "$PACKAGE_TYPE" != 'libtorch' ]]; then
|
||||
echo "Checking that xpu is compiled"
|
||||
python -c 'import torch; exit(0 if torch.xpu._is_compiled() else 1)'
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Check CUDA configured correctly
|
||||
###############################################################################
|
||||
@ -309,12 +311,6 @@ if [[ "$(uname)" == 'Linux' && "$PACKAGE_TYPE" == 'manywheel' ]]; then
|
||||
# Per https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Dialect-Options.html gcc-11 is ABI16
|
||||
# Though manylinux_2.28 should have been build with gcc-14, per
|
||||
# https://github.com/pypa/manylinux?tab=readme-ov-file#manylinux_2_28-almalinux-8-based
|
||||
# On s390x gcc 14 is used because it contains fix for interaction
|
||||
# between precompiled headers and vectorization builtins.
|
||||
# This fix is not available in earlier gcc versions.
|
||||
# gcc-14 uses ABI19.
|
||||
if [[ "$(uname -m)" != "s390x" ]]; then
|
||||
python -c "import torch; exit(0 if torch._C._PYBIND11_BUILD_ABI == '_cxxabi1016' else 1)"
|
||||
fi
|
||||
python -c "import torch; exit(0 if torch._C._PYBIND11_BUILD_ABI == '_cxxabi1016' else 1)"
|
||||
popd
|
||||
fi
|
||||
|
||||
@ -33,15 +33,56 @@ if which sccache > /dev/null; then
|
||||
export PATH="${tmp_dir}:$PATH"
|
||||
fi
|
||||
|
||||
print_cmake_info
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"distributed"* ]]; then
|
||||
# Needed for inductor benchmarks, as lots of HF networks make `torch.distribtued` calls
|
||||
USE_DISTRIBUTED=1 USE_OPENMP=1 WERROR=1 python setup.py bdist_wheel
|
||||
else
|
||||
cross_compile_arm64() {
|
||||
# Cross compilation for arm64
|
||||
# Explicitly set USE_DISTRIBUTED=0 to align with the default build config on mac. This also serves as the sole CI config that tests
|
||||
# that building with USE_DISTRIBUTED=0 works at all. See https://github.com/pytorch/pytorch/issues/86448
|
||||
USE_DISTRIBUTED=0 CMAKE_OSX_ARCHITECTURES=arm64 MACOSX_DEPLOYMENT_TARGET=11.0 USE_MKLDNN=OFF USE_QNNPACK=OFF WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel
|
||||
}
|
||||
|
||||
compile_arm64() {
|
||||
# Compilation for arm64
|
||||
# TODO: Compile with OpenMP support (but this causes CI regressions as cross-compilation were done with OpenMP disabled)
|
||||
USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel
|
||||
}
|
||||
|
||||
compile_x86_64() {
|
||||
USE_DISTRIBUTED=0 WERROR=1 python setup.py bdist_wheel --plat-name=macosx_10_9_x86_64
|
||||
}
|
||||
|
||||
build_lite_interpreter() {
|
||||
echo "Testing libtorch (lite interpreter)."
|
||||
|
||||
CPP_BUILD="$(pwd)/../cpp_build"
|
||||
# Ensure the removal of the tmp directory
|
||||
trap 'rm -rfv ${CPP_BUILD}' EXIT
|
||||
rm -rf "${CPP_BUILD}"
|
||||
mkdir -p "${CPP_BUILD}/caffe2"
|
||||
|
||||
# It looks libtorch need to be built in "${CPP_BUILD}/caffe2 folder.
|
||||
BUILD_LIBTORCH_PY=$PWD/tools/build_libtorch.py
|
||||
pushd "${CPP_BUILD}/caffe2" || exit
|
||||
VERBOSE=1 DEBUG=1 python "${BUILD_LIBTORCH_PY}"
|
||||
popd || exit
|
||||
|
||||
"${CPP_BUILD}/caffe2/build/bin/test_lite_interpreter_runtime"
|
||||
}
|
||||
|
||||
print_cmake_info
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} = *arm64* ]]; then
|
||||
if [[ $(uname -m) == "arm64" ]]; then
|
||||
compile_arm64
|
||||
else
|
||||
cross_compile_arm64
|
||||
fi
|
||||
elif [[ ${BUILD_ENVIRONMENT} = *lite-interpreter* ]]; then
|
||||
export BUILD_LITE_INTERPRETER=1
|
||||
build_lite_interpreter
|
||||
else
|
||||
compile_x86_64
|
||||
fi
|
||||
|
||||
if which sccache > /dev/null; then
|
||||
print_sccache_stats
|
||||
fi
|
||||
|
||||
@ -221,39 +221,25 @@ test_torchbench_smoketest() {
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
|
||||
local backend=eager
|
||||
local dtype=notset
|
||||
local device=mps
|
||||
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam pytorch_unet stable_diffusion_text_encoder moco speech_transformer)
|
||||
|
||||
for backend in eager inductor; do
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv"
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv"
|
||||
|
||||
for dtype in notset float16 bfloat16; do
|
||||
echo "Launching torchbench inference performance run for backend ${backend} and dtype ${dtype}"
|
||||
local dtype_arg="--${dtype}"
|
||||
if [ "$dtype" == notset ]; then
|
||||
dtype_arg="--float32"
|
||||
fi
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv"
|
||||
for model in "${models[@]}"; do
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv" || true
|
||||
done
|
||||
done
|
||||
|
||||
for dtype in notset amp; do
|
||||
echo "Launching torchbench training performance run for backend ${backend} and dtype ${dtype}"
|
||||
touch "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv"
|
||||
local dtype_arg="--${dtype}"
|
||||
if [ "$dtype" == notset ]; then
|
||||
dtype_arg="--float32"
|
||||
fi
|
||||
for model in "${models[@]}"; do
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --training --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv" || true
|
||||
done
|
||||
done
|
||||
echo "Setup complete, launching torchbench training performance run"
|
||||
for model in hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152; do
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --training --devices "$device" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_training_${device}_performance.csv"
|
||||
done
|
||||
|
||||
echo "Launching torchbench inference performance run"
|
||||
for model in hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152; do
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
|
||||
--performance --only "$model" --backend "$backend" --inference --devices "$device" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv"
|
||||
done
|
||||
|
||||
echo "Pytorch benchmark on mps device completed"
|
||||
|
||||
@ -119,6 +119,12 @@ popd
|
||||
git rm -rf "$install_path" || true
|
||||
mv "$pt_checkout/docs/build/html" "$install_path"
|
||||
|
||||
# Prevent Google from indexing $install_path/_modules. This folder contains
|
||||
# generated source files.
|
||||
# NB: the following only works on gnu sed. The sed shipped with mac os is different.
|
||||
# One can `brew install gnu-sed` on a mac and then use "gsed" instead of "sed".
|
||||
find "$install_path/_modules" -name "*.html" -print0 | xargs -0 sed -i '/<head>/a \ \ <meta name="robots" content="noindex">'
|
||||
|
||||
git add "$install_path" || true
|
||||
git status
|
||||
git config user.email "soumith+bot@pytorch.org"
|
||||
|
||||
@ -227,10 +227,7 @@ def compare_pypi_to_torch_versions(
|
||||
|
||||
|
||||
def smoke_test_cuda(
|
||||
package: str,
|
||||
runtime_error_check: str,
|
||||
torch_compile_check: str,
|
||||
pypi_pkg_check: str,
|
||||
package: str, runtime_error_check: str, torch_compile_check: str
|
||||
) -> None:
|
||||
if not torch.cuda.is_available() and is_cuda_system:
|
||||
raise RuntimeError(f"Expected CUDA {gpu_arch_ver}. However CUDA is not loaded.")
|
||||
@ -262,25 +259,21 @@ def smoke_test_cuda(
|
||||
)
|
||||
|
||||
print(f"torch cuda: {torch.version.cuda}")
|
||||
print(f"cuDNN enabled? {torch.backends.cudnn.enabled}")
|
||||
torch_cudnn_version = cudnn_to_version_str(torch.backends.cudnn.version())
|
||||
compare_pypi_to_torch_versions(
|
||||
"cudnn", find_pypi_package_version("nvidia-cudnn"), torch_cudnn_version
|
||||
)
|
||||
|
||||
torch.cuda.init()
|
||||
print("CUDA initialized successfully")
|
||||
print(f"Number of CUDA devices: {torch.cuda.device_count()}")
|
||||
for i in range(torch.cuda.device_count()):
|
||||
print(f"Device {i}: {torch.cuda.get_device_name(i)}")
|
||||
|
||||
print(f"cuDNN enabled? {torch.backends.cudnn.enabled}")
|
||||
torch_cudnn_version = cudnn_to_version_str(torch.backends.cudnn.version())
|
||||
print(f"Torch cuDNN version: {torch_cudnn_version}")
|
||||
|
||||
# nccl is availbale only on Linux
|
||||
if sys.platform in ["linux", "linux2"]:
|
||||
torch_nccl_version = ".".join(str(v) for v in torch.cuda.nccl.version())
|
||||
print(f"Torch nccl; version: {torch_nccl_version}")
|
||||
|
||||
# Pypi dependencies are installed on linux ony and nccl is availbale only on Linux.
|
||||
if pypi_pkg_check == "enabled" and sys.platform in ["linux", "linux2"]:
|
||||
compare_pypi_to_torch_versions(
|
||||
"cudnn", find_pypi_package_version("nvidia-cudnn"), torch_cudnn_version
|
||||
)
|
||||
compare_pypi_to_torch_versions(
|
||||
"nccl", find_pypi_package_version("nvidia-nccl"), torch_nccl_version
|
||||
)
|
||||
@ -442,13 +435,6 @@ def parse_args():
|
||||
choices=["enabled", "disabled"],
|
||||
default="enabled",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pypi-pkg-check",
|
||||
help="Check pypi package versions cudnn and nccl",
|
||||
type=str,
|
||||
choices=["enabled", "disabled"],
|
||||
default="enabled",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@ -473,10 +459,7 @@ def main() -> None:
|
||||
smoke_test_modules()
|
||||
|
||||
smoke_test_cuda(
|
||||
options.package,
|
||||
options.runtime_error_check,
|
||||
options.torch_compile_check,
|
||||
options.pypi_pkg_check,
|
||||
options.package, options.runtime_error_check, options.torch_compile_check
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -1173,7 +1173,7 @@ build_xla() {
|
||||
apply_patches
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
# These functions are defined in .circleci/common.sh in pytorch/xla repo
|
||||
retry install_pre_deps_pytorch_xla $XLA_DIR $USE_CACHE
|
||||
retry install_deps_pytorch_xla $XLA_DIR $USE_CACHE
|
||||
CMAKE_PREFIX_PATH="${SITE_PACKAGES}/torch:${CMAKE_PREFIX_PATH}" XLA_SANDBOX_BUILD=1 build_torch_xla $XLA_DIR
|
||||
assert_git_not_dirty
|
||||
}
|
||||
@ -1474,7 +1474,8 @@ test_executorch() {
|
||||
pushd /executorch
|
||||
|
||||
export PYTHON_EXECUTABLE=python
|
||||
export CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
|
||||
export EXECUTORCH_BUILD_PYBIND=ON
|
||||
export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
|
||||
|
||||
# For llama3
|
||||
bash examples/models/llama3_2_vision/install_requirements.sh
|
||||
|
||||
@ -90,17 +90,8 @@ fi
|
||||
/pytorch/.ci/pytorch/check_binary.sh
|
||||
|
||||
if [[ "\$GPU_ARCH_TYPE" != *s390x* && "\$GPU_ARCH_TYPE" != *xpu* && "\$GPU_ARCH_TYPE" != *rocm* && "$PACKAGE_TYPE" != libtorch ]]; then
|
||||
|
||||
torch_pkg_size="$(ls -1 /final_pkgs/torch-* | sort |tail -1 |xargs wc -c |cut -d ' ' -f1)"
|
||||
# todo: implement check for large binaries
|
||||
# if the package is larger than 1.5GB, we disable the pypi check.
|
||||
# this package contains all libraries packaged in torch libs folder
|
||||
# example of such package is https://download.pytorch.org/whl/cu126_full/torch
|
||||
if [[ "\$torch_pkg_size" -gt 1500000000 ]]; then
|
||||
python /pytorch/.ci/pytorch/smoke_test/smoke_test.py --package=torchonly --torch-compile-check disabled --pypi-pkg-check disabled
|
||||
else
|
||||
python /pytorch/.ci/pytorch/smoke_test/smoke_test.py --package=torchonly --torch-compile-check disabled $extra_parameters
|
||||
fi
|
||||
# Exclude s390, xpu, rocm and libtorch builds from smoke testing
|
||||
python /pytorch/.ci/pytorch/smoke_test/smoke_test.py --package=torchonly --torch-compile-check disabled
|
||||
fi
|
||||
|
||||
# Clean temp files
|
||||
|
||||
22
.circleci/scripts/binary_windows_arm64_build.sh
Normal file
22
.circleci/scripts/binary_windows_arm64_build.sh
Normal file
@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
source "${BINARY_ENV_FILE:-/c/w/env}"
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR"
|
||||
|
||||
export USE_SCCACHE=1
|
||||
export SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||
|
||||
echo "Free space on filesystem before build:"
|
||||
df -h
|
||||
|
||||
export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT"
|
||||
|
||||
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
pytorch/.ci/pytorch/windows/arm64/build_libtorch.bat
|
||||
elif [[ "$PACKAGE_TYPE" == 'wheel' ]]; then
|
||||
pytorch/.ci/pytorch/windows/arm64/build_pytorch.bat
|
||||
fi
|
||||
|
||||
echo "Free space on filesystem after build:"
|
||||
df -h
|
||||
6
.circleci/scripts/binary_windows_arm64_test.sh
Normal file
6
.circleci/scripts/binary_windows_arm64_test.sh
Normal file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
source "${BINARY_ENV_FILE:-/c/w/env}"
|
||||
|
||||
pytorch/.ci/pytorch/windows/arm64/smoke_test.bat
|
||||
@ -4,13 +4,11 @@ set -eux -o pipefail
|
||||
source "${BINARY_ENV_FILE:-/c/w/env}"
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR"
|
||||
|
||||
if [[ "$OS" != "windows-arm64" ]]; then
|
||||
export CUDA_VERSION="${DESIRED_CUDA/cu/}"
|
||||
export USE_SCCACHE=1
|
||||
export SCCACHE_BUCKET=ossci-compiler-cache
|
||||
export SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||
export VC_YEAR=2022
|
||||
fi
|
||||
export CUDA_VERSION="${DESIRED_CUDA/cu/}"
|
||||
export USE_SCCACHE=1
|
||||
export SCCACHE_BUCKET=ossci-compiler-cache
|
||||
export SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||
export VC_YEAR=2022
|
||||
|
||||
if [[ "$DESIRED_CUDA" == 'xpu' ]]; then
|
||||
export USE_SCCACHE=0
|
||||
@ -23,16 +21,7 @@ df -h
|
||||
|
||||
pushd "$PYTORCH_ROOT/.ci/pytorch/"
|
||||
export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT"
|
||||
|
||||
if [[ "$OS" == "windows-arm64" ]]; then
|
||||
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
./windows/arm64/build_libtorch.bat
|
||||
elif [[ "$PACKAGE_TYPE" == 'wheel' ]]; then
|
||||
./windows/arm64/build_pytorch.bat
|
||||
fi
|
||||
else
|
||||
./windows/internal/build_wheels.bat
|
||||
fi
|
||||
./windows/internal/build_wheels.bat
|
||||
|
||||
echo "Free space on filesystem after build:"
|
||||
df -h
|
||||
|
||||
@ -11,11 +11,6 @@ if [[ "$DESIRED_CUDA" == 'xpu' ]]; then
|
||||
fi
|
||||
|
||||
pushd "$PYTORCH_ROOT/.ci/pytorch/"
|
||||
|
||||
if [[ "$OS" == "windows-arm64" ]]; then
|
||||
./windows/arm64/smoke_test.bat
|
||||
else
|
||||
./windows/internal/smoke_test.bat
|
||||
fi
|
||||
./windows/internal/smoke_test.bat
|
||||
|
||||
popd
|
||||
|
||||
@ -52,6 +52,7 @@ modernize-*,
|
||||
-modernize-macro-to-enum,
|
||||
-modernize-return-braced-init-list,
|
||||
-modernize-use-auto,
|
||||
-modernize-use-default-member-init,
|
||||
-modernize-use-using,
|
||||
-modernize-use-trailing-return-type,
|
||||
-modernize-use-nodiscard,
|
||||
|
||||
@ -1,14 +0,0 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
|
||||
# Python
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
# Make
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
2
.github/ISSUE_TEMPLATE/pt2-bug-report.yml
vendored
2
.github/ISSUE_TEMPLATE/pt2-bug-report.yml
vendored
@ -20,7 +20,7 @@ body:
|
||||
|
||||
- Don't compare indices of max/min etc, because that avoids the above requirement
|
||||
|
||||
- When comparing eager and torch.compile, use a higher precision result as a baseline. `torch._dynamo.utils.same` with fp64_ref will handle this comparison.
|
||||
- If comparing eager and torch.compile at fp16/bf16, you should use fp32 as baseline
|
||||
|
||||
- Ensure rng state used to compare results is equivalent. Use `torch._inductor.config.fallback_random=True` and reset the torch rng seed between comparisons
|
||||
|
||||
|
||||
9
.github/actionlint.yaml
vendored
9
.github/actionlint.yaml
vendored
@ -3,6 +3,9 @@ self-hosted-runner:
|
||||
# GitHub hosted runner that actionlint doesn't recognize because actionlint version (1.6.21) is too old
|
||||
- ubuntu-24.04
|
||||
# GitHub hosted x86 Linux runners
|
||||
# TODO: Cleanup mentions of linux.20_04 when upgrade to linux.24_04 is complete
|
||||
- linux.20_04.4x
|
||||
- linux.20_04.16x
|
||||
- linux.24_04.4x
|
||||
- linux.24_04.16x
|
||||
# Organization-wide AWS Linux Runners
|
||||
@ -45,14 +48,10 @@ self-hosted-runner:
|
||||
- windows.g5.4xlarge.nvidia.gpu
|
||||
# Windows ARM64 runners
|
||||
- windows-11-arm64
|
||||
# Organization-wide AMD-hosted runners
|
||||
# MI2xx runners
|
||||
# Organization-wide AMD hosted runners
|
||||
- linux.rocm.gpu
|
||||
- linux.rocm.gpu.2
|
||||
- linux.rocm.gpu.4
|
||||
# MI300 runners
|
||||
- linux.rocm.gpu.mi300.2
|
||||
- linux.rocm.gpu.mi300.4
|
||||
- rocm-docker
|
||||
# Repo-specific Apple hosted runners
|
||||
- macos-m1-ultra
|
||||
|
||||
70
.github/actions/binary-docker-build/action.yml
vendored
70
.github/actions/binary-docker-build/action.yml
vendored
@ -1,70 +0,0 @@
|
||||
name: Binary docker build
|
||||
|
||||
description: Build docker image for binary builds
|
||||
|
||||
inputs:
|
||||
docker-image-name:
|
||||
description: Docker image name for PR builds
|
||||
required: true
|
||||
docker-build-dir:
|
||||
description: Location of the build.sh relative to .ci/docker
|
||||
required: true
|
||||
custom-tag-prefix:
|
||||
description: Custom tag prefix for the docker image
|
||||
required: false
|
||||
DOCKER_TOKEN:
|
||||
description: Docker token for authentication
|
||||
required: true
|
||||
DOCKER_ID:
|
||||
description: Docker ID for authentication
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image-name }}
|
||||
docker-build-dir: .ci/docker
|
||||
custom-tag-prefix: ${{ inputs.custom-tag-prefix }}
|
||||
docker-build-script: ${{ inputs.docker-build-dir }}/build.sh
|
||||
always-rebuild: true
|
||||
push: true
|
||||
|
||||
- name: Tag and (if WITH_PUSH) push docker image to docker.io
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ inputs.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ inputs.DOCKER_ID }}
|
||||
DOCKER_IMAGE_NAME: ${{ inputs.docker-image-name }}
|
||||
DOCKER_IMAGE_PREFIX: ${{ inputs.custom-tag-prefix }}
|
||||
CREATED_FULL_DOCKER_IMAGE_NAME: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euox pipefail
|
||||
GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)}
|
||||
GIT_BRANCH_NAME=${GITHUB_REF##*/}
|
||||
GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)}
|
||||
CI_FOLDER_SHA=$(git rev-parse HEAD:.ci/docker)
|
||||
|
||||
DOCKER_IMAGE_NAME_PREFIX=docker.io/pytorch/${DOCKER_IMAGE_NAME}:${DOCKER_IMAGE_PREFIX}
|
||||
|
||||
docker tag ${CREATED_FULL_DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_NAME_PREFIX}
|
||||
docker tag ${CREATED_FULL_DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_NAME_PREFIX}-${GIT_BRANCH_NAME}
|
||||
docker tag ${CREATED_FULL_DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_NAME_PREFIX}-${GIT_COMMIT_SHA}
|
||||
docker tag ${CREATED_FULL_DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_NAME_PREFIX}-${CI_FOLDER_SHA}
|
||||
|
||||
# Pretty sure Github will mask tokens and I'm not sure if it will even be
|
||||
# printed due to pipe, but just in case
|
||||
set +x
|
||||
if [[ ${WITH_PUSH:-false} == "true" ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
docker push ${DOCKER_IMAGE_NAME_PREFIX}
|
||||
docker push ${DOCKER_IMAGE_NAME_PREFIX}-${GIT_BRANCH_NAME}
|
||||
docker push ${DOCKER_IMAGE_NAME_PREFIX}-${GIT_COMMIT_SHA}
|
||||
docker push ${DOCKER_IMAGE_NAME_PREFIX}-${CI_FOLDER_SHA}
|
||||
fi
|
||||
10
.github/actions/upload-test-artifacts/action.yml
vendored
10
.github/actions/upload-test-artifacts/action.yml
vendored
@ -48,8 +48,14 @@ runs:
|
||||
run: |
|
||||
# Remove any previous usage logs if they exist
|
||||
rm -f logs-*.zip
|
||||
zip "logs-${FILE_SUFFIX}.zip" 'usage_log.txt' || true
|
||||
zip -r "logs-${FILE_SUFFIX}.zip" test/test-reports -i '*.log' || true
|
||||
# this workflow is also run in bazel build test, but we dont generate usage reports for it
|
||||
# so check to see if the file exists first
|
||||
if [ -f 'usage_log.txt' ]; then
|
||||
zip "logs-${FILE_SUFFIX}.zip" 'usage_log.txt'
|
||||
fi
|
||||
if find "test/test-reports" -name "*.log" 2>/dev/null | grep -q .; then
|
||||
zip -r "logs-${FILE_SUFFIX}.zip" test/test-reports -i '*.log'
|
||||
fi
|
||||
|
||||
- name: Zip debugging artifacts for upload
|
||||
if: runner.os != 'Windows' && !inputs.use-gha
|
||||
|
||||
2
.github/ci_commit_pins/audio.txt
vendored
2
.github/ci_commit_pins/audio.txt
vendored
@ -1 +1 @@
|
||||
bccaa454a54c3c648697cc2f46a4fb0500b1f01b
|
||||
318bace01aebc1f82ae13d0d133fcf9fede73383
|
||||
|
||||
2
.github/ci_commit_pins/xla.txt
vendored
2
.github/ci_commit_pins/xla.txt
vendored
@ -1 +1 @@
|
||||
ac9a39f4b768cef09b9d2be8e074be496d7783b6
|
||||
b2b890e962f5fb6f481e5da2eb4a43bb990d0f1b
|
||||
|
||||
19
.github/labeler.yml
vendored
19
.github/labeler.yml
vendored
@ -112,22 +112,3 @@
|
||||
- torch/csrc/inductor/aoti_include/xpu.h
|
||||
- torch/csrc/inductor/cpp_wrapper/device_internal/xpu.h
|
||||
- torch/csrc/inductor/cpp_wrapper/xpu.h
|
||||
|
||||
"release notes: inductor (aoti)":
|
||||
- torch/_C/_aoti.pyi
|
||||
- torch/_dynamo/repro/aoti.py
|
||||
- torch/_export/serde/aoti_schema.py
|
||||
- torch/_higher_order_ops/aoti_call_delegate.py
|
||||
- torch/_inductor/codegen/aoti_runtime/**
|
||||
- torch/_inductor/codegen/aoti_hipify_utils.py
|
||||
- torch/_inductor/codegen/cpp_wrapper_cpu.py
|
||||
- torch/_inductor/codegen/cpp_wrapper_gpu.py
|
||||
- torch/_inductor/aoti_eager.py
|
||||
- torch/csrc/inductor/aoti_runtime/**
|
||||
- torch/csrc/inductor/aoti_torch/**
|
||||
- torch/csrc/inductor/aoti_runner/**
|
||||
- torch/csrc/inductor/aoti_eager/**
|
||||
- torch/csrc/inductor/aoti_package/**
|
||||
- torch/csrc/inductor/aoti_include/**
|
||||
- torchgen/aoti/**
|
||||
- torchgen/gen_aoti_c_shim.py
|
||||
|
||||
1
.github/merge_rules.yaml
vendored
1
.github/merge_rules.yaml
vendored
@ -540,7 +540,6 @@
|
||||
- bdhirsh
|
||||
- zou3519
|
||||
- isuruf
|
||||
- Chillee
|
||||
mandatory_checks_name:
|
||||
- EasyCLA
|
||||
- Lint
|
||||
|
||||
1
.github/pytorch-probot.yml
vendored
1
.github/pytorch-probot.yml
vendored
@ -16,7 +16,6 @@ ciflow_push_tags:
|
||||
- ciflow/mps
|
||||
- ciflow/nightly
|
||||
- ciflow/periodic
|
||||
- ciflow/periodic-rocm-mi300
|
||||
- ciflow/rocm
|
||||
- ciflow/rocm-mi300
|
||||
- ciflow/s390
|
||||
|
||||
@ -30,7 +30,7 @@ CUDA_ARCHES_CUDNN_VERSION = {
|
||||
}
|
||||
|
||||
# NOTE: Also update the ROCm sources in tools/nightly.py when changing this list
|
||||
ROCM_ARCHES = ["6.3", "6.4"]
|
||||
ROCM_ARCHES = ["6.2.4", "6.3"]
|
||||
|
||||
XPU_ARCHES = ["xpu"]
|
||||
|
||||
@ -173,7 +173,7 @@ WHEEL_CONTAINER_IMAGES = {
|
||||
"xpu": f"pytorch/manylinux2_28-builder:xpu-{DEFAULT_TAG}",
|
||||
"cpu": f"pytorch/manylinux2_28-builder:cpu-{DEFAULT_TAG}",
|
||||
"cpu-aarch64": f"pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-{DEFAULT_TAG}",
|
||||
"cpu-s390x": "pytorch/manylinuxs390x-builder:cpu-s390x",
|
||||
"cpu-s390x": f"pytorch/manylinuxs390x-builder:cpu-s390x-{DEFAULT_TAG}",
|
||||
}
|
||||
|
||||
RELEASE = "release"
|
||||
|
||||
74
.github/scripts/generate_ci_workflows.py
vendored
74
.github/scripts/generate_ci_workflows.py
vendored
@ -227,6 +227,42 @@ WINDOWS_BINARY_BUILD_WORKFLOWS = [
|
||||
isolated_workflow=True,
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
WINDOWS_BINARY_SMOKE_WORKFLOWS = [
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.WINDOWS,
|
||||
package_type="libtorch",
|
||||
build_variant=generate_binary_build_matrix.RELEASE,
|
||||
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
|
||||
OperatingSystem.WINDOWS,
|
||||
generate_binary_build_matrix.RELEASE,
|
||||
arches=["cpu"],
|
||||
libtorch_variants=["shared-with-deps"],
|
||||
),
|
||||
branches="main",
|
||||
ciflow_config=CIFlowConfig(
|
||||
isolated_workflow=True,
|
||||
),
|
||||
),
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.WINDOWS,
|
||||
package_type="libtorch",
|
||||
build_variant=generate_binary_build_matrix.DEBUG,
|
||||
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
|
||||
OperatingSystem.WINDOWS,
|
||||
generate_binary_build_matrix.DEBUG,
|
||||
arches=["cpu"],
|
||||
libtorch_variants=["shared-with-deps"],
|
||||
),
|
||||
branches="main",
|
||||
ciflow_config=CIFlowConfig(
|
||||
isolated_workflow=True,
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
WINDOWS_ARM64_BINARY_BUILD_WORKFLOWS = [
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.WINDOWS_ARM64,
|
||||
package_type="wheel",
|
||||
@ -258,7 +294,6 @@ WINDOWS_BINARY_BUILD_WORKFLOWS = [
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.WINDOWS_ARM64,
|
||||
package_type="libtorch",
|
||||
build_variant=generate_binary_build_matrix.DEBUG,
|
||||
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
|
||||
OperatingSystem.WINDOWS_ARM64,
|
||||
generate_binary_build_matrix.DEBUG,
|
||||
@ -272,39 +307,6 @@ WINDOWS_BINARY_BUILD_WORKFLOWS = [
|
||||
),
|
||||
]
|
||||
|
||||
WINDOWS_BINARY_SMOKE_WORKFLOWS = [
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.WINDOWS,
|
||||
package_type="libtorch",
|
||||
build_variant=generate_binary_build_matrix.RELEASE,
|
||||
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
|
||||
OperatingSystem.WINDOWS,
|
||||
generate_binary_build_matrix.RELEASE,
|
||||
arches=["cpu"],
|
||||
libtorch_variants=["shared-with-deps"],
|
||||
),
|
||||
branches="main",
|
||||
ciflow_config=CIFlowConfig(
|
||||
isolated_workflow=True,
|
||||
),
|
||||
),
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.WINDOWS,
|
||||
package_type="libtorch",
|
||||
build_variant=generate_binary_build_matrix.DEBUG,
|
||||
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
|
||||
OperatingSystem.WINDOWS,
|
||||
generate_binary_build_matrix.DEBUG,
|
||||
arches=["cpu"],
|
||||
libtorch_variants=["shared-with-deps"],
|
||||
),
|
||||
branches="main",
|
||||
ciflow_config=CIFlowConfig(
|
||||
isolated_workflow=True,
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
MACOS_BINARY_BUILD_WORKFLOWS = [
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.MACOS_ARM64,
|
||||
@ -399,6 +401,10 @@ def main() -> None:
|
||||
jinja_env.get_template("windows_binary_build_workflow.yml.j2"),
|
||||
WINDOWS_BINARY_SMOKE_WORKFLOWS,
|
||||
),
|
||||
(
|
||||
jinja_env.get_template("windows_arm64_binary_build_workflow.yml.j2"),
|
||||
WINDOWS_ARM64_BINARY_BUILD_WORKFLOWS,
|
||||
),
|
||||
(
|
||||
jinja_env.get_template("macos_binary_build_workflow.yml.j2"),
|
||||
MACOS_BINARY_BUILD_WORKFLOWS,
|
||||
|
||||
97
.github/scripts/s390x-ci/tests_list.py
vendored
Executable file
97
.github/scripts/s390x-ci/tests_list.py
vendored
Executable file
@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
sys.path.insert(1, os.path.join(sys.path[0], "..", "..", ".."))
|
||||
|
||||
from tools.testing.discover_tests import TESTS
|
||||
|
||||
|
||||
skip_list = [
|
||||
# these tests fail due to various reasons
|
||||
"dynamo/test_misc",
|
||||
"inductor/test_aot_inductor",
|
||||
"inductor/test_cpu_repro",
|
||||
"inductor/test_cpu_select_algorithm",
|
||||
"inductor/test_aot_inductor_arrayref",
|
||||
"inductor/test_torchinductor_codegen_dynamic_shapes",
|
||||
"lazy/test_meta_kernel",
|
||||
"onnx/test_utility_funs",
|
||||
"profiler/test_profiler",
|
||||
"test_ao_sparsity",
|
||||
"test_cpp_extensions_open_device_registration",
|
||||
"test_jit",
|
||||
"test_metal",
|
||||
"test_mps",
|
||||
"dynamo/test_torchrec",
|
||||
"inductor/test_aot_inductor_utils",
|
||||
"inductor/test_coordinate_descent_tuner",
|
||||
"test_jiterator",
|
||||
# these tests run long and fail in addition to that
|
||||
"dynamo/test_dynamic_shapes",
|
||||
"test_quantization",
|
||||
"inductor/test_torchinductor",
|
||||
"inductor/test_torchinductor_dynamic_shapes",
|
||||
"inductor/test_torchinductor_opinfo",
|
||||
"test_binary_ufuncs",
|
||||
"test_unary_ufuncs",
|
||||
# these tests fail when cuda is not available
|
||||
"inductor/test_cudacodecache",
|
||||
"inductor/test_inductor_utils",
|
||||
"inductor/test_inplacing_pass",
|
||||
"inductor/test_kernel_benchmark",
|
||||
"inductor/test_max_autotune",
|
||||
"inductor/test_move_constructors_to_cuda",
|
||||
"inductor/test_multi_kernel",
|
||||
"inductor/test_pattern_matcher",
|
||||
"inductor/test_perf",
|
||||
"inductor/test_select_algorithm",
|
||||
"inductor/test_snode_runtime",
|
||||
"inductor/test_triton_wrapper",
|
||||
# these tests fail when mkldnn is not available
|
||||
"inductor/test_custom_post_grad_passes",
|
||||
"inductor/test_mkldnn_pattern_matcher",
|
||||
# lacks quantization support
|
||||
"onnx/test_models_quantized_onnxruntime",
|
||||
"onnx/test_pytorch_onnx_onnxruntime",
|
||||
# https://github.com/pytorch/pytorch/issues/102078
|
||||
"test_decomp",
|
||||
# https://github.com/pytorch/pytorch/issues/146698
|
||||
"test_model_exports_to_core_aten",
|
||||
# runs very long, skip for now
|
||||
"inductor/test_layout_optim",
|
||||
"test_fx",
|
||||
# some false errors
|
||||
"doctests",
|
||||
]
|
||||
|
||||
skip_list_regex = [
|
||||
# distributed tests fail randomly
|
||||
"distributed/.*",
|
||||
]
|
||||
|
||||
all_testfiles = sorted(TESTS)
|
||||
|
||||
filtered_testfiles = []
|
||||
|
||||
for filename in all_testfiles:
|
||||
if filename in skip_list:
|
||||
continue
|
||||
|
||||
regex_filtered = False
|
||||
|
||||
for regex_string in skip_list_regex:
|
||||
if re.fullmatch(regex_string, filename):
|
||||
regex_filtered = True
|
||||
break
|
||||
|
||||
if regex_filtered:
|
||||
continue
|
||||
|
||||
filtered_testfiles.append(filename)
|
||||
|
||||
for filename in filtered_testfiles:
|
||||
print(' "' + filename + '",')
|
||||
2
.github/scripts/trymerge.py
vendored
2
.github/scripts/trymerge.py
vendored
@ -434,7 +434,7 @@ query ($owner: String!, $name: String!) {
|
||||
RE_GHSTACK_HEAD_REF = re.compile(r"^(gh/[^/]+/[0-9]+/)head$")
|
||||
RE_GHSTACK_DESC = re.compile(r"Stack.*:\r?\n(\* [^\r\n]+\r?\n)+", re.MULTILINE)
|
||||
RE_PULL_REQUEST_RESOLVED = re.compile(
|
||||
r"(Pull Request resolved|Pull-Request-resolved): "
|
||||
r"Pull Request resolved: "
|
||||
r"https://github.com/(?P<owner>[^/]+)/(?P<repo>[^/]+)/pull/(?P<number>[0-9]+)",
|
||||
re.MULTILINE,
|
||||
)
|
||||
|
||||
3
.github/scripts/windows/build_magma.bat
vendored
3
.github/scripts/windows/build_magma.bat
vendored
@ -54,8 +54,7 @@ cmake .. -DGPU_TARGET="%GPU_TARGET%" ^
|
||||
-DCMAKE_BUILD_TYPE=%CONFIG% ^
|
||||
-DCMAKE_GENERATOR=Ninja ^
|
||||
-DCMAKE_INSTALL_PREFIX=..\install\ ^
|
||||
-DCUDA_ARCH_LIST="%CUDA_ARCH_LIST%" ^
|
||||
-DCMAKE_POLICY_VERSION_MINIMUM=3.5
|
||||
-DCUDA_ARCH_LIST="%CUDA_ARCH_LIST%"
|
||||
if errorlevel 1 exit /b 1
|
||||
|
||||
cmake --build . --target install --config %CONFIG% -- -j%NUMBER_OF_PROCESSORS%
|
||||
|
||||
3
.github/scripts/windows/build_triton.bat
vendored
3
.github/scripts/windows/build_triton.bat
vendored
@ -9,8 +9,7 @@ if "%PY_VERS%" == "3.13t" (
|
||||
) else (
|
||||
call conda create -n %PYTHON_PREFIX% -y -c=conda-forge python=%PY_VERS%
|
||||
)
|
||||
:: Fix cmake version for issue https://github.com/pytorch/pytorch/issues/150480
|
||||
call conda run -n %PYTHON_PREFIX% pip install wheel pybind11 certifi cython cmake==3.31.6 setuptools==72.1.0 ninja
|
||||
call conda run -n %PYTHON_PREFIX% pip install wheel pybind11 certifi cython cmake setuptools==72.1.0 ninja
|
||||
|
||||
dir "%VC_INSTALL_PATH%"
|
||||
|
||||
|
||||
197
.github/templates/windows_arm64_binary_build_workflow.yml.j2
vendored
Normal file
197
.github/templates/windows_arm64_binary_build_workflow.yml.j2
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
{% import 'common.yml.j2' as common %}
|
||||
{% import 'upload.yml.j2' as upload %}
|
||||
|
||||
{%- block name -%}
|
||||
# Template is at: .github/templates/windows_arm64_binary_build_workflow.yml.j2
|
||||
# Generation script: .github/scripts/generate_ci_workflows.py
|
||||
name: !{{ build_environment }}
|
||||
{%- endblock %}
|
||||
|
||||
{%- macro set_runner_specific_vars() -%}
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
{%- endmacro %}
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- !{{ branches }}
|
||||
{%- if branches == "nightly" %}
|
||||
tags:
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
{%- endif %}
|
||||
{%- for label in ciflow_config.labels | sort %}
|
||||
{%- if loop.first and branches != "nightly" %}
|
||||
tags:
|
||||
{%- endif %}
|
||||
- '!{{ label }}/*'
|
||||
{%- endfor %}
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
BUILD_ENVIRONMENT: !{{ build_environment }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
PYTORCH_ROOT: /pytorch
|
||||
DOWNLOADS_DIR: c:\temp\downloads
|
||||
DEPENDENCIES_DIR: c:\temp\dependencies
|
||||
ENABLE_APL: 1
|
||||
ENABLE_OPENBLAS: 0
|
||||
MSVC_VERSION : 14.42
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
|
||||
jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
{%- for config in build_configs %}
|
||||
!{{ config["build_name"] }}-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: get-label-type
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: !{{ common.timeout_minutes }}
|
||||
!{{ upload.binary_env(config, True) }}
|
||||
{%- if config.pytorch_extra_install_requirements is defined and config.pytorch_extra_install_requirements|d('')|length > 0 %}
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: !{{ config.pytorch_extra_install_requirements }}
|
||||
{%- endif %}
|
||||
steps:
|
||||
!{{ set_runner_specific_vars() }}
|
||||
- name: Bootstrap folders
|
||||
shell: cmd
|
||||
run: |
|
||||
mkdir "%NIGHTLIES_PYTORCH_ROOT%"
|
||||
mkdir "%PYTORCH_FINAL_PACKAGE_DIR%"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch - recursive
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
submodules: recursive
|
||||
- name: Bootstrap Python
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_python.bat"
|
||||
- name: Bootstrap APL
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_apl.bat"
|
||||
- name: Bootstrap Rust
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_rust.bat"
|
||||
- name: Bootstrap sccache
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_sccache.bat"
|
||||
- name: Bootstrap Libuv
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_libuv.bat"
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
"pytorch/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
shell: bash
|
||||
run: |
|
||||
"pytorch/.circleci/scripts/binary_windows_arm64_build.sh"
|
||||
- uses: !{{ common.upload_artifact_action }}
|
||||
if: always()
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
!{{ config["build_name"] }}-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
- !{{ config["build_name"] }}-build
|
||||
- get-label-type
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: !{{ common.timeout_minutes }}
|
||||
!{{ upload.binary_env(config, True) }}
|
||||
steps:
|
||||
!{{ set_runner_specific_vars() }}
|
||||
- uses: !{{ common.download_artifact_action }}
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
submodules: recursive
|
||||
- name: Bootstrap APL
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_apl.bat"
|
||||
- name: Bootstrap Python
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_python.bat"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Rust
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_rust.bat"
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
"pytorch/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Test PyTorch binary
|
||||
shell: bash
|
||||
run: |
|
||||
"pytorch/.circleci/scripts/binary_windows_arm64_test.sh"
|
||||
{%- if branches == "nightly" %}
|
||||
!{{ upload.upload_binaries(config, True) }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
@ -49,15 +49,6 @@ env:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
OS: !{{ os }}
|
||||
{%- if os == "windows-arm64" %}
|
||||
PYTORCH_ROOT: /pytorch
|
||||
DOWNLOADS_DIR: c:\temp\downloads
|
||||
DEPENDENCIES_DIR: c:\temp\dependencies
|
||||
ENABLE_APL: 1
|
||||
ENABLE_OPENBLAS: 0
|
||||
MSVC_VERSION : 14.42
|
||||
{%- endif %}
|
||||
!{{ common.concurrency(build_environment) }}
|
||||
|
||||
jobs:
|
||||
@ -75,79 +66,20 @@ jobs:
|
||||
!{{ config["build_name"] }}-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: get-label-type
|
||||
{%- if os == "windows-arm64" %}
|
||||
runs-on: "windows-11-arm64"
|
||||
{%- else %}
|
||||
{%- if branches == "nightly" %}
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||
{%- else %}
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
timeout-minutes: !{{ common.timeout_minutes_windows_binary }}
|
||||
!{{ upload.binary_env(config, True) }}
|
||||
{%- if config.pytorch_extra_install_requirements is defined and config.pytorch_extra_install_requirements|d('')|length > 0 %}
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: !{{ config.pytorch_extra_install_requirements }}
|
||||
{%- endif %}
|
||||
steps:
|
||||
{%- if os == "windows-arm64" %}
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
- name: Bootstrap folders
|
||||
shell: cmd
|
||||
run: |
|
||||
mkdir "%NIGHTLIES_PYTORCH_ROOT%"
|
||||
mkdir "%PYTORCH_FINAL_PACKAGE_DIR%"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch - recursive
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
submodules: recursive
|
||||
- name: Bootstrap Python
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_python.bat"
|
||||
- name: Bootstrap APL
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_apl.bat"
|
||||
- name: Bootstrap Rust
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_rust.bat"
|
||||
- name: Bootstrap sccache
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_sccache.bat"
|
||||
- name: Bootstrap Libuv
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_libuv.bat"
|
||||
{%- else %}
|
||||
!{{ set_runner_specific_vars() }}
|
||||
!{{ common.setup_ec2_windows() }}
|
||||
!{{ set_runner_specific_vars() }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
{%- endif %}
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
@ -163,17 +95,12 @@ jobs:
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
{%- if os != "windows-arm64" %}
|
||||
!{{ common.wait_and_kill_ssh_windows('pytorch') }}
|
||||
{% endif %}
|
||||
!{{ config["build_name"] }}-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
- !{{ config["build_name"] }}-build
|
||||
- get-label-type
|
||||
{%- if os == "windows-arm64" %}
|
||||
runs-on: "windows-11-arm64"
|
||||
{%- else %}
|
||||
{%- if config["gpu_arch_type"] == "cuda" %}
|
||||
{%- if branches == "nightly" %}
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.g4dn.xlarge"
|
||||
@ -186,61 +113,18 @@ jobs:
|
||||
{%- else %}
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
timeout-minutes: !{{ common.timeout_minutes_windows_binary }}
|
||||
!{{ upload.binary_env(config, True) }}
|
||||
steps:
|
||||
{%- if os == "windows-arm64" %}
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
submodules: recursive
|
||||
- name: Bootstrap APL
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_apl.bat"
|
||||
- name: Bootstrap Python
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_python.bat"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Rust
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_rust.bat"
|
||||
{%- else %}
|
||||
!{{ common.setup_ec2_windows() }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ set_runner_specific_vars() }}
|
||||
{%- endif %}
|
||||
- uses: !{{ common.download_artifact_action }}
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
@ -249,10 +133,8 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
|
||||
{%- if os != "windows-arm64" %}
|
||||
!{{ common.wait_and_kill_ssh_windows('pytorch') }}
|
||||
{%- endif %}
|
||||
{%- if branches == "nightly" %}
|
||||
!{{ upload.upload_binaries(config, True) }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endfor %}
|
||||
|
||||
18
.github/workflows/_bazel-build-test.yml
vendored
18
.github/workflows/_bazel-build-test.yml
vendored
@ -33,10 +33,6 @@ on:
|
||||
default: "linux.large"
|
||||
description: Runner type
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
env:
|
||||
GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
||||
|
||||
@ -84,13 +80,6 @@ jobs:
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
|
||||
role-session-name: gha-bazel-build
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
@ -213,13 +202,6 @@ jobs:
|
||||
uses: ./.github/actions/chown-workspace
|
||||
if: always()
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_pytorch_artifacts
|
||||
role-session-name: gha-bazel-build-upload-artifacts
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Upload test artifacts
|
||||
uses: ./.github/actions/upload-test-artifacts
|
||||
if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped'
|
||||
|
||||
2
.github/workflows/_binary-build-linux.yml
vendored
2
.github/workflows/_binary-build-linux.yml
vendored
@ -23,7 +23,7 @@ on:
|
||||
description: Hardware to run this "build" job on, linux.12xlarge or linux.arm64.2xlarge.
|
||||
timeout-minutes:
|
||||
required: false
|
||||
default: 240
|
||||
default: 210
|
||||
type: number
|
||||
description: timeout for the job
|
||||
use_split_build:
|
||||
|
||||
6
.github/workflows/_mac-test.yml
vendored
6
.github/workflows/_mac-test.yml
vendored
@ -38,11 +38,6 @@ on:
|
||||
required: false
|
||||
type: boolean
|
||||
default: true
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN:
|
||||
required: false
|
||||
description: |
|
||||
HF Auth token to avoid rate limits when downloading models or datasets from hub
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@ -171,7 +166,6 @@ jobs:
|
||||
JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
|
||||
JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
|
||||
REENABLED_ISSUES: ${{ steps.keep-going.outputs.reenabled-issues }}
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
run: |
|
||||
# shellcheck disable=SC1090
|
||||
set -ex
|
||||
|
||||
4
.github/workflows/_xpu-test.yml
vendored
4
.github/workflows/_xpu-test.yml
vendored
@ -47,10 +47,6 @@ on:
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
env:
|
||||
GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
||||
|
||||
|
||||
43
.github/workflows/build-almalinux-images.yml
vendored
43
.github/workflows/build-almalinux-images.yml
vendored
@ -11,14 +11,14 @@ on:
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
paths:
|
||||
- .ci/docker/**
|
||||
- '.ci/docker/almalinux/*'
|
||||
- '.ci/docker/common/*'
|
||||
- .github/workflows/build-almalinux-images.yml
|
||||
- .github/actions/binary-docker-build/**
|
||||
pull_request:
|
||||
paths:
|
||||
- .ci/docker/**
|
||||
- '.ci/docker/almalinux/*'
|
||||
- '.ci/docker/common/*'
|
||||
- .github/workflows/build-almalinux-images.yml
|
||||
- .github/actions/binary-docker-build/**
|
||||
|
||||
env:
|
||||
DOCKER_REGISTRY: "docker.io"
|
||||
@ -37,12 +37,37 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
cuda_version: ["11.8", "12.4", "12.6", "cpu"]
|
||||
env:
|
||||
CUDA_VERSION: ${{ matrix.cuda_version }}
|
||||
steps:
|
||||
- name: Build docker image
|
||||
uses: pytorch/pytorch/.github/actions/binary-docker-build@main
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
docker-image-name: almalinux-builder
|
||||
custom-tag-prefix: ${{ matrix.cuda_version != 'cpu' && 'cuda' || '' }}${{matrix.cuda_version}}
|
||||
docker-build-dir: almalinux
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: almalinux-builder${{ matrix.cuda_version == 'cpu' && '-' || '-cuda' }}${{matrix.cuda_version}}
|
||||
docker-build-dir: .ci/docker/almalinux
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/almalinux/build.sh almalinux-builder${{ matrix.cuda_version == 'cpu' && ':' || ':cuda' }}${{matrix.cuda_version}}
|
||||
|
||||
138
.github/workflows/build-libtorch-images.yml
vendored
138
.github/workflows/build-libtorch-images.yml
vendored
@ -10,14 +10,14 @@ on:
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
paths:
|
||||
- .ci/docker/**
|
||||
- '.ci/docker/libtorch/*'
|
||||
- '.ci/docker/common/*'
|
||||
- .github/workflows/build-libtorch-images.yml
|
||||
- .github/actions/binary-docker-build/**
|
||||
pull_request:
|
||||
paths:
|
||||
- .ci/docker/**
|
||||
- '.ci/docker/libtorch/*'
|
||||
- '.ci/docker/common/*'
|
||||
- .github/workflows/build-libtorch-images.yml
|
||||
- .github/actions/binary-docker-build/**
|
||||
|
||||
env:
|
||||
DOCKER_REGISTRY: "docker.io"
|
||||
@ -39,29 +39,123 @@ jobs:
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
build:
|
||||
build-docker-cuda:
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: ${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral
|
||||
name: libtorch-cxx11-builder:${{ matrix.tag }}
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: [
|
||||
{ tag: "cuda12.8" },
|
||||
{ tag: "cuda12.6" },
|
||||
{ tag: "cuda12.4" },
|
||||
{ tag: "cuda11.8" },
|
||||
{ tag: "rocm6.3" },
|
||||
{ tag: "rocm6.4" },
|
||||
{ tag: "cpu" },
|
||||
]
|
||||
cuda_version: ["12.8", "12.6", "12.4", "11.8"]
|
||||
env:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
GPU_ARCH_VERSION: ${{ matrix.cuda_version }}
|
||||
steps:
|
||||
- name: Build docker image
|
||||
uses: pytorch/pytorch/.github/actions/binary-docker-build@main
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
docker-image-name: libtorch-cxx11-builder
|
||||
custom-tag-prefix: ${{ matrix.tag }}
|
||||
docker-build-dir: libtorch
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: libtorch-cxx11-builder-cuda${{matrix.cuda_version}}
|
||||
docker-build-dir: .ci/docker/libtorch
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/libtorch/build.sh libtorch-cxx11-builder:cuda${{matrix.cuda_version}}
|
||||
build-docker-rocm:
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
|
||||
strategy:
|
||||
matrix:
|
||||
rocm_version: ["6.2.4", "6.3"]
|
||||
env:
|
||||
GPU_ARCH_TYPE: rocm
|
||||
GPU_ARCH_VERSION: ${{ matrix.rocm_version }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: libtorch-cxx11-builder-rocm${{matrix.rocm_version}}
|
||||
docker-build-dir: .ci/docker/libtorch
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/libtorch/build.sh libtorch-cxx11-builder:rocm${{matrix.rocm_version}}
|
||||
build-docker-cpu:
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: libtorch-cxx11-builder-cpu
|
||||
docker-build-dir: .ci/docker/libtorch
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/libtorch/build.sh libtorch-cxx11-builder:cpu
|
||||
|
||||
2
.github/workflows/build-magma-rocm-linux.yml
vendored
2
.github/workflows/build-magma-rocm-linux.yml
vendored
@ -34,7 +34,7 @@ jobs:
|
||||
id-token: write
|
||||
strategy:
|
||||
matrix:
|
||||
rocm_version: ["64", "63"]
|
||||
rocm_version: ["63", "624"]
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
|
||||
2
.github/workflows/build-magma-windows.yml
vendored
2
.github/workflows/build-magma-windows.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
runs-on: windows-2019
|
||||
strategy:
|
||||
matrix:
|
||||
cuda_version: ["128", "126", "118"]
|
||||
cuda_version: ["128", "126", "124", "118"]
|
||||
config: ["Release", "Debug"]
|
||||
env:
|
||||
CUDA_VERSION: ${{ matrix.cuda_version }}
|
||||
|
||||
@ -11,11 +11,15 @@ on:
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
paths:
|
||||
- .ci/docker/**
|
||||
- '.ci/docker/manywheel/*'
|
||||
- '.ci/docker/manywheel/build_scripts/*'
|
||||
- '.ci/docker/common/*'
|
||||
- .github/workflows/build-manywheel-images-s390x.yml
|
||||
pull_request:
|
||||
paths:
|
||||
- .ci/docker/**
|
||||
- '.ci/docker/manywheel/*'
|
||||
- '.ci/docker/manywheel/build_scripts/*'
|
||||
- '.ci/docker/common/*'
|
||||
- .github/workflows/build-manywheel-images-s390x.yml
|
||||
|
||||
|
||||
@ -33,45 +37,26 @@ jobs:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
runs-on: linux.s390x
|
||||
env:
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
submodules: false
|
||||
no-sudo: true
|
||||
|
||||
- name: Build Docker Image
|
||||
run: |
|
||||
.ci/docker/manywheel/build.sh manylinuxs390x-builder:cpu-s390x -t manylinuxs390x-builder:cpu-s390x
|
||||
|
||||
- name: Tag and (if WITH_PUSH) push docker image to docker.io
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
CREATED_FULL_DOCKER_IMAGE_NAME: manylinuxs390x-builder:cpu-s390x
|
||||
shell: bash
|
||||
run: |
|
||||
set -euox pipefail
|
||||
GITHUB_REF="${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)}"
|
||||
GIT_BRANCH_NAME="${GITHUB_REF##*/}"
|
||||
GIT_COMMIT_SHA="${GITHUB_SHA:-$(git rev-parse HEAD)}"
|
||||
CI_FOLDER_SHA="$(git rev-parse HEAD:.ci/docker)"
|
||||
|
||||
DOCKER_IMAGE_NAME_PREFIX="docker.io/pytorch/${CREATED_FULL_DOCKER_IMAGE_NAME}"
|
||||
|
||||
docker tag "${CREATED_FULL_DOCKER_IMAGE_NAME}" "${DOCKER_IMAGE_NAME_PREFIX}-${GIT_BRANCH_NAME}"
|
||||
docker tag "${CREATED_FULL_DOCKER_IMAGE_NAME}" "${DOCKER_IMAGE_NAME_PREFIX}-${GIT_COMMIT_SHA}"
|
||||
docker tag "${CREATED_FULL_DOCKER_IMAGE_NAME}" "${DOCKER_IMAGE_NAME_PREFIX}-${CI_FOLDER_SHA}"
|
||||
|
||||
# Prety sure Github will mask tokens and I'm not sure if it will even be
|
||||
# printed due to pipe, but just in case
|
||||
set +x
|
||||
if [[ "${WITH_PUSH:-false}" == "true" ]]; then
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
docker push "${DOCKER_IMAGE_NAME_PREFIX}-${GIT_BRANCH_NAME}"
|
||||
docker push "${DOCKER_IMAGE_NAME_PREFIX}-${GIT_COMMIT_SHA}"
|
||||
docker push "${DOCKER_IMAGE_NAME_PREFIX}-${CI_FOLDER_SHA}"
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
run: |
|
||||
.ci/docker/manywheel/build.sh manylinuxs390x-builder:cpu-s390x
|
||||
|
||||
- name: Cleanup docker
|
||||
if: cancelled()
|
||||
|
||||
345
.github/workflows/build-manywheel-images.yml
vendored
345
.github/workflows/build-manywheel-images.yml
vendored
@ -11,14 +11,17 @@ on:
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
paths:
|
||||
- .ci/docker/**
|
||||
- '.ci/docker/common/*'
|
||||
- '.ci/docker/manywheel/*'
|
||||
- '.ci/docker/manywheel/build_scripts/*'
|
||||
- .github/workflows/build-manywheel-images.yml
|
||||
- .github/actions/binary-docker-build/**
|
||||
pull_request:
|
||||
paths:
|
||||
- .ci/docker/**
|
||||
- '.ci/docker/common/*'
|
||||
- '.ci/docker/manywheel/*'
|
||||
- '.ci/docker/manywheel/build_scripts/*'
|
||||
- .github/workflows/build-manywheel-images.yml
|
||||
- .github/actions/binary-docker-build/**
|
||||
|
||||
|
||||
env:
|
||||
DOCKER_REGISTRY: "docker.io"
|
||||
@ -40,34 +43,322 @@ jobs:
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
build:
|
||||
build-docker-cuda-manylinux_2_28:
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include: [
|
||||
{ name: "manylinux2_28-builder", tag: "cuda12.8", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28-builder", tag: "cuda12.6", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28-builder", tag: "cuda12.4", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28-builder", tag: "cuda11.8", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinuxaarch64-builder", tag: "cuda12.8", runner: "linux.arm64.2xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28-builder", tag: "rocm6.3", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28-builder", tag: "rocm6.4", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28-builder", tag: "cpu", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinuxaarch64-builder", tag: "cpu-aarch64", runner: "linux.arm64.2xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28_aarch64-builder", tag: "cpu-aarch64", runner: "linux.arm64.2xlarge.ephemeral" },
|
||||
{ name: "manylinuxcxx11-abi-builder", tag: "cpu-cxx11-abi", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28-builder", tag: "xpu", runner: "linux.9xlarge.ephemeral" },
|
||||
]
|
||||
runs-on: ${{ needs.get-label-type.outputs.label-type }}${{ matrix.runner }}
|
||||
name: ${{ matrix.name }}:${{ matrix.tag }}
|
||||
cuda_version: ["12.8", "12.6", "12.4", "11.8"]
|
||||
env:
|
||||
GPU_ARCH_TYPE: cuda-manylinux_2_28
|
||||
GPU_ARCH_VERSION: ${{ matrix.cuda_version }}
|
||||
steps:
|
||||
- name: Build docker image
|
||||
uses: pytorch/pytorch/.github/actions/binary-docker-build@main
|
||||
- name: Purge tools folder (free space for build)
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
docker-image-name: ${{ matrix.name }}
|
||||
custom-tag-prefix: ${{ matrix.tag }}
|
||||
docker-build-dir: manywheel
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: manylinux2_28-builder-cuda${{matrix.cuda_version}}
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/manywheel/build.sh manylinux2_28-builder:cuda${{matrix.cuda_version}}
|
||||
build-docker-cuda-aarch64:
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.2xlarge.ephemeral"
|
||||
strategy:
|
||||
matrix:
|
||||
cuda_version: ["12.8"]
|
||||
env:
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
GPU_ARCH_VERSION: ${{ matrix.cuda_version }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: manylinuxaarch64-builder-cuda${{matrix.cuda_version}}
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/manywheel/build.sh manylinuxaarch64-builder:cuda${{matrix.cuda_version}}
|
||||
build-docker-rocm-manylinux_2_28:
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
|
||||
strategy:
|
||||
matrix:
|
||||
rocm_version: ["6.2.4", "6.3"]
|
||||
env:
|
||||
GPU_ARCH_TYPE: rocm-manylinux_2_28
|
||||
GPU_ARCH_VERSION: ${{ matrix.rocm_version }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: manylinux2_28-builder-rocm${{matrix.rocm_version}}
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/manywheel/build.sh manylinux2_28-builder:rocm${{matrix.rocm_version}}
|
||||
build-docker-cpu-manylinux_2_28:
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
|
||||
env:
|
||||
GPU_ARCH_TYPE: cpu-manylinux_2_28
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: manylinux2_28-builder-cpu
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/manywheel/build.sh manylinux2_28-builder:cpu
|
||||
build-docker-cpu-aarch64:
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.2xlarge.ephemeral"
|
||||
env:
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: manylinuxaarch64-builder-cpu-aarch64
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/manywheel/build.sh manylinuxaarch64-builder:cpu-aarch64
|
||||
build-docker-cpu-aarch64-2_28:
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.2xlarge.ephemeral"
|
||||
env:
|
||||
GPU_ARCH_TYPE: cpu-aarch64-2_28
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: manylinux2_28_aarch64-builder-cpu-aarch64
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/manywheel/build.sh manylinux2_28_aarch64-builder:cpu-aarch64
|
||||
build-docker-cpu-cxx11-abi:
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
|
||||
env:
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: manylinuxcxx11-abi-builder-cpu-cxx11-abi
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/manywheel/build.sh manylinuxcxx11-abi-builder:cpu-cxx11-abi
|
||||
build-docker-xpu:
|
||||
environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
|
||||
needs: get-label-type
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
|
||||
env:
|
||||
GPU_ARCH_TYPE: xpu
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: manylinux2_28-builder-xpu
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
always-rebuild: true
|
||||
push: true
|
||||
- name: Authenticate if WITH_PUSH
|
||||
if: env.WITH_PUSH == 'true'
|
||||
env:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
DOCKER_ID: ${{ secrets.DOCKER_ID }}
|
||||
run: |
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin
|
||||
fi
|
||||
- name: Build Docker Image
|
||||
if: env.WITH_PUSH == 'true'
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 90
|
||||
max_attempts: 3
|
||||
retry_wait_seconds: 90
|
||||
command: |
|
||||
.ci/docker/manywheel/build.sh manylinux2_28-builder:xpu
|
||||
|
||||
9
.github/workflows/build-triton-wheel.yml
vendored
9
.github/workflows/build-triton-wheel.yml
vendored
@ -12,8 +12,6 @@ on:
|
||||
- .github/workflows/build-triton-wheel.yml
|
||||
- .github/scripts/build_triton_wheel.py
|
||||
- .github/ci_commit_pins/triton.txt
|
||||
- .github/scripts/windows/install_vs2022.ps1
|
||||
- .github/scripts/windows/build_triton.bat
|
||||
- .ci/docker/ci_commit_pins/triton.txt
|
||||
- .ci/docker/ci_commit_pins/triton-xpu.txt
|
||||
workflow_dispatch:
|
||||
@ -22,8 +20,6 @@ on:
|
||||
- .github/workflows/build-triton-wheel.yml
|
||||
- .github/scripts/build_triton_wheel.py
|
||||
- .github/ci_commit_pins/triton.txt
|
||||
- .github/scripts/windows/install_vs2022.ps1
|
||||
- .github/scripts/windows/build_triton.bat
|
||||
- .ci/docker/ci_commit_pins/triton.txt
|
||||
- .ci/docker/ci_commit_pins/triton-xpu.txt
|
||||
|
||||
@ -54,7 +50,7 @@ jobs:
|
||||
docker-image: ["pytorch/manylinux2_28-builder:cpu"]
|
||||
include:
|
||||
- device: "rocm"
|
||||
rocm_version: "6.4"
|
||||
rocm_version: "6.3"
|
||||
runs_on: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge"
|
||||
- device: "cuda"
|
||||
rocm_version: ""
|
||||
@ -138,7 +134,7 @@ jobs:
|
||||
fi
|
||||
|
||||
docker exec -t "${container_name}" yum install -y zlib-devel zip
|
||||
docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}" -m pip install -U setuptools==78.1.0 pybind11==2.13.1 auditwheel wheel
|
||||
docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}" -m pip install -U setuptools==67.4.0 pybind11==2.13.1 auditwheel wheel
|
||||
|
||||
if [[ ("${{ matrix.device }}" == "cuda" || "${{ matrix.device }}" == "rocm" || "${{ matrix.device }}" == "aarch64" ) ]]; then
|
||||
# With this install, it gets clang 16.0.6.
|
||||
@ -248,6 +244,7 @@ jobs:
|
||||
.github/scripts/windows/build_triton.bat
|
||||
mkdir -p "${RUNNER_TEMP}/artifacts/"
|
||||
mv ./*.whl "${RUNNER_TEMP}/artifacts/"
|
||||
|
||||
- uses: actions/upload-artifact@v4.4.0
|
||||
with:
|
||||
name: pytorch-triton-wheel-${{ matrix.py_vers }}-${{ matrix.device }}
|
||||
|
||||
2
.github/workflows/docker-builds.yml
vendored
2
.github/workflows/docker-builds.yml
vendored
@ -79,7 +79,7 @@ jobs:
|
||||
]
|
||||
include:
|
||||
- docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11
|
||||
runner: linux.arm64.m7g.4xlarge
|
||||
runner: linux.arm64.2xlarge
|
||||
- docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11-inductor-benchmarks
|
||||
runner: linux.arm64.m7g.4xlarge
|
||||
timeout-minutes: 600
|
||||
|
||||
184
.github/workflows/generated-linux-binary-libtorch-nightly.yml
generated
vendored
184
.github/workflows/generated-linux-binary-libtorch-nightly.yml
generated
vendored
@ -301,6 +301,98 @@ jobs:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
||||
libtorch-rocm6_2_4-shared-with-deps-release-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
PACKAGE_TYPE: libtorch
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.2.4
|
||||
GPU_ARCH_VERSION: 6.2.4
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-main
|
||||
LIBTORCH_CONFIG: release
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: libtorch-rocm6_2_4-shared-with-deps-release
|
||||
build_environment: linux-binary-libtorch
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
libtorch-rocm6_2_4-shared-with-deps-release-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
- libtorch-rocm6_2_4-shared-with-deps-release-build
|
||||
- get-label-type
|
||||
runs-on: linux.rocm.gpu
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
PACKAGE_TYPE: libtorch
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.2.4
|
||||
GPU_ARCH_VERSION: 6.2.4
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-main
|
||||
LIBTORCH_CONFIG: release
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
uses: ./.github/actions/setup-rocm
|
||||
- uses: actions/download-artifact@v4.1.7
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-rocm6_2_4-shared-with-deps-release
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/libtorch-cxx11-builder:rocm6.2.4-main
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
uses: ./.github/actions/teardown-rocm
|
||||
libtorch-rocm6_2_4-shared-with-deps-release-upload: # Uploading
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
needs: libtorch-rocm6_2_4-shared-with-deps-release-test
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
PACKAGE_TYPE: libtorch
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.2.4
|
||||
GPU_ARCH_VERSION: 6.2.4
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-main
|
||||
LIBTORCH_CONFIG: release
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
build_name: libtorch-rocm6_2_4-shared-with-deps-release
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
||||
libtorch-rocm6_3-shared-with-deps-release-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
@ -392,95 +484,3 @@ jobs:
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
||||
libtorch-rocm6_4-shared-with-deps-release-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
PACKAGE_TYPE: libtorch
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.4
|
||||
GPU_ARCH_VERSION: 6.4
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.4-main
|
||||
LIBTORCH_CONFIG: release
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: libtorch-rocm6_4-shared-with-deps-release
|
||||
build_environment: linux-binary-libtorch
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
libtorch-rocm6_4-shared-with-deps-release-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
- libtorch-rocm6_4-shared-with-deps-release-build
|
||||
- get-label-type
|
||||
runs-on: linux.rocm.gpu
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
PACKAGE_TYPE: libtorch
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.4
|
||||
GPU_ARCH_VERSION: 6.4
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.4-main
|
||||
LIBTORCH_CONFIG: release
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
uses: ./.github/actions/setup-rocm
|
||||
- uses: actions/download-artifact@v4.1.7
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-rocm6_4-shared-with-deps-release
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/libtorch-cxx11-builder:rocm6.4-main
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
uses: ./.github/actions/teardown-rocm
|
||||
libtorch-rocm6_4-shared-with-deps-release-upload: # Uploading
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
needs: libtorch-rocm6_4-shared-with-deps-release-test
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
PACKAGE_TYPE: libtorch
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.4
|
||||
GPU_ARCH_VERSION: 6.4
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.4-main
|
||||
LIBTORCH_CONFIG: release
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
build_name: libtorch-rocm6_4-shared-with-deps-release
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
||||
1104
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
1104
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
File diff suppressed because it is too large
Load Diff
30
.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
generated
vendored
30
.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
generated
vendored
@ -55,7 +55,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
runs_on: linux.s390x
|
||||
@ -79,7 +79,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu-s390x
|
||||
@ -101,7 +101,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu-s390x
|
||||
@ -120,7 +120,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.10"
|
||||
runs_on: linux.s390x
|
||||
@ -144,7 +144,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu-s390x
|
||||
@ -166,7 +166,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu-s390x
|
||||
@ -185,7 +185,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.11"
|
||||
runs_on: linux.s390x
|
||||
@ -209,7 +209,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu-s390x
|
||||
@ -231,7 +231,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu-s390x
|
||||
@ -250,7 +250,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.12"
|
||||
runs_on: linux.s390x
|
||||
@ -274,7 +274,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu-s390x
|
||||
@ -296,7 +296,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu-s390x
|
||||
@ -315,7 +315,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13"
|
||||
runs_on: linux.s390x
|
||||
@ -339,7 +339,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13"
|
||||
build_name: manywheel-py3_13-cpu-s390x
|
||||
@ -361,7 +361,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13"
|
||||
build_name: manywheel-py3_13-cpu-s390x
|
||||
|
||||
@ -1,12 +1,11 @@
|
||||
# @generated DO NOT EDIT MANUALLY
|
||||
|
||||
# Template is at: .github/templates/windows_binary_build_workflow.yml.j2
|
||||
# Template is at: .github/templates/windows_arm64_binary_build_workflow.yml.j2
|
||||
# Generation script: .github/scripts/generate_ci_workflows.py
|
||||
name: windows-arm64-binary-libtorch-debug
|
||||
name: windows-arm64-binary-libtorch
|
||||
|
||||
on:
|
||||
push:
|
||||
# NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
|
||||
branches:
|
||||
- nightly
|
||||
tags:
|
||||
@ -18,24 +17,18 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Needed for conda builds
|
||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
BUILD_ENVIRONMENT: windows-arm64-binary-libtorch-debug
|
||||
BUILD_ENVIRONMENT: windows-arm64-binary-libtorch
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
OS: windows-arm64
|
||||
PYTORCH_ROOT: /pytorch
|
||||
DOWNLOADS_DIR: c:\temp\downloads
|
||||
DEPENDENCIES_DIR: c:\temp\dependencies
|
||||
ENABLE_APL: 1
|
||||
ENABLE_OPENBLAS: 0
|
||||
MSVC_VERSION : 14.42
|
||||
concurrency:
|
||||
group: windows-arm64-binary-libtorch-debug-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
|
||||
jobs:
|
||||
get-label-type:
|
||||
@ -51,7 +44,7 @@ jobs:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: get-label-type
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
PACKAGE_TYPE: libtorch
|
||||
@ -66,6 +59,9 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
@ -121,11 +117,11 @@ jobs:
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
"pytorch/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
|
||||
"pytorch/.circleci/scripts/binary_windows_arm64_build.sh"
|
||||
- uses: actions/upload-artifact@v4.4.0
|
||||
if: always()
|
||||
with:
|
||||
@ -139,7 +135,7 @@ jobs:
|
||||
- libtorch-cpu-shared-with-deps-debug-build
|
||||
- get-label-type
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
PACKAGE_TYPE: libtorch
|
||||
@ -154,17 +150,25 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
- uses: actions/download-artifact@v4.1.7
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-debug
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Populate binary env
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
@ -193,19 +197,14 @@ jobs:
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_rust.bat"
|
||||
- uses: actions/download-artifact@v4.1.7
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-debug
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
"pytorch/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Test PyTorch binary
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
|
||||
"pytorch/.circleci/scripts/binary_windows_arm64_test.sh"
|
||||
libtorch-cpu-shared-with-deps-debug-upload: # Uploading
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
permissions:
|
||||
41
.github/workflows/generated-windows-arm64-binary-libtorch-release-nightly.yml
generated
vendored
41
.github/workflows/generated-windows-arm64-binary-libtorch-release-nightly.yml
generated
vendored
@ -1,12 +1,11 @@
|
||||
# @generated DO NOT EDIT MANUALLY
|
||||
|
||||
# Template is at: .github/templates/windows_binary_build_workflow.yml.j2
|
||||
# Template is at: .github/templates/windows_arm64_binary_build_workflow.yml.j2
|
||||
# Generation script: .github/scripts/generate_ci_workflows.py
|
||||
name: windows-arm64-binary-libtorch-release
|
||||
|
||||
on:
|
||||
push:
|
||||
# NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
|
||||
branches:
|
||||
- nightly
|
||||
tags:
|
||||
@ -18,24 +17,18 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Needed for conda builds
|
||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
BUILD_ENVIRONMENT: windows-arm64-binary-libtorch-release
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
OS: windows-arm64
|
||||
PYTORCH_ROOT: /pytorch
|
||||
DOWNLOADS_DIR: c:\temp\downloads
|
||||
DEPENDENCIES_DIR: c:\temp\dependencies
|
||||
ENABLE_APL: 1
|
||||
ENABLE_OPENBLAS: 0
|
||||
MSVC_VERSION : 14.42
|
||||
concurrency:
|
||||
group: windows-arm64-binary-libtorch-release-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
|
||||
jobs:
|
||||
get-label-type:
|
||||
@ -51,7 +44,7 @@ jobs:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: get-label-type
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
PACKAGE_TYPE: libtorch
|
||||
@ -66,6 +59,9 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
@ -121,11 +117,11 @@ jobs:
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
"pytorch/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
|
||||
"pytorch/.circleci/scripts/binary_windows_arm64_build.sh"
|
||||
- uses: actions/upload-artifact@v4.4.0
|
||||
if: always()
|
||||
with:
|
||||
@ -139,7 +135,7 @@ jobs:
|
||||
- libtorch-cpu-shared-with-deps-release-build
|
||||
- get-label-type
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
PACKAGE_TYPE: libtorch
|
||||
@ -154,17 +150,25 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
- uses: actions/download-artifact@v4.1.7
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-release
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Populate binary env
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
@ -193,19 +197,14 @@ jobs:
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_rust.bat"
|
||||
- uses: actions/download-artifact@v4.1.7
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-release
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
"pytorch/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Test PyTorch binary
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
|
||||
"pytorch/.circleci/scripts/binary_windows_arm64_test.sh"
|
||||
libtorch-cpu-shared-with-deps-release-upload: # Uploading
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
permissions:
|
||||
|
||||
41
.github/workflows/generated-windows-arm64-binary-wheel-nightly.yml
generated
vendored
41
.github/workflows/generated-windows-arm64-binary-wheel-nightly.yml
generated
vendored
@ -1,12 +1,11 @@
|
||||
# @generated DO NOT EDIT MANUALLY
|
||||
|
||||
# Template is at: .github/templates/windows_binary_build_workflow.yml.j2
|
||||
# Template is at: .github/templates/windows_arm64_binary_build_workflow.yml.j2
|
||||
# Generation script: .github/scripts/generate_ci_workflows.py
|
||||
name: windows-arm64-binary-wheel
|
||||
|
||||
on:
|
||||
push:
|
||||
# NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
|
||||
branches:
|
||||
- nightly
|
||||
tags:
|
||||
@ -18,24 +17,18 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Needed for conda builds
|
||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
BUILD_ENVIRONMENT: windows-arm64-binary-wheel
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
OS: windows-arm64
|
||||
PYTORCH_ROOT: /pytorch
|
||||
DOWNLOADS_DIR: c:\temp\downloads
|
||||
DEPENDENCIES_DIR: c:\temp\dependencies
|
||||
ENABLE_APL: 1
|
||||
ENABLE_OPENBLAS: 0
|
||||
MSVC_VERSION : 14.42
|
||||
concurrency:
|
||||
group: windows-arm64-binary-wheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
|
||||
jobs:
|
||||
get-label-type:
|
||||
@ -51,7 +44,7 @@ jobs:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: get-label-type
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
PACKAGE_TYPE: wheel
|
||||
@ -63,6 +56,9 @@ jobs:
|
||||
DESIRED_PYTHON: "3.12"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
@ -118,11 +114,11 @@ jobs:
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
"pytorch/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
|
||||
"pytorch/.circleci/scripts/binary_windows_arm64_build.sh"
|
||||
- uses: actions/upload-artifact@v4.4.0
|
||||
if: always()
|
||||
with:
|
||||
@ -136,7 +132,7 @@ jobs:
|
||||
- wheel-py3_12-cpu-build
|
||||
- get-label-type
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
PACKAGE_TYPE: wheel
|
||||
@ -147,17 +143,25 @@ jobs:
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.12"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
- uses: actions/download-artifact@v4.1.7
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: wheel-py3_12-cpu
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Populate binary env
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
@ -186,19 +190,14 @@ jobs:
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_rust.bat"
|
||||
- uses: actions/download-artifact@v4.1.7
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: wheel-py3_12-cpu
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
"pytorch/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Test PyTorch binary
|
||||
shell: bash
|
||||
run: |
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
|
||||
"pytorch/.circleci/scripts/binary_windows_arm64_test.sh"
|
||||
wheel-py3_12-cpu-upload: # Uploading
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
permissions:
|
||||
|
||||
44
.github/workflows/generated-windows-binary-libtorch-debug-main.yml
generated
vendored
44
.github/workflows/generated-windows-binary-libtorch-debug-main.yml
generated
vendored
@ -19,7 +19,6 @@ env:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
OS: windows
|
||||
concurrency:
|
||||
group: windows-binary-libtorch-debug-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
@ -53,15 +52,6 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
@ -106,6 +96,15 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -146,7 +145,6 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\kill_active_ssh_sessions.ps1
|
||||
|
||||
libtorch-cpu-shared-with-deps-debug-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
@ -212,18 +210,6 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
@ -238,6 +224,18 @@ jobs:
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-debug
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
173
.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
generated
vendored
173
.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
generated
vendored
@ -26,7 +26,6 @@ env:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
OS: windows
|
||||
concurrency:
|
||||
group: windows-binary-libtorch-debug-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
@ -60,15 +59,6 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
@ -113,6 +103,15 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -153,7 +152,6 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\kill_active_ssh_sessions.ps1
|
||||
|
||||
libtorch-cpu-shared-with-deps-debug-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
@ -219,18 +217,6 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
@ -245,6 +231,18 @@ jobs:
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-debug
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
@ -308,15 +306,6 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
@ -361,6 +350,15 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -401,7 +399,6 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\kill_active_ssh_sessions.ps1
|
||||
|
||||
libtorch-cuda11_8-shared-with-deps-debug-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
@ -468,18 +465,6 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
@ -494,6 +479,18 @@ jobs:
|
||||
with:
|
||||
name: libtorch-cuda11_8-shared-with-deps-debug
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
@ -558,15 +555,6 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
@ -611,6 +599,15 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -651,7 +648,6 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\kill_active_ssh_sessions.ps1
|
||||
|
||||
libtorch-cuda12_6-shared-with-deps-debug-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
@ -718,18 +714,6 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
@ -744,6 +728,18 @@ jobs:
|
||||
with:
|
||||
name: libtorch-cuda12_6-shared-with-deps-debug
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
@ -808,15 +804,6 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
@ -861,6 +848,15 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -901,7 +897,6 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\kill_active_ssh_sessions.ps1
|
||||
|
||||
libtorch-cuda12_8-shared-with-deps-debug-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
@ -968,18 +963,6 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
@ -994,6 +977,18 @@ jobs:
|
||||
with:
|
||||
name: libtorch-cuda12_8-shared-with-deps-debug
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
44
.github/workflows/generated-windows-binary-libtorch-release-main.yml
generated
vendored
44
.github/workflows/generated-windows-binary-libtorch-release-main.yml
generated
vendored
@ -19,7 +19,6 @@ env:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
OS: windows
|
||||
concurrency:
|
||||
group: windows-binary-libtorch-release-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
@ -53,15 +52,6 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
@ -106,6 +96,15 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -146,7 +145,6 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\kill_active_ssh_sessions.ps1
|
||||
|
||||
libtorch-cpu-shared-with-deps-release-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
@ -212,18 +210,6 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
@ -238,6 +224,18 @@ jobs:
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-release
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
173
.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
generated
vendored
173
.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
generated
vendored
@ -26,7 +26,6 @@ env:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
OS: windows
|
||||
concurrency:
|
||||
group: windows-binary-libtorch-release-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
@ -60,15 +59,6 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
@ -113,6 +103,15 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -153,7 +152,6 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\kill_active_ssh_sessions.ps1
|
||||
|
||||
libtorch-cpu-shared-with-deps-release-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
@ -219,18 +217,6 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
@ -245,6 +231,18 @@ jobs:
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-release
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
@ -308,15 +306,6 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
@ -361,6 +350,15 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -401,7 +399,6 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\kill_active_ssh_sessions.ps1
|
||||
|
||||
libtorch-cuda11_8-shared-with-deps-release-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
@ -468,18 +465,6 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
@ -494,6 +479,18 @@ jobs:
|
||||
with:
|
||||
name: libtorch-cuda11_8-shared-with-deps-release
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
@ -558,15 +555,6 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
@ -611,6 +599,15 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -651,7 +648,6 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\kill_active_ssh_sessions.ps1
|
||||
|
||||
libtorch-cuda12_6-shared-with-deps-release-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
@ -718,18 +714,6 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
@ -744,6 +728,18 @@ jobs:
|
||||
with:
|
||||
name: libtorch-cuda12_6-shared-with-deps-release
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
@ -808,15 +804,6 @@ jobs:
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
@ -861,6 +848,15 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -901,7 +897,6 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\kill_active_ssh_sessions.ps1
|
||||
|
||||
libtorch-cuda12_8-shared-with-deps-release-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
@ -968,18 +963,6 @@ jobs:
|
||||
# Let's both exclude the path and disable Windows Defender completely just to be sure
|
||||
# that it doesn't interfere
|
||||
Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
@ -994,6 +977,18 @@ jobs:
|
||||
with:
|
||||
name: libtorch-cuda12_8-shared-with-deps-release
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
show-progress: false
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
1291
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
1291
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
File diff suppressed because it is too large
Load Diff
3
.github/workflows/inductor-nightly.yml
vendored
3
.github/workflows/inductor-nightly.yml
vendored
@ -4,9 +4,6 @@ on:
|
||||
pull_request:
|
||||
paths:
|
||||
- .github/workflows/inductor-nightly.yml
|
||||
- benchmarks/dynamo/ci_expected_accuracy/dynamic_cpu_max_autotune_inductor_amp_freezing_huggingface_inference.csv
|
||||
- benchmarks/dynamo/ci_expected_accuracy/dynamic_cpu_max_autotune_inductor_amp_freezing_timm_inference.csv
|
||||
- benchmarks/dynamo/ci_expected_accuracy/dynamic_cpu_max_autotune_inductor_amp_freezing_torchbench_inference.csv
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Run every day at 7:00 AM UTC
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
name: inductor-perf-nightly-macos
|
||||
name: perf-nightly-macos
|
||||
# Technically not an inductor test, but uses it as a template for tracking macos performance
|
||||
|
||||
on:
|
||||
schedule:
|
||||
@ -20,13 +21,9 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: torchbench_perf_mps
|
||||
pull_request:
|
||||
paths:
|
||||
- .github/workflows/inductor-perf-test-nightly-macos.yml
|
||||
- .ci/pytorch/macos-test.sh
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: read-all
|
||||
@ -38,14 +35,14 @@ jobs:
|
||||
uses: ./.github/workflows/_mac-build.yml
|
||||
with:
|
||||
sync-tag: macos-perf-py3-arm64-build
|
||||
build-environment: macos-py3-arm64-distributed
|
||||
build-environment: macos-py3-arm64
|
||||
runner-type: macos-m1-stable
|
||||
build-generates-artifacts: true
|
||||
# To match the one pre-installed in the m1 runners
|
||||
python-version: 3.9.12
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "perf_smoketest", shard: 1, num_shards: 1, runner: "macos-m2-15" },
|
||||
{ config: "perf_smoketest", shard: 1, num_shards: 1, runner: "macos-m1-14" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
@ -54,7 +51,7 @@ jobs:
|
||||
uses: ./.github/workflows/_mac-test.yml
|
||||
needs: macos-perf-py3-arm64-build
|
||||
with:
|
||||
build-environment: macos-py3-arm64-distributed
|
||||
build-environment: macos-py3-arm64
|
||||
# Same as the build job
|
||||
python-version: 3.9.12
|
||||
test-matrix: ${{ needs.macos-perf-py3-arm64-build.outputs.test-matrix }}
|
||||
|
||||
@ -78,12 +78,12 @@ jobs:
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
linux-focal-rocm-py3_10-inductor-benchmark-build:
|
||||
linux-focal-rocm6_3-py3_10-inductor-benchmark-build:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: rocm-py3_10-inductor-benchmark-build
|
||||
name: rocm6_3-py3_10-inductor-benchmark-build
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-rocm-py3_10
|
||||
build-environment: linux-focal-rocm6_3-py3_10
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -102,18 +102,18 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-rocm-py3_10-inductor-benchmark-test:
|
||||
linux-focal-rocm6_3-py3_10-inductor-benchmark-test:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
name: rocm-py3_10-inductor-benchmark-test
|
||||
name: rocm6_3-py3_10-inductor-benchmark-test
|
||||
uses: ./.github/workflows/_rocm-test.yml
|
||||
needs: linux-focal-rocm-py3_10-inductor-benchmark-build
|
||||
needs: linux-focal-rocm6_3-py3_10-inductor-benchmark-build
|
||||
with:
|
||||
build-environment: linux-focal-rocm-py3_10
|
||||
build-environment: linux-focal-rocm6_3-py3_10
|
||||
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true
|
||||
docker-image: ${{ needs.linux-focal-rocm-py3_10-inductor-benchmark-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm-py3_10-inductor-benchmark-build.outputs.test-matrix }}
|
||||
docker-image: ${{ needs.linux-focal-rocm6_3-py3_10-inductor-benchmark-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm6_3-py3_10-inductor-benchmark-build.outputs.test-matrix }}
|
||||
timeout-minutes: 720
|
||||
# Disable monitor in perf tests for more investigation
|
||||
disable-monitor: true
|
||||
|
||||
18
.github/workflows/inductor-periodic.yml
vendored
18
.github/workflows/inductor-periodic.yml
vendored
@ -67,12 +67,12 @@ jobs:
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-rocm-py3_10-periodic-dynamo-benchmarks-build:
|
||||
linux-focal-rocm6_3-py3_10-periodic-dynamo-benchmarks-build:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: rocm-py3_10-periodic-dynamo-benchmarks
|
||||
name: rocm6_3-py3_10-periodic-dynamo-benchmarks
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-rocm-py3_10
|
||||
build-environment: linux-focal-rocm6_3-py3_10
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
sync-tag: rocm-build
|
||||
test-matrix: |
|
||||
@ -95,17 +95,17 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-rocm-py3_10-periodic-dynamo-benchmarks-test:
|
||||
linux-focal-rocm6_3-py3_10-periodic-dynamo-benchmarks-test:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
name: rocm-py3_10-periodic-dynamo-benchmarks
|
||||
name: rocm6_3-py3_10-periodic-dynamo-benchmarks
|
||||
uses: ./.github/workflows/_rocm-test.yml
|
||||
needs: linux-focal-rocm-py3_10-periodic-dynamo-benchmarks-build
|
||||
needs: linux-focal-rocm6_3-py3_10-periodic-dynamo-benchmarks-build
|
||||
with:
|
||||
build-environment: linux-focal-rocm-py3_10
|
||||
docker-image: ${{ needs.linux-focal-rocm-py3_10-periodic-dynamo-benchmarks-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm-py3_10-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
|
||||
build-environment: linux-focal-rocm6_3-py3_10
|
||||
docker-image: ${{ needs.linux-focal-rocm6_3-py3_10-periodic-dynamo-benchmarks-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm6_3-py3_10-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-build-gcp:
|
||||
|
||||
18
.github/workflows/inductor-rocm-mi300.yml
vendored
18
.github/workflows/inductor-rocm-mi300.yml
vendored
@ -36,13 +36,13 @@ jobs:
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
linux-focal-rocm-py3_10-inductor-build:
|
||||
name: rocm-py3.10-inductor
|
||||
linux-focal-rocm6_3-py3_10-inductor-build:
|
||||
name: rocm6.3-py3.10-inductor
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
build-environment: linux-focal-rocm6.3-py3.10
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -51,15 +51,15 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-rocm-py3_10-inductor-test:
|
||||
linux-focal-rocm6_3-py3_10-inductor-test:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
name: rocm-py3.10-inductor
|
||||
name: rocm6.3-py3.10-inductor
|
||||
uses: ./.github/workflows/_rocm-test.yml
|
||||
needs: linux-focal-rocm-py3_10-inductor-build
|
||||
needs: linux-focal-rocm6_3-py3_10-inductor-build
|
||||
with:
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
docker-image: ${{ needs.linux-focal-rocm-py3_10-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm-py3_10-inductor-build.outputs.test-matrix }}
|
||||
build-environment: linux-focal-rocm6.3-py3.10
|
||||
docker-image: ${{ needs.linux-focal-rocm6_3-py3_10-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm6_3-py3_10-inductor-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
18
.github/workflows/inductor-rocm.yml
vendored
18
.github/workflows/inductor-rocm.yml
vendored
@ -29,13 +29,13 @@ jobs:
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
linux-focal-rocm-py3_10-inductor-build:
|
||||
name: rocm-py3.10-inductor
|
||||
linux-focal-rocm6_3-py3_10-inductor-build:
|
||||
name: rocm6.3-py3.10-inductor
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
build-environment: linux-focal-rocm6.3-py3.10
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -44,15 +44,15 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-rocm-py3_10-inductor-test:
|
||||
linux-focal-rocm6_3-py3_10-inductor-test:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
name: rocm-py3.10-inductor
|
||||
name: rocm6.3-py3.10-inductor
|
||||
uses: ./.github/workflows/_rocm-test.yml
|
||||
needs: linux-focal-rocm-py3_10-inductor-build
|
||||
needs: linux-focal-rocm6_3-py3_10-inductor-build
|
||||
with:
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
docker-image: ${{ needs.linux-focal-rocm-py3_10-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm-py3_10-inductor-build.outputs.test-matrix }}
|
||||
build-environment: linux-focal-rocm6.3-py3.10
|
||||
docker-image: ${{ needs.linux-focal-rocm6_3-py3_10-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm6_3-py3_10-inductor-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
30
.github/workflows/inductor-unittest.yml
vendored
30
.github/workflows/inductor-unittest.yml
vendored
@ -36,11 +36,11 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.12xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_cpp_wrapper", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_cpp_wrapper", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_cpp_wrapper", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_cpp_wrapper", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
@ -65,8 +65,8 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
@ -90,7 +90,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor-halide", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.12xlarge" },
|
||||
{ config: "inductor-halide", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
@ -114,7 +114,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor-triton-cpu", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.12xlarge" },
|
||||
{ config: "inductor-triton-cpu", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
@ -138,10 +138,10 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor_amx", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
||||
{ config: "inductor_amx", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
||||
{ config: "inductor_avx2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.10xlarge.avx2" },
|
||||
{ config: "inductor_avx2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.10xlarge.avx2" },
|
||||
{ config: "inductor_amx", shard: 1, num_shards: 2, runner: "linux.8xlarge.amx" },
|
||||
{ config: "inductor_amx", shard: 2, num_shards: 2, runner: "linux.8xlarge.amx" },
|
||||
{ config: "inductor_avx2", shard: 1, num_shards: 2, runner: "linux.10xlarge.avx2" },
|
||||
{ config: "inductor_avx2", shard: 2, num_shards: 2, runner: "linux.10xlarge.avx2" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
@ -165,8 +165,8 @@ jobs:
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
|
||||
26
.github/workflows/inductor.yml
vendored
26
.github/workflows/inductor.yml
vendored
@ -53,11 +53,11 @@ jobs:
|
||||
sync-tag: linux-focal-cuda12_6-py3_10-gcc9-inductor-build
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_timm", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_timm", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
@ -82,14 +82,14 @@ jobs:
|
||||
sync-tag: linux-jammy-cpu-py3_9-gcc11-inductor-build
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
||||
{ config: "cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
||||
{ config: "dynamic_cpu_inductor_huggingface", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
||||
{ config: "dynamic_cpu_inductor_timm", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
||||
{ config: "dynamic_cpu_inductor_timm", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
||||
{ config: "dynamic_cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
||||
{ config: "dynamic_cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.amx" },
|
||||
{ config: "inductor_torchbench_cpu_smoketest_perf", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.24xl.spr-metal" },
|
||||
{ config: "cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.8xlarge.amx" },
|
||||
{ config: "cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.8xlarge.amx" },
|
||||
{ config: "dynamic_cpu_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.8xlarge.amx" },
|
||||
{ config: "dynamic_cpu_inductor_timm", shard: 1, num_shards: 2, runner: "linux.8xlarge.amx" },
|
||||
{ config: "dynamic_cpu_inductor_timm", shard: 2, num_shards: 2, runner: "linux.8xlarge.amx" },
|
||||
{ config: "dynamic_cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.8xlarge.amx" },
|
||||
{ config: "dynamic_cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.8xlarge.amx" },
|
||||
{ config: "inductor_torchbench_cpu_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.24xl.spr-metal" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
|
||||
6
.github/workflows/lint.yml
vendored
6
.github/workflows/lint.yml
vendored
@ -233,8 +233,10 @@ jobs:
|
||||
runner: linux.24_04.4x
|
||||
- test_type: without_torch
|
||||
runner: linux.24_04.4x
|
||||
# NOTE: The oldest supported version of python for 24.04 is 3.8
|
||||
# so this cannot be updated if we want to keep this test at 3.6
|
||||
- test_type: older_python_version
|
||||
runner: linux.24_04.4x
|
||||
runner: linux.20_04.4x
|
||||
steps:
|
||||
# [see note: pytorch repo ref]
|
||||
# deep clone (fetch-depth 0) required, to allow us to use git log
|
||||
@ -254,7 +256,7 @@ jobs:
|
||||
if: matrix.test_type == 'older_python_version'
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.8
|
||||
python-version: 3.6
|
||||
architecture: x64
|
||||
check-latest: false
|
||||
cache: pip
|
||||
|
||||
14
.github/workflows/linux-aarch64.yml
vendored
14
.github/workflows/linux-aarch64.yml
vendored
@ -37,13 +37,13 @@ jobs:
|
||||
runner: linux.arm64.2xlarge
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.2xlarge" },
|
||||
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.2xlarge" },
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.m7g.4xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.m7g.4xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.arm64.m7g.4xlarge" },
|
||||
{ config: "default", shard: 1, num_shards: 4, runner: "linux.arm64.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 4, runner: "linux.arm64.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 4, runner: "linux.arm64.2xlarge" },
|
||||
{ config: "default", shard: 4, num_shards: 4, runner: "linux.arm64.2xlarge" },
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "linux.arm64.m7g.4xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "linux.arm64.m7g.4xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "linux.arm64.m7g.4xlarge" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
|
||||
3
.github/workflows/operator_benchmark.yml
vendored
3
.github/workflows/operator_benchmark.yml
vendored
@ -11,9 +11,6 @@ on:
|
||||
type: string
|
||||
default: 'short'
|
||||
description: tag filter for operator benchmarks, options from long, short, all
|
||||
schedule:
|
||||
# Run at 07:00 UTC every Sunday
|
||||
- cron: 0 7 * * 0
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
|
||||
81
.github/workflows/periodic-rocm-mi300.yml
vendored
81
.github/workflows/periodic-rocm-mi300.yml
vendored
@ -1,81 +0,0 @@
|
||||
name: periodic-rocm-mi300
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# We have several schedules so jobs can check github.event.schedule to activate only for a fraction of the runs.
|
||||
# Also run less frequently on weekends.
|
||||
- cron: 45 0,8,16 * * 1-5
|
||||
- cron: 45 4 * * 0,6
|
||||
- cron: 45 4,12,20 * * 1-5
|
||||
- cron: 45 12 * * 0,6
|
||||
- cron: 29 8 * * * # about 1:29am PDT, for mem leak check and rerun disabled tests
|
||||
push:
|
||||
tags:
|
||||
- ciflow/periodic-rocm-mi300/*
|
||||
branches:
|
||||
- release/*
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}-${{ github.event.schedule }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
llm-td:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: before-test
|
||||
uses: ./.github/workflows/llm_td_retrieval.yml
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
target-determination:
|
||||
name: before-test
|
||||
uses: ./.github/workflows/target_determination.yml
|
||||
needs: llm-td
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
if: (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch'
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
linux-focal-rocm-py3_10-build:
|
||||
name: linux-focal-rocm-py3.10
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu.mi300.4", owners: ["module:rocm", "oncall:distributed"] },
|
||||
{ config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu.mi300.4", owners: ["module:rocm", "oncall:distributed"] },
|
||||
{ config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu.mi300.4", owners: ["module:rocm", "oncall:distributed"] },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-rocm-py3_10-test:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
name: linux-focal-rocm-py3.10
|
||||
uses: ./.github/workflows/_rocm-test.yml
|
||||
needs:
|
||||
- linux-focal-rocm-py3_10-build
|
||||
- target-determination
|
||||
with:
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
docker-image: ${{ needs.linux-focal-rocm-py3_10-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm-py3_10-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
54
.github/workflows/periodic.yml
vendored
54
.github/workflows/periodic.yml
vendored
@ -59,9 +59,8 @@ jobs:
|
||||
docker-image-name: pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc11
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "nogpu_AVX512", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "nogpu_AVX512", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "nogpu_AVX512", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "nogpu_AVX512", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "nogpu_AVX512", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "nogpu_NO_AVX2", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "nogpu_NO_AVX2", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
@ -140,13 +139,13 @@ jobs:
|
||||
test-matrix: ${{ needs.linux-focal-cuda11_8-py3_10-gcc9-debug-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-rocm-py3_10-build:
|
||||
name: linux-focal-rocm-py3.10
|
||||
linux-focal-rocm6_3-py3_10-build:
|
||||
name: linux-focal-rocm6.3-py3.10
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
build-environment: linux-focal-rocm6.3-py3.10
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -156,19 +155,19 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-rocm-py3_10-test:
|
||||
linux-focal-rocm6_3-py3_10-test:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
name: linux-focal-rocm-py3.10
|
||||
name: linux-focal-rocm6.3-py3.10
|
||||
uses: ./.github/workflows/_rocm-test.yml
|
||||
needs:
|
||||
- linux-focal-rocm-py3_10-build
|
||||
- linux-focal-rocm6_3-py3_10-build
|
||||
- target-determination
|
||||
with:
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
docker-image: ${{ needs.linux-focal-rocm-py3_10-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm-py3_10-build.outputs.test-matrix }}
|
||||
build-environment: linux-focal-rocm6.3-py3.10
|
||||
docker-image: ${{ needs.linux-focal-rocm6_3-py3_10-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm6_3-py3_10-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-cuda12_6-py3-gcc11-slow-gradcheck-build:
|
||||
@ -182,14 +181,14 @@ jobs:
|
||||
cuda-arch-list: 8.6
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 8, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 2, num_shards: 8, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 3, num_shards: 8, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 4, num_shards: 8, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 5, num_shards: 8, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 6, num_shards: 8, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 7, num_shards: 8, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 8, num_shards: 8, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 1, num_shards: 8, runner: "linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 2, num_shards: 8, runner: "linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 3, num_shards: 8, runner: "linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 4, num_shards: 8, runner: "linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 5, num_shards: 8, runner: "linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 6, num_shards: 8, runner: "linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 7, num_shards: 8, runner: "linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
{ config: "default", shard: 8, num_shards: 8, runner: "linux.g5.4xlarge.nvidia.gpu", owners: ["module:slowgradcheck"] },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
@ -205,3 +204,18 @@ jobs:
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_6-py3-gcc11-slow-gradcheck-build.outputs.test-matrix }}
|
||||
timeout-minutes: 300
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-cuda12_6-py3_10-gcc11-bazel-test:
|
||||
name: linux-focal-cuda12.6-py3.10-gcc11-bazel-test
|
||||
uses: ./.github/workflows/_bazel-build-test.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.large"
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc11-bazel-test
|
||||
docker-image-name: pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc11
|
||||
cuda-version: "12.6"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
11
.github/workflows/pull.yml
vendored
11
.github/workflows/pull.yml
vendored
@ -184,7 +184,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-py3.9-clang10
|
||||
docker-image-name: pytorch-linux-focal-py3.9-clang10
|
||||
test-matrix: |
|
||||
@ -385,9 +385,6 @@ jobs:
|
||||
name: linux-focal-cpu-py3.10-gcc11-bazel-test
|
||||
uses: ./.github/workflows/_bazel-build-test.yml
|
||||
needs: get-label-type
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.large"
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc11-bazel-test
|
||||
@ -414,15 +411,15 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-rocm-py3_10-build:
|
||||
linux-focal-rocm6_3-py3_10-build:
|
||||
# don't run build twice on main
|
||||
if: github.event_name == 'pull_request'
|
||||
name: linux-focal-rocm-py3.10
|
||||
name: linux-focal-rocm6.3-py3.10
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
build-environment: linux-focal-rocm6.3-py3.10
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
sync-tag: rocm-build
|
||||
test-matrix: |
|
||||
|
||||
18
.github/workflows/rocm-mi300.yml
vendored
18
.github/workflows/rocm-mi300.yml
vendored
@ -36,14 +36,14 @@ jobs:
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
linux-focal-rocm-py3_10-build:
|
||||
linux-focal-rocm6_3-py3_10-build:
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
name: linux-focal-rocm-py3.10
|
||||
name: linux-focal-rocm6.3-py3.10
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
build-environment: linux-focal-rocm6.3-py3.10
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
sync-tag: rocm-build
|
||||
test-matrix: |
|
||||
@ -57,17 +57,17 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-rocm-py3_10-test:
|
||||
linux-focal-rocm6_3-py3_10-test:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
name: linux-focal-rocm-py3.10
|
||||
name: linux-focal-rocm6.3-py3.10
|
||||
uses: ./.github/workflows/_rocm-test.yml
|
||||
needs:
|
||||
- linux-focal-rocm-py3_10-build
|
||||
- linux-focal-rocm6_3-py3_10-build
|
||||
- target-determination
|
||||
with:
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
docker-image: ${{ needs.linux-focal-rocm-py3_10-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm-py3_10-build.outputs.test-matrix }}
|
||||
build-environment: linux-focal-rocm6.3-py3.10
|
||||
docker-image: ${{ needs.linux-focal-rocm6_3-py3_10-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm6_3-py3_10-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
18
.github/workflows/rocm.yml
vendored
18
.github/workflows/rocm.yml
vendored
@ -26,12 +26,12 @@ jobs:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
linux-focal-rocm-py3_10-build:
|
||||
linux-focal-rocm6_3-py3_10-build:
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
name: linux-focal-rocm-py3.10
|
||||
name: linux-focal-rocm6.3-py3.10
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
build-environment: linux-focal-rocm6.3-py3.10
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
sync-tag: rocm-build
|
||||
test-matrix: |
|
||||
@ -45,17 +45,17 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-rocm-py3_10-test:
|
||||
linux-focal-rocm6_3-py3_10-test:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
name: linux-focal-rocm-py3.10
|
||||
name: linux-focal-rocm6.3-py3.10
|
||||
uses: ./.github/workflows/_rocm-test.yml
|
||||
needs:
|
||||
- linux-focal-rocm-py3_10-build
|
||||
- linux-focal-rocm6_3-py3_10-build
|
||||
- target-determination
|
||||
with:
|
||||
build-environment: linux-focal-rocm-py3.10
|
||||
docker-image: ${{ needs.linux-focal-rocm-py3_10-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm-py3_10-build.outputs.test-matrix }}
|
||||
build-environment: linux-focal-rocm6.3-py3.10
|
||||
docker-image: ${{ needs.linux-focal-rocm6_3-py3_10-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm6_3-py3_10-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
2
.github/workflows/s390.yml
vendored
2
.github/workflows/s390.yml
vendored
@ -21,6 +21,6 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-s390x-binary-manywheel
|
||||
docker-image-name: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
docker-image-name: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
runner: linux.s390x
|
||||
secrets: inherit
|
||||
|
||||
6
.github/workflows/s390x-periodic.yml
vendored
6
.github/workflows/s390x-periodic.yml
vendored
@ -42,7 +42,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-s390x-binary-manywheel
|
||||
docker-image-name: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
docker-image-name: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
runner: linux.s390x
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -70,8 +70,8 @@ jobs:
|
||||
- target-determination
|
||||
with:
|
||||
build-environment: linux-s390x-binary-manywheel
|
||||
docker-image: pytorch/manylinuxs390x-builder:cpu-s390x
|
||||
docker-image: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
test-matrix: ${{ needs.linux-manylinux-2_28-py3-cpu-s390x-build.outputs.test-matrix }}
|
||||
timeout-minutes: 600
|
||||
timeout-minutes: 480
|
||||
use-gha: "yes"
|
||||
secrets: inherit
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user