mirror of
https://github.com/pytorch/pytorch.git
synced 2025-11-03 07:24:58 +08:00
Compare commits
1 Commits
cublasltre
...
mlazos/dyn
| Author | SHA1 | Date | |
|---|---|---|---|
| 66f30ce6dd |
@ -27,7 +27,6 @@ if [ "$DESIRED_CUDA" = "cpu" ]; then
|
||||
USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn
|
||||
else
|
||||
echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA"
|
||||
export USE_SYSTEM_NCCL=1
|
||||
#USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files
|
||||
USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda
|
||||
fi
|
||||
|
||||
@ -31,47 +31,33 @@ def build_ArmComputeLibrary() -> None:
|
||||
"build=native",
|
||||
]
|
||||
acl_install_dir = "/acl"
|
||||
acl_checkout_dir = os.getenv("ACL_SOURCE_DIR", "ComputeLibrary")
|
||||
if os.path.isdir(acl_install_dir):
|
||||
shutil.rmtree(acl_install_dir)
|
||||
if not os.path.isdir(acl_checkout_dir) or not len(os.listdir(acl_checkout_dir)):
|
||||
check_call(
|
||||
[
|
||||
"git",
|
||||
"clone",
|
||||
"https://github.com/ARM-software/ComputeLibrary.git",
|
||||
"-b",
|
||||
"v25.02",
|
||||
"--depth",
|
||||
"1",
|
||||
"--shallow-submodules",
|
||||
]
|
||||
)
|
||||
acl_checkout_dir = "ComputeLibrary"
|
||||
os.makedirs(acl_install_dir)
|
||||
check_call(
|
||||
[
|
||||
"git",
|
||||
"clone",
|
||||
"https://github.com/ARM-software/ComputeLibrary.git",
|
||||
"-b",
|
||||
"v25.02",
|
||||
"--depth",
|
||||
"1",
|
||||
"--shallow-submodules",
|
||||
]
|
||||
)
|
||||
|
||||
check_call(
|
||||
["scons", "Werror=1", f"-j{os.cpu_count()}"] + acl_build_flags,
|
||||
["scons", "Werror=1", "-j8", f"build_dir=/{acl_install_dir}/build"]
|
||||
+ acl_build_flags,
|
||||
cwd=acl_checkout_dir,
|
||||
)
|
||||
for d in ["arm_compute", "include", "utils", "support", "src", "build"]:
|
||||
for d in ["arm_compute", "include", "utils", "support", "src"]:
|
||||
shutil.copytree(f"{acl_checkout_dir}/{d}", f"{acl_install_dir}/{d}")
|
||||
|
||||
|
||||
def replace_tag(filename) -> None:
|
||||
with open(filename) as f:
|
||||
lines = f.readlines()
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith("Tag:"):
|
||||
lines[i] = line.replace("-linux_", "-manylinux_2_28_")
|
||||
print(f"Updated tag from {line} to {lines[i]}")
|
||||
break
|
||||
|
||||
with open(filename, "w") as f:
|
||||
f.writelines(lines)
|
||||
|
||||
|
||||
def package_cuda_wheel(wheel_path, desired_cuda) -> None:
|
||||
def update_wheel(wheel_path, desired_cuda) -> None:
|
||||
"""
|
||||
Package the cuda wheel libraries
|
||||
Update the cuda wheel libraries
|
||||
"""
|
||||
folder = os.path.dirname(wheel_path)
|
||||
wheelname = os.path.basename(wheel_path)
|
||||
@ -102,19 +88,30 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
|
||||
"/usr/lib64/libgfortran.so.5",
|
||||
"/acl/build/libarm_compute.so",
|
||||
"/acl/build/libarm_compute_graph.so",
|
||||
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
|
||||
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
|
||||
"/usr/local/lib/libnvpl_lapack_core.so.0",
|
||||
"/usr/local/lib/libnvpl_blas_core.so.0",
|
||||
]
|
||||
|
||||
if "128" in desired_cuda:
|
||||
if enable_cuda:
|
||||
libs_to_copy += [
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.8",
|
||||
"/usr/local/cuda/lib64/libcufile.so.0",
|
||||
"/usr/local/cuda/lib64/libcufile_rdma.so.1",
|
||||
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
|
||||
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
|
||||
"/usr/local/lib/libnvpl_lapack_core.so.0",
|
||||
"/usr/local/lib/libnvpl_blas_core.so.0",
|
||||
]
|
||||
if "126" in desired_cuda:
|
||||
libs_to_copy += [
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.6",
|
||||
"/usr/local/cuda/lib64/libcufile.so.0",
|
||||
"/usr/local/cuda/lib64/libcufile_rdma.so.1",
|
||||
]
|
||||
elif "128" in desired_cuda:
|
||||
libs_to_copy += [
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.8",
|
||||
"/usr/local/cuda/lib64/libcufile.so.0",
|
||||
"/usr/local/cuda/lib64/libcufile_rdma.so.1",
|
||||
]
|
||||
else:
|
||||
libs_to_copy += [
|
||||
"/opt/OpenBLAS/lib/libopenblas.so.0",
|
||||
]
|
||||
|
||||
# Copy libraries to unzipped_folder/a/lib
|
||||
for lib_path in libs_to_copy:
|
||||
lib_name = os.path.basename(lib_path)
|
||||
@ -123,13 +120,6 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
|
||||
f"cd {folder}/tmp/torch/lib/; "
|
||||
f"patchelf --set-rpath '$ORIGIN' --force-rpath {folder}/tmp/torch/lib/{lib_name}"
|
||||
)
|
||||
|
||||
# Make sure the wheel is tagged with manylinux_2_28
|
||||
for f in os.scandir(f"{folder}/tmp/"):
|
||||
if f.is_dir() and f.name.endswith(".dist-info"):
|
||||
replace_tag(f"{f.path}/WHEEL")
|
||||
break
|
||||
|
||||
os.mkdir(f"{folder}/cuda_wheel")
|
||||
os.system(f"cd {folder}/tmp/; zip -r {folder}/cuda_wheel/{wheelname} *")
|
||||
shutil.move(
|
||||
@ -204,10 +194,8 @@ if __name__ == "__main__":
|
||||
).decode()
|
||||
|
||||
print("Building PyTorch wheel")
|
||||
build_vars = "CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
|
||||
# MAX_JOB=5 is not required for CPU backend (see commit 465d98b)
|
||||
if enable_cuda:
|
||||
build_vars = "MAX_JOBS=5 " + build_vars
|
||||
build_vars = "MAX_JOBS=5 CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
|
||||
os.system("cd /pytorch; python setup.py clean")
|
||||
|
||||
override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION")
|
||||
desired_cuda = os.getenv("DESIRED_CUDA")
|
||||
@ -254,6 +242,6 @@ if __name__ == "__main__":
|
||||
print("Updating Cuda Dependency")
|
||||
filename = os.listdir("/pytorch/dist/")
|
||||
wheel_path = f"/pytorch/dist/{filename[0]}"
|
||||
package_cuda_wheel(wheel_path, desired_cuda)
|
||||
update_wheel(wheel_path, desired_cuda)
|
||||
pytorch_wheel_name = complete_wheel("/pytorch/")
|
||||
print(f"Build Complete. Created {pytorch_wheel_name}..")
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
ARG CUDA_VERSION=12.4
|
||||
ARG BASE_TARGET=cuda${CUDA_VERSION}
|
||||
ARG ROCM_IMAGE=rocm/dev-almalinux-8:6.3-complete
|
||||
FROM amd64/almalinux:8.10-20250519 as base
|
||||
FROM amd64/almalinux:8 as base
|
||||
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
@ -11,8 +11,6 @@ ARG DEVTOOLSET_VERSION=11
|
||||
|
||||
RUN yum -y update
|
||||
RUN yum -y install epel-release
|
||||
# install glibc-langpack-en make sure en_US.UTF-8 locale is available
|
||||
RUN yum -y install glibc-langpack-en
|
||||
RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel openssl-devel yum-utils autoconf automake make gcc-toolset-${DEVTOOLSET_VERSION}-toolchain
|
||||
# Just add everything as a safe.directory for git since these will be used in multiple places with git
|
||||
RUN git config --global --add safe.directory '*'
|
||||
@ -78,7 +76,7 @@ RUN bash ./install_mnist.sh
|
||||
FROM base as all_cuda
|
||||
COPY --from=cuda11.8 /usr/local/cuda-11.8 /usr/local/cuda-11.8
|
||||
COPY --from=cuda12.6 /usr/local/cuda-12.6 /usr/local/cuda-12.6
|
||||
COPY --from=cuda12.8 /usr/local/cuda-12.8 /usr/local/cuda-12.8
|
||||
COPY --from=cuda12.4 /usr/local/cuda-12.8 /usr/local/cuda-12.8
|
||||
|
||||
# Final step
|
||||
FROM ${BASE_TARGET} as final
|
||||
|
||||
@ -50,21 +50,30 @@ if [[ "$image" == *xla* ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "$image" == *-jammy* ]]; then
|
||||
if [[ "$image" == *-focal* ]]; then
|
||||
UBUNTU_VERSION=20.04
|
||||
elif [[ "$image" == *-jammy* ]]; then
|
||||
UBUNTU_VERSION=22.04
|
||||
elif [[ "$image" == *ubuntu* ]]; then
|
||||
extract_version_from_image_name ubuntu UBUNTU_VERSION
|
||||
elif [[ "$image" == *centos* ]]; then
|
||||
extract_version_from_image_name centos CENTOS_VERSION
|
||||
fi
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ]; then
|
||||
OS="ubuntu"
|
||||
elif [ -n "${CENTOS_VERSION}" ]; then
|
||||
OS="centos"
|
||||
else
|
||||
echo "Unable to derive operating system base..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DOCKERFILE="${OS}/Dockerfile"
|
||||
if [[ "$image" == *rocm* ]]; then
|
||||
# When using ubuntu - 22.04, start from Ubuntu docker image, instead of nvidia/cuda docker image.
|
||||
if [[ "$image" == *cuda* && "$UBUNTU_VERSION" != "22.04" ]]; then
|
||||
DOCKERFILE="${OS}-cuda/Dockerfile"
|
||||
elif [[ "$image" == *rocm* ]]; then
|
||||
DOCKERFILE="${OS}-rocm/Dockerfile"
|
||||
elif [[ "$image" == *xpu* ]]; then
|
||||
DOCKERFILE="${OS}-xpu/Dockerfile"
|
||||
@ -89,7 +98,7 @@ tag=$(echo $image | awk -F':' '{print $2}')
|
||||
# configuration, so we hardcode everything here rather than do it
|
||||
# from scratch
|
||||
case "$tag" in
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3-gcc11)
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc11)
|
||||
CUDA_VERSION=12.6.3
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
@ -100,8 +109,8 @@ case "$tag" in
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.8.1
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.4.1
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
@ -112,8 +121,8 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.8.1
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.4.1
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=9
|
||||
@ -124,8 +133,8 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.13-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.8.1
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3.13-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.4.1
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.13
|
||||
GCC_VERSION=9
|
||||
@ -136,7 +145,7 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3-gcc9)
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.6.3
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
@ -147,8 +156,8 @@ case "$tag" in
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.6
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.6.3
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
@ -159,8 +168,8 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3.12-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.6
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3.12-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.6.3
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=9
|
||||
@ -171,8 +180,8 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3.13-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.6
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3.13-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.6.3
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.13
|
||||
GCC_VERSION=9
|
||||
@ -183,8 +192,8 @@ case "$tag" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.8.1
|
||||
pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=11.8.0
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
@ -194,25 +203,25 @@ case "$tag" in
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang12-onnx)
|
||||
pytorch-linux-focal-py3-clang10-onnx)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CLANG_VERSION=12
|
||||
CLANG_VERSION=10
|
||||
VISION=yes
|
||||
ONNX=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.9-clang12)
|
||||
pytorch-linux-focal-py3.9-clang10)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CLANG_VERSION=12
|
||||
CLANG_VERSION=10
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.11-clang12)
|
||||
pytorch-linux-focal-py3.11-clang10)
|
||||
ANACONDA_PYTHON_VERSION=3.11
|
||||
CLANG_VERSION=12
|
||||
CLANG_VERSION=10
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.9-gcc9)
|
||||
pytorch-linux-focal-py3.9-gcc9)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
GCC_VERSION=9
|
||||
VISION=yes
|
||||
@ -267,9 +276,9 @@ case "$tag" in
|
||||
DOCS=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-clang12)
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-clang12)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CUDA_VERSION=12.8.1
|
||||
CUDA_VERSION=11.8
|
||||
CUDNN_VERSION=9
|
||||
CLANG_VERSION=12
|
||||
VISION=yes
|
||||
@ -318,15 +327,15 @@ case "$tag" in
|
||||
GCC_VERSION=11
|
||||
TRITON_CPU=yes
|
||||
;;
|
||||
pytorch-linux-jammy-linter)
|
||||
pytorch-linux-focal-linter)
|
||||
# TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
|
||||
# We will need to update mypy version eventually, but that's for another day. The task
|
||||
# would be to upgrade mypy to 1.0.0 with Python 3.11
|
||||
PYTHON_VERSION=3.9
|
||||
;;
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-linter)
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter)
|
||||
PYTHON_VERSION=3.9
|
||||
CUDA_VERSION=12.8.1
|
||||
CUDA_VERSION=11.8
|
||||
;;
|
||||
pytorch-linux-jammy-aarch64-py3.10-gcc11)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
@ -385,6 +394,14 @@ esac
|
||||
|
||||
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
#when using cudnn version 8 install it separately from cuda
|
||||
if [[ "$image" == *cuda* && ${OS} == "ubuntu" ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
if [[ ${CUDNN_VERSION} == 9 ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
no_cache_flag=""
|
||||
progress_flag=""
|
||||
# Do not use cache and progress=plain when in CI
|
||||
@ -401,6 +418,7 @@ docker build \
|
||||
--build-arg "LLVMDEV=${LLVMDEV:-}" \
|
||||
--build-arg "VISION=${VISION:-}" \
|
||||
--build-arg "UBUNTU_VERSION=${UBUNTU_VERSION}" \
|
||||
--build-arg "CENTOS_VERSION=${CENTOS_VERSION}" \
|
||||
--build-arg "DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" \
|
||||
--build-arg "GLIBC_VERSION=${GLIBC_VERSION}" \
|
||||
--build-arg "CLANG_VERSION=${CLANG_VERSION}" \
|
||||
|
||||
@ -1 +1 @@
|
||||
f50bfa92602b45dca884a9e511e5d9ddbe8ba314
|
||||
b173722085b3f555d6ba4533d6bbaddfd7c71144
|
||||
|
||||
@ -1 +1 @@
|
||||
b0e26b7359c147b8aa0af686c20510fb9b15990a
|
||||
0bcc8265e677e5321606a3311bf71470f14456a8
|
||||
|
||||
@ -1 +1 @@
|
||||
c8757738a7418249896224430ce84888e8ecdd79
|
||||
96316ce50fade7e209553aba4898cd9b82aab83b
|
||||
|
||||
@ -36,6 +36,8 @@ install_ubuntu() {
|
||||
# See https://github.com/pytorch/pytorch/issues/144768
|
||||
if [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "11.8"* ]]; then
|
||||
maybe_libnccl_dev="libnccl2=2.15.5-1+cuda11.8 libnccl-dev=2.15.5-1+cuda11.8 --allow-downgrades --allow-change-held-packages"
|
||||
elif [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "12.4"* ]]; then
|
||||
maybe_libnccl_dev="libnccl2=2.26.2-1+cuda12.4 libnccl-dev=2.26.2-1+cuda12.4 --allow-downgrades --allow-change-held-packages"
|
||||
else
|
||||
maybe_libnccl_dev=""
|
||||
fi
|
||||
|
||||
@ -54,6 +54,20 @@ function install_118 {
|
||||
ldconfig
|
||||
}
|
||||
|
||||
function install_124 {
|
||||
CUDNN_VERSION=9.1.0.70
|
||||
echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.2"
|
||||
install_cuda 12.4.1 cuda_12.4.1_550.54.15_linux
|
||||
|
||||
install_cudnn 12 $CUDNN_VERSION
|
||||
|
||||
CUDA_VERSION=12.4 bash install_nccl.sh
|
||||
|
||||
CUDA_VERSION=12.4 bash install_cusparselt.sh
|
||||
|
||||
ldconfig
|
||||
}
|
||||
|
||||
function install_126 {
|
||||
CUDNN_VERSION=9.5.1.17
|
||||
echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
|
||||
@ -99,6 +113,40 @@ function prune_118 {
|
||||
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2022.3.0 $CUDA_BASE/nsight-systems-2022.4.2/
|
||||
}
|
||||
|
||||
function prune_124 {
|
||||
echo "Pruning CUDA 12.4"
|
||||
#####################################################################################
|
||||
# CUDA 12.4 prune static libs
|
||||
#####################################################################################
|
||||
export NVPRUNE="/usr/local/cuda-12.4/bin/nvprune"
|
||||
export CUDA_LIB_DIR="/usr/local/cuda-12.4/lib64"
|
||||
|
||||
export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
|
||||
export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
|
||||
|
||||
if [[ -n "$OVERRIDE_GENCODE" ]]; then
|
||||
export GENCODE=$OVERRIDE_GENCODE
|
||||
fi
|
||||
if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then
|
||||
export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN
|
||||
fi
|
||||
|
||||
# all CUDA libs except CuDNN and CuBLAS
|
||||
ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \
|
||||
| xargs -I {} bash -c \
|
||||
"echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}"
|
||||
|
||||
# prune CuDNN and CuBLAS
|
||||
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a
|
||||
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a
|
||||
|
||||
#####################################################################################
|
||||
# CUDA 12.4 prune visual tools
|
||||
#####################################################################################
|
||||
export CUDA_BASE="/usr/local/cuda-12.4/"
|
||||
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.1.0 $CUDA_BASE/nsight-systems-2023.4.4/
|
||||
}
|
||||
|
||||
function prune_126 {
|
||||
echo "Pruning CUDA 12.6"
|
||||
#####################################################################################
|
||||
@ -135,9 +183,9 @@ function prune_126 {
|
||||
|
||||
function install_128 {
|
||||
CUDNN_VERSION=9.8.0.87
|
||||
echo "Installing CUDA 12.8.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
|
||||
# install CUDA 12.8.1 in the same container
|
||||
install_cuda 12.8.1 cuda_12.8.1_570.124.06_linux
|
||||
echo "Installing CUDA 12.8.0 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.3"
|
||||
# install CUDA 12.8.0 in the same container
|
||||
install_cuda 12.8.0 cuda_12.8.0_570.86.10_linux
|
||||
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
install_cudnn 12 $CUDNN_VERSION
|
||||
@ -155,9 +203,11 @@ do
|
||||
case "$1" in
|
||||
11.8) install_118; prune_118
|
||||
;;
|
||||
12.6|12.6.*) install_126; prune_126
|
||||
12.4) install_124; prune_124
|
||||
;;
|
||||
12.8|12.8.*) install_128;
|
||||
12.6) install_126; prune_126
|
||||
;;
|
||||
12.8) install_128;
|
||||
;;
|
||||
*) echo "bad argument $1"; exit 1
|
||||
;;
|
||||
|
||||
@ -8,6 +8,8 @@ if [[ -n "${CUDNN_VERSION}" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.8.0.87_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:4} == "12.6" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.5.1.17_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:2} == "12" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive"
|
||||
else
|
||||
|
||||
@ -13,6 +13,14 @@ if [[ ${CUDA_VERSION:0:4} =~ ^12\.[5-8]$ ]]; then
|
||||
fi
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.6.3.2-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz
|
||||
elif [[ ${CUDA_VERSION:0:4} == "12.4" ]]; then
|
||||
arch_path='sbsa'
|
||||
export TARGETARCH=${TARGETARCH:-$(uname -m)}
|
||||
if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then
|
||||
arch_path='x86_64'
|
||||
fi
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.6.2.3-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz
|
||||
elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then
|
||||
CUSPARSELT_NAME="libcusparse_lt-linux-x86_64-0.4.0.7-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/${CUSPARSELT_NAME}.tar.xz
|
||||
|
||||
@ -8,6 +8,16 @@ retry () {
|
||||
"$@" || (sleep 10 && "$@") || (sleep 20 && "$@") || (sleep 40 && "$@")
|
||||
}
|
||||
|
||||
# A bunch of custom pip dependencies for ONNX
|
||||
pip_install \
|
||||
beartype==0.15.0 \
|
||||
filelock==3.9.0 \
|
||||
flatbuffers==2.0 \
|
||||
mock==5.0.1 \
|
||||
ninja==1.10.2 \
|
||||
networkx==2.5 \
|
||||
numpy==1.24.2
|
||||
|
||||
# ONNXRuntime should be installed before installing
|
||||
# onnx-weekly. Otherwise, onnx-weekly could be
|
||||
# overwritten by onnx.
|
||||
@ -19,8 +29,12 @@ pip_install \
|
||||
transformers==4.36.2
|
||||
|
||||
pip_install coloredlogs packaging
|
||||
|
||||
pip_install onnxruntime==1.18.1
|
||||
pip_install onnxscript==0.3.0
|
||||
pip_install onnx==1.17.0
|
||||
pip_install onnxscript==0.2.2 --no-deps
|
||||
# required by onnxscript
|
||||
pip_install ml_dtypes
|
||||
|
||||
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
|
||||
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
|
||||
|
||||
@ -4,7 +4,8 @@
|
||||
set -ex
|
||||
|
||||
cd /
|
||||
git clone https://github.com/OpenMathLib/OpenBLAS.git -b "${OPENBLAS_VERSION:-v0.3.29}" --depth 1 --shallow-submodules
|
||||
git clone https://github.com/OpenMathLib/OpenBLAS.git -b v0.3.29 --depth 1 --shallow-submodules
|
||||
|
||||
|
||||
OPENBLAS_BUILD_FLAGS="
|
||||
NUM_THREADS=128
|
||||
|
||||
@ -51,12 +51,7 @@ as_jenkins git clone --recursive ${TRITON_REPO} triton
|
||||
cd triton
|
||||
as_jenkins git checkout ${TRITON_PINNED_COMMIT}
|
||||
as_jenkins git submodule update --init --recursive
|
||||
|
||||
# Old versions of python have setup.py in ./python; newer versions have it in ./
|
||||
if [ ! -f setup.py ]; then
|
||||
cd python
|
||||
fi
|
||||
|
||||
cd python
|
||||
pip_install pybind11==2.13.6
|
||||
|
||||
# TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527
|
||||
|
||||
@ -58,7 +58,6 @@ RUN git config --global --add safe.directory "*"
|
||||
|
||||
FROM base as openblas
|
||||
# Install openblas
|
||||
ARG OPENBLAS_VERSION
|
||||
ADD ./common/install_openblas.sh install_openblas.sh
|
||||
RUN bash ./install_openblas.sh && rm install_openblas.sh
|
||||
|
||||
|
||||
@ -5,9 +5,7 @@ ENV LC_ALL=C.UTF-8
|
||||
ENV LANG=C.UTF-8
|
||||
ENV LANGUAGE=C.UTF-8
|
||||
|
||||
# there is a bugfix in gcc >= 14 for precompiled headers and s390x vectorization interaction.
|
||||
# with earlier gcc versions test/inductor/test_cpu_cpp_wrapper.py will fail.
|
||||
ARG DEVTOOLSET_VERSION=14
|
||||
ARG DEVTOOLSET_VERSION=13
|
||||
# Installed needed OS packages. This is to support all
|
||||
# the binary builds (torch, vision, audio, text, data)
|
||||
RUN yum -y install epel-release
|
||||
@ -60,8 +58,7 @@ RUN yum install -y \
|
||||
libxslt-devel \
|
||||
libxml2-devel \
|
||||
openssl-devel \
|
||||
valgrind \
|
||||
ninja-build
|
||||
valgrind
|
||||
|
||||
ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH
|
||||
ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH
|
||||
@ -106,6 +103,9 @@ CMD ["/bin/bash"]
|
||||
# install test dependencies:
|
||||
# - grpcio requires system openssl, bundled crypto fails to build
|
||||
RUN dnf install -y \
|
||||
protobuf-devel \
|
||||
protobuf-c-devel \
|
||||
protobuf-lite-devel \
|
||||
hdf5-devel \
|
||||
python3-h5py \
|
||||
git
|
||||
@ -129,9 +129,6 @@ RUN pip3 install flatbuffers && \
|
||||
git clone https://github.com/microsoft/onnxruntime && \
|
||||
cd onnxruntime && git checkout v1.21.0 && \
|
||||
git submodule update --init --recursive && \
|
||||
./build.sh --config Release --parallel 0 --enable_pybind \
|
||||
--build_wheel --enable_training --enable_training_apis \
|
||||
--enable_training_ops --skip_tests --allow_running_as_root \
|
||||
--compile_no_warning_as_error && \
|
||||
./build.sh --config Release --parallel 0 --enable_pybind --build_wheel --enable_training --enable_training_apis --enable_training_ops --skip_tests --allow_running_as_root && \
|
||||
pip3 install ./build/Linux/Release/dist/onnxruntime_training-*.whl && \
|
||||
cd .. && /bin/rm -rf ./onnxruntime
|
||||
|
||||
@ -27,7 +27,6 @@ fi
|
||||
|
||||
MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-}
|
||||
DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-}
|
||||
OPENBLAS_VERSION=${OPENBLAS_VERSION:-}
|
||||
|
||||
case ${image} in
|
||||
manylinux2_28-builder:cpu)
|
||||
@ -41,7 +40,6 @@ case ${image} in
|
||||
GPU_IMAGE=arm64v8/almalinux:8
|
||||
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=13 --build-arg NINJA_VERSION=1.12.1"
|
||||
MANY_LINUX_VERSION="2_28_aarch64"
|
||||
OPENBLAS_VERSION="v0.3.29"
|
||||
;;
|
||||
manylinuxcxx11-abi-builder:cpu-cxx11-abi)
|
||||
TARGET=final
|
||||
@ -111,7 +109,6 @@ tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
${DOCKER_GPU_BUILD_ARG} \
|
||||
--build-arg "GPU_IMAGE=${GPU_IMAGE}" \
|
||||
--build-arg "OPENBLAS_VERSION=${OPENBLAS_VERSION}" \
|
||||
--target "${TARGET}" \
|
||||
-t "${tmp_tag}" \
|
||||
$@ \
|
||||
|
||||
@ -41,11 +41,14 @@ fbscribelogger==0.1.7
|
||||
#Pinned versions: 0.1.6
|
||||
#test that import:
|
||||
|
||||
flatbuffers==24.12.23
|
||||
flatbuffers==2.0 ; platform_machine != "s390x"
|
||||
#Description: cross platform serialization library
|
||||
#Pinned versions: 24.12.23
|
||||
#Pinned versions: 2.0
|
||||
#test that import:
|
||||
|
||||
flatbuffers ; platform_machine == "s390x"
|
||||
#Description: cross platform serialization library; Newer version is required on s390x for new python version
|
||||
|
||||
hypothesis==5.35.1
|
||||
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
||||
#Description: advanced library for generating parametrized tests
|
||||
@ -90,7 +93,7 @@ librosa>=0.6.2 ; python_version < "3.11"
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
mypy==1.15.0
|
||||
mypy==1.14.0
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
#Description: linter
|
||||
#Pinned versions: 1.14.0
|
||||
@ -163,10 +166,10 @@ pillow==11.0.0
|
||||
#Pinned versions: 10.3.0
|
||||
#test that import:
|
||||
|
||||
protobuf==5.29.4
|
||||
#Description: Google's data interchange format
|
||||
#Pinned versions: 5.29.4
|
||||
#test that import: test_tensorboard.py, test/onnx/*
|
||||
protobuf==3.20.2
|
||||
#Description: Google’s data interchange format
|
||||
#Pinned versions: 3.20.1
|
||||
#test that import: test_tensorboard.py
|
||||
|
||||
psutil
|
||||
#Description: information on running processes and system utilization
|
||||
@ -334,12 +337,12 @@ sympy==1.13.3
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
onnx==1.18.0
|
||||
#Description: Required by onnx tests, and mypy and test_public_bindings.py when checking torch.onnx._internal
|
||||
onnx==1.17.0
|
||||
#Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
onnxscript==0.2.6
|
||||
onnxscript==0.2.2
|
||||
#Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
@ -15,10 +15,6 @@ sphinxext-opengraph==0.9.1
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 0.9.1
|
||||
|
||||
sphinx_sitemap==2.6.0
|
||||
#Description: This is used to generate sitemap for PyTorch docs
|
||||
#Pinned versions: 2.6.0
|
||||
|
||||
matplotlib==3.5.3
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 3.5.3
|
||||
|
||||
@ -1 +1 @@
|
||||
3.3.1
|
||||
3.3.0
|
||||
|
||||
170
.ci/docker/ubuntu-cuda/Dockerfile
Normal file
170
.ci/docker/ubuntu-cuda/Dockerfile
Normal file
@ -0,0 +1,170 @@
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG IMAGE_NAME
|
||||
|
||||
FROM ${IMAGE_NAME} as base
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
COPY ./common/install_docs_reqs.sh install_docs_reqs.sh
|
||||
RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ./common/install_magma_conda.sh install_magma_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh install_magma_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
COPY ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install clang
|
||||
ARG CLANG_VERSION
|
||||
COPY ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# (optional) Install vision packages like OpenCV
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh cache_vision_models.sh common_utils.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# (optional) Install UCC
|
||||
ARG UCX_COMMIT
|
||||
ARG UCC_COMMIT
|
||||
ENV UCX_COMMIT $UCX_COMMIT
|
||||
ENV UCC_COMMIT $UCC_COMMIT
|
||||
ENV UCX_HOME /usr
|
||||
ENV UCC_HOME /usr
|
||||
ADD ./common/install_ucc.sh install_ucc.sh
|
||||
RUN if [ -n "${UCX_COMMIT}" ] && [ -n "${UCC_COMMIT}" ]; then bash ./install_ucc.sh; fi
|
||||
RUN rm install_ucc.sh
|
||||
|
||||
COPY ./common/install_openssl.sh install_openssl.sh
|
||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
||||
RUN bash ./install_openssl.sh
|
||||
ENV OPENSSL_DIR /opt/openssl
|
||||
|
||||
ARG INDUCTOR_BENCHMARKS
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/huggingface.txt huggingface.txt
|
||||
COPY ci_commit_pins/timm.txt timm.txt
|
||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
|
||||
|
||||
ARG TRITON
|
||||
|
||||
FROM base as triton-builder
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton.txt triton.txt
|
||||
COPY triton_version.txt triton_version.txt
|
||||
RUN bash ./install_triton.sh
|
||||
|
||||
FROM base as final
|
||||
COPY --from=triton-builder /opt/triton /opt/triton
|
||||
RUN if [ -n "${TRITON}" ]; then pip install /opt/triton/*.whl; chown -R jenkins:jenkins /opt/conda; fi
|
||||
RUN rm -rf /opt/triton
|
||||
|
||||
ARG HALIDE
|
||||
# Build and install halide
|
||||
COPY ./common/install_halide.sh install_halide.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/halide.txt halide.txt
|
||||
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
|
||||
RUN rm install_halide.sh common_utils.sh halide.txt
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
# See https://github.com/pytorch/pytorch/issues/82174
|
||||
# TODO(sdym@fb.com):
|
||||
# check if this is needed after full off Xenial migration
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI true
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
ENV CMAKE_CUDA_COMPILER_LAUNCHER=/opt/cache/bin/sccache
|
||||
|
||||
# Add jni.h for java host build
|
||||
COPY ./common/install_jni.sh install_jni.sh
|
||||
COPY ./java/jni.h jni.h
|
||||
RUN bash ./install_jni.sh && rm install_jni.sh
|
||||
|
||||
# Install Open MPI for CUDA
|
||||
COPY ./common/install_openmpi.sh install_openmpi.sh
|
||||
RUN if [ -n "${CUDA_VERSION}" ]; then bash install_openmpi.sh; fi
|
||||
RUN rm install_openmpi.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
# AWS specific CUDA build guidance
|
||||
ENV TORCH_CUDA_ARCH_LIST Maxwell
|
||||
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
|
||||
ENV CUDA_PATH /usr/local/cuda
|
||||
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
|
||||
# Install CUDNN
|
||||
ARG CUDNN_VERSION
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cudnn.sh install_cudnn.sh
|
||||
RUN if [ -n "${CUDNN_VERSION}" ]; then bash install_cudnn.sh; fi
|
||||
RUN rm install_cudnn.sh
|
||||
|
||||
# Install CUSPARSELT
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cusparselt.sh install_cusparselt.sh
|
||||
RUN bash install_cusparselt.sh
|
||||
RUN rm install_cusparselt.sh
|
||||
|
||||
# Install NCCL
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_nccl.sh install_nccl.sh
|
||||
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
|
||||
RUN bash install_nccl.sh
|
||||
RUN rm install_nccl.sh /ci_commit_pins/nccl-cu*
|
||||
ENV USE_SYSTEM_NCCL=1
|
||||
ENV NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
ENV NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
|
||||
# Install CUDSS
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cudss.sh install_cudss.sh
|
||||
RUN bash install_cudss.sh
|
||||
RUN rm install_cudss.sh
|
||||
|
||||
# Delete /usr/local/cuda-11.X/cuda-11.X symlinks
|
||||
RUN if [ -h /usr/local/cuda-11.6/cuda-11.6 ]; then rm /usr/local/cuda-11.6/cuda-11.6; fi
|
||||
RUN if [ -h /usr/local/cuda-11.7/cuda-11.7 ]; then rm /usr/local/cuda-11.7/cuda-11.7; fi
|
||||
RUN if [ -h /usr/local/cuda-12.1/cuda-12.1 ]; then rm /usr/local/cuda-12.1/cuda-12.1; fi
|
||||
RUN if [ -h /usr/local/cuda-12.4/cuda-12.4 ]; then rm /usr/local/cuda-12.4/cuda-12.4; fi
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -18,10 +18,12 @@ retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
|
||||
PLATFORM=""
|
||||
PLATFORM="manylinux2014_x86_64"
|
||||
# TODO move this into the Docker images
|
||||
OS_NAME=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
if [[ "$OS_NAME" == *"AlmaLinux"* ]]; then
|
||||
if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then
|
||||
retry yum install -q -y zip openssl
|
||||
elif [[ "$OS_NAME" == *"AlmaLinux"* ]]; then
|
||||
retry yum install -q -y zip openssl
|
||||
PLATFORM="manylinux_2_28_x86_64"
|
||||
elif [[ "$OS_NAME" == *"Red Hat Enterprise Linux"* ]]; then
|
||||
@ -34,9 +36,6 @@ elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then
|
||||
|
||||
retry apt-get update
|
||||
retry apt-get -y install zip openssl
|
||||
else
|
||||
echo "Unknown OS: '$OS_NAME'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# We use the package name to test the package by passing this to 'pip install'
|
||||
@ -80,6 +79,8 @@ if [[ -e /opt/openssl ]]; then
|
||||
export CMAKE_INCLUDE_PATH="/opt/openssl/include":$CMAKE_INCLUDE_PATH
|
||||
fi
|
||||
|
||||
|
||||
|
||||
mkdir -p /tmp/$WHEELHOUSE_DIR
|
||||
|
||||
export PATCHELF_BIN=/usr/local/bin/patchelf
|
||||
|
||||
@ -15,9 +15,6 @@ export INSTALL_TEST=0 # dont install test binaries into site-packages
|
||||
export USE_CUPTI_SO=0
|
||||
export USE_CUSPARSELT=${USE_CUSPARSELT:-1} # Enable if not disabled by libtorch build
|
||||
export USE_CUFILE=${USE_CUFILE:-1}
|
||||
export USE_SYSTEM_NCCL=1
|
||||
export NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
export NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
|
||||
# Keep an array of cmake variables to add to
|
||||
if [[ -z "$CMAKE_ARGS" ]]; then
|
||||
@ -39,8 +36,10 @@ if [[ -n "$DESIRED_CUDA" ]]; then
|
||||
if [[ ${DESIRED_CUDA} =~ ^[0-9]+\.[0-9]+$ ]]; then
|
||||
CUDA_VERSION=${DESIRED_CUDA}
|
||||
else
|
||||
# cu126, cu128 etc...
|
||||
if [[ ${#DESIRED_CUDA} -eq 5 ]]; then
|
||||
# cu90, cu92, cu100, cu101
|
||||
if [[ ${#DESIRED_CUDA} -eq 4 ]]; then
|
||||
CUDA_VERSION="${DESIRED_CUDA:2:1}.${DESIRED_CUDA:3:1}"
|
||||
elif [[ ${#DESIRED_CUDA} -eq 5 ]]; then
|
||||
CUDA_VERSION="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4:1}"
|
||||
fi
|
||||
fi
|
||||
@ -62,6 +61,10 @@ case ${CUDA_VERSION} in
|
||||
TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};9.0"
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
;;
|
||||
12.4)
|
||||
TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};9.0"
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
;;
|
||||
11.8)
|
||||
TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};3.7;9.0"
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
@ -88,15 +91,14 @@ fi
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true
|
||||
|
||||
OS_NAME=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
if [[ "$OS_NAME" == *"AlmaLinux"* ]]; then
|
||||
if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then
|
||||
LIBGOMP_PATH="/usr/lib64/libgomp.so.1"
|
||||
elif [[ "$OS_NAME" == *"AlmaLinux"* ]]; then
|
||||
LIBGOMP_PATH="/usr/lib64/libgomp.so.1"
|
||||
elif [[ "$OS_NAME" == *"Red Hat Enterprise Linux"* ]]; then
|
||||
LIBGOMP_PATH="/usr/lib64/libgomp.so.1"
|
||||
elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then
|
||||
LIBGOMP_PATH="/usr/lib/x86_64-linux-gnu/libgomp.so.1"
|
||||
else
|
||||
echo "Unknown OS: '$OS_NAME'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DEPS_LIST=(
|
||||
@ -106,8 +108,26 @@ DEPS_SONAME=(
|
||||
"libgomp.so.1"
|
||||
)
|
||||
|
||||
# CUDA 11.8 have to ship the libcusparseLt.so.0 with the binary
|
||||
# since nvidia-cusparselt-cu11 is not available in PYPI
|
||||
if [[ $USE_CUSPARSELT == "1" && $CUDA_VERSION == "11.8" ]]; then
|
||||
DEPS_SONAME+=(
|
||||
"libcusparseLt.so.0"
|
||||
)
|
||||
DEPS_LIST+=(
|
||||
"/usr/local/cuda/lib64/libcusparseLt.so.0"
|
||||
)
|
||||
fi
|
||||
|
||||
# CUDA_VERSION 12.6, 12.8
|
||||
|
||||
# Turn USE_CUFILE off for CUDA 11.8, 12.4 since nvidia-cufile-cu11 and 1.9.0.20 are
|
||||
# not available in PYPI
|
||||
if [[ $CUDA_VERSION == "11.8" || $CUDA_VERSION == "12.4" ]]; then
|
||||
export USE_CUFILE=0
|
||||
fi
|
||||
|
||||
|
||||
# CUDA_VERSION 12.4, 12.6, 12.8
|
||||
if [[ $CUDA_VERSION == 12* ]]; then
|
||||
export USE_STATIC_CUDNN=0
|
||||
# Try parallelizing nvcc as well
|
||||
@ -131,8 +151,6 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
"/usr/local/cuda/lib64/libnvToolsExt.so.1"
|
||||
"/usr/local/cuda/lib64/libnvrtc.so.12"
|
||||
"/usr/local/cuda/lib64/libnvrtc-builtins.so"
|
||||
"/usr/local/cuda/lib64/libcufile.so.0"
|
||||
"/usr/local/cuda/lib64/libcufile_rdma.so.1"
|
||||
)
|
||||
DEPS_SONAME+=(
|
||||
"libcudnn_adv.so.9"
|
||||
@ -150,9 +168,17 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
"libnvToolsExt.so.1"
|
||||
"libnvrtc.so.12"
|
||||
"libnvrtc-builtins.so"
|
||||
"libcufile.so.0"
|
||||
"libcufile_rdma.so.1"
|
||||
)
|
||||
if [[ $USE_CUFILE == 1 ]]; then
|
||||
DEPS_LIST+=(
|
||||
"/usr/local/cuda/lib64/libcufile.so.0"
|
||||
"/usr/local/cuda/lib64/libcufile_rdma.so.1"
|
||||
)
|
||||
DEPS_SONAME+=(
|
||||
"libcufile.so.0"
|
||||
"libcufile_rdma.so.1"
|
||||
)
|
||||
fi
|
||||
else
|
||||
echo "Using nvidia libs from pypi."
|
||||
CUDA_RPATHS=(
|
||||
@ -167,40 +193,32 @@ if [[ $CUDA_VERSION == 12* ]]; then
|
||||
'$ORIGIN/../../nvidia/cusparse/lib'
|
||||
'$ORIGIN/../../cusparselt/lib'
|
||||
'$ORIGIN/../../nvidia/nccl/lib'
|
||||
'$ORIGIN/../../nvidia/nvshmem/lib'
|
||||
'$ORIGIN/../../nvidia/nvtx/lib'
|
||||
'$ORIGIN/../../nvidia/cufile/lib'
|
||||
)
|
||||
if [[ $USE_CUFILE == 1 ]]; then
|
||||
CUDA_RPATHS+=(
|
||||
'$ORIGIN/../../nvidia/cufile/lib'
|
||||
)
|
||||
fi
|
||||
CUDA_RPATHS=$(IFS=: ; echo "${CUDA_RPATHS[*]}")
|
||||
export C_SO_RPATH=$CUDA_RPATHS':$ORIGIN:$ORIGIN/lib'
|
||||
export LIB_SO_RPATH=$CUDA_RPATHS':$ORIGIN'
|
||||
export FORCE_RPATH="--force-rpath"
|
||||
export USE_STATIC_NCCL=0
|
||||
export USE_SYSTEM_NCCL=1
|
||||
export ATEN_STATIC_CUDA=0
|
||||
export USE_CUDA_STATIC_LINK=0
|
||||
export USE_CUPTI_SO=1
|
||||
export NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
export NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
fi
|
||||
elif [[ $CUDA_VERSION == "11.8" ]]; then
|
||||
export USE_STATIC_CUDNN=0
|
||||
# Turn USE_CUFILE off for CUDA 11.8 since nvidia-cufile-cu11 and 1.9.0.20 are
|
||||
# not available in PYPI
|
||||
export USE_CUFILE=0
|
||||
# Try parallelizing nvcc as well
|
||||
export TORCH_NVCC_FLAGS="-Xfatbin -compress-all --threads 2"
|
||||
# Bundle ptxas into the wheel, see https://github.com/pytorch/pytorch/pull/119750
|
||||
export BUILD_BUNDLE_PTXAS=1
|
||||
|
||||
# CUDA 11.8 have to ship the libcusparseLt.so.0 with the binary
|
||||
# since nvidia-cusparselt-cu11 is not available in PYPI
|
||||
if [[ $USE_CUSPARSELT == "1" ]]; then
|
||||
DEPS_SONAME+=(
|
||||
"libcusparseLt.so.0"
|
||||
)
|
||||
DEPS_LIST+=(
|
||||
"/usr/local/cuda/lib64/libcusparseLt.so.0"
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
|
||||
echo "Bundling with cudnn and cublas."
|
||||
DEPS_LIST+=(
|
||||
@ -255,9 +273,12 @@ elif [[ $CUDA_VERSION == "11.8" ]]; then
|
||||
export LIB_SO_RPATH=$CUDA_RPATHS':$ORIGIN'
|
||||
export FORCE_RPATH="--force-rpath"
|
||||
export USE_STATIC_NCCL=0
|
||||
export USE_SYSTEM_NCCL=1
|
||||
export ATEN_STATIC_CUDA=0
|
||||
export USE_CUDA_STATIC_LINK=0
|
||||
export USE_CUPTI_SO=1
|
||||
export NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
|
||||
export NCCL_LIB_DIR="/usr/local/cuda/lib64/"
|
||||
fi
|
||||
else
|
||||
echo "Unknown cuda version $CUDA_VERSION"
|
||||
|
||||
@ -22,7 +22,9 @@ retry () {
|
||||
|
||||
# TODO move this into the Docker images
|
||||
OS_NAME=`awk -F= '/^NAME/{print $2}' /etc/os-release`
|
||||
if [[ "$OS_NAME" == *"AlmaLinux"* ]]; then
|
||||
if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then
|
||||
retry yum install -q -y zip openssl
|
||||
elif [[ "$OS_NAME" == *"AlmaLinux"* ]]; then
|
||||
retry yum install -q -y zip openssl
|
||||
elif [[ "$OS_NAME" == *"Red Hat Enterprise Linux"* ]]; then
|
||||
retry dnf install -q -y zip openssl
|
||||
@ -33,9 +35,6 @@ elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then
|
||||
sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list")
|
||||
retry apt-get update
|
||||
retry apt-get -y install zip openssl
|
||||
else
|
||||
echo "Unknown OS: '$OS_NAME'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Version: setup.py uses $PYTORCH_BUILD_VERSION.post$PYTORCH_BUILD_NUMBER if
|
||||
|
||||
@ -95,7 +95,6 @@ ROCM_SO_FILES=(
|
||||
"libroctracer64.so"
|
||||
"libroctx64.so"
|
||||
"libhipblaslt.so"
|
||||
"libhipsparselt.so"
|
||||
"libhiprtc.so"
|
||||
)
|
||||
|
||||
@ -187,28 +186,20 @@ do
|
||||
OS_SO_FILES[${#OS_SO_FILES[@]}]=$file_name # Append lib to array
|
||||
done
|
||||
|
||||
ARCH=$(echo $PYTORCH_ROCM_ARCH | sed 's/;/|/g') # Replace ; seperated arch list to bar for grep
|
||||
|
||||
# rocBLAS library files
|
||||
ROCBLAS_LIB_SRC=$ROCM_HOME/lib/rocblas/library
|
||||
ROCBLAS_LIB_DST=lib/rocblas/library
|
||||
ROCBLAS_ARCH_SPECIFIC_FILES=$(ls $ROCBLAS_LIB_SRC | grep -E $ARCH)
|
||||
ROCBLAS_OTHER_FILES=$(ls $ROCBLAS_LIB_SRC | grep -v gfx)
|
||||
ROCBLAS_LIB_FILES=($ROCBLAS_ARCH_SPECIFIC_FILES $OTHER_FILES)
|
||||
ARCH=$(echo $PYTORCH_ROCM_ARCH | sed 's/;/|/g') # Replace ; seperated arch list to bar for grep
|
||||
ARCH_SPECIFIC_FILES=$(ls $ROCBLAS_LIB_SRC | grep -E $ARCH)
|
||||
OTHER_FILES=$(ls $ROCBLAS_LIB_SRC | grep -v gfx)
|
||||
ROCBLAS_LIB_FILES=($ARCH_SPECIFIC_FILES $OTHER_FILES)
|
||||
|
||||
# hipblaslt library files
|
||||
HIPBLASLT_LIB_SRC=$ROCM_HOME/lib/hipblaslt/library
|
||||
HIPBLASLT_LIB_DST=lib/hipblaslt/library
|
||||
HIPBLASLT_ARCH_SPECIFIC_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -E $ARCH)
|
||||
HIPBLASLT_OTHER_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -v gfx)
|
||||
HIPBLASLT_LIB_FILES=($HIPBLASLT_ARCH_SPECIFIC_FILES $HIPBLASLT_OTHER_FILES)
|
||||
|
||||
# hipsparselt library files
|
||||
HIPSPARSELT_LIB_SRC=$ROCM_HOME/lib/hipsparselt/library
|
||||
HIPSPARSELT_LIB_DST=lib/hipsparselt/library
|
||||
HIPSPARSELT_ARCH_SPECIFIC_FILES=$(ls $HIPSPARSELT_LIB_SRC | grep -E $ARCH)
|
||||
#HIPSPARSELT_OTHER_FILES=$(ls $HIPSPARSELT_LIB_SRC | grep -v gfx)
|
||||
HIPSPARSELT_LIB_FILES=($HIPSPARSELT_ARCH_SPECIFIC_FILES $HIPSPARSELT_OTHER_FILES)
|
||||
ARCH_SPECIFIC_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -E $ARCH)
|
||||
OTHER_FILES=$(ls $HIPBLASLT_LIB_SRC | grep -v gfx)
|
||||
HIPBLASLT_LIB_FILES=($ARCH_SPECIFIC_FILES $OTHER_FILES)
|
||||
|
||||
# ROCm library files
|
||||
ROCM_SO_PATHS=()
|
||||
@ -243,14 +234,12 @@ DEPS_SONAME=(
|
||||
DEPS_AUX_SRCLIST=(
|
||||
"${ROCBLAS_LIB_FILES[@]/#/$ROCBLAS_LIB_SRC/}"
|
||||
"${HIPBLASLT_LIB_FILES[@]/#/$HIPBLASLT_LIB_SRC/}"
|
||||
"${HIPSPARSELT_LIB_FILES[@]/#/$HIPSPARSELT_LIB_SRC/}"
|
||||
"/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||
)
|
||||
|
||||
DEPS_AUX_DSTLIST=(
|
||||
"${ROCBLAS_LIB_FILES[@]/#/$ROCBLAS_LIB_DST/}"
|
||||
"${HIPBLASLT_LIB_FILES[@]/#/$HIPBLASLT_LIB_DST/}"
|
||||
"${HIPSPARSELT_LIB_FILES[@]/#/$HIPSPARSELT_LIB_DST/}"
|
||||
"share/libdrm/amdgpu.ids"
|
||||
)
|
||||
|
||||
|
||||
@ -52,6 +52,12 @@ fi
|
||||
export USE_LLVM=/opt/llvm
|
||||
export LLVM_DIR=/opt/llvm/lib/cmake/llvm
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *executorch* ]]; then
|
||||
# To build test_edge_op_registration
|
||||
export BUILD_EXECUTORCH=ON
|
||||
export USE_CUDA=0
|
||||
fi
|
||||
|
||||
if ! which conda; then
|
||||
# In ROCm CIs, we are doing cross compilation on build machines with
|
||||
# intel cpu and later run tests on machines with amd cpu.
|
||||
|
||||
@ -40,7 +40,7 @@ if [[ ${BUILD_ENVIRONMENT} == *"distributed"* ]]; then
|
||||
else
|
||||
# Explicitly set USE_DISTRIBUTED=0 to align with the default build config on mac. This also serves as the sole CI config that tests
|
||||
# that building with USE_DISTRIBUTED=0 works at all. See https://github.com/pytorch/pytorch/issues/86448
|
||||
USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel --plat-name macosx_11_0_arm64
|
||||
USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel
|
||||
fi
|
||||
if which sccache > /dev/null; then
|
||||
print_sccache_stats
|
||||
|
||||
@ -20,4 +20,14 @@ print_cmake_info() {
|
||||
CONDA_INSTALLATION_DIR=$(dirname "$CMAKE_EXEC")
|
||||
# Print all libraries under cmake rpath for debugging
|
||||
ls -la "$CONDA_INSTALLATION_DIR/../lib"
|
||||
|
||||
export CMAKE_EXEC
|
||||
# Explicitly add conda env lib folder to cmake rpath to address the flaky issue
|
||||
# where cmake dependencies couldn't be found. This seems to point to how conda
|
||||
# links $CMAKE_EXEC to its package cache when cloning a new environment
|
||||
install_name_tool -add_rpath @executable_path/../lib "${CMAKE_EXEC}" || true
|
||||
# Adding the rpath will invalidate cmake signature, so signing it again here
|
||||
# to trust the executable. EXC_BAD_ACCESS (SIGKILL (Code Signature Invalid))
|
||||
# with an exit code 137 otherwise
|
||||
codesign -f -s - "${CMAKE_EXEC}" || true
|
||||
}
|
||||
|
||||
@ -165,7 +165,6 @@ test_jit_hooks() {
|
||||
torchbench_setup_macos() {
|
||||
git clone --recursive https://github.com/pytorch/vision torchvision
|
||||
git clone --recursive https://github.com/pytorch/audio torchaudio
|
||||
brew install jpeg-turbo libpng
|
||||
|
||||
pushd torchvision
|
||||
git fetch
|
||||
@ -180,8 +179,7 @@ torchbench_setup_macos() {
|
||||
git checkout "$(cat ../.github/ci_commit_pins/audio.txt)"
|
||||
git submodule update --init --recursive
|
||||
python setup.py clean
|
||||
#TODO: Remove me, when figure out how to make TorchAudio find brew installed openmp
|
||||
USE_OPENMP=0 python setup.py develop
|
||||
python setup.py develop
|
||||
popd
|
||||
|
||||
# Shellcheck doesn't like it when you pass no arguments to a function that can take args. See https://www.shellcheck.net/wiki/SC2120
|
||||
@ -189,8 +187,9 @@ torchbench_setup_macos() {
|
||||
checkout_install_torchbench
|
||||
}
|
||||
|
||||
pip_benchmark_deps() {
|
||||
python -mpip install --no-input astunparse requests cython scikit-learn
|
||||
conda_benchmark_deps() {
|
||||
conda install -y astunparse numpy scipy ninja pyyaml setuptools cmake typing-extensions requests protobuf numba cython scikit-learn
|
||||
conda install -y -c conda-forge librosa
|
||||
}
|
||||
|
||||
|
||||
@ -198,7 +197,7 @@ test_torchbench_perf() {
|
||||
print_cmake_info
|
||||
|
||||
echo "Launching torchbench setup"
|
||||
pip_benchmark_deps
|
||||
conda_benchmark_deps
|
||||
torchbench_setup_macos
|
||||
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
@ -225,7 +224,7 @@ test_torchbench_smoketest() {
|
||||
print_cmake_info
|
||||
|
||||
echo "Launching torchbench setup"
|
||||
pip_benchmark_deps
|
||||
conda_benchmark_deps
|
||||
# shellcheck disable=SC2119,SC2120
|
||||
torchbench_setup_macos
|
||||
|
||||
@ -233,8 +232,8 @@ test_torchbench_smoketest() {
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
|
||||
local device=mps
|
||||
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor)
|
||||
local hf_models=(GoogleFnet YituTechConvBert Speech2Text2ForCausalLM)
|
||||
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo)
|
||||
local hf_models=(GoogleFnet YituTechConvBert)
|
||||
|
||||
for backend in eager inductor; do
|
||||
|
||||
@ -259,10 +258,10 @@ test_torchbench_smoketest() {
|
||||
if [ "$backend" == "inductor" ]; then
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
|
||||
--performance --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_huggingface_${dtype}_inference_${device}_performance.csv" || true
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_performance.csv" || true
|
||||
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
|
||||
--accuracy --only "$model" --backend "$backend" --inference --devices "$device" "$dtype_arg" \
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_huggingface_${dtype}_inference_${device}_accuracy.csv" || true
|
||||
--output "$TEST_REPORTS_DIR/inductor_${backend}_torchbench_${dtype}_inference_${device}_accuracy.csv" || true
|
||||
fi
|
||||
done
|
||||
done
|
||||
@ -290,7 +289,7 @@ test_hf_perf() {
|
||||
print_cmake_info
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
pip_benchmark_deps
|
||||
conda_benchmark_deps
|
||||
torchbench_setup_macos
|
||||
|
||||
echo "Launching HuggingFace training perf run"
|
||||
@ -306,7 +305,7 @@ test_timm_perf() {
|
||||
print_cmake_info
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
mkdir -p "$TEST_REPORTS_DIR"
|
||||
pip_benchmark_deps
|
||||
conda_benchmark_deps
|
||||
torchbench_setup_macos
|
||||
|
||||
echo "Launching timm training perf run"
|
||||
|
||||
@ -46,9 +46,6 @@ def get_gomp_thread():
|
||||
|
||||
# use the default gomp path of AlmaLinux OS
|
||||
libgomp_path = "/usr/lib64/libgomp.so.1"
|
||||
# if it does not exist, try Ubuntu path
|
||||
if not os.path.exists(libgomp_path):
|
||||
libgomp_path = f"/usr/lib/{os.uname().machine}-linux-gnu/libgomp.so.1"
|
||||
|
||||
os.environ["GOMP_CPU_AFFINITY"] = "0-3"
|
||||
|
||||
|
||||
@ -324,12 +324,6 @@ test_python_smoke() {
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_h100_distributed() {
|
||||
# Distributed tests at H100
|
||||
time python test/run_test.py --include distributed/_composable/test_composability/test_pp_composability.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_lazy_tensor_meta_reference_disabled() {
|
||||
export TORCH_DISABLE_FUNCTIONALIZATION_META_REFERENCE=1
|
||||
echo "Testing lazy tensor operations without meta reference"
|
||||
@ -826,7 +820,16 @@ test_inductor_torchbench_smoketest_perf() {
|
||||
done
|
||||
}
|
||||
|
||||
test_inductor_get_core_number() {
|
||||
if [[ "${TEST_CONFIG}" == *aarch64* ]]; then
|
||||
echo "$(($(lscpu | grep 'Cluster(s):' | awk '{print $2}') * $(lscpu | grep 'Core(s) per cluster:' | awk '{print $4}')))"
|
||||
else
|
||||
echo "$(($(lscpu | grep 'Socket(s):' | awk '{print $2}') * $(lscpu | grep 'Core(s) per socket:' | awk '{print $4}')))"
|
||||
fi
|
||||
}
|
||||
|
||||
test_inductor_set_cpu_affinity(){
|
||||
#set jemalloc
|
||||
JEMALLOC_LIB="$(find /usr/lib -name libjemalloc.so.2)"
|
||||
export LD_PRELOAD="$JEMALLOC_LIB":"$LD_PRELOAD"
|
||||
export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:-1"
|
||||
@ -838,23 +841,14 @@ test_inductor_set_cpu_affinity(){
|
||||
export KMP_AFFINITY=granularity=fine,compact,1,0
|
||||
export KMP_BLOCKTIME=1
|
||||
fi
|
||||
|
||||
# Use nproc here instead of lscpu because it takes into account cgroups slice
|
||||
cpus=$(nproc)
|
||||
thread_per_core=$(lscpu | grep 'Thread(s) per core:' | awk '{print $4}')
|
||||
cores=$((cpus / thread_per_core))
|
||||
|
||||
# Set number of cores to 16 on aarch64 for performance runs
|
||||
cores=$(test_inductor_get_core_number)
|
||||
# Set number of cores to 16 on Aarch64 for performance runs.
|
||||
if [[ "${TEST_CONFIG}" == *aarch64* && $cores -gt 16 ]]; then
|
||||
cores=16
|
||||
fi
|
||||
export OMP_NUM_THREADS=$cores
|
||||
|
||||
# Handle cgroups slice start and end CPU
|
||||
start_cpu=$(python -c 'import os; print(min(os.sched_getaffinity(0)))')
|
||||
# Leaving one physical CPU for other tasks
|
||||
end_cpu=$(($(python -c 'import os; print(max(os.sched_getaffinity(0)))') - thread_per_core))
|
||||
export TASKSET="taskset -c $start_cpu-$end_cpu"
|
||||
end_core=$((cores-1))
|
||||
export TASKSET="taskset -c 0-$end_core"
|
||||
}
|
||||
|
||||
test_inductor_torchbench_cpu_smoketest_perf(){
|
||||
@ -1526,7 +1520,7 @@ test_executorch() {
|
||||
test_linux_aarch64() {
|
||||
python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp test_torch test_dynamic_shapes \
|
||||
test_transformers test_multiprocessing test_numpy_interop test_autograd test_binary_ufuncs test_complex test_spectral_ops \
|
||||
test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops test_ops test_cpp_extensions_open_device_registration \
|
||||
test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops test_ops \
|
||||
--shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose
|
||||
|
||||
# Dynamo tests
|
||||
@ -1560,8 +1554,7 @@ test_operator_benchmark() {
|
||||
|
||||
cd "${TEST_DIR}"/benchmarks/operator_benchmark
|
||||
$TASKSET python -m benchmark_all_test --device "$1" --tag-filter "$2" \
|
||||
--output-csv "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv" \
|
||||
--output-json-for-dashboard "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.json" \
|
||||
--output-dir "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv"
|
||||
|
||||
pip_install pandas
|
||||
python check_perf_csv.py \
|
||||
@ -1646,7 +1639,7 @@ elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
|
||||
install_torchaudio cuda
|
||||
fi
|
||||
install_torchvision
|
||||
TORCH_CUDA_ARCH_LIST="8.0;8.6" install_torchao
|
||||
TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install git+https://github.com/pytorch/ao.git
|
||||
id=$((SHARD_NUMBER-1))
|
||||
# https://github.com/opencv/opencv-python/issues/885
|
||||
pip_install opencv-python==4.8.0.74
|
||||
@ -1731,8 +1724,6 @@ elif [[ "${BUILD_ENVIRONMENT}" == *xpu* ]]; then
|
||||
test_xpu_bin
|
||||
elif [[ "${TEST_CONFIG}" == smoke ]]; then
|
||||
test_python_smoke
|
||||
elif [[ "${TEST_CONFIG}" == h100_distributed ]]; then
|
||||
test_h100_distributed
|
||||
else
|
||||
install_torchvision
|
||||
install_monkeytype
|
||||
|
||||
@ -38,7 +38,7 @@ if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
|
||||
fi
|
||||
|
||||
# TODO: Move both of them to Windows AMI
|
||||
python -m pip install pytest-rerunfailures==10.3 pytest-cpp==2.3.0 tensorboard==2.13.0 protobuf==5.29.4 pytest-subtests==0.13.1
|
||||
python -m pip install pytest-rerunfailures==10.3 pytest-cpp==2.3.0 tensorboard==2.13.0 pytest-subtests==0.13.1
|
||||
|
||||
# Install Z3 optional dependency for Windows builds.
|
||||
python -m pip install z3-solver==4.12.2.0
|
||||
|
||||
@ -7,7 +7,7 @@ if not exist "%DOWNLOADS_DIR%" mkdir %DOWNLOADS_DIR%
|
||||
if not exist "%DEPENDENCIES_DIR%" mkdir %DEPENDENCIES_DIR%
|
||||
|
||||
:: activate visual studio
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
call "%DEPENDENCIES_DIR%\VSBuildTools\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
where cl.exe
|
||||
|
||||
cd %DEPENDENCIES_DIR%
|
||||
|
||||
@ -7,7 +7,7 @@ if not exist "%DOWNLOADS_DIR%" mkdir %DOWNLOADS_DIR%
|
||||
if not exist "%DEPENDENCIES_DIR%" mkdir %DEPENDENCIES_DIR%
|
||||
|
||||
:: activate visual studio
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
call "%DEPENDENCIES_DIR%\VSBuildTools\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
where cl.exe
|
||||
|
||||
:: Clone OpenBLAS
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
cd %PYTORCH_ROOT%
|
||||
|
||||
:: activate visual studio
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
call "%DEPENDENCIES_DIR%\VSBuildTools\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
where cl.exe
|
||||
|
||||
:: create virtual environment
|
||||
|
||||
@ -21,7 +21,7 @@ if %ENABLE_APL% == 1 (
|
||||
)
|
||||
|
||||
:: activate visual studio
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
call "%DEPENDENCIES_DIR%\VSBuildTools\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
where cl.exe
|
||||
|
||||
:: change to source directory
|
||||
|
||||
@ -21,7 +21,7 @@ if %ENABLE_APL% == 1 (
|
||||
)
|
||||
|
||||
:: activate visual studio
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
call "%DEPENDENCIES_DIR%\VSBuildTools\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
where cl.exe
|
||||
|
||||
:: change to source directory
|
||||
|
||||
@ -33,7 +33,7 @@ pushd tmp
|
||||
set VC_VERSION_LOWER=14
|
||||
set VC_VERSION_UPPER=36
|
||||
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
call "%DEPENDENCIES_DIR%\VSBuildTools\VC\Auxiliary\Build\vcvarsall.bat" arm64
|
||||
|
||||
set install_root=%CD%
|
||||
set INCLUDE=%INCLUDE%;%install_root%\include;%install_root%\include\torch\csrc\api\include
|
||||
|
||||
59
.ci/pytorch/windows/cuda124.bat
Normal file
59
.ci/pytorch/windows/cuda124.bat
Normal file
@ -0,0 +1,59 @@
|
||||
@echo off
|
||||
|
||||
set MODULE_NAME=pytorch
|
||||
|
||||
IF NOT EXIST "setup.py" IF NOT EXIST "%MODULE_NAME%" (
|
||||
call internal\clone.bat
|
||||
cd %~dp0
|
||||
) ELSE (
|
||||
call internal\clean.bat
|
||||
)
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call internal\check_deps.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
REM Check for optional components
|
||||
|
||||
set USE_CUDA=
|
||||
set CMAKE_GENERATOR=Visual Studio 15 2017 Win64
|
||||
|
||||
IF "%NVTOOLSEXT_PATH%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA Corporation\NvToolsExt\lib\x64\nvToolsExt64_1.lib" (
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
) ELSE (
|
||||
echo NVTX ^(Visual Studio Extension ^for CUDA^) ^not installed, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%CUDA_PATH_V124%"=="" (
|
||||
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin\nvcc.exe" (
|
||||
set "CUDA_PATH_V124=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4"
|
||||
) ELSE (
|
||||
echo CUDA 12.4 not found, failing
|
||||
exit /b 1
|
||||
)
|
||||
)
|
||||
|
||||
IF "%BUILD_VISION%" == "" (
|
||||
set TORCH_CUDA_ARCH_LIST=6.1;7.0;7.5;8.0;8.6;9.0
|
||||
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
|
||||
) ELSE (
|
||||
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90
|
||||
)
|
||||
|
||||
set "CUDA_PATH=%CUDA_PATH_V124%"
|
||||
set "PATH=%CUDA_PATH_V124%\bin;%PATH%"
|
||||
|
||||
:optcheck
|
||||
|
||||
call internal\check_opts.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
if exist "%NIGHTLIES_PYTORCH_ROOT%" cd %NIGHTLIES_PYTORCH_ROOT%\..
|
||||
call %~dp0\internal\copy.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
|
||||
call %~dp0\internal\setup.bat
|
||||
IF ERRORLEVEL 1 goto :eof
|
||||
@ -58,6 +58,33 @@ xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda124
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_12.4.0_551.61_windows.exe
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
|
||||
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
|
||||
set "ARGS=cuda_profiler_api_12.4 thrust_12.4 nvcc_12.4 cuobjdump_12.4 nvprune_12.4 nvprof_12.4 cupti_12.4 cublas_12.4 cublas_dev_12.4 cudart_12.4 cufft_12.4 cufft_dev_12.4 curand_12.4 curand_dev_12.4 cusolver_12.4 cusolver_dev_12.4 cusparse_12.4 cusparse_dev_12.4 npp_12.4 npp_dev_12.4 nvrtc_12.4 nvrtc_dev_12.4 nvml_dev_12.4 nvjitlink_12.4 nvtx_12.4"
|
||||
)
|
||||
|
||||
set CUDNN_FOLDER=cudnn-windows-x86_64-9.5.0.50_cuda12-archive
|
||||
set CUDNN_LIB_FOLDER="lib"
|
||||
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
|
||||
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" & REM @lint-ignore
|
||||
if errorlevel 1 exit /b 1
|
||||
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
|
||||
)
|
||||
|
||||
@REM cuDNN 8.3+ required zlib to be installed on the path
|
||||
echo Installing ZLIB dlls
|
||||
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
|
||||
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
|
||||
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
|
||||
goto cuda_common
|
||||
|
||||
:cuda126
|
||||
|
||||
set CUDA_INSTALL_EXE=cuda_12.6.2_560.94_windows.exe
|
||||
|
||||
@ -3,8 +3,6 @@ if "%VC_YEAR%" == "2022" powershell windows/internal/vs2022_install.ps1
|
||||
|
||||
set VC_VERSION_LOWER=17
|
||||
set VC_VERSION_UPPER=18
|
||||
:: Please don't delete VS2019 as an alternative, in case some Windows compiler issue.
|
||||
:: Reference: https://github.com/pytorch/pytorch/issues/145702#issuecomment-2858693930
|
||||
if "%VC_YEAR%" == "2019" (
|
||||
set VC_VERSION_LOWER=16
|
||||
set VC_VERSION_UPPER=17
|
||||
|
||||
@ -9,7 +9,7 @@ if [[ "$OS" != "windows-arm64" ]]; then
|
||||
export USE_SCCACHE=1
|
||||
export SCCACHE_BUCKET=ossci-compiler-cache
|
||||
export SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||
export VC_YEAR=2022
|
||||
export VC_YEAR=2019
|
||||
fi
|
||||
|
||||
if [[ "$DESIRED_CUDA" == 'xpu' ]]; then
|
||||
|
||||
@ -4,7 +4,7 @@ set -eux -o pipefail
|
||||
source "${BINARY_ENV_FILE:-/c/w/env}"
|
||||
|
||||
export CUDA_VERSION="${DESIRED_CUDA/cu/}"
|
||||
export VC_YEAR=2022
|
||||
export VC_YEAR=2019
|
||||
|
||||
if [[ "$DESIRED_CUDA" == 'xpu' ]]; then
|
||||
export VC_YEAR=2022
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
5
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -12,9 +12,7 @@ body:
|
||||
description: |
|
||||
Please provide a clear and concise description of what the bug is.
|
||||
|
||||
If relevant, add a minimal example so that we can reproduce the error by running the code. It is very important for the snippet to be as succinct (minimal) as possible, so please take time to trim down any irrelevant code to help us debug efficiently.
|
||||
Your example should be fully self-contained and not rely on any artifact that should be downloaded.
|
||||
For example:
|
||||
If relevant, add a minimal example so that we can reproduce the error by running the code. It is very important for the snippet to be as succinct (minimal) as possible, so please take time to trim down any irrelevant code to help us debug efficiently. We are going to copy-paste your code and we expect to get the same result as you did: avoid any external data, and include the relevant imports, etc. For example:
|
||||
|
||||
```python
|
||||
# All necessary imports at the beginning
|
||||
@ -28,7 +26,6 @@ body:
|
||||
If the code is too long (hopefully, it isn't), feel free to put it in a public gist and link it in the issue: https://gist.github.com.
|
||||
|
||||
Please also paste or describe the results you observe instead of the expected results. If you observe an error, please paste the error message including the **full** traceback of the exception. It may be relevant to wrap error messages in ```` ```triple quotes blocks``` ````.
|
||||
If your issue is related to numerical accuracy or reproducibility, please read the [numerical accuracy](https://docs.pytorch.org/docs/stable/notes/numerical_accuracy.html) and [reproducibility](https://docs.pytorch.org/docs/stable/notes/randomness.html) notes. If the difference is not expected as described in these documents, please provide appropriate justification on why one result is wrong and the other is correct.
|
||||
placeholder: |
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/disable-ci-jobs.md
vendored
2
.github/ISSUE_TEMPLATE/disable-ci-jobs.md
vendored
@ -5,7 +5,7 @@ title: "DISABLED [WORKFLOW_NAME] / [PLATFORM_NAME] / [JOB_NAME]"
|
||||
labels: "module: ci"
|
||||
---
|
||||
|
||||
> For example, DISABLED pull / win-vs2022-cpu-py3 / test (default). Once
|
||||
> For example, DISABLED pull / win-vs2019-cpu-py3 / test (default). Once
|
||||
> created, the job will be disabled within 15 minutes. You can check the
|
||||
> list of disabled jobs at https://ossci-metrics.s3.amazonaws.com/disabled-jobs.json
|
||||
|
||||
|
||||
111
.github/ISSUE_TEMPLATE/release-feature-request.yml
vendored
111
.github/ISSUE_TEMPLATE/release-feature-request.yml
vendored
@ -1,111 +0,0 @@
|
||||
name: 🚀 Release highlight for proposed Feature
|
||||
description: Submit a Release highlight for proposed Feature
|
||||
labels: ["release-feature-request"]
|
||||
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Release highlight for proposed Feature
|
||||
description: >
|
||||
Example: “A torch.special module, analogous to SciPy's special module.”
|
||||
- type: input
|
||||
id: contact
|
||||
attributes:
|
||||
label: Point(s) of contact
|
||||
description: How can we get in touch with you if we need more info?
|
||||
placeholder: ex. github username
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Release Mode (pytorch/pytorch features only)
|
||||
description: |
|
||||
If "out-of-tree", please include the GH repo name
|
||||
options:
|
||||
- In-tree
|
||||
- Out-of-tree
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Out-Of-Tree Repo
|
||||
description: >
|
||||
please include the GH repo name
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description and value to the user
|
||||
description: >
|
||||
Please provide a brief description of the feature and how it will benefit the user.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Link to design doc, GitHub issues, past submissions, etc
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: What feedback adopters have provided
|
||||
description: >
|
||||
Please list users/teams that have tried the feature and provided feedback. If that feedback motivated material changes (API, doc, etc..), a quick overview of the changes and the status (planned, in progress, implemented) would be helpful as well.
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Plan for documentations / tutorials
|
||||
description: |
|
||||
Select One of the following options
|
||||
options:
|
||||
- Tutorial exists
|
||||
- Will submit a PR to pytorch/tutorials
|
||||
- Will submit a PR to a repo
|
||||
- Tutorial is not needed
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context for tutorials
|
||||
description: >
|
||||
Please provide a link for existing tutorial or link to a repo or context for why tutorial is not needed.
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Marketing/Blog Coverage
|
||||
description: |
|
||||
Are you requesting feature Inclusion in the release blogs?
|
||||
options:
|
||||
- "Yes"
|
||||
- "No"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Are you requesting other marketing assistance with this feature?
|
||||
description: >
|
||||
E.g. supplementary blogs, social media amplification, etc.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Release Version
|
||||
description: >
|
||||
Please include release version for marketing coverage.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: OS / Platform / Compute Coverage
|
||||
description: >
|
||||
Please list the platforms supported by the proposed feature. If the feature supports all the platforms, write "all". Goal of this section is to clearly share if this feature works in all PyTorch configurations or is it limited to only certain platforms/configurations (e.g. CPU only, GPU only, Linux only, etc...)
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Testing Support (CI, test cases, etc..)
|
||||
description: >
|
||||
Please provide an overview of test coverage. This includes unit testing and integration testing, but if E2E validation testing has been done to show that the feature works for a certain set of use cases or models please mention that as well.
|
||||
validations:
|
||||
required: false
|
||||
1
.github/actionlint.yaml
vendored
1
.github/actionlint.yaml
vendored
@ -45,7 +45,6 @@ self-hosted-runner:
|
||||
- windows.g5.4xlarge.nvidia.gpu
|
||||
# Windows ARM64 runners
|
||||
- windows-11-arm64
|
||||
- windows-11-arm64-preview
|
||||
# Organization-wide AMD-hosted runners
|
||||
# MI2xx runners
|
||||
- linux.rocm.gpu
|
||||
|
||||
38
.github/actions/reuse-old-whl/action.yml
vendored
38
.github/actions/reuse-old-whl/action.yml
vendored
@ -1,38 +0,0 @@
|
||||
name: Reuse old wheel if possible
|
||||
|
||||
description:
|
||||
Reuse old wheel if possible
|
||||
|
||||
inputs:
|
||||
build-environment:
|
||||
description: Build environment
|
||||
required: true
|
||||
run-id:
|
||||
description: Workflow run ID
|
||||
required: true
|
||||
github-token:
|
||||
description: GitHub token
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
reuse:
|
||||
description: Whether the wheel is reused or not
|
||||
value: ${{ steps.check-file-changes.outputs.reuse }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
|
||||
steps:
|
||||
# Check out pytorch with fetch depth 0
|
||||
- name: Check file changes
|
||||
id: check-file-changes
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ inputs.github-token }}
|
||||
run: |
|
||||
set -x
|
||||
python3 ${GITHUB_ACTION_PATH}/reuse_old_whl.py \
|
||||
--build-environment "${{ inputs.build-environment }}" \
|
||||
--run-id "${{ inputs.run-id }}" \
|
||||
--github-ref "${{ github.ref }}"
|
||||
351
.github/actions/reuse-old-whl/reuse_old_whl.py
vendored
351
.github/actions/reuse-old-whl/reuse_old_whl.py
vendored
@ -1,351 +0,0 @@
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Any, cast, Optional, Union
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
FORCE_REBUILD_LABEL = "ci-force-rebuild"
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_merge_base() -> str:
|
||||
merge_base = subprocess.check_output(
|
||||
["git", "merge-base", "HEAD", "origin/main"],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
).strip()
|
||||
# Remove this when we turn this off for the main branch
|
||||
if merge_base == get_head_sha():
|
||||
print("Merge base is the same as HEAD, using HEAD^")
|
||||
merge_base = subprocess.check_output(
|
||||
["git", "rev-parse", "HEAD^"],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
).strip()
|
||||
print(f"Merge base: {merge_base}")
|
||||
return merge_base
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_head_sha() -> str:
|
||||
sha = subprocess.check_output(
|
||||
["git", "rev-parse", "HEAD"],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
).strip()
|
||||
return sha
|
||||
|
||||
|
||||
def is_main_branch() -> bool:
|
||||
return False
|
||||
# Testing on main branch for now
|
||||
# print(
|
||||
# f"Checking if we are on main branch: merge base {get_merge_base()}, head {get_head_sha()}"
|
||||
# )
|
||||
# return get_merge_base() == get_head_sha()
|
||||
|
||||
|
||||
def query_github_api(url: str) -> Any:
|
||||
headers = {
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
"Authorization": f"Bearer {os.environ['GITHUB_TOKEN']}",
|
||||
}
|
||||
response = requests.get(url, headers=headers)
|
||||
return response.json()
|
||||
|
||||
|
||||
@lru_cache
|
||||
def check_labels_for_pr() -> bool:
|
||||
# Check if the current commit is part of a PR and if it has the
|
||||
# FORCE_REBUILD_LABEL
|
||||
head_sha = get_head_sha()
|
||||
url = f"https://api.github.com/repos/pytorch/pytorch/commits/{head_sha}/pulls"
|
||||
response = query_github_api(url)
|
||||
|
||||
print(
|
||||
f"Found {len(response)} PRs for commit {head_sha}: {[pr['number'] for pr in response]}"
|
||||
)
|
||||
for pr in response:
|
||||
labels = pr.get("labels", [])
|
||||
for label in labels:
|
||||
if label["name"] == FORCE_REBUILD_LABEL:
|
||||
print(f"Found label {FORCE_REBUILD_LABEL} in PR {pr['number']}.")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def check_issue_open() -> bool:
|
||||
# Check if issue #153759 is open. This is the config issue for quickly
|
||||
# forcing everyone to build
|
||||
url = "https://api.github.com/repos/pytorch/pytorch/issues/153759"
|
||||
response = query_github_api(url)
|
||||
if response.get("state") == "open":
|
||||
print("Issue #153759 is open.")
|
||||
return True
|
||||
else:
|
||||
print("Issue #153759 is not open.")
|
||||
return False
|
||||
|
||||
|
||||
def get_workflow_id(run_id: str) -> Optional[str]:
|
||||
# Get the workflow ID that corresponds to the file for the run ID
|
||||
url = f"https://api.github.com/repos/pytorch/pytorch/actions/runs/{run_id}"
|
||||
response = query_github_api(url)
|
||||
if "workflow_id" in response:
|
||||
print(f"Found workflow ID for run ID {run_id}: {response['workflow_id']}")
|
||||
return cast(str, response["workflow_id"])
|
||||
else:
|
||||
print("No workflow ID found.")
|
||||
return None
|
||||
|
||||
|
||||
def ok_changed_file(file: str) -> bool:
|
||||
# Return true if the file is in the list of allowed files to be changed to
|
||||
# reuse the old whl
|
||||
if (
|
||||
file.startswith("torch/")
|
||||
and file.endswith(".py")
|
||||
and not file.startswith("torch/csrc/")
|
||||
):
|
||||
return True
|
||||
if file.startswith("test/") and file.endswith(".py"):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def check_changed_files(sha: str) -> bool:
|
||||
# Return true if all the changed files are in the list of allowed files to
|
||||
# be changed to reuse the old whl
|
||||
|
||||
# Removing any files is not allowed since rysnc will not remove files
|
||||
removed_files = (
|
||||
subprocess.check_output(
|
||||
["git", "diff", "--name-only", sha, "HEAD", "--diff-filter=D"],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
.strip()
|
||||
.split()
|
||||
)
|
||||
if removed_files:
|
||||
print(
|
||||
f"Removed files between {sha} and HEAD: {removed_files}, cannot reuse old whl"
|
||||
)
|
||||
return False
|
||||
|
||||
changed_files = (
|
||||
subprocess.check_output(
|
||||
["git", "diff", "--name-only", sha, "HEAD"],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
.strip()
|
||||
.split()
|
||||
)
|
||||
print(f"Checking changed files between {sha} and HEAD:")
|
||||
for file in changed_files:
|
||||
if not ok_changed_file(file):
|
||||
print(f" File {file} is not allowed to be changed.")
|
||||
return False
|
||||
else:
|
||||
print(f" File {file} is allowed to be changed.")
|
||||
return True
|
||||
|
||||
|
||||
def find_old_whl(workflow_id: str, build_environment: str, sha: str) -> bool:
|
||||
# Find the old whl on s3 and download it to artifacts.zip
|
||||
if build_environment is None:
|
||||
print("BUILD_ENVIRONMENT is not set.")
|
||||
return False
|
||||
print(f"SHA: {sha}, workflow_id: {workflow_id}")
|
||||
|
||||
workflow_runs = query_github_api(
|
||||
f"https://api.github.com/repos/pytorch/pytorch/actions/workflows/{workflow_id}/runs?head_sha={sha}&branch=main&per_page=100"
|
||||
)
|
||||
if workflow_runs.get("total_count", 0) == 0:
|
||||
print("No workflow runs found.")
|
||||
return False
|
||||
for run in workflow_runs.get("workflow_runs", []):
|
||||
# Look in s3 for the old whl
|
||||
run_id = run["id"]
|
||||
try:
|
||||
url = f"https://gha-artifacts.s3.amazonaws.com/pytorch/pytorch/{run_id}/{build_environment}/artifacts.zip"
|
||||
print(f"Checking for old whl at {url}")
|
||||
response = requests.get(
|
||||
url,
|
||||
)
|
||||
if response.status_code == 200:
|
||||
with open("artifacts.zip", "wb") as f:
|
||||
f.write(response.content)
|
||||
print(f"Found old whl file from s3: {url}")
|
||||
return True
|
||||
except requests.RequestException as e:
|
||||
print(f"Error checking for old whl: {e}")
|
||||
continue
|
||||
return False
|
||||
|
||||
|
||||
def unzip_artifact_and_replace_files() -> None:
|
||||
# Unzip the artifact and replace files
|
||||
subprocess.check_output(
|
||||
["unzip", "-o", "artifacts.zip", "-d", "artifacts"],
|
||||
)
|
||||
os.remove("artifacts.zip")
|
||||
|
||||
head_sha = get_head_sha()
|
||||
|
||||
# Rename wheel into zip
|
||||
wheel_path = Path("artifacts/dist").glob("*.whl")
|
||||
for path in wheel_path:
|
||||
# Should be of the form torch-2.0.0+git1234567-cp37-etc.whl
|
||||
# Should usually be the merge base sha but for the ones that didn't do
|
||||
# the replacement, it won't be. Can probably change it to just be merge
|
||||
# base later
|
||||
old_version = f"+git{path.stem.split('+')[1].split('-')[0][3:]}"
|
||||
new_version = f"+git{head_sha[:7]}"
|
||||
|
||||
def rename_to_new_version(file: Union[str, Path]) -> None:
|
||||
# Rename file with old_version to new_version
|
||||
subprocess.check_output(
|
||||
["mv", file, str(file).replace(old_version, new_version)]
|
||||
)
|
||||
|
||||
def change_content_to_new_version(file: Union[str, Path]) -> None:
|
||||
# Check if is a file
|
||||
if os.path.isdir(file):
|
||||
return
|
||||
# Replace the old version in the file with the new version
|
||||
with open(file) as f:
|
||||
content = f.read()
|
||||
content = content.replace(old_version, new_version)
|
||||
with open(file, "w") as f:
|
||||
f.write(content)
|
||||
|
||||
zip_path = path.with_suffix(".zip")
|
||||
os.rename(path, zip_path)
|
||||
old_stem = zip_path.stem
|
||||
# Unzip the wheel
|
||||
subprocess.check_output(
|
||||
["unzip", "-o", zip_path, "-d", f"artifacts/dist/{old_stem}"],
|
||||
)
|
||||
|
||||
# Remove the old wheel (which is now a zip file)
|
||||
os.remove(zip_path)
|
||||
|
||||
# Copy python files into the artifact
|
||||
subprocess.check_output(
|
||||
["rsync", "-avz", "torch", f"artifacts/dist/{old_stem}"],
|
||||
)
|
||||
|
||||
change_content_to_new_version(f"artifacts/dist/{old_stem}/torch/version.py")
|
||||
|
||||
for file in Path(f"artifacts/dist/{old_stem}").glob(
|
||||
"*.dist-info/**",
|
||||
):
|
||||
change_content_to_new_version(file)
|
||||
|
||||
rename_to_new_version(f"artifacts/dist/{old_stem}")
|
||||
new_stem = old_stem.replace(old_version, new_version)
|
||||
|
||||
for file in Path(f"artifacts/dist/{new_stem}").glob(
|
||||
"*.dist-info",
|
||||
):
|
||||
rename_to_new_version(file)
|
||||
|
||||
# Zip the wheel back
|
||||
subprocess.check_output(
|
||||
["zip", "-r", f"{new_stem}.zip", "."],
|
||||
cwd=f"artifacts/dist/{new_stem}",
|
||||
)
|
||||
|
||||
subprocess.check_output(
|
||||
[
|
||||
"mv",
|
||||
f"artifacts/dist/{new_stem}/{new_stem}.zip",
|
||||
f"artifacts/dist/{new_stem}.whl",
|
||||
],
|
||||
)
|
||||
|
||||
# Remove the extracted folder
|
||||
subprocess.check_output(
|
||||
["rm", "-rf", f"artifacts/dist/{new_stem}"],
|
||||
)
|
||||
|
||||
# Rezip the artifact
|
||||
subprocess.check_output(["zip", "-r", "artifacts.zip", "."], cwd="artifacts")
|
||||
subprocess.check_output(
|
||||
["mv", "artifacts/artifacts.zip", "."],
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def set_output() -> None:
|
||||
# Disable for now so we can monitor first
|
||||
# pass
|
||||
if os.getenv("GITHUB_OUTPUT"):
|
||||
with open(str(os.getenv("GITHUB_OUTPUT")), "a") as env:
|
||||
print("reuse=true", file=env)
|
||||
else:
|
||||
print("::set-output name=reuse::true")
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Check for old whl files.")
|
||||
parser.add_argument("--run-id", type=str, required=True, help="Workflow ID")
|
||||
parser.add_argument(
|
||||
"--build-environment", type=str, required=True, help="Build environment"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-ref",
|
||||
type=str,
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def can_reuse_whl(args: argparse.Namespace) -> bool:
|
||||
# if is_main_branch() or (
|
||||
# args.github_ref
|
||||
# and any(
|
||||
# args.github_ref.startswith(x)
|
||||
# for x in ["refs/heads/release", "refs/tags/v", "refs/heads/main"]
|
||||
# )
|
||||
# ):
|
||||
# print("On main branch or release branch, rebuild whl")
|
||||
# return False
|
||||
|
||||
if check_labels_for_pr():
|
||||
print(f"Found {FORCE_REBUILD_LABEL} label on PR, rebuild whl")
|
||||
return False
|
||||
|
||||
if check_issue_open():
|
||||
print("Issue #153759 is open, rebuild whl")
|
||||
return False
|
||||
|
||||
if not check_changed_files(get_merge_base()):
|
||||
print("Cannot use old whl due to the changed files, rebuild whl")
|
||||
return False
|
||||
|
||||
workflow_id = get_workflow_id(args.run_id)
|
||||
if workflow_id is None:
|
||||
print("No workflow ID found, rebuild whl")
|
||||
return False
|
||||
|
||||
if not find_old_whl(workflow_id, args.build_environment, get_merge_base()):
|
||||
print("No old whl found, rebuild whl")
|
||||
# TODO: go backwards from merge base to find more runs
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
|
||||
if can_reuse_whl(args):
|
||||
print("Reusing old whl")
|
||||
unzip_artifact_and_replace_files()
|
||||
set_output()
|
||||
4
.github/actions/setup-xpu/action.yml
vendored
4
.github/actions/setup-xpu/action.yml
vendored
@ -29,13 +29,13 @@ runs:
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
timeout 30 xpu-smi discovery || true
|
||||
xpu-smi discovery
|
||||
|
||||
- name: Runner health check GPU count
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
ngpu=$(timeout 30 xpu-smi discovery | grep -c -E 'Device Name')
|
||||
ngpu=$(xpu-smi discovery | grep -c -E 'Device Name')
|
||||
msg="Please file an issue on pytorch/pytorch reporting the faulty runner. Include a link to the runner logs so the runner can be identified"
|
||||
if [[ $ngpu -eq 0 ]]; then
|
||||
echo "Error: Failed to detect any GPUs on the runner"
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
name: upload-utilization-stats
|
||||
|
||||
description: Upload utilization stats to artifacts.
|
||||
description: Upload utilization stats to artifacts
|
||||
|
||||
inputs:
|
||||
workflow_run_id:
|
||||
@ -23,17 +23,6 @@ inputs:
|
||||
type: string
|
||||
description: 'the job name of the test'
|
||||
required: True
|
||||
local_path:
|
||||
type: string
|
||||
description: 'the local path to the utilization stats file'
|
||||
required: False
|
||||
default: ''
|
||||
artifact_prefix:
|
||||
type: string
|
||||
description: |
|
||||
'the prefix of the raw utilization data, for data stored in zip file, this is the prefix of the parent zip file'
|
||||
default: ""
|
||||
required: False
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
@ -46,8 +35,6 @@ runs:
|
||||
echo "workflow_Name: ${{inputs.workflow_name}}"
|
||||
echo "job_id: ${{inputs.job_id}}"
|
||||
echo "job_name: ${{inputs.job_name}}"
|
||||
echo "artifact_prefix: ${{inputs.artifact_prefix}}"
|
||||
python3 --version
|
||||
- uses: nick-fields/retry@v3.0.0
|
||||
name: Setup dependencies
|
||||
with:
|
||||
@ -66,6 +53,4 @@ runs:
|
||||
--workflow-name "${{inputs.workflow_name}}" \
|
||||
--workflow-run-attempt "${{inputs.workflow_attempt}}" \
|
||||
--job-id "${{inputs.job_id}}" \
|
||||
--job-name "${{inputs.job_name}}" \
|
||||
--local-path "${{inputs.local_path}}" \
|
||||
--artifact-prefix "${{inputs.artifact_prefix}}"
|
||||
--job-name "${{inputs.job_name}}"
|
||||
|
||||
2
.github/ci_commit_pins/audio.txt
vendored
2
.github/ci_commit_pins/audio.txt
vendored
@ -1 +1 @@
|
||||
4cb7f57d31b0b288696f09b89e890e5fac092eed
|
||||
ea5de17755d657508c84c4dce8970b614008adcf
|
||||
|
||||
2
.github/ci_commit_pins/torchbench.txt
vendored
2
.github/ci_commit_pins/torchbench.txt
vendored
@ -1 +1 @@
|
||||
e03a63be43e33596f7f0a43b0f530353785e4a59
|
||||
373ffb19dc470f4423a3176a4133f8f4b3cdb5bd
|
||||
|
||||
2
.github/ci_commit_pins/vision.txt
vendored
2
.github/ci_commit_pins/vision.txt
vendored
@ -1 +1 @@
|
||||
966da7e46f65d6d49df3e31214470a4fe5cc8e66
|
||||
d23a6e1664d20707c11781299611436e1f0c104f
|
||||
|
||||
2
.github/ci_commit_pins/xla.txt
vendored
2
.github/ci_commit_pins/xla.txt
vendored
@ -1 +1 @@
|
||||
edc1a882d872dd7f1362e4312fd045a1d81b3355
|
||||
8d9e34b352af09c81ff8df448fd27f9c4aae1382
|
||||
|
||||
7
.github/merge_rules.yaml
vendored
7
.github/merge_rules.yaml
vendored
@ -123,8 +123,6 @@
|
||||
- torch/*docs.py
|
||||
approved_by:
|
||||
- svekars
|
||||
- sekyondaMeta
|
||||
- AlannaBurke
|
||||
mandatory_checks_name:
|
||||
- EasyCLA
|
||||
- Lint
|
||||
@ -400,10 +398,7 @@
|
||||
- torch/_inductor/codegen/cpp_micro_gemm.py
|
||||
- torch/_inductor/codegen/cpp_template_kernel.py
|
||||
- torch/_inductor/codegen/cpp_template.py
|
||||
- torch/_inductor/codegen/cpp_bmm_template.py
|
||||
- torch/_inductor/codegen/cpp_gemm_template.py
|
||||
- torch/_inductor/codegen/cpp_grouped_gemm_template.py
|
||||
- torch/_inductor/codegen/cpp_flex_attention_template.py
|
||||
- torch/csrc/inductor/cpp_prefix.h
|
||||
- test/inductor/test_mkldnn_pattern_matcher.py
|
||||
- test/inductor/test_cpu_repro.py
|
||||
@ -411,7 +406,6 @@
|
||||
- test/inductor/test_cpu_select_algorithm.py
|
||||
- aten/src/ATen/cpu/**
|
||||
- aten/src/ATen/native/quantized/cpu/**
|
||||
- aten/src/ATen/test/vec_test_all_types.*
|
||||
- test/quantization/core/test_quantized_op.py
|
||||
- torch/ao/quantization/quantizer/x86_inductor_quantizer.py
|
||||
- test/quantization/pt2e/test_x86inductor_quantizer.py
|
||||
@ -419,7 +413,6 @@
|
||||
- leslie-fang-intel
|
||||
- jgong5
|
||||
- EikanWang
|
||||
- CaoE
|
||||
mandatory_checks_name:
|
||||
- EasyCLA
|
||||
- Lint
|
||||
|
||||
1
.github/pytorch-probot.yml
vendored
1
.github/pytorch-probot.yml
vendored
@ -28,7 +28,6 @@ ciflow_push_tags:
|
||||
- ciflow/op-benchmark
|
||||
- ciflow/pull
|
||||
- ciflow/h100
|
||||
- ciflow/h100-distributed
|
||||
retryable_workflows:
|
||||
- pull
|
||||
- trunk
|
||||
|
||||
10
.github/requirements/README.md
vendored
10
.github/requirements/README.md
vendored
@ -11,6 +11,16 @@ jobs, but it also allows them to be cached properly to improve CI
|
||||
reliability.
|
||||
|
||||
The list of support files are as follows:
|
||||
|
||||
* Conda:
|
||||
* conda-env-iOS. This is used by iOS build and test jobs to setup the
|
||||
conda environment
|
||||
* conda-env-macOS-ARM64. This is used by MacOS (m1, arm64) build and
|
||||
test jobs to setup the conda environment
|
||||
* conda-env-Linux-X64. This is used by Linux buck build and test jobs
|
||||
to setup the conda environment
|
||||
* Pip:
|
||||
* pip-requirements-iOS.txt. This is used by iOS build and test jobs to
|
||||
setup the pip environment
|
||||
* pip-requirements-macOS.txt. This is used by MacOS build and test jobs to
|
||||
setup the pip environment
|
||||
|
||||
8
.github/requirements/conda-env-Linux-X64.txt
vendored
Normal file
8
.github/requirements/conda-env-Linux-X64.txt
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
cmake=3.22.*
|
||||
mkl=2022.1.0
|
||||
mkl-include=2022.1.0
|
||||
ninja=1.10.2
|
||||
numpy=1.23.3
|
||||
pyyaml=6.0
|
||||
setuptools=72.1.0
|
||||
typing-extensions=4.11.0
|
||||
7
.github/requirements/conda-env-iOS.txt
vendored
Normal file
7
.github/requirements/conda-env-iOS.txt
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
blas=1.0
|
||||
cmake=3.22.1
|
||||
ninja=1.10.2
|
||||
numpy=1.23.3
|
||||
pyyaml=6.0
|
||||
setuptools=72.1.0
|
||||
typing-extensions=4.11.0
|
||||
24
.github/requirements/conda-env-macOS-ARM64
vendored
24
.github/requirements/conda-env-macOS-ARM64
vendored
@ -1,6 +1,22 @@
|
||||
numpy=1.22.3
|
||||
pyyaml=6.0
|
||||
setuptools=72.1.0
|
||||
cmake=3.22.*
|
||||
typing-extensions=4.11.0
|
||||
dataclasses=0.8
|
||||
pip=22.2.2
|
||||
pillow=10.0.1
|
||||
pkg-config=0.29.2
|
||||
wheel=0.37.1
|
||||
# NB: This is intentionally held back because anaconda main doesn't
|
||||
# have updated expecttest, but you don't /need/ the updated version
|
||||
# to run the tests. In the meantime I need to figure out how to
|
||||
# cajole anaconda into updating, or get the package from pypi instead...
|
||||
expecttest=0.1.3
|
||||
|
||||
# Not pinning certifi so that we can always get the latest certificates
|
||||
certifi
|
||||
pip=23.2.1
|
||||
pkg-config=0.29.2
|
||||
setuptools=72.1.0
|
||||
wheel=0.37.1
|
||||
|
||||
# Cross-compiling arm64 from x86-64 picks up 1.40.0 while testing on arm64
|
||||
# itself only has up to 1.39.0 from upstream conda. Both work though
|
||||
libuv>=1.39.0,<=1.40.0
|
||||
|
||||
4
.github/requirements/pip-requirements-iOS.txt
vendored
Normal file
4
.github/requirements/pip-requirements-iOS.txt
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
# iOS simulator requirements
|
||||
coremltools==5.0b5
|
||||
protobuf==3.20.2
|
||||
optree==0.13.0
|
||||
39
.github/requirements/pip-requirements-macOS.txt
vendored
39
.github/requirements/pip-requirements-macOS.txt
vendored
@ -1,34 +1,33 @@
|
||||
boto3==1.35.42
|
||||
cmake==3.25.*
|
||||
hypothesis==6.56.4
|
||||
expecttest==0.3.0
|
||||
fbscribelogger==0.1.7
|
||||
filelock==3.6.0
|
||||
hypothesis==6.56.4
|
||||
librosa>=0.6.2
|
||||
mpmath==1.3.0
|
||||
networkx==2.8.7
|
||||
ninja==1.10.2.4
|
||||
numba==0.59.0
|
||||
numpy==1.26.4
|
||||
# Use numba-0.49.1 or older on Intel Macs, but 0.56.0 on M1 machines, as older numba is not available
|
||||
numba==0.56.0; platform_machine == "arm64"
|
||||
numba<=0.49.1; platform_machine != "arm64"
|
||||
opt-einsum>=3.3
|
||||
optree==0.13.0
|
||||
packaging==23.1
|
||||
parameterized==0.8.1
|
||||
pillow==10.3.0
|
||||
protobuf==5.29.4
|
||||
psutil==5.9.1
|
||||
nvidia-ml-py==11.525.84
|
||||
packaging==23.1
|
||||
pygments==2.15.0
|
||||
pytest-cpp==2.3.0
|
||||
pytest-flakefinder==1.1.0
|
||||
pytest-rerunfailures==10.3
|
||||
pytest-subtests==0.13.1
|
||||
pytest-xdist==3.3.1
|
||||
pytest==7.3.2
|
||||
pyyaml==6.0.2
|
||||
scipy==1.12.0
|
||||
pytest-xdist==3.3.1
|
||||
pytest-rerunfailures==10.3
|
||||
pytest-flakefinder==1.1.0
|
||||
pytest-subtests==0.13.1
|
||||
scipy==1.10.1
|
||||
sympy==1.13.3
|
||||
tensorboard==2.13.0
|
||||
typing-extensions==4.12.2
|
||||
unittest-xml-reporting<=3.2.0,>=2.0.0
|
||||
xdoctest==1.1.0
|
||||
filelock==3.6.0
|
||||
pytest-cpp==2.3.0
|
||||
z3-solver==4.12.2.0
|
||||
tensorboard==2.13.0
|
||||
optree==0.13.0
|
||||
# NB: test_hparams_* from test_tensorboard is failing with protobuf 5.26.0 in
|
||||
# which the stringify metadata is wrong when escaping double quote
|
||||
protobuf==3.20.2
|
||||
parameterized==0.8.1
|
||||
|
||||
13
.github/scripts/build_triton_wheel.py
vendored
13
.github/scripts/build_triton_wheel.py
vendored
@ -65,7 +65,6 @@ def build_triton(
|
||||
with TemporaryDirectory() as tmpdir:
|
||||
triton_basedir = Path(tmpdir) / "triton"
|
||||
triton_pythondir = triton_basedir / "python"
|
||||
|
||||
triton_repo = "https://github.com/openai/triton"
|
||||
if device == "rocm":
|
||||
triton_pkg_name = "pytorch-triton-rocm"
|
||||
@ -102,19 +101,11 @@ def build_triton(
|
||||
)
|
||||
print("ROCm libraries setup for triton installation...")
|
||||
|
||||
# old triton versions have setup.py in the python/ dir,
|
||||
# new versions have it in the root dir.
|
||||
triton_setupdir = (
|
||||
triton_basedir
|
||||
if (triton_basedir / "setup.py").exists()
|
||||
else triton_pythondir
|
||||
)
|
||||
|
||||
check_call(
|
||||
[sys.executable, "setup.py", "bdist_wheel"], cwd=triton_setupdir, env=env
|
||||
[sys.executable, "setup.py", "bdist_wheel"], cwd=triton_pythondir, env=env
|
||||
)
|
||||
|
||||
whl_path = next(iter((triton_setupdir / "dist").glob("*.whl")))
|
||||
whl_path = next(iter((triton_pythondir / "dist").glob("*.whl")))
|
||||
shutil.copy(whl_path, Path.cwd())
|
||||
|
||||
if device == "rocm":
|
||||
|
||||
4
.github/scripts/docathon-label-sync.py
vendored
4
.github/scripts/docathon-label-sync.py
vendored
@ -28,12 +28,12 @@ def main() -> None:
|
||||
issue = repo.get_issue(issue_number)
|
||||
issue_labels = issue.labels
|
||||
docathon_label_present = any(
|
||||
label.name == "docathon-h1-2025" for label in issue_labels
|
||||
label.name == "docathon-h1-2024" for label in issue_labels
|
||||
)
|
||||
|
||||
# if the issue has a docathon label, add all labels from the issue to the PR.
|
||||
if not docathon_label_present:
|
||||
print("The 'docathon-h1-2025' label is not present in the issue.")
|
||||
print("The 'docathon-h1-2024' label is not present in the issue.")
|
||||
return
|
||||
pull_request_labels = pull_request.get_labels()
|
||||
pull_request_label_names = [label.name for label in pull_request_labels]
|
||||
|
||||
2
.github/scripts/filter_test_configs.py
vendored
2
.github/scripts/filter_test_configs.py
vendored
@ -80,7 +80,7 @@ def parse_args() -> Any:
|
||||
parser.add_argument(
|
||||
"--job-name",
|
||||
type=str,
|
||||
help="the name of the current job, i.e. linux-jammy-py3.8-gcc7 / build",
|
||||
help="the name of the current job, i.e. linux-focal-py3.8-gcc7 / build",
|
||||
)
|
||||
parser.add_argument("--pr-number", type=str, help="the pull request number")
|
||||
parser.add_argument("--tag", type=str, help="the associated tag if it exists")
|
||||
|
||||
26
.github/scripts/generate_binary_build_matrix.py
vendored
26
.github/scripts/generate_binary_build_matrix.py
vendored
@ -21,7 +21,7 @@ CUDA_STABLE = "12.6"
|
||||
CUDA_ARCHES_FULL_VERSION = {
|
||||
"11.8": "11.8.0",
|
||||
"12.6": "12.6.3",
|
||||
"12.8": "12.8.1",
|
||||
"12.8": "12.8.0",
|
||||
}
|
||||
CUDA_ARCHES_CUDNN_VERSION = {
|
||||
"11.8": "9",
|
||||
@ -67,27 +67,25 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
),
|
||||
"12.8": (
|
||||
"nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
"nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
),
|
||||
"xpu": (
|
||||
"intel-cmplr-lib-rt==2025.1.1 | "
|
||||
|
||||
6
.github/scripts/lintrunner.sh
vendored
6
.github/scripts/lintrunner.sh
vendored
@ -31,9 +31,6 @@ python3 -m tools.pyi.gen_pyi \
|
||||
--deprecated-functions-path "tools/autograd/deprecated.yaml"
|
||||
python3 torch/utils/data/datapipes/gen_pyi.py
|
||||
|
||||
# Also check generated pyi files
|
||||
find torch -name '*.pyi' -exec git add --force -- "{}" +
|
||||
|
||||
RC=0
|
||||
# Run lintrunner on all files
|
||||
if ! lintrunner --force-color --tee-json=lint.json ${ADDITIONAL_LINTRUNNER_ARGS} 2> /dev/null; then
|
||||
@ -44,9 +41,6 @@ if ! lintrunner --force-color --tee-json=lint.json ${ADDITIONAL_LINTRUNNER_ARGS}
|
||||
RC=1
|
||||
fi
|
||||
|
||||
# Unstage temporally added pyi files
|
||||
find torch -name '*.pyi' -exec git restore --staged -- "{}" +
|
||||
|
||||
# Use jq to massage the JSON lint output into GitHub Actions workflow commands.
|
||||
jq --raw-output \
|
||||
'"::\(if .severity == "advice" or .severity == "disabled" then "warning" else .severity end) file=\(.path),line=\(.line),col=\(.char),title=\(.code) \(.name)::" + (.description | gsub("\\n"; "%0A"))' \
|
||||
|
||||
@ -27,9 +27,6 @@ unset ACCESS_TOKEN
|
||||
# it does one job, stops and unregisters
|
||||
registration_token=$(jq --raw-output .token "$token_file")
|
||||
|
||||
# workaround for https://gitlab.com/qemu-project/qemu/-/issues/2600
|
||||
export DOTNET_EnableWriteXorExecute=0
|
||||
|
||||
./config.sh \
|
||||
--unattended \
|
||||
--ephemeral \
|
||||
@ -47,5 +44,8 @@ rm -f "$token_file"
|
||||
# and it doesn't work for non-root user
|
||||
source venv/bin/activate
|
||||
|
||||
# workaround for https://gitlab.com/qemu-project/qemu/-/issues/2600
|
||||
export DOTNET_EnableWriteXorExecute=0
|
||||
|
||||
# Run one job.
|
||||
./run.sh
|
||||
|
||||
6
.github/scripts/trymerge.py
vendored
6
.github/scripts/trymerge.py
vendored
@ -939,12 +939,6 @@ class GitHubPR:
|
||||
summary=None,
|
||||
)
|
||||
|
||||
# Making an exception for Apply lint auggestions/autoformat because the
|
||||
# bot adds a merged label -> triggers workflow -> sometimes needs
|
||||
# approval -> is read as failure, which results in a blocked merge, but
|
||||
# this workflow doesn't provide mergability info
|
||||
self.conclusions.pop("Apply lint suggestions", None)
|
||||
|
||||
return self.conclusions
|
||||
|
||||
def get_authors(self) -> dict[str, str]:
|
||||
|
||||
@ -76,7 +76,7 @@ jobs:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: get-label-type
|
||||
{%- if os == "windows-arm64" %}
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
{%- else %}
|
||||
{%- if branches == "nightly" %}
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge"
|
||||
@ -102,12 +102,23 @@ jobs:
|
||||
run: |
|
||||
mkdir "%NIGHTLIES_PYTORCH_ROOT%"
|
||||
mkdir "%PYTORCH_FINAL_PACKAGE_DIR%"
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
- name: Git checkout PyTorch
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch - recursive
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
@ -161,7 +172,7 @@ jobs:
|
||||
- !{{ config["build_name"] }}-build
|
||||
- get-label-type
|
||||
{%- if os == "windows-arm64" %}
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
{%- else %}
|
||||
{%- if config["gpu_arch_type"] == "cuda" %}
|
||||
{%- if branches == "nightly" %}
|
||||
@ -187,11 +198,18 @@ jobs:
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -205,6 +223,10 @@ jobs:
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_python.bat"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Rust
|
||||
shell: cmd
|
||||
run: |
|
||||
|
||||
30
.github/workflows/_link_check.yml
vendored
30
.github/workflows/_link_check.yml
vendored
@ -7,25 +7,29 @@ on:
|
||||
ref:
|
||||
type: string
|
||||
required: true
|
||||
run-url-lint:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
lint-urls:
|
||||
if: ${{ github.event_name != 'pull_request' || !contains(github.event.pull_request.labels.*.name, 'skip-url-lint') }}
|
||||
if: ${{ inputs.run-url-lint }}
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
|
||||
with:
|
||||
timeout: 120
|
||||
runner: ${{ inputs.runner }}linux.2xlarge
|
||||
docker-image: ci-image:pytorch-linux-jammy-linter
|
||||
docker-image: pytorch-linux-focal-linter
|
||||
fetch-depth: 0
|
||||
submodules: false
|
||||
ref: ${{ inputs.ref }}
|
||||
script: |
|
||||
./scripts/lint_urls.sh $(
|
||||
if [ "${{ github.event_name }}" = "pull_request" ]; then
|
||||
echo "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}"
|
||||
else
|
||||
echo "${{ github.event.before }}" "${{ github.sha }}"
|
||||
fi
|
||||
{ [ "${{ github.event_name }}" = "pull_request" ] \
|
||||
&& git diff --name-only "${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }}"; } \
|
||||
|| \
|
||||
{ [ "${{ github.event_name }}" = "push" ] \
|
||||
&& git diff --name-only "${{ github.event.before }}...${{ github.sha }}"; }
|
||||
) || {
|
||||
echo
|
||||
echo "URL lint failed."
|
||||
@ -40,17 +44,17 @@ jobs:
|
||||
with:
|
||||
timeout: 60
|
||||
runner: ${{ inputs.runner }}linux.2xlarge
|
||||
docker-image: ci-image:pytorch-linux-jammy-linter
|
||||
docker-image: pytorch-linux-focal-linter
|
||||
fetch-depth: 0
|
||||
submodules: false
|
||||
ref: ${{ inputs.ref }}
|
||||
script: |
|
||||
./scripts/lint_xrefs.sh $(
|
||||
if [ "${{ github.event_name }}" = "pull_request" ]; then
|
||||
echo "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}"
|
||||
else
|
||||
echo "${{ github.event.before }}" "${{ github.sha }}"
|
||||
fi
|
||||
{ [ "${{ github.event_name }}" = "pull_request" ] \
|
||||
&& git diff --name-only "${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }}"; } \
|
||||
|| \
|
||||
{ [ "${{ github.event_name }}" = "push" ] \
|
||||
&& git diff --name-only "${{ github.event.before }}...${{ github.sha }}"; }
|
||||
) || {
|
||||
echo
|
||||
echo "Xref lint failed."
|
||||
|
||||
110
.github/workflows/_linux-build.yml
vendored
110
.github/workflows/_linux-build.yml
vendored
@ -74,32 +74,6 @@ on:
|
||||
Overwrite the number of jobs to use for the build
|
||||
required: false
|
||||
type: string
|
||||
disable-monitor:
|
||||
description: |
|
||||
Disable utilization monitoring for build job
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
monitor-log-interval:
|
||||
description: |
|
||||
Set the interval for the monitor script to log utilization.
|
||||
required: false
|
||||
type: number
|
||||
default: 5
|
||||
monitor-data-collect-interval:
|
||||
description: |
|
||||
Set the interval for the monitor script to collect data.
|
||||
required: false
|
||||
type: number
|
||||
default: 1
|
||||
|
||||
allow-reuse-old-whl:
|
||||
description: |
|
||||
If set, the build try to pull an old wheel from s3 that was built on a
|
||||
commit with no cpp changes from this commit
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN:
|
||||
@ -158,15 +132,6 @@ jobs:
|
||||
role-session-name: gha-linux-build
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Check if can use old whl build
|
||||
id: use-old-whl
|
||||
uses: ./.github/actions/reuse-old-whl
|
||||
if: ${{ inputs.allow-reuse-old-whl }}
|
||||
with:
|
||||
build-environment: ${{ inputs.build-environment }}
|
||||
run-id: ${{ github.run_id }}
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
@ -176,7 +141,7 @@ jobs:
|
||||
|
||||
- name: Use following to pull public copy of the image
|
||||
id: print-ghcr-mirror
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel' && steps.use-old-whl.outputs.reuse != 'true'
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
env:
|
||||
ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
shell: bash
|
||||
@ -186,7 +151,7 @@ jobs:
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel' && steps.use-old-whl.outputs.reuse != 'true'
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -211,38 +176,17 @@ jobs:
|
||||
selected-test-configs: ${{ inputs.selected-test-configs }}
|
||||
job-name: ${{ steps.get-job-id.outputs.job-name }}
|
||||
|
||||
- name: Start monitoring script
|
||||
id: monitor-script
|
||||
if: ${{ !inputs.disable-monitor }}
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
env:
|
||||
JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
|
||||
JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
|
||||
WORKFLOW_NAME: ${{ github.workflow }}
|
||||
WORKFLOW_RUN_ID: ${{github.run_id}}
|
||||
MONITOR_LOG_INTERVAL: ${{ inputs.monitor-log-interval }}
|
||||
MONITOR_DATA_COLLECT_INTERVAL: ${{ inputs.monitor-data-collect-interval }}
|
||||
run: |
|
||||
mkdir -p ../../usage_logs
|
||||
python3 -m pip install psutil==5.9.1 dataclasses_json==0.6.7
|
||||
python3 -m tools.stats.monitor \
|
||||
--log-interval "$MONITOR_LOG_INTERVAL" \
|
||||
--data-collect-interval "$MONITOR_DATA_COLLECT_INTERVAL" \
|
||||
> "../../usage_logs/usage_log_build_${JOB_ID}.txt" 2>&1 &
|
||||
echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Download pytest cache
|
||||
uses: ./.github/actions/pytest-cache-download
|
||||
continue-on-error: true
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel' && steps.use-old-whl.outputs.reuse != 'true'
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
with:
|
||||
cache_dir: .pytest_cache
|
||||
job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }}
|
||||
s3_bucket: ${{ inputs.s3-bucket }}
|
||||
|
||||
- name: Build
|
||||
if: (steps.filter.outputs.is-test-matrix-empty == 'False' || inputs.test-matrix == '') && steps.use-old-whl.outputs.reuse != 'true'
|
||||
if: steps.filter.outputs.is-test-matrix-empty == 'False' || inputs.test-matrix == ''
|
||||
id: build
|
||||
env:
|
||||
BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
|
||||
@ -336,23 +280,14 @@ jobs:
|
||||
END_TIME=$(date +%s)
|
||||
echo "build_time=$((END_TIME - START_TIME))" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Stop monitoring script
|
||||
if: ${{ always() && steps.monitor-script.outputs.monitor-script-pid }}
|
||||
shell: bash
|
||||
continue-on-error: true
|
||||
env:
|
||||
MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }}
|
||||
run: |
|
||||
kill "$MONITOR_SCRIPT_PID"
|
||||
|
||||
- name: Archive artifacts into zip
|
||||
if: inputs.build-generates-artifacts && steps.build.outcome != 'skipped' && steps.use-old-whl.outputs.reuse != 'true'
|
||||
if: inputs.build-generates-artifacts && steps.build.outcome != 'skipped'
|
||||
run: |
|
||||
zip -1 -r artifacts.zip dist/ build/custom_test_artifacts build/lib build/bin .additional_ci_files
|
||||
|
||||
- name: Store PyTorch Build Artifacts on S3
|
||||
uses: seemethere/upload-artifact-s3@baba72d0712b404f646cebe0730933554ebce96a # v5.1.0
|
||||
if: inputs.build-generates-artifacts && (steps.build.outcome != 'skipped' || steps.use-old-whl.outputs.reuse == 'true') && inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
if: inputs.build-generates-artifacts && steps.build.outcome != 'skipped' && inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
with:
|
||||
name: ${{ inputs.build-environment }}
|
||||
retention-days: 14
|
||||
@ -362,32 +297,13 @@ jobs:
|
||||
|
||||
- name: Store PyTorch Build Artifacts for s390x
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: inputs.build-generates-artifacts && (steps.build.outcome != 'skipped' || steps.use-old-whl.outputs.reuse == 'true') && inputs.build-environment == 'linux-s390x-binary-manywheel'
|
||||
if: inputs.build-generates-artifacts && steps.build.outcome != 'skipped' && inputs.build-environment == 'linux-s390x-binary-manywheel'
|
||||
with:
|
||||
name: ${{ inputs.build-environment }}
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: artifacts.zip
|
||||
|
||||
- name: copy logs
|
||||
shell: bash
|
||||
if: ${{ always() && steps.build.outcome != 'skipped' && !inputs.disable-monitor && inputs.build-environment != 'linux-s390x-binary-manywheel'}}
|
||||
continue-on-error: true
|
||||
run: |
|
||||
rm -f ./usage_logs
|
||||
mkdir -p ./usage_logs
|
||||
cp ../../usage_logs/usage_log_build_*.txt ./usage_logs/
|
||||
|
||||
- name: Upload raw usage log to s3
|
||||
if: ${{ always() && steps.build.outcome != 'skipped' && !inputs.disable-monitor && inputs.build-environment != 'linux-s390x-binary-manywheel'}}
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
with:
|
||||
s3-prefix: |
|
||||
${{ github.repository }}/${{ github.run_id }}/${{ github.run_attempt }}/artifact
|
||||
retention-days: 14
|
||||
if-no-files-found: warn
|
||||
path: usage_logs/usage_log_build_*.txt
|
||||
|
||||
- name: Upload sccache stats
|
||||
if: steps.build.outcome != 'skipped' && inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
uses: ./.github/actions/upload-sccache-stats
|
||||
@ -395,18 +311,6 @@ jobs:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
build-time: ${{ steps.build.outputs.build_time }}
|
||||
|
||||
- name: Upload utilization stats
|
||||
if: ${{ always() && steps.build.outcome != 'skipped' && !inputs.disable-monitor && inputs.build-environment != 'linux-s390x-binary-manywheel' }}
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/upload-utilization-stats
|
||||
with:
|
||||
job_id: ${{ steps.get-job-id.outputs.job-id }}
|
||||
job_name: ${{ steps.get-job-id.outputs.job-name }}
|
||||
workflow_name: ${{ github.workflow }}
|
||||
workflow_run_id: ${{github.run_id}}
|
||||
workflow_attempt: ${{github.run_attempt}}
|
||||
artifact_prefix: usage_log_build_${{ steps.get-job-id.outputs.job-id }}
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
if: always() && inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
|
||||
4
.github/workflows/_linux-test.yml
vendored
4
.github/workflows/_linux-test.yml
vendored
@ -376,7 +376,7 @@ jobs:
|
||||
- name: Upload pytest cache if tests failed
|
||||
uses: ./.github/actions/pytest-cache-upload
|
||||
continue-on-error: true
|
||||
if: failure() && steps.test.conclusion && steps.test.conclusion == 'failure' && inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
if: failure() && steps.test.conclusion && steps.test.conclusion == 'failure'
|
||||
with:
|
||||
cache_dir: .pytest_cache
|
||||
shard: ${{ matrix.shard }}
|
||||
@ -431,7 +431,7 @@ jobs:
|
||||
path: ./**/core.[1-9]*
|
||||
|
||||
- name: Upload utilization stats
|
||||
if: ${{ always() && steps.test.conclusion && steps.test.conclusion != 'skipped' && !inputs.disable-monitor && inputs.build-environment != 'linux-s390x-binary-manywheel' }}
|
||||
if: ${{ always() && steps.test.conclusion && steps.test.conclusion != 'skipped' && !inputs.disable-monitor }}
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/upload-utilization-stats
|
||||
with:
|
||||
|
||||
7
.github/workflows/_mac-build.yml
vendored
7
.github/workflows/_mac-build.yml
vendored
@ -30,7 +30,7 @@ on:
|
||||
python-version:
|
||||
required: false
|
||||
type: string
|
||||
default: "3.12"
|
||||
default: "3.9"
|
||||
description: |
|
||||
The python version to be used. Will be 3.9 by default
|
||||
test-matrix:
|
||||
@ -85,9 +85,8 @@ jobs:
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
environment-file: .github/requirements/conda-env-macOS-ARM64
|
||||
pip-requirements-file: .github/requirements/pip-requirements-macOS.txt
|
||||
default-packages: ""
|
||||
environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
|
||||
pip-requirements-file: .github/requirements/pip-requirements-${{ runner.os }}.txt
|
||||
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
uses: nick-fields/retry@7152eba30c6575329ac0576536151aca5a72780e # v3.0.0
|
||||
|
||||
8
.github/workflows/_mac-test.yml
vendored
8
.github/workflows/_mac-test.yml
vendored
@ -21,7 +21,7 @@ on:
|
||||
python-version:
|
||||
required: false
|
||||
type: string
|
||||
default: "3.12"
|
||||
default: "3.9"
|
||||
description: |
|
||||
The python version to be used. Will be 3.9 by default
|
||||
timeout-minutes:
|
||||
@ -144,9 +144,8 @@ jobs:
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
environment-file: .github/requirements/conda-env-macOS-ARM64
|
||||
pip-requirements-file: .github/requirements/pip-requirements-macOS.txt
|
||||
default-packages: ""
|
||||
environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
|
||||
pip-requirements-file: .github/requirements/pip-requirements-${{ runner.os }}.txt
|
||||
|
||||
- name: Parse ref
|
||||
id: parse-ref
|
||||
@ -279,7 +278,6 @@ jobs:
|
||||
workflow_name: ${{ github.workflow }}
|
||||
workflow_run_id: ${{github.run_id}}
|
||||
workflow_attempt: ${{github.run_attempt}}
|
||||
local_path: usage_log.txt
|
||||
|
||||
- name: Clean up disk space
|
||||
if: always()
|
||||
|
||||
4
.github/workflows/_win-build.yml
vendored
4
.github/workflows/_win-build.yml
vendored
@ -23,7 +23,7 @@ on:
|
||||
vc-year:
|
||||
required: false
|
||||
type: string
|
||||
default: "2022"
|
||||
default: "2019"
|
||||
description: The Visual Studio year to use for building.
|
||||
build-with-debug:
|
||||
required: false
|
||||
@ -98,7 +98,7 @@ jobs:
|
||||
To start build locally, change working folder to \actions-runner\_work\pytorch\pytorch,
|
||||
Activate miniconda and Visual Studio environment, by running:
|
||||
call C:\Jenkins\Miniconda3\Scripts\activate.bat C:\Jenkins\Miniconda3
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x64
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x64
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
|
||||
6
.github/workflows/_win-test.yml
vendored
6
.github/workflows/_win-test.yml
vendored
@ -91,7 +91,7 @@ jobs:
|
||||
To start tests locally, change working folder to \actions-runner\_work\pytorch\pytorch\test,
|
||||
Activate miniconda and Visual Studio environment and set PYTHON_PATH, by running:
|
||||
call C:\Jenkins\Miniconda3\Scripts\activate.bat C:\Jenkins\Miniconda3
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x64
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x64
|
||||
set PYTHONPATH=C:\actions-runner\_work\pytorch\pytorch\build\win_tmp\build
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
@ -191,8 +191,8 @@ jobs:
|
||||
NO_TD: ${{ steps.keep-going.outputs.ci-no-td }}
|
||||
VC_PRODUCT: "BuildTools"
|
||||
VC_VERSION: ""
|
||||
VS_VERSION: "17.4.1"
|
||||
VC_YEAR: "2022"
|
||||
VS_VERSION: "16.8.6"
|
||||
VC_YEAR: "2019"
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
|
||||
6
.github/workflows/assigntome-docathon.yml
vendored
6
.github/workflows/assigntome-docathon.yml
vendored
@ -28,14 +28,14 @@ jobs:
|
||||
repo: context.repo.repo,
|
||||
issue_number: issueNumber
|
||||
});
|
||||
const hasLabel = issue.labels.some(label => label.name === 'docathon-h1-2025');
|
||||
const hasLabel = issue.labels.some(label => label.name === 'docathon-h1-2024');
|
||||
if (hasLabel) {
|
||||
if (issue.assignee !== null) {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: issueNumber,
|
||||
body: "The issue is already assigned. Please pick an opened and unnasigned issue with the [docathon-h1-2025 label](https://github.com/pytorch/pytorch/issues?q=is%3Aopen+is%3Aissue+label%3Adocathon-h1-2025)."
|
||||
body: "The issue is already assigned. Please pick an opened and unnasigned issue with the [docathon-h1-2024 label](https://github.com/pytorch/pytorch/issues?q=is%3Aopen+is%3Aissue+label%3Adocathon-h1-2024)."
|
||||
});
|
||||
} else {
|
||||
await github.rest.issues.addAssignees({
|
||||
@ -46,7 +46,7 @@ jobs:
|
||||
});
|
||||
}
|
||||
} else {
|
||||
const commmentMessage = "This issue does not have the correct label. Please pick an opened and unnasigned issue with the [docathon-h1-2025 label](https://github.com/pytorch/pytorch/issues?q=is%3Aopen+is%3Aissue+label%3Adocathon-h1-2025)."
|
||||
const commmentMessage = "This issue does not have the correct label. Please pick an opened and unnasigned issue with the [docathon-h1-2024 label](https://github.com/pytorch/pytorch/issues?q=is%3Aopen+is%3Aissue+label%3Adocathon-h1-2024)."
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
|
||||
1
.github/workflows/build-libtorch-images.yml
vendored
1
.github/workflows/build-libtorch-images.yml
vendored
@ -50,6 +50,7 @@ jobs:
|
||||
include: [
|
||||
{ tag: "cuda12.8" },
|
||||
{ tag: "cuda12.6" },
|
||||
{ tag: "cuda12.4" },
|
||||
{ tag: "cuda11.8" },
|
||||
{ tag: "rocm6.3" },
|
||||
{ tag: "rocm6.4" },
|
||||
|
||||
5
.github/workflows/build-magma-windows.yml
vendored
5
.github/workflows/build-magma-windows.yml
vendored
@ -19,15 +19,14 @@ concurrency:
|
||||
jobs:
|
||||
build-windows-magma:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
runs-on: windows-2022
|
||||
runs-on: windows-2019
|
||||
strategy:
|
||||
matrix:
|
||||
cuda_version: ["128", "126"]
|
||||
cuda_version: ["128", "126", "118"]
|
||||
config: ["Release", "Debug"]
|
||||
env:
|
||||
CUDA_VERSION: ${{ matrix.cuda_version }}
|
||||
CONFIG: ${{ matrix.config }}
|
||||
VC_YEAR: "2022"
|
||||
steps:
|
||||
- name: Checkout pytorch/pytorch
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
@ -3,9 +3,19 @@ name: Build manywheel docker images for s390x
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/*
|
||||
tags:
|
||||
- ciflow/s390/*
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate or nightly builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
paths:
|
||||
- .ci/docker/**
|
||||
- .github/workflows/build-manywheel-images-s390x.yml
|
||||
pull_request:
|
||||
paths:
|
||||
- .ci/docker/**
|
||||
- .github/workflows/build-manywheel-images-s390x.yml
|
||||
|
||||
|
||||
|
||||
1
.github/workflows/build-manywheel-images.yml
vendored
1
.github/workflows/build-manywheel-images.yml
vendored
@ -49,6 +49,7 @@ jobs:
|
||||
include: [
|
||||
{ name: "manylinux2_28-builder", tag: "cuda12.8", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28-builder", tag: "cuda12.6", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28-builder", tag: "cuda12.4", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28-builder", tag: "cuda11.8", runner: "linux.9xlarge.ephemeral" },
|
||||
{ name: "manylinuxaarch64-builder", tag: "cuda12.8", runner: "linux.arm64.2xlarge.ephemeral" },
|
||||
{ name: "manylinux2_28-builder", tag: "rocm6.3", runner: "linux.9xlarge.ephemeral" },
|
||||
|
||||
9
.github/workflows/build-triton-wheel.yml
vendored
9
.github/workflows/build-triton-wheel.yml
vendored
@ -139,15 +139,6 @@ jobs:
|
||||
|
||||
docker exec -t "${container_name}" yum install -y zlib-devel zip
|
||||
docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}" -m pip install -U setuptools==78.1.0 pybind11==2.13.1 auditwheel wheel
|
||||
set +e
|
||||
docker exec -t "${container_name}" command -v pip
|
||||
has_pip=$?
|
||||
set -e
|
||||
if [ $has_pip -eq 0 ] ; then
|
||||
docker exec -t "${container_name}" pip install -U cmake --force-reinstall
|
||||
else
|
||||
docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}" -m pip install -U cmake --force-reinstall
|
||||
fi
|
||||
|
||||
if [[ ("${{ matrix.device }}" == "cuda" || "${{ matrix.device }}" == "rocm" || "${{ matrix.device }}" == "aarch64" ) ]]; then
|
||||
# With this install, it gets clang 16.0.6.
|
||||
|
||||
44
.github/workflows/docker-builds.yml
vendored
44
.github/workflows/docker-builds.yml
vendored
@ -49,21 +49,21 @@ jobs:
|
||||
matrix:
|
||||
runner: [linux.12xlarge]
|
||||
docker-image-name: [
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3-gcc11,
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3.12-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-jammy-cuda12.6-cudnn9-py3.13-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.13-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9,
|
||||
pytorch-linux-jammy-py3.9-clang12,
|
||||
pytorch-linux-jammy-py3.11-clang12,
|
||||
pytorch-linux-jammy-py3.12-clang12,
|
||||
pytorch-linux-jammy-py3.13-clang12,
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3.13-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc11,
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3.12-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda12.6-cudnn9-py3.13-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9,
|
||||
pytorch-linux-focal-py3.9-clang10,
|
||||
pytorch-linux-focal-py3.11-clang10,
|
||||
pytorch-linux-focal-py3.12-clang10,
|
||||
pytorch-linux-focal-py3.13-clang10,
|
||||
pytorch-linux-jammy-rocm-n-1-py3,
|
||||
pytorch-linux-jammy-rocm-n-py3,
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-clang12,
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-clang12,
|
||||
pytorch-linux-jammy-py3.9-gcc11,
|
||||
pytorch-linux-jammy-py3.9-gcc11-inductor-benchmarks,
|
||||
pytorch-linux-jammy-py3.12-halide,
|
||||
@ -71,9 +71,9 @@ jobs:
|
||||
pytorch-linux-jammy-xpu-2025.1-py3,
|
||||
pytorch-linux-jammy-py3-clang15-asan,
|
||||
pytorch-linux-jammy-py3-clang18-asan,
|
||||
pytorch-linux-jammy-py3-clang12-onnx,
|
||||
pytorch-linux-jammy-linter,
|
||||
pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-linter,
|
||||
pytorch-linux-focal-py3-clang10-onnx,
|
||||
pytorch-linux-focal-linter,
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter,
|
||||
pytorch-linux-jammy-py3-clang12-executorch,
|
||||
pytorch-linux-jammy-py3.12-triton-cpu
|
||||
]
|
||||
@ -110,6 +110,18 @@ jobs:
|
||||
always-rebuild: true
|
||||
push: true
|
||||
|
||||
- name: Push docker image to old name
|
||||
shell: bash
|
||||
env:
|
||||
ECR_DOCKER_IMAGE: ${{ steps.build-docker-image.outputs.docker-image }}
|
||||
run: |
|
||||
# This can be deleted after people have rebased their PRs/the new name has been in main for a while
|
||||
set -euox pipefail
|
||||
docker_image_name="308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/${{ matrix.docker-image-name }}"
|
||||
foldersha=${ECR_DOCKER_IMAGE##*-}
|
||||
docker tag "${ECR_DOCKER_IMAGE}" "${docker_image_name}:${foldersha}"
|
||||
docker push "${docker_image_name}:${foldersha}"
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
|
||||
12
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
12
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
@ -136,7 +136,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_9-cuda-aarch64-12_8
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
timeout-minutes: 420
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -252,7 +252,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_10-cuda-aarch64-12_8
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
timeout-minutes: 420
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -368,7 +368,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_11-cuda-aarch64-12_8
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
timeout-minutes: 420
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -484,7 +484,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_12-cuda-aarch64-12_8
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
timeout-minutes: 420
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -600,7 +600,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_13-cuda-aarch64-12_8
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
timeout-minutes: 420
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -716,7 +716,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_13t-cuda-aarch64-12_8
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
timeout-minutes: 420
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
4
.github/workflows/generated-linux-binary-manywheel-main.yml
generated
vendored
4
.github/workflows/generated-linux-binary-manywheel-main.yml
generated
vendored
@ -108,7 +108,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_9-cuda12_6
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_9-cuda12_6-test: # Testing
|
||||
@ -155,7 +155,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_9-cuda12_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_9-cuda12_8-test: # Testing
|
||||
|
||||
24
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
24
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
@ -200,7 +200,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_9-cuda12_6
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_9-cuda12_6-test: # Testing
|
||||
@ -269,7 +269,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_9-cuda12_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_9-cuda12_8-test: # Testing
|
||||
@ -813,7 +813,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_10-cuda12_6
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_10-cuda12_6-test: # Testing
|
||||
@ -882,7 +882,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_10-cuda12_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_10-cuda12_8-test: # Testing
|
||||
@ -1426,7 +1426,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_11-cuda12_6
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_11-cuda12_6-test: # Testing
|
||||
@ -1563,7 +1563,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_11-cuda12_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_11-cuda12_8-test: # Testing
|
||||
@ -2107,7 +2107,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_12-cuda12_6
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-cuda12_6-test: # Testing
|
||||
@ -2176,7 +2176,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_12-cuda12_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-cuda12_8-test: # Testing
|
||||
@ -2720,7 +2720,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_13-cuda12_6
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_13-cuda12_6-test: # Testing
|
||||
@ -2789,7 +2789,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_13-cuda12_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_13-cuda12_8-test: # Testing
|
||||
@ -3333,7 +3333,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_13t-cuda12_6
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.5.1.17; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_13t-cuda12_6-test: # Testing
|
||||
@ -3402,7 +3402,7 @@ jobs:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build_name: manywheel-py3_13t-cuda12_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvshmem-cu12==3.2.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.8.57; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.8.0.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.8.3.14; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.3.3.41; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.9.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.7.2.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.5.7.53; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.26.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.8.55; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.8.61; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufile-cu12==1.13.0.11; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_13t-cuda12_8-test: # Testing
|
||||
|
||||
40
.github/workflows/generated-windows-arm64-binary-libtorch-debug-nightly.yml
generated
vendored
40
.github/workflows/generated-windows-arm64-binary-libtorch-debug-nightly.yml
generated
vendored
@ -50,7 +50,7 @@ jobs:
|
||||
libtorch-cpu-shared-with-deps-debug-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: get-label-type
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
@ -77,12 +77,23 @@ jobs:
|
||||
run: |
|
||||
mkdir "%NIGHTLIES_PYTORCH_ROOT%"
|
||||
mkdir "%PYTORCH_FINAL_PACKAGE_DIR%"
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
- name: Git checkout PyTorch
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch - recursive
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
@ -127,7 +138,7 @@ jobs:
|
||||
needs:
|
||||
- libtorch-cpu-shared-with-deps-debug-build
|
||||
- get-label-type
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
@ -149,11 +160,18 @@ jobs:
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -167,6 +185,10 @@ jobs:
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_python.bat"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Rust
|
||||
shell: cmd
|
||||
run: |
|
||||
|
||||
40
.github/workflows/generated-windows-arm64-binary-libtorch-release-nightly.yml
generated
vendored
40
.github/workflows/generated-windows-arm64-binary-libtorch-release-nightly.yml
generated
vendored
@ -50,7 +50,7 @@ jobs:
|
||||
libtorch-cpu-shared-with-deps-release-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: get-label-type
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
@ -77,12 +77,23 @@ jobs:
|
||||
run: |
|
||||
mkdir "%NIGHTLIES_PYTORCH_ROOT%"
|
||||
mkdir "%PYTORCH_FINAL_PACKAGE_DIR%"
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
- name: Git checkout PyTorch
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch - recursive
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
@ -127,7 +138,7 @@ jobs:
|
||||
needs:
|
||||
- libtorch-cpu-shared-with-deps-release-build
|
||||
- get-label-type
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
@ -149,11 +160,18 @@ jobs:
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -167,6 +185,10 @@ jobs:
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_python.bat"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Rust
|
||||
shell: cmd
|
||||
run: |
|
||||
|
||||
120
.github/workflows/generated-windows-arm64-binary-wheel-nightly.yml
generated
vendored
120
.github/workflows/generated-windows-arm64-binary-wheel-nightly.yml
generated
vendored
@ -50,7 +50,7 @@ jobs:
|
||||
wheel-py3_11-cpu-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: get-label-type
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
@ -73,12 +73,23 @@ jobs:
|
||||
run: |
|
||||
mkdir "%NIGHTLIES_PYTORCH_ROOT%"
|
||||
mkdir "%PYTORCH_FINAL_PACKAGE_DIR%"
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
- name: Git checkout PyTorch
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch - recursive
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
@ -123,7 +134,7 @@ jobs:
|
||||
needs:
|
||||
- wheel-py3_11-cpu-build
|
||||
- get-label-type
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
@ -141,11 +152,18 @@ jobs:
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -159,6 +177,10 @@ jobs:
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_python.bat"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Rust
|
||||
shell: cmd
|
||||
run: |
|
||||
@ -197,7 +219,7 @@ jobs:
|
||||
wheel-py3_12-cpu-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: get-label-type
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
@ -220,12 +242,23 @@ jobs:
|
||||
run: |
|
||||
mkdir "%NIGHTLIES_PYTORCH_ROOT%"
|
||||
mkdir "%PYTORCH_FINAL_PACKAGE_DIR%"
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
- name: Git checkout PyTorch
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch - recursive
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
@ -270,7 +303,7 @@ jobs:
|
||||
needs:
|
||||
- wheel-py3_12-cpu-build
|
||||
- get-label-type
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
@ -288,11 +321,18 @@ jobs:
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -306,6 +346,10 @@ jobs:
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_python.bat"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Rust
|
||||
shell: cmd
|
||||
run: |
|
||||
@ -344,7 +388,7 @@ jobs:
|
||||
wheel-py3_13-cpu-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: get-label-type
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
@ -367,12 +411,23 @@ jobs:
|
||||
run: |
|
||||
mkdir "%NIGHTLIES_PYTORCH_ROOT%"
|
||||
mkdir "%PYTORCH_FINAL_PACKAGE_DIR%"
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
- name: Git checkout PyTorch
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Git
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch - recursive
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
@ -417,7 +472,7 @@ jobs:
|
||||
needs:
|
||||
- wheel-py3_13-cpu-build
|
||||
- get-label-type
|
||||
runs-on: "windows-11-arm64-preview"
|
||||
runs-on: "windows-11-arm64"
|
||||
timeout-minutes: 300
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
@ -435,11 +490,18 @@ jobs:
|
||||
echo BINARY_ENV_FILE=%RUNNER_TEMP%/env>> %GITHUB_ENV%
|
||||
echo PYTORCH_FINAL_PACKAGE_DIR=%RUNNER_TEMP%/artifacts>> %GITHUB_ENV%
|
||||
echo WIN_PACKAGE_WORK_DIR=%RUNNER_TEMP%>> %GITHUB_ENV%
|
||||
- name: Enable long paths
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: "pytorch"
|
||||
- name: Populate binary env
|
||||
shell: cmd
|
||||
run: |
|
||||
git config --system --get core.longpaths || echo "core.longpaths is not set, setting it now"
|
||||
git config --system core.longpaths true
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_git.bat"
|
||||
- name: Remove Pytorch folder
|
||||
shell: cmd
|
||||
run: |
|
||||
rmdir /s /q "pytorch"
|
||||
- name: Git checkout PyTorch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -453,6 +515,10 @@ jobs:
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_python.bat"
|
||||
- name: Bootstrap Build Tools
|
||||
shell: cmd
|
||||
run: |
|
||||
"pytorch/.ci/pytorch/windows/arm64/bootstrap_buildtools.bat"
|
||||
- name: Bootstrap Rust
|
||||
shell: cmd
|
||||
run: |
|
||||
|
||||
53
.github/workflows/h100-distributed.yml
vendored
53
.github/workflows/h100-distributed.yml
vendored
@ -1,53 +0,0 @@
|
||||
name: Limited CI for distributed tests on H100
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- .github/workflows/h100-distributed.yml
|
||||
workflow_dispatch:
|
||||
push:
|
||||
tags:
|
||||
- ciflow/h100-distributed/*
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
linux-focal-cuda12_6-py3_10-gcc11-sm90-build:
|
||||
name: linux-focal-cuda12.6-py3.10-gcc11-sm90
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
runner: "linux.12xlarge"
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc11-sm90
|
||||
docker-image-name: ci-image:pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc11
|
||||
cuda-arch-list: '9.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "h100_distributed", shard: 1, num_shards: 1, runner: "linux.aws.h100.8" },
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-focal-cuda12_6-py3_10-gcc11-sm90-test:
|
||||
name: linux-focal-cuda12.6-py3.10-gcc11-sm90
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs:
|
||||
- linux-focal-cuda12_6-py3_10-gcc11-sm90-build
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc11-sm90
|
||||
docker-image: ${{ needs.linux-focal-cuda12_6-py3_10-gcc11-sm90-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_6-py3_10-gcc11-sm90-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
20
.github/workflows/inductor-micro-benchmark.yml
vendored
20
.github/workflows/inductor-micro-benchmark.yml
vendored
@ -27,15 +27,15 @@ jobs:
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
opt_out_experiments: lf
|
||||
|
||||
build:
|
||||
name: cuda12.8-py3.10-gcc9-sm80
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-micro-benchmark-build:
|
||||
name: cuda12.6-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs:
|
||||
- get-default-label-prefix
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-default-label-prefix.outputs.label-type }}"
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm80
|
||||
docker-image-name: ci-image:pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -43,13 +43,13 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
name: cuda12.8-py3.10-gcc9-sm80
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-micro-benchmark-test:
|
||||
name: cuda12.6-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: build
|
||||
needs: linux-focal-cuda12_6-py3_10-gcc9-inductor-micro-benchmark-build
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.build.outputs.test-matrix }}
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-micro-benchmark-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-micro-benchmark-build.outputs.test-matrix }}
|
||||
timeout-minutes: 720
|
||||
secrets: inherit
|
||||
|
||||
20
.github/workflows/inductor-perf-compare.yml
vendored
20
.github/workflows/inductor-perf-compare.yml
vendored
@ -24,15 +24,15 @@ jobs:
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
opt_out_experiments: lf
|
||||
|
||||
build:
|
||||
name: cuda12.8-py3.10-gcc9-sm80
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-build:
|
||||
name: cuda12.6-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs:
|
||||
- get-default-label-prefix
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-default-label-prefix.outputs.label-type }}"
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm80
|
||||
docker-image-name: ci-image:pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -43,14 +43,14 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
name: cuda12.8-py3.10-gcc9-sm80
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-test:
|
||||
name: cuda12.6-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: build
|
||||
needs: linux-focal-cuda12_6-py3_10-gcc9-inductor-build
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.build.outputs.test-matrix }}
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build.outputs.test-matrix }}
|
||||
# disable monitor in perf tests for more investigation
|
||||
disable-monitor: false
|
||||
monitor-log-interval: 15
|
||||
|
||||
@ -79,13 +79,13 @@ jobs:
|
||||
|
||||
# NB: Keep this in sync with trunk.yml
|
||||
build:
|
||||
name: cuda12.8-py3.10-gcc9-sm90
|
||||
name: cuda12.6-py3.10-gcc9-sm90
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm90
|
||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm90
|
||||
docker-image-name: ci-image:pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '9.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -111,12 +111,12 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
test-nightly:
|
||||
name: cuda12.8-py3.10-gcc9-sm90
|
||||
name: cuda12.6-py3.10-gcc9-sm90
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: build
|
||||
if: github.event.schedule == '0 7 * * 1-6'
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm90
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm90
|
||||
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true
|
||||
docker-image: ${{ needs.build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.build.outputs.test-matrix }}
|
||||
@ -128,12 +128,12 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
test-weekly:
|
||||
name: cuda12.8-py3.10-gcc9-sm90
|
||||
name: cuda12.6-py3.10-gcc9-sm90
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: build
|
||||
if: github.event.schedule == '0 7 * * 0'
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm90
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm90
|
||||
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-maxautotune-true-freeze_autotune_cudagraphs-true-cudagraphs_low_precision-true
|
||||
docker-image: ${{ needs.build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.build.outputs.test-matrix }}
|
||||
@ -145,12 +145,12 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
name: cuda12.8-py3.10-gcc9-sm90
|
||||
name: cuda12.6-py3.10-gcc9-sm90
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: build
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm90
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm90
|
||||
dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-${{ inputs.cppwrapper }}-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}-cudagraphs_low_precision-${{ inputs.cudagraphs }}
|
||||
docker-image: ${{ needs.build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.build.outputs.test-matrix }}
|
||||
|
||||
@ -42,7 +42,7 @@ jobs:
|
||||
runner-type: macos-m1-stable
|
||||
build-generates-artifacts: true
|
||||
# To match the one pre-installed in the m1 runners
|
||||
python-version: 3.12.7
|
||||
python-version: 3.9.12
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "perf_smoketest", shard: 1, num_shards: 1, runner: "macos-m2-15" },
|
||||
@ -56,10 +56,8 @@ jobs:
|
||||
with:
|
||||
build-environment: macos-py3-arm64-distributed
|
||||
# Same as the build job
|
||||
python-version: 3.12.7
|
||||
python-version: 3.9.12
|
||||
test-matrix: ${{ needs.macos-perf-py3-arm64-build.outputs.test-matrix }}
|
||||
disable-monitor: false
|
||||
monitor-log-interval: 15
|
||||
monitor-data-collect-interval: 4
|
||||
|
||||
# disable monitor in perf tests for more investigation
|
||||
disable-monitor: true
|
||||
secrets: inherit
|
||||
|
||||
44
.github/workflows/inductor-perf-test-nightly.yml
vendored
44
.github/workflows/inductor-perf-test-nightly.yml
vendored
@ -78,14 +78,14 @@ jobs:
|
||||
opt_out_experiments: lf
|
||||
|
||||
# NB: Keep this in sync with trunk.yml
|
||||
build:
|
||||
name: cuda12.8-py3.10-gcc9-sm80
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-build:
|
||||
name: cuda12.6-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm80
|
||||
docker-image-name: ci-image:pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -112,32 +112,32 @@ jobs:
|
||||
selected-test-configs: ${{ inputs.benchmark_configs }}
|
||||
secrets: inherit
|
||||
|
||||
test-nightly:
|
||||
name: cuda12.8-py3.10-gcc9-sm80
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-test-nightly:
|
||||
name: cuda12.6-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: build
|
||||
needs: linux-focal-cuda12_6-py3_10-gcc9-inductor-build
|
||||
if: github.event.schedule == '0 7 * * 1-6'
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm80
|
||||
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true
|
||||
docker-image: ${{ needs.build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.build.outputs.test-matrix }}
|
||||
docker-image: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build.outputs.test-matrix }}
|
||||
timeout-minutes: 720
|
||||
disable-monitor: false
|
||||
monitor-log-interval: 15
|
||||
monitor-data-collect-interval: 4
|
||||
secrets: inherit
|
||||
|
||||
test-weekly:
|
||||
name: cuda12.8-py3.10-gcc9-sm80
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-test-weekly:
|
||||
name: cuda12.6-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: build
|
||||
needs: linux-focal-cuda12_6-py3_10-gcc9-inductor-build
|
||||
if: github.event.schedule == '0 7 * * 0'
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm80
|
||||
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-maxautotune-true-freeze_autotune_cudagraphs-true-cudagraphs_low_precision-true
|
||||
docker-image: ${{ needs.build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.build.outputs.test-matrix }}
|
||||
docker-image: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build.outputs.test-matrix }}
|
||||
timeout-minutes: 1440
|
||||
# disable monitor in perf tests, next step is to enable it
|
||||
disable-monitor: false
|
||||
@ -145,16 +145,16 @@ jobs:
|
||||
monitor-data-collect-interval: 4
|
||||
secrets: inherit
|
||||
|
||||
test:
|
||||
name: cuda12.8-py3.10-gcc9-sm80
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-test:
|
||||
name: cuda12.6-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: build
|
||||
needs: linux-focal-cuda12_6-py3_10-gcc9-inductor-build
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm80
|
||||
dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-${{ inputs.cppwrapper }}-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}-cudagraphs_low_precision-${{ inputs.cudagraphs }}
|
||||
docker-image: ${{ needs.build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.build.outputs.test-matrix }}
|
||||
docker-image: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build.outputs.test-matrix }}
|
||||
timeout-minutes: 720
|
||||
disable-monitor: false
|
||||
monitor-log-interval: 15
|
||||
|
||||
62
.github/workflows/inductor-periodic.yml
vendored
62
.github/workflows/inductor-periodic.yml
vendored
@ -29,14 +29,14 @@ jobs:
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
opt_out_experiments: lf
|
||||
|
||||
linux-jammy-cuda12_8-py3_10-gcc9-periodic-dynamo-benchmarks-build:
|
||||
name: cuda12.8-py3.10-gcc9-sm86-periodic-dynamo-benchmarks
|
||||
linux-focal-cuda12_6-py3_10-gcc9-periodic-dynamo-benchmarks-build:
|
||||
name: cuda12.6-py3.10-gcc9-sm86-periodic-dynamo-benchmarks
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-default-label-prefix
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-default-label-prefix.outputs.label-type }}"
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm86
|
||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm86
|
||||
docker-image-name: ci-image:pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -58,14 +58,14 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-jammy-cuda12_8-py3_10-gcc9-periodic-dynamo-benchmarks-test:
|
||||
name: cuda12.8-py3.10-gcc9-sm86-periodic-dynamo-benchmarks
|
||||
linux-focal-cuda12_6-py3_10-gcc9-periodic-dynamo-benchmarks-test:
|
||||
name: cuda12.6-py3.10-gcc9-sm86-periodic-dynamo-benchmarks
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-jammy-cuda12_8-py3_10-gcc9-periodic-dynamo-benchmarks-build
|
||||
needs: linux-focal-cuda12_6-py3_10-gcc9-periodic-dynamo-benchmarks-build
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm86
|
||||
docker-image: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm86
|
||||
docker-image: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
linux-jammy-rocm-py3_10-periodic-dynamo-benchmarks-build:
|
||||
@ -109,15 +109,15 @@ jobs:
|
||||
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
linux-jammy-cuda12_8-py3_10-gcc9-inductor-smoke-build:
|
||||
name: cuda12.8-py3.10-gcc9-sm80
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-build-gcp:
|
||||
name: cuda12.6-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs:
|
||||
- get-default-label-prefix
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-default-label-prefix.outputs.label-type }}"
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm80
|
||||
docker-image-name: ci-image:pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -125,14 +125,14 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-jammy-cuda12_8-py3_10-gcc9-inductor-smoke-test:
|
||||
name: cuda12.8-py3.10-gcc9-sm80
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-test-gcp:
|
||||
name: cuda12.6-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-jammy-cuda12_8-py3_10-gcc9-inductor-smoke-build
|
||||
needs: linux-focal-cuda12_6-py3_10-gcc9-inductor-build-gcp
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc9-inductor-smoke-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc9-inductor-smoke-build.outputs.test-matrix }}
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
linux-jammy-cpu-py3_9-gcc11-periodic-dynamo-benchmarks-build:
|
||||
@ -170,16 +170,16 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
|
||||
linux-jammy-cuda12_8-py3_10-gcc9-inductor-build:
|
||||
name: cuda12.8-py3.10-gcc9-sm86
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-build:
|
||||
name: cuda12.6-py3.10-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-default-label-prefix
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm86
|
||||
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm86
|
||||
docker-image-name: ci-image:pytorch-linux-focal-cuda12.6-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.6'
|
||||
runner_prefix: "${{ needs.get-default-label-prefix.outputs.label-type }}"
|
||||
sync-tag: linux-jammy-cuda12_8-py3_10-gcc9-inductor-build
|
||||
sync-tag: linux-focal-cuda12_6-py3_10-gcc9-inductor-build
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "dynamic_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
@ -195,14 +195,14 @@ jobs:
|
||||
]}
|
||||
secrets: inherit
|
||||
|
||||
linux-jammy-cuda12_8-py3_10-gcc9-inductor-test:
|
||||
name: cuda12.8-py3.10-gcc9-sm86
|
||||
linux-focal-cuda12_6-py3_10-gcc9-inductor-test:
|
||||
name: cuda12.6-py3.10-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-jammy-cuda12_8-py3_10-gcc9-inductor-build
|
||||
needs: linux-focal-cuda12_6-py3_10-gcc9-inductor-build
|
||||
with:
|
||||
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm86
|
||||
docker-image: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-jammy-cuda12_8-py3_10-gcc9-inductor-build.outputs.test-matrix }}
|
||||
build-environment: linux-focal-cuda12.6-py3.10-gcc9-sm86
|
||||
docker-image: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_6-py3_10-gcc9-inductor-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
linux-jammy-cpu-py3_9-gcc11-inductor-build:
|
||||
|
||||
8
.github/workflows/inductor-rocm-mi300.yml
vendored
8
.github/workflows/inductor-rocm-mi300.yml
vendored
@ -38,12 +38,12 @@ jobs:
|
||||
opt_out_experiments: lf
|
||||
|
||||
linux-jammy-rocm-py3_10-inductor-build:
|
||||
name: rocm-py3.10-inductor-mi300
|
||||
name: rocm-py3.10-inductor
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
build-environment: linux-jammy-rocm-py3.10-mi300
|
||||
build-environment: linux-jammy-rocm-py3.10
|
||||
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -56,11 +56,11 @@ jobs:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
name: rocm-py3.10-inductor-mi300
|
||||
name: rocm-py3.10-inductor
|
||||
uses: ./.github/workflows/_rocm-test.yml
|
||||
needs: linux-jammy-rocm-py3_10-inductor-build
|
||||
with:
|
||||
build-environment: linux-jammy-rocm-py3.10-mi300
|
||||
build-environment: linux-jammy-rocm-py3.10
|
||||
docker-image: ${{ needs.linux-jammy-rocm-py3_10-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-inductor-build.outputs.test-matrix }}
|
||||
secrets: inherit
|
||||
|
||||
1
.github/workflows/inductor-rocm.yml
vendored
1
.github/workflows/inductor-rocm.yml
vendored
@ -43,7 +43,6 @@ jobs:
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.2" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.2" },
|
||||
]}
|
||||
allow-reuse-old-whl: true
|
||||
secrets: inherit
|
||||
|
||||
linux-jammy-rocm-py3_10-inductor-test:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user