Compare commits

..

1 Commits

Author SHA1 Message Date
638dcd6039 Automated submodule update: tensorpipe 2025-08-15 12:25:50 -07:00
1716 changed files with 44995 additions and 96524 deletions

View File

@ -1,15 +0,0 @@
version: 1
paths:
include:
- "**/*.py"
exclude:
- ".*"
- ".*/**"
- "**/.*/**"
- "**/.*"
- "**/_*/**"
- "**/_*.py"
- "**/test/**"
- "**/benchmarks/**"
- "**/test_*.py"
- "**/*_test.py"

View File

@ -3,18 +3,8 @@ set -eux -o pipefail
GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-} GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
# Set CUDA architecture lists to match x86 build_cuda.sh if [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then
if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0"
elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0" export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
elif [[ "$GPU_ARCH_VERSION" == *"13.0"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;11.0;12.0+PTX"
fi
# Compress the fatbin with -compress-mode=size for CUDA 13
if [[ "$DESIRED_CUDA" == *"13"* ]]; then
export TORCH_NVCC_FLAGS="-compress-mode=size"
fi fi
SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
@ -28,7 +18,7 @@ cd /
# on the mounted pytorch repo # on the mounted pytorch repo
git config --global --add safe.directory /pytorch git config --global --add safe.directory /pytorch
pip install -r /pytorch/requirements.txt pip install -r /pytorch/requirements.txt
pip install auditwheel==6.2.0 wheel pip install auditwheel==6.2.0
if [ "$DESIRED_CUDA" = "cpu" ]; then if [ "$DESIRED_CUDA" = "cpu" ]; then
echo "BASE_CUDA_VERSION is not set. Building cpu wheel." echo "BASE_CUDA_VERSION is not set. Building cpu wheel."
#USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files #USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files
@ -36,16 +26,6 @@ if [ "$DESIRED_CUDA" = "cpu" ]; then
else else
echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA" echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA"
export USE_SYSTEM_NCCL=1 export USE_SYSTEM_NCCL=1
# Check if we should use NVIDIA libs from PyPI (similar to x86 build_cuda.sh logic)
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
echo "Bundling CUDA libraries with wheel for aarch64."
else
echo "Using nvidia libs from pypi for aarch64."
echo "Updated PYTORCH_EXTRA_INSTALL_REQUIREMENTS for aarch64: $PYTORCH_EXTRA_INSTALL_REQUIREMENTS"
export USE_NVIDIA_PYPI_LIBS=1
fi
#USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files #USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files
USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda
fi fi

View File

@ -69,137 +69,29 @@ def replace_tag(filename) -> None:
f.writelines(lines) f.writelines(lines)
def patch_library_rpath(
folder: str,
lib_name: str,
use_nvidia_pypi_libs: bool = False,
desired_cuda: str = "",
) -> None:
"""Apply patchelf to set RPATH for a library in torch/lib"""
lib_path = f"{folder}/tmp/torch/lib/{lib_name}"
if use_nvidia_pypi_libs:
# For PyPI NVIDIA libraries, construct CUDA RPATH
cuda_rpaths = [
"$ORIGIN/../../nvidia/cudnn/lib",
"$ORIGIN/../../nvidia/nvshmem/lib",
"$ORIGIN/../../nvidia/nccl/lib",
"$ORIGIN/../../nvidia/cusparselt/lib",
]
if "130" in desired_cuda:
cuda_rpaths.append("$ORIGIN/../../nvidia/cu13/lib")
else:
cuda_rpaths.extend(
[
"$ORIGIN/../../nvidia/cublas/lib",
"$ORIGIN/../../nvidia/cuda_cupti/lib",
"$ORIGIN/../../nvidia/cuda_nvrtc/lib",
"$ORIGIN/../../nvidia/cuda_runtime/lib",
"$ORIGIN/../../nvidia/cufft/lib",
"$ORIGIN/../../nvidia/curand/lib",
"$ORIGIN/../../nvidia/cusolver/lib",
"$ORIGIN/../../nvidia/cusparse/lib",
"$ORIGIN/../../nvidia/nvtx/lib",
"$ORIGIN/../../nvidia/cufile/lib",
]
)
# Add $ORIGIN for local torch libs
rpath = ":".join(cuda_rpaths) + ":$ORIGIN"
else:
# For bundled libraries, just use $ORIGIN
rpath = "$ORIGIN"
if os.path.exists(lib_path):
os.system(
f"cd {folder}/tmp/torch/lib/; "
f"patchelf --set-rpath '{rpath}' --force-rpath {lib_name}"
)
def copy_and_patch_library(
src_path: str,
folder: str,
use_nvidia_pypi_libs: bool = False,
desired_cuda: str = "",
) -> None:
"""Copy a library to torch/lib and patch its RPATH"""
if os.path.exists(src_path):
lib_name = os.path.basename(src_path)
shutil.copy2(src_path, f"{folder}/tmp/torch/lib/{lib_name}")
patch_library_rpath(folder, lib_name, use_nvidia_pypi_libs, desired_cuda)
def package_cuda_wheel(wheel_path, desired_cuda) -> None: def package_cuda_wheel(wheel_path, desired_cuda) -> None:
""" """
Package the cuda wheel libraries Package the cuda wheel libraries
""" """
folder = os.path.dirname(wheel_path) folder = os.path.dirname(wheel_path)
wheelname = os.path.basename(wheel_path)
os.mkdir(f"{folder}/tmp") os.mkdir(f"{folder}/tmp")
os.system(f"unzip {wheel_path} -d {folder}/tmp") os.system(f"unzip {wheel_path} -d {folder}/tmp")
# Delete original wheel since it will be repackaged libs_to_copy = [
os.system(f"rm {wheel_path}") "/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12",
# Check if we should use PyPI NVIDIA libraries or bundle system libraries
use_nvidia_pypi_libs = os.getenv("USE_NVIDIA_PYPI_LIBS", "0") == "1"
if use_nvidia_pypi_libs:
print("Using nvidia libs from pypi - skipping CUDA library bundling")
# For PyPI approach, we don't bundle CUDA libraries - they come from PyPI packages
# We only need to bundle non-NVIDIA libraries
minimal_libs_to_copy = [
"/lib64/libgomp.so.1",
"/usr/lib64/libgfortran.so.5",
"/acl/build/libarm_compute.so",
"/acl/build/libarm_compute_graph.so",
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_lapack_core.so.0",
"/usr/local/lib/libnvpl_blas_core.so.0",
]
# Copy minimal libraries to unzipped_folder/torch/lib
for lib_path in minimal_libs_to_copy:
copy_and_patch_library(lib_path, folder, use_nvidia_pypi_libs, desired_cuda)
# Patch torch libraries used for searching libraries
torch_libs_to_patch = [
"libtorch.so",
"libtorch_cpu.so",
"libtorch_cuda.so",
"libtorch_cuda_linalg.so",
"libtorch_global_deps.so",
"libtorch_python.so",
"libtorch_nvshmem.so",
"libc10.so",
"libc10_cuda.so",
"libcaffe2_nvrtc.so",
"libshm.so",
]
for lib_name in torch_libs_to_patch:
patch_library_rpath(folder, lib_name, use_nvidia_pypi_libs, desired_cuda)
else:
print("Bundling CUDA libraries with wheel")
# Original logic for bundling system CUDA libraries
# Common libraries for all CUDA versions
common_libs = [
# Non-NVIDIA system libraries
"/lib64/libgomp.so.1",
"/usr/lib64/libgfortran.so.5",
"/acl/build/libarm_compute.so",
"/acl/build/libarm_compute_graph.so",
# Common CUDA libraries (same for all versions)
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_lapack_core.so.0",
"/usr/local/lib/libnvpl_blas_core.so.0",
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so", "/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so",
"/usr/local/cuda/lib64/libcudnn.so.9", "/usr/local/cuda/lib64/libcudnn.so.9",
"/usr/local/cuda/lib64/libcublas.so.12",
"/usr/local/cuda/lib64/libcublasLt.so.12",
"/usr/local/cuda/lib64/libcudart.so.12",
"/usr/local/cuda/lib64/libcufft.so.11",
"/usr/local/cuda/lib64/libcusparse.so.12",
"/usr/local/cuda/lib64/libcusparseLt.so.0", "/usr/local/cuda/lib64/libcusparseLt.so.0",
"/usr/local/cuda/lib64/libcusolver.so.11",
"/usr/local/cuda/lib64/libcurand.so.10", "/usr/local/cuda/lib64/libcurand.so.10",
"/usr/local/cuda/lib64/libnccl.so.2", "/usr/local/cuda/lib64/libnccl.so.2",
"/usr/local/cuda/lib64/libnvshmem_host.so.3", "/usr/local/cuda/lib64/libnvJitLink.so.12",
"/usr/local/cuda/lib64/libnvrtc.so.12",
"/usr/local/cuda/lib64/libcudnn_adv.so.9", "/usr/local/cuda/lib64/libcudnn_adv.so.9",
"/usr/local/cuda/lib64/libcudnn_cnn.so.9", "/usr/local/cuda/lib64/libcudnn_cnn.so.9",
"/usr/local/cuda/lib64/libcudnn_graph.so.9", "/usr/local/cuda/lib64/libcudnn_graph.so.9",
@ -207,48 +99,31 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
"/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9", "/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9", "/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9",
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9", "/usr/local/cuda/lib64/libcudnn_heuristic.so.9",
"/lib64/libgomp.so.1",
"/usr/lib64/libgfortran.so.5",
"/acl/build/libarm_compute.so",
"/acl/build/libarm_compute_graph.so",
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_lapack_core.so.0",
"/usr/local/lib/libnvpl_blas_core.so.0",
]
if "129" in desired_cuda:
libs_to_copy += [
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.9",
"/usr/local/cuda/lib64/libcufile.so.0", "/usr/local/cuda/lib64/libcufile.so.0",
"/usr/local/cuda/lib64/libcufile_rdma.so.1", "/usr/local/cuda/lib64/libcufile_rdma.so.1",
"/usr/local/cuda/lib64/libcusparse.so.12",
] ]
# CUDA version-specific libraries # Copy libraries to unzipped_folder/a/lib
if "13" in desired_cuda:
minor_version = desired_cuda[-1]
version_specific_libs = [
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.13",
"/usr/local/cuda/lib64/libcublas.so.13",
"/usr/local/cuda/lib64/libcublasLt.so.13",
"/usr/local/cuda/lib64/libcudart.so.13",
"/usr/local/cuda/lib64/libcufft.so.12",
"/usr/local/cuda/lib64/libcusolver.so.12",
"/usr/local/cuda/lib64/libnvJitLink.so.13",
"/usr/local/cuda/lib64/libnvrtc.so.13",
f"/usr/local/cuda/lib64/libnvrtc-builtins.so.13.{minor_version}",
]
elif "12" in desired_cuda:
# Get the last character for libnvrtc-builtins version (e.g., "129" -> "9")
minor_version = desired_cuda[-1]
version_specific_libs = [
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12",
"/usr/local/cuda/lib64/libcublas.so.12",
"/usr/local/cuda/lib64/libcublasLt.so.12",
"/usr/local/cuda/lib64/libcudart.so.12",
"/usr/local/cuda/lib64/libcufft.so.11",
"/usr/local/cuda/lib64/libcusolver.so.11",
"/usr/local/cuda/lib64/libnvJitLink.so.12",
"/usr/local/cuda/lib64/libnvrtc.so.12",
f"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.{minor_version}",
]
else:
raise ValueError(f"Unsupported CUDA version: {desired_cuda}.")
# Combine all libraries
libs_to_copy = common_libs + version_specific_libs
# Copy libraries to unzipped_folder/torch/lib
for lib_path in libs_to_copy: for lib_path in libs_to_copy:
copy_and_patch_library(lib_path, folder, use_nvidia_pypi_libs, desired_cuda) lib_name = os.path.basename(lib_path)
shutil.copy2(lib_path, f"{folder}/tmp/torch/lib/{lib_name}")
os.system(
f"cd {folder}/tmp/torch/lib/; "
f"patchelf --set-rpath '$ORIGIN' --force-rpath {folder}/tmp/torch/lib/{lib_name}"
)
# Make sure the wheel is tagged with manylinux_2_28 # Make sure the wheel is tagged with manylinux_2_28
for f in os.scandir(f"{folder}/tmp/"): for f in os.scandir(f"{folder}/tmp/"):
@ -256,8 +131,14 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
replace_tag(f"{f.path}/WHEEL") replace_tag(f"{f.path}/WHEEL")
break break
os.system(f"wheel pack {folder}/tmp/ -d {folder}") os.mkdir(f"{folder}/cuda_wheel")
os.system(f"rm -rf {folder}/tmp/") os.system(f"cd {folder}/tmp/; zip -r {folder}/cuda_wheel/{wheelname} *")
shutil.move(
f"{folder}/cuda_wheel/{wheelname}",
f"{folder}/{wheelname}",
copy_function=shutil.copy2,
)
os.system(f"rm -rf {folder}/tmp/ {folder}/cuda_wheel/")
def complete_wheel(folder: str) -> str: def complete_wheel(folder: str) -> str:
@ -280,7 +161,14 @@ def complete_wheel(folder: str) -> str:
f"/{folder}/dist/{repaired_wheel_name}", f"/{folder}/dist/{repaired_wheel_name}",
) )
else: else:
repaired_wheel_name = list_dir(f"/{folder}/dist")[0] repaired_wheel_name = wheel_name.replace(
"linux_aarch64", "manylinux_2_28_aarch64"
)
print(f"Renaming {wheel_name} wheel to {repaired_wheel_name}")
os.rename(
f"/{folder}/dist/{wheel_name}",
f"/{folder}/dist/{repaired_wheel_name}",
)
print(f"Copying {repaired_wheel_name} to artifacts") print(f"Copying {repaired_wheel_name} to artifacts")
shutil.copy2( shutil.copy2(
@ -321,16 +209,8 @@ if __name__ == "__main__":
# MAX_JOB=5 is not required for CPU backend (see commit 465d98b) # MAX_JOB=5 is not required for CPU backend (see commit 465d98b)
if enable_cuda: if enable_cuda:
build_vars += "MAX_JOBS=5 " build_vars += "MAX_JOBS=5 "
# nvshmem is broken for aarch64 see https://github.com/pytorch/pytorch/issues/160425
# Handle PyPI NVIDIA libraries vs bundled libraries build_vars += "USE_NVSHMEM=OFF "
use_nvidia_pypi_libs = os.getenv("USE_NVIDIA_PYPI_LIBS", "0") == "1"
if use_nvidia_pypi_libs:
print("Configuring build for PyPI NVIDIA libraries")
# Configure for dynamic linking (matching x86 logic)
build_vars += "ATEN_STATIC_CUDA=0 USE_CUDA_STATIC_LINK=0 USE_CUPTI_SO=1 "
else:
print("Configuring build for bundled NVIDIA libraries")
# Keep existing static linking approach - already configured above
override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION") override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION")
desired_cuda = os.getenv("DESIRED_CUDA") desired_cuda = os.getenv("DESIRED_CUDA")

View File

@ -64,10 +64,6 @@ FROM cuda as cuda12.9
RUN bash ./install_cuda.sh 12.9 RUN bash ./install_cuda.sh 12.9
ENV DESIRED_CUDA=12.9 ENV DESIRED_CUDA=12.9
FROM cuda as cuda13.0
RUN bash ./install_cuda.sh 13.0
ENV DESIRED_CUDA=13.0
FROM ${ROCM_IMAGE} as rocm FROM ${ROCM_IMAGE} as rocm
ENV PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201" ENV PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
ADD ./common/install_mkl.sh install_mkl.sh ADD ./common/install_mkl.sh install_mkl.sh
@ -83,7 +79,6 @@ FROM base as all_cuda
COPY --from=cuda12.6 /usr/local/cuda-12.6 /usr/local/cuda-12.6 COPY --from=cuda12.6 /usr/local/cuda-12.6 /usr/local/cuda-12.6
COPY --from=cuda12.8 /usr/local/cuda-12.8 /usr/local/cuda-12.8 COPY --from=cuda12.8 /usr/local/cuda-12.8 /usr/local/cuda-12.8
COPY --from=cuda12.9 /usr/local/cuda-12.9 /usr/local/cuda-12.9 COPY --from=cuda12.9 /usr/local/cuda-12.9 /usr/local/cuda-12.9
COPY --from=cuda13.0 /usr/local/cuda-13.0 /usr/local/cuda-13.0
# Final step # Final step
FROM ${BASE_TARGET} as final FROM ${BASE_TARGET} as final

View File

@ -81,8 +81,8 @@ elif [[ "$image" == *riscv* ]]; then
DOCKERFILE="ubuntu-cross-riscv/Dockerfile" DOCKERFILE="ubuntu-cross-riscv/Dockerfile"
fi fi
_UCX_COMMIT=7836b165abdbe468a2f607e7254011c07d788152 _UCX_COMMIT=7bb2722ff2187a0cad557ae4a6afa090569f83fb
_UCC_COMMIT=430e241bf5d38cbc73fc7a6b89155397232e3f96 _UCC_COMMIT=20eae37090a4ce1b32bcce6144ccad0b49943e0b
if [[ "$image" == *rocm* ]]; then if [[ "$image" == *rocm* ]]; then
_UCX_COMMIT=cc312eaa4655c0cc5c2bcd796db938f90563bcf6 _UCX_COMMIT=cc312eaa4655c0cc5c2bcd796db938f90563bcf6
_UCC_COMMIT=0c0fc21559835044ab107199e334f7157d6a0d3d _UCC_COMMIT=0c0fc21559835044ab107199e334f7157d6a0d3d
@ -114,19 +114,31 @@ case "$tag" in
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11) pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks)
CUDA_VERSION=13.0.0 CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11 GCC_VERSION=9
VISION=yes VISION=yes
KATEX=yes KATEX=yes
UCX_COMMIT=${_UCX_COMMIT} UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes TRITON=yes
INDUCTOR_BENCHMARKS=yes
;; ;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks) pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc9-inductor-benchmarks)
CUDA_VERSION=12.8.1 CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=9
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.13-gcc9-inductor-benchmarks)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.13
GCC_VERSION=9 GCC_VERSION=9
VISION=yes VISION=yes
KATEX=yes KATEX=yes
@ -156,13 +168,13 @@ case "$tag" in
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-py3-clang12-onnx) pytorch-linux-jammy-py3-clang12-onnx)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
CLANG_VERSION=12 CLANG_VERSION=12
VISION=yes VISION=yes
ONNX=yes ONNX=yes
;; ;;
pytorch-linux-jammy-py3.10-clang12) pytorch-linux-jammy-py3.9-clang12)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
CLANG_VERSION=12 CLANG_VERSION=12
VISION=yes VISION=yes
TRITON=yes TRITON=yes
@ -197,24 +209,24 @@ case "$tag" in
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950" PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950"
;; ;;
pytorch-linux-jammy-xpu-n-1-py3) pytorch-linux-jammy-xpu-2025.0-py3)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11
VISION=yes
XPU_VERSION=2025.0
NINJA_VERSION=1.9.0
TRITON=yes
;;
pytorch-linux-jammy-xpu-2025.1-py3)
ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11 GCC_VERSION=11
VISION=yes VISION=yes
XPU_VERSION=2025.1 XPU_VERSION=2025.1
NINJA_VERSION=1.9.0 NINJA_VERSION=1.9.0
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-xpu-n-py3) pytorch-linux-jammy-py3.9-gcc11-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11
VISION=yes
XPU_VERSION=2025.2
NINJA_VERSION=1.9.0
TRITON=yes
;;
pytorch-linux-jammy-py3-gcc11-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11 GCC_VERSION=11
VISION=yes VISION=yes
KATEX=yes KATEX=yes
@ -222,8 +234,8 @@ case "$tag" in
DOCS=yes DOCS=yes
INDUCTOR_BENCHMARKS=yes INDUCTOR_BENCHMARKS=yes
;; ;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.10-clang12) pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-clang12)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
CUDA_VERSION=12.8.1 CUDA_VERSION=12.8.1
CLANG_VERSION=12 CLANG_VERSION=12
VISION=yes VISION=yes
@ -234,8 +246,8 @@ case "$tag" in
CLANG_VERSION=18 CLANG_VERSION=18
VISION=yes VISION=yes
;; ;;
pytorch-linux-jammy-py3.10-gcc11) pytorch-linux-jammy-py3.9-gcc11)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11 GCC_VERSION=11
VISION=yes VISION=yes
KATEX=yes KATEX=yes
@ -276,6 +288,7 @@ case "$tag" in
GCC_VERSION=11 GCC_VERSION=11
ACL=yes ACL=yes
VISION=yes VISION=yes
CONDA_CMAKE=yes
OPENBLAS=yes OPENBLAS=yes
# snadampal: skipping llvm src build install because the current version # snadampal: skipping llvm src build install because the current version
# from pytorch/llvm:9.0.1 is x86 specific # from pytorch/llvm:9.0.1 is x86 specific
@ -286,6 +299,7 @@ case "$tag" in
GCC_VERSION=11 GCC_VERSION=11
ACL=yes ACL=yes
VISION=yes VISION=yes
CONDA_CMAKE=yes
OPENBLAS=yes OPENBLAS=yes
# snadampal: skipping llvm src build install because the current version # snadampal: skipping llvm src build install because the current version
# from pytorch/llvm:9.0.1 is x86 specific # from pytorch/llvm:9.0.1 is x86 specific

View File

@ -56,13 +56,9 @@ ENV INSTALLED_VISION ${VISION}
# Install rocm # Install rocm
ARG ROCM_VERSION ARG ROCM_VERSION
RUN mkdir ci_commit_pins
COPY ./common/common_utils.sh common_utils.sh
COPY ./ci_commit_pins/rocm-composable-kernel.txt ci_commit_pins/rocm-composable-kernel.txt
COPY ./common/install_rocm.sh install_rocm.sh COPY ./common/install_rocm.sh install_rocm.sh
RUN bash ./install_rocm.sh RUN bash ./install_rocm.sh
RUN rm install_rocm.sh common_utils.sh RUN rm install_rocm.sh
RUN rm -r ci_commit_pins
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} RUN bash ./install_rocm_magma.sh ${ROCM_VERSION}
RUN rm install_rocm_magma.sh RUN rm install_rocm_magma.sh

View File

@ -1,2 +0,0 @@
transformers==4.54.0
soxr==0.5.0

View File

@ -0,0 +1 @@
v4.54.0

View File

@ -1 +0,0 @@
v2.27.7-1

View File

@ -1 +0,0 @@
7fe50dc3da2069d6645d9deb8c017a876472a977

View File

@ -1 +1 @@
74a23feff57432129df84d8099e622773cf77925 e03a63be43e33596f7f0a43b0f530353785e4a59

View File

@ -1 +1 @@
1b0418a9a454b2b93ab8d71f40e59d2297157fae 0958dc9b2bb815e428f721f9da599dab0dc1c5d7

View File

@ -1 +1 @@
5ae38bdb0dc066c5823e34dc9797afb9de42c866 f7888497a1eb9e98d4c07537f0d0bcfe180d1363

View File

@ -83,9 +83,9 @@ function build_cpython {
py_suffix=${py_ver::-1} py_suffix=${py_ver::-1}
py_folder=$py_suffix py_folder=$py_suffix
fi fi
# Update to rc2 due to https://github.com/python/cpython/commit/c72699086fe4 # Only b3 is available now
if [ "$py_suffix" == "3.14.0" ]; then if [ "$py_suffix" == "3.14.0" ]; then
py_suffix="3.14.0rc2" py_suffix="3.14.0b3"
fi fi
wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz
do_cpython_build $py_ver Python-$py_suffix do_cpython_build $py_ver Python-$py_suffix

View File

@ -10,7 +10,7 @@ else
arch_path='sbsa' arch_path='sbsa'
fi fi
NVSHMEM_VERSION=3.3.24 NVSHMEM_VERSION=3.3.9
function install_cuda { function install_cuda {
version=$1 version=$1
@ -62,16 +62,14 @@ function install_nvshmem {
mkdir -p "${tmpdir}" && cd "${tmpdir}" mkdir -p "${tmpdir}" && cd "${tmpdir}"
# nvSHMEM license: https://docs.nvidia.com/nvshmem/api/sla.html # nvSHMEM license: https://docs.nvidia.com/nvshmem/api/sla.html
# This pattern is a lie as it is not consistent across versions, for 3.3.9 it was cuda_ver-arch-nvshhem-ver filename="libnvshmem_cuda${cuda_major_version}-linux-${arch_path}-${nvshmem_version}"
filename="libnvshmem-linux-${arch_path}-${nvshmem_version}_cuda${cuda_major_version}-archive" url="https://developer.download.nvidia.com/compute/redist/nvshmem/${nvshmem_version}/builds/cuda${cuda_major_version}/txz/agnostic/${dl_arch}/${filename}.tar.gz"
suffix=".tar.xz"
url="https://developer.download.nvidia.com/compute/nvshmem/redist/libnvshmem/linux-${arch_path}/${filename}${suffix}"
# download, unpack, install # download, unpack, install
wget -q "${url}" wget -q "${url}"
tar xf "${filename}${suffix}" tar xf "${filename}.tar.gz"
cp -a "${filename}/include/"* /usr/local/cuda/include/ cp -a "libnvshmem/include/"* /usr/local/cuda/include/
cp -a "${filename}/lib/"* /usr/local/cuda/lib64/ cp -a "libnvshmem/lib/"* /usr/local/cuda/lib64/
# cleanup # cleanup
cd .. cd ..
@ -128,6 +126,74 @@ function install_129 {
ldconfig ldconfig
} }
function prune_124 {
echo "Pruning CUDA 12.4"
#####################################################################################
# CUDA 12.4 prune static libs
#####################################################################################
export NVPRUNE="/usr/local/cuda-12.4/bin/nvprune"
export CUDA_LIB_DIR="/usr/local/cuda-12.4/lib64"
export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
if [[ -n "$OVERRIDE_GENCODE" ]]; then
export GENCODE=$OVERRIDE_GENCODE
fi
if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then
export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN
fi
# all CUDA libs except CuDNN and CuBLAS
ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \
| xargs -I {} bash -c \
"echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}"
# prune CuDNN and CuBLAS
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a
#####################################################################################
# CUDA 12.4 prune visual tools
#####################################################################################
export CUDA_BASE="/usr/local/cuda-12.4/"
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.1.0 $CUDA_BASE/nsight-systems-2023.4.4/
}
function prune_126 {
echo "Pruning CUDA 12.6"
#####################################################################################
# CUDA 12.6 prune static libs
#####################################################################################
export NVPRUNE="/usr/local/cuda-12.6/bin/nvprune"
export CUDA_LIB_DIR="/usr/local/cuda-12.6/lib64"
export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
if [[ -n "$OVERRIDE_GENCODE" ]]; then
export GENCODE=$OVERRIDE_GENCODE
fi
if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then
export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN
fi
# all CUDA libs except CuDNN and CuBLAS
ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \
| xargs -I {} bash -c \
"echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}"
# prune CuDNN and CuBLAS
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a
#####################################################################################
# CUDA 12.6 prune visual tools
#####################################################################################
export CUDA_BASE="/usr/local/cuda-12.6/"
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.3.2 $CUDA_BASE/nsight-systems-2024.5.1/
}
function install_128 { function install_128 {
CUDNN_VERSION=9.8.0.87 CUDNN_VERSION=9.8.0.87
echo "Installing CUDA 12.8.1 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1" echo "Installing CUDA 12.8.1 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
@ -146,38 +212,18 @@ function install_128 {
ldconfig ldconfig
} }
function install_130 {
CUDNN_VERSION=9.13.0.50
echo "Installing CUDA 13.0 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
# install CUDA 13.0 in the same container
install_cuda 13.0.0 cuda_13.0.0_580.65.06_linux
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
install_cudnn 13 $CUDNN_VERSION
install_nvshmem 13 $NVSHMEM_VERSION
CUDA_VERSION=13.0 bash install_nccl.sh
CUDA_VERSION=13.0 bash install_cusparselt.sh
ldconfig
}
# idiomatic parameter and option handling in sh # idiomatic parameter and option handling in sh
while test $# -gt 0 while test $# -gt 0
do do
case "$1" in case "$1" in
12.4) install_124; 12.4) install_124; prune_124
;; ;;
12.6|12.6.*) install_126; 12.6|12.6.*) install_126; prune_126
;; ;;
12.8|12.8.*) install_128; 12.8|12.8.*) install_128;
;; ;;
12.9|12.9.*) install_129; 12.9|12.9.*) install_129;
;; ;;
13.0|13.0.*) install_130;
;;
*) echo "bad argument $1"; exit 1 *) echo "bad argument $1"; exit 1
;; ;;
esac esac

View File

@ -5,15 +5,7 @@ set -ex
# cuSPARSELt license: https://docs.nvidia.com/cuda/cusparselt/license.html # cuSPARSELt license: https://docs.nvidia.com/cuda/cusparselt/license.html
mkdir tmp_cusparselt && cd tmp_cusparselt mkdir tmp_cusparselt && cd tmp_cusparselt
if [[ ${CUDA_VERSION:0:4} =~ "13" ]]; then if [[ ${CUDA_VERSION:0:4} =~ ^12\.[5-9]$ ]]; then
arch_path='sbsa'
export TARGETARCH=${TARGETARCH:-$(uname -m)}
if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then
arch_path='x86_64'
fi
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.8.0.4_cuda13-archive"
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz
elif [[ ${CUDA_VERSION:0:4} =~ ^12\.[5-9]$ ]]; then
arch_path='sbsa' arch_path='sbsa'
export TARGETARCH=${TARGETARCH:-$(uname -m)} export TARGETARCH=${TARGETARCH:-$(uname -m)}
if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then

View File

@ -5,7 +5,9 @@ set -ex
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
function install_huggingface() { function install_huggingface() {
pip_install -r huggingface-requirements.txt local version
commit=$(get_pinned_commit huggingface)
pip_install "git+https://github.com/huggingface/transformers@${commit}"
} }
function install_timm() { function install_timm() {
@ -24,6 +26,9 @@ function install_torchbench() {
python install.py --continue_on_fail python install.py --continue_on_fail
# soxr comes from https://github.com/huggingface/transformers/pull/39429
pip install transformers==4.54.0 soxr==0.5.0
echo "Print all dependencies after TorchBench is installed" echo "Print all dependencies after TorchBench is installed"
python -mpip freeze python -mpip freeze
popd popd

View File

@ -7,8 +7,6 @@ if [[ ${CUDA_VERSION:0:2} == "11" ]]; then
NCCL_VERSION=$(cat ci_commit_pins/nccl-cu11.txt) NCCL_VERSION=$(cat ci_commit_pins/nccl-cu11.txt)
elif [[ ${CUDA_VERSION:0:2} == "12" ]]; then elif [[ ${CUDA_VERSION:0:2} == "12" ]]; then
NCCL_VERSION=$(cat ci_commit_pins/nccl-cu12.txt) NCCL_VERSION=$(cat ci_commit_pins/nccl-cu12.txt)
elif [[ ${CUDA_VERSION:0:2} == "13" ]]; then
NCCL_VERSION=$(cat ci_commit_pins/nccl-cu13.txt)
else else
echo "Unexpected CUDA_VERSION ${CUDA_VERSION}" echo "Unexpected CUDA_VERSION ${CUDA_VERSION}"
exit 1 exit 1

View File

@ -19,8 +19,8 @@ pip_install \
transformers==4.36.2 transformers==4.36.2
pip_install coloredlogs packaging pip_install coloredlogs packaging
pip_install onnxruntime==1.22.1 pip_install onnxruntime==1.18.1
pip_install onnxscript==0.4.0 pip_install onnxscript==0.3.1
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers # Cache the transformers model to be used later by ONNX tests. We need to run the transformers
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/ # package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/

View File

@ -2,11 +2,6 @@
set -ex set -ex
# for pip_install function
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
ROCM_COMPOSABLE_KERNEL_VERSION="$(cat $(dirname $0)/../ci_commit_pins/rocm-composable-kernel.txt)"
ver() { ver() {
printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' '); printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' ');
} }
@ -118,8 +113,6 @@ EOF
rm -rf HIP clr rm -rf HIP clr
fi fi
pip_install "git+https://github.com/rocm/composable_kernel@$ROCM_COMPOSABLE_KERNEL_VERSION"
# Cleanup # Cleanup
apt-get autoclean && apt-get clean apt-get autoclean && apt-get clean
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
@ -183,8 +176,6 @@ install_centos() {
sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;" sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;"
done done
pip_install "git+https://github.com/rocm/composable_kernel@$ROCM_COMPOSABLE_KERNEL_VERSION"
# Cleanup # Cleanup
yum clean all yum clean all
rm -rf /var/cache/yum rm -rf /var/cache/yum

View File

@ -57,7 +57,7 @@ if [ ! -f setup.py ]; then
cd python cd python
fi fi
pip_install pybind11==3.0.1 pip_install pybind11==2.13.6
# TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527 # TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527
as_jenkins sed -i -e 's/https:\/\/tritonlang.blob.core.windows.net\/llvm-builds/https:\/\/oaitriton.blob.core.windows.net\/public\/llvm-builds/g' setup.py as_jenkins sed -i -e 's/https:\/\/tritonlang.blob.core.windows.net\/llvm-builds/https:\/\/oaitriton.blob.core.windows.net\/public\/llvm-builds/g' setup.py

View File

@ -44,12 +44,8 @@ function install_ucc() {
./autogen.sh ./autogen.sh
if [[ -n "$CUDA_VERSION" && $CUDA_VERSION == 13* ]]; then
NVCC_GENCODE="-gencode=arch=compute_86,code=compute_86"
else
# We only run distributed tests on Tesla M60 and A10G # We only run distributed tests on Tesla M60 and A10G
NVCC_GENCODE="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_86,code=compute_86" NVCC_GENCODE="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_86,code=compute_86"
fi
if [[ -n "$ROCM_VERSION" ]]; then if [[ -n "$ROCM_VERSION" ]]; then
if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then

View File

@ -65,15 +65,11 @@ function install_ubuntu() {
function install_rhel() { function install_rhel() {
. /etc/os-release . /etc/os-release
if [[ "${ID}" == "rhel" ]]; then
if [[ ! " 8.8 8.9 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then if [[ ! " 8.8 8.10 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then
echo "RHEL version ${VERSION_ID} not supported" echo "RHEL version ${VERSION_ID} not supported"
exit exit
fi fi
elif [[ "${ID}" == "almalinux" ]]; then
# Workaround for almalinux8 which used by quay.io/pypa/manylinux_2_28_x86_64
VERSION_ID="8.8"
fi
dnf install -y 'dnf-command(config-manager)' dnf install -y 'dnf-command(config-manager)'
# To add the online network package repository for the GPU Driver # To add the online network package repository for the GPU Driver
@ -150,11 +146,11 @@ if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then
XPU_DRIVER_VERSION="/lts/2350" XPU_DRIVER_VERSION="/lts/2350"
fi fi
# Default use Intel® oneAPI Deep Learning Essentials 2025.1 # Default use Intel® oneAPI Deep Learning Essentials 2025.0
if [[ "$XPU_VERSION" == "2025.2" ]]; then if [[ "$XPU_VERSION" == "2025.1" ]]; then
XPU_PACKAGES="intel-deep-learning-essentials-2025.2"
else
XPU_PACKAGES="intel-deep-learning-essentials-2025.1" XPU_PACKAGES="intel-deep-learning-essentials-2025.1"
else
XPU_PACKAGES="intel-deep-learning-essentials-2025.0"
fi fi
# The installation depends on the base OS # The installation depends on the base OS

View File

@ -69,19 +69,6 @@ RUN bash ./install_cuda.sh 12.9
RUN bash ./install_magma.sh 12.9 RUN bash ./install_magma.sh 12.9
RUN ln -sf /usr/local/cuda-12.9 /usr/local/cuda RUN ln -sf /usr/local/cuda-12.9 /usr/local/cuda
FROM cuda as cuda13.0
RUN bash ./install_cuda.sh 13.0
RUN bash ./install_magma.sh 13.0
RUN ln -sf /usr/local/cuda-13.0 /usr/local/cuda
# Install libibverbs for libtorch and copy to CUDA directory
RUN apt-get update -y && \
apt-get install -y libibverbs-dev librdmacm-dev && \
cp /usr/lib/x86_64-linux-gnu/libmlx5.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/librdmacm.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/libibverbs.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/libnl* /usr/local/cuda/lib64/
FROM cpu as rocm FROM cpu as rocm
ARG ROCM_VERSION ARG ROCM_VERSION
ARG PYTORCH_ROCM_ARCH ARG PYTORCH_ROCM_ARCH

View File

@ -175,6 +175,6 @@ ENV XPU_DRIVER_TYPE ROLLING
RUN python3 -m pip install --upgrade pip && \ RUN python3 -m pip install --upgrade pip && \
python3 -mpip install cmake==3.28.4 python3 -mpip install cmake==3.28.4
ADD ./common/install_xpu.sh install_xpu.sh ADD ./common/install_xpu.sh install_xpu.sh
ENV XPU_VERSION 2025.2 ENV XPU_VERSION 2025.1
RUN bash ./install_xpu.sh && rm install_xpu.sh RUN bash ./install_xpu.sh && rm install_xpu.sh
RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd

View File

@ -67,12 +67,6 @@ case ${image} in
DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=13" DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=13"
MANY_LINUX_VERSION="2_28" MANY_LINUX_VERSION="2_28"
;; ;;
manylinux2_28-builder:cuda13*)
TARGET=cuda_final
GPU_IMAGE=amd64/almalinux:8
DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=13"
MANY_LINUX_VERSION="2_28"
;;
manylinuxaarch64-builder:cuda*) manylinuxaarch64-builder:cuda*)
TARGET=cuda_final TARGET=cuda_final
GPU_IMAGE=amd64/almalinux:8 GPU_IMAGE=amd64/almalinux:8

View File

@ -263,6 +263,11 @@ scipy==1.14.1 ; python_version >= "3.12"
#Pinned versions: #Pinned versions:
#test that import: #test that import:
tb-nightly==2.13.0a20230426
#Description: TensorBoard
#Pinned versions:
#test that import:
# needed by torchgen utils # needed by torchgen utils
typing-extensions>=4.10.0 typing-extensions>=4.10.0
#Description: type hints for python #Description: type hints for python
@ -339,7 +344,7 @@ onnx==1.18.0
#Pinned versions: #Pinned versions:
#test that import: #test that import:
onnxscript==0.4.0 onnxscript==0.3.1
#Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal #Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
#Pinned versions: #Pinned versions:
#test that import: #test that import:
@ -379,7 +384,7 @@ dataclasses_json==0.6.7
cmake==4.0.0 cmake==4.0.0
#Description: required for building #Description: required for building
tlparse==0.4.0 tlparse==0.3.30
#Description: required for log parsing #Description: required for log parsing
cuda-bindings>=12.0,<13.0 ; platform_machine != "s390x" cuda-bindings>=12.0,<13.0 ; platform_machine != "s390x"

View File

@ -1,7 +1,7 @@
sphinx==5.3.0 sphinx==5.3.0
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 5.3.0 #Pinned versions: 5.3.0
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@1657ad2fc1acdc98aa719eebecbb0128a7c13ce4#egg=pytorch_sphinx_theme2 -e git+https://github.com/pytorch/pytorch_sphinx_theme.git@722b7e6f9ca512fcc526ad07d62b3d28c50bb6cd#egg=pytorch_sphinx_theme2
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering # TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
# but it doesn't seem to work and hangs around idly. The initial thought that it is probably # but it doesn't seem to work and hangs around idly. The initial thought that it is probably

View File

@ -1 +1 @@
3.5.0 3.4.0

View File

@ -1 +1 @@
3.5.0 3.4.0

View File

@ -52,13 +52,9 @@ ENV INSTALLED_VISION ${VISION}
# Install rocm # Install rocm
ARG ROCM_VERSION ARG ROCM_VERSION
RUN mkdir ci_commit_pins
COPY ./common/common_utils.sh common_utils.sh
COPY ./ci_commit_pins/rocm-composable-kernel.txt ci_commit_pins/rocm-composable-kernel.txt
COPY ./common/install_rocm.sh install_rocm.sh COPY ./common/install_rocm.sh install_rocm.sh
RUN bash ./install_rocm.sh RUN bash ./install_rocm.sh
RUN rm install_rocm.sh common_utils.sh RUN rm install_rocm.sh
RUN rm -r ci_commit_pins
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} RUN bash ./install_rocm_magma.sh ${ROCM_VERSION}
RUN rm install_rocm_magma.sh RUN rm install_rocm_magma.sh
@ -100,11 +96,11 @@ ARG ANACONDA_PYTHON_VERSION
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
COPY ./common/common_utils.sh common_utils.sh COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt COPY ci_commit_pins/huggingface.txt huggingface.txt
COPY ci_commit_pins/timm.txt timm.txt COPY ci_commit_pins/timm.txt timm.txt
COPY ci_commit_pins/torchbench.txt torchbench.txt COPY ci_commit_pins/torchbench.txt torchbench.txt
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt torchbench.txt
# (optional) Install non-default Ninja version # (optional) Install non-default Ninja version
ARG NINJA_VERSION ARG NINJA_VERSION

View File

@ -56,10 +56,10 @@ RUN rm install_openssl.sh
ARG INDUCTOR_BENCHMARKS ARG INDUCTOR_BENCHMARKS
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
COPY ./common/common_utils.sh common_utils.sh COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt COPY ci_commit_pins/huggingface.txt huggingface.txt
COPY ci_commit_pins/timm.txt timm.txt COPY ci_commit_pins/timm.txt timm.txt
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
# Install XPU Dependencies # Install XPU Dependencies
ARG XPU_VERSION ARG XPU_VERSION

View File

@ -66,7 +66,6 @@ ENV NCCL_LIB_DIR="/usr/local/cuda/lib64/"
# (optional) Install UCC # (optional) Install UCC
ARG UCX_COMMIT ARG UCX_COMMIT
ARG UCC_COMMIT ARG UCC_COMMIT
ARG CUDA_VERSION
ENV UCX_COMMIT $UCX_COMMIT ENV UCX_COMMIT $UCX_COMMIT
ENV UCC_COMMIT $UCC_COMMIT ENV UCC_COMMIT $UCC_COMMIT
ENV UCX_HOME /usr ENV UCX_HOME /usr
@ -97,11 +96,11 @@ RUN rm install_openssl.sh
ARG INDUCTOR_BENCHMARKS ARG INDUCTOR_BENCHMARKS
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
COPY ./common/common_utils.sh common_utils.sh COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt COPY ci_commit_pins/huggingface.txt huggingface.txt
COPY ci_commit_pins/timm.txt timm.txt COPY ci_commit_pins/timm.txt timm.txt
COPY ci_commit_pins/torchbench.txt torchbench.txt COPY ci_commit_pins/torchbench.txt torchbench.txt
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt torchbench.txt
ARG TRITON ARG TRITON
ARG TRITON_CPU ARG TRITON_CPU
@ -182,6 +181,7 @@ COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
RUN if [ -n "${SKIP_LLVM_SRC_BUILD_INSTALL}" ]; then set -eu; rm -rf /opt/llvm; fi RUN if [ -n "${SKIP_LLVM_SRC_BUILD_INSTALL}" ]; then set -eu; rm -rf /opt/llvm; fi
# AWS specific CUDA build guidance # AWS specific CUDA build guidance
ENV TORCH_CUDA_ARCH_LIST Maxwell
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all" ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
ENV CUDA_PATH /usr/local/cuda ENV CUDA_PATH /usr/local/cuda

View File

@ -7,4 +7,4 @@ set -ex
SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
USE_NVSHMEM=0 USE_CUSPARSELT=0 BUILD_PYTHONLESS=1 DESIRED_PYTHON="3.9" ${SCRIPTPATH}/../manywheel/build.sh USE_CUSPARSELT=0 BUILD_PYTHONLESS=1 DESIRED_PYTHON="3.9" ${SCRIPTPATH}/../manywheel/build.sh

View File

@ -2,7 +2,7 @@ import argparse
import logging import logging
from cli.lib.common.cli_helper import register_targets, RichHelp, TargetSpec from cli.lib.common.cli_helper import register_targets, RichHelp, TargetSpec
from cli.lib.core.vllm.vllm_build import VllmBuildRunner from cli.lib.core.vllm import VllmBuildRunner
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@ -1,143 +0,0 @@
from __future__ import annotations
import logging
import os
import textwrap
from pathlib import Path
from typing import TYPE_CHECKING
from cli.lib.common.utils import get_wheels
from jinja2 import Template
if TYPE_CHECKING:
from collections.abc import Iterable, Mapping
logger = logging.getLogger(__name__)
_TPL_CONTENT = Template(
textwrap.dedent("""\
## {{ title }}
```{{ lang }}
{{ content }}
```
""")
)
_TPL_LIST_ITEMS = Template(
textwrap.dedent("""\
## {{ title }}
{% for it in items %}
- {{ it.pkg }}: {{ it.relpath }}
{% else %}
_(no item found)_
{% endfor %}
""")
)
_TPL_TABLE = Template(
textwrap.dedent("""\
{%- if rows %}
| {{ cols | join(' | ') }} |
|{%- for _ in cols %} --- |{%- endfor %}
{%- for r in rows %}
| {%- for c in cols %} {{ r.get(c, "") }} |{%- endfor %}
{%- endfor %}
{%- else %}
_(no data)_
{%- endif %}
""")
)
def gh_summary_path() -> Path | None:
"""Return the Path to the GitHub step summary file, or None if not set."""
p = os.environ.get("GITHUB_STEP_SUMMARY")
return Path(p) if p else None
def write_gh_step_summary(md: str, *, append_content: bool = True) -> bool:
"""
Write Markdown content to the GitHub Step Summary file if GITHUB_STEP_SUMMARY is set.
append_content: default true, if True, append to the end of the file, else overwrite the whole file
Returns:
True if written successfully (in GitHub Actions environment),
False if skipped (e.g., running locally where the variable is not set).
"""
sp = gh_summary_path()
if not sp:
logger.info("[gh-summary] GITHUB_STEP_SUMMARY not set, skipping write.")
return False
md_clean = textwrap.dedent(md).strip() + "\n"
mode = "a" if append_content else "w"
with sp.open(mode, encoding="utf-8") as f:
f.write(md_clean)
return True
def md_heading(text: str, level: int = 2) -> str:
"""Generate a Markdown heading string with the given level (1-6)."""
return f"{'#' * max(1, min(level, 6))} {text}\n"
def md_details(summary: str, content: str) -> str:
"""Generate a collapsible <details> block with a summary and inner content."""
return f"<details>\n<summary>{summary}</summary>\n\n{content}\n\n</details>\n"
def summarize_content_from_file(
output_dir: Path,
freeze_file: str,
title: str = "Content from file",
code_lang: str = "", # e.g. "text" or "ini"
) -> bool:
f = Path(output_dir) / freeze_file
if not f.exists():
return False
content = f.read_text(encoding="utf-8").strip()
md = render_content(content, title=title, lang=code_lang)
return write_gh_step_summary(md)
def summarize_wheels(path: Path, title: str = "Wheels", max_depth: int = 3):
items = get_wheels(path, max_depth=max_depth)
if not items:
return False
md = render_list(items, title=title)
return write_gh_step_summary(md)
def md_kv_table(rows: Iterable[Mapping[str, str | int | float]]) -> str:
"""
Render a list of dicts as a Markdown table using Jinja template.
"""
rows = list(rows)
cols = list({k for r in rows for k in r.keys()})
md = _TPL_TABLE.render(cols=cols, rows=rows).strip() + "\n"
return md
def render_list(
items: Iterable[str],
*,
title: str = "List",
) -> str:
tpl = _TPL_LIST_ITEMS
md = tpl.render(title=title, items=items)
return md
def render_content(
content: str,
*,
title: str = "Content",
lang: str = "text",
) -> str:
tpl = _TPL_CONTENT
md = tpl.render(title=title, content=content, lang=lang)
return md

View File

@ -45,7 +45,7 @@ def clone_external_repo(target: str, repo: str, dst: str = "", update_submodules
# Checkout pinned commit # Checkout pinned commit
commit = get_post_build_pinned_commit(target) commit = get_post_build_pinned_commit(target)
logger.info("Checking out pinned %s commit %s", target, commit) logger.info("Checking out pinned commit %s", commit)
r.git.checkout(commit) r.git.checkout(commit)
# Update submodules if requested # Update submodules if requested
@ -55,7 +55,7 @@ def clone_external_repo(target: str, repo: str, dst: str = "", update_submodules
sm.update(init=True, recursive=True, progress=PrintProgress()) sm.update(init=True, recursive=True, progress=PrintProgress())
logger.info("Successfully cloned %s", target) logger.info("Successfully cloned %s", target)
return r, commit return r
except GitCommandError as e: except GitCommandError as e:
logger.error("Git operation failed: %s", e) logger.error("Git operation failed: %s", e)

View File

@ -1,71 +0,0 @@
import glob
import logging
import shlex
import shutil
import sys
from collections.abc import Iterable
from importlib.metadata import PackageNotFoundError, version # noqa: UP035
from typing import Optional, Union
from cli.lib.common.utils import run_command
logger = logging.getLogger(__name__)
def pip_install_packages(
packages: Iterable[str] = (),
env=None,
*,
requirements: Optional[str] = None,
constraints: Optional[str] = None,
prefer_uv: bool = False,
) -> None:
use_uv = prefer_uv and shutil.which("uv") is not None
base = (
[sys.executable, "-m", "uv", "pip", "install"]
if use_uv
else [sys.executable, "-m", "pip", "install"]
)
cmd = base[:]
if requirements:
cmd += ["-r", requirements]
if constraints:
cmd += ["-c", constraints]
cmd += list(packages)
logger.info("pip installing packages: %s", " ".join(map(shlex.quote, cmd)))
run_command(" ".join(map(shlex.quote, cmd)), env=env)
def pip_install_first_match(pattern: str, extras: Optional[str] = None, pref_uv=False):
wheel = first_matching_pkg(pattern)
target = f"{wheel}[{extras}]" if extras else wheel
logger.info("Installing %s...", target)
pip_install_packages([target], prefer_uv=pref_uv)
def run_python(args: Union[str, list[str]], env=None):
"""
Run the python in the current environment.
"""
if isinstance(args, str):
args = shlex.split(args)
cmd = [sys.executable] + args
run_command(" ".join(map(shlex.quote, cmd)), env=env)
def pkg_exists(name: str) -> bool:
try:
pkg_version = version(name)
logger.info("%s already exist with version: %s", name, pkg_version)
return True
except PackageNotFoundError:
logger.info("%s is not installed", name)
return False
def first_matching_pkg(pattern: str) -> str:
matches = sorted(glob.glob(pattern))
if not matches:
raise FileNotFoundError(f"No wheel matching: {pattern}")
return matches[0]

View File

@ -7,8 +7,6 @@ import os
import shlex import shlex
import subprocess import subprocess
import sys import sys
from contextlib import contextmanager
from pathlib import Path
from typing import Optional from typing import Optional
@ -79,61 +77,3 @@ def str2bool(value: Optional[str]) -> bool:
if value in false_value_set: if value in false_value_set:
return False return False
raise ValueError(f"Invalid string value for boolean conversion: {value}") raise ValueError(f"Invalid string value for boolean conversion: {value}")
@contextmanager
def temp_environ(updates: dict[str, str]):
"""
Temporarily set environment variables and restore them after the block.
Args:
updates: Dict of environment variables to set.
"""
missing = object()
old: dict[str, str | object] = {k: os.environ.get(k, missing) for k in updates}
try:
os.environ.update(updates)
yield
finally:
for k, v in old.items():
if v is missing:
os.environ.pop(k, None)
else:
os.environ[k] = v # type: ignore[arg-type]
@contextmanager
def working_directory(path: str):
"""
Temporarily change the working directory inside a context.
"""
if not path:
# No-op context
yield
return
prev_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(prev_cwd)
def get_wheels(
output_dir: Path,
max_depth: Optional[int] = None,
) -> list[str]:
"""Return a list of wheels found in the given output directory."""
root = Path(output_dir)
if not root.exists():
return []
items = []
for dirpath, _, filenames in os.walk(root):
depth = Path(dirpath).relative_to(root).parts
if max_depth is not None and len(depth) > max_depth:
continue
for fname in sorted(filenames):
if fname.endswith(".whl"):
pkg = fname.split("-")[0]
relpath = str((Path(dirpath) / fname).relative_to(root))
items.append({"pkg": pkg, "relpath": relpath})
return items

View File

@ -13,11 +13,7 @@ from cli.lib.common.envs_helper import (
env_str_field, env_str_field,
with_params_help, with_params_help,
) )
from cli.lib.common.gh_summary import ( from cli.lib.common.git_helper import clone_external_repo
gh_summary_path,
summarize_content_from_file,
summarize_wheels,
)
from cli.lib.common.path_helper import ( from cli.lib.common.path_helper import (
copy, copy,
ensure_dir_exists, ensure_dir_exists,
@ -26,7 +22,6 @@ from cli.lib.common.path_helper import (
is_path_exist, is_path_exist,
) )
from cli.lib.common.utils import run_command from cli.lib.common.utils import run_command
from cli.lib.core.vllm.lib import clone_vllm, summarize_build_info
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -66,11 +61,6 @@ class VllmBuildParameters:
"DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm" "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm"
) )
# the cleaning script to remove torch dependencies from pip
cleaning_script: Path = env_path_field(
"cleaning_script", ".github/ci_configs/vllm/use_existing_torch.py"
)
# OUTPUT_DIR: where docker buildx (local exporter) will write artifacts # OUTPUT_DIR: where docker buildx (local exporter) will write artifacts
output_dir: Path = env_path_field("OUTPUT_DIR", "external/vllm") output_dir: Path = env_path_field("OUTPUT_DIR", "external/vllm")
@ -162,45 +152,18 @@ class VllmBuildRunner(BaseRunner):
3. run docker build 3. run docker build
""" """
inputs = VllmBuildParameters() inputs = VllmBuildParameters()
logger.info("Running vllm build with inputs: %s", inputs) clone_vllm()
vllm_commit = clone_vllm()
self.cp_torch_cleaning_script(inputs)
self.cp_dockerfile_if_exist(inputs) self.cp_dockerfile_if_exist(inputs)
# cp torch wheels from root direct to vllm workspace if exist # cp torch wheels from root direct to vllm workspace if exist
self.cp_torch_whls_if_exist(inputs) self.cp_torch_whls_if_exist(inputs)
# make sure the output dir to store the build artifacts exist ensure_dir_exists(inputs.output_dir)
ensure_dir_exists(Path(inputs.output_dir))
cmd = self._generate_docker_build_cmd(inputs) cmd = self._generate_docker_build_cmd(inputs)
logger.info("Running docker build: \n %s", cmd) logger.info("Running docker build: \n %s", cmd)
try:
run_command(cmd, cwd="vllm", env=os.environ.copy()) run_command(cmd, cwd="vllm", env=os.environ.copy())
finally:
self.genearte_vllm_build_summary(vllm_commit, inputs)
def genearte_vllm_build_summary(
self, vllm_commit: str, inputs: VllmBuildParameters
):
if not gh_summary_path():
return logger.info("Skipping, not detect GH Summary env var....")
logger.info("Generate GH Summary ...")
# summarize vllm build info
summarize_build_info(vllm_commit)
# summarize vllm build artifacts
vllm_artifact_dir = inputs.output_dir / "wheels"
summarize_content_from_file(
vllm_artifact_dir,
"build_summary.txt",
title="Vllm build env pip package summary",
)
summarize_wheels(
inputs.torch_whls_path, max_depth=3, title="Torch Wheels Artifacts"
)
summarize_wheels(vllm_artifact_dir, max_depth=3, title="Vllm Wheels Artifacts")
def cp_torch_whls_if_exist(self, inputs: VllmBuildParameters) -> str: def cp_torch_whls_if_exist(self, inputs: VllmBuildParameters) -> str:
if not inputs.use_torch_whl: if not inputs.use_torch_whl:
@ -211,11 +174,6 @@ class VllmBuildRunner(BaseRunner):
copy(inputs.torch_whls_path, tmp_dir) copy(inputs.torch_whls_path, tmp_dir)
return tmp_dir return tmp_dir
def cp_torch_cleaning_script(self, inputs: VllmBuildParameters):
script = get_path(inputs.cleaning_script, resolve=True)
vllm_script = Path(f"./{self.work_directory}/use_existing_torch.py")
copy(script, vllm_script)
def cp_dockerfile_if_exist(self, inputs: VllmBuildParameters): def cp_dockerfile_if_exist(self, inputs: VllmBuildParameters):
if not inputs.use_local_dockerfile: if not inputs.use_local_dockerfile:
logger.info("using vllm default dockerfile.torch_nightly for build") logger.info("using vllm default dockerfile.torch_nightly for build")
@ -294,3 +252,12 @@ class VllmBuildRunner(BaseRunner):
--progress=plain . --progress=plain .
""" """
).strip() ).strip()
def clone_vllm():
clone_external_repo(
target="vllm",
repo="https://github.com/vllm-project/vllm.git",
dst="vllm",
update_submodules=True,
)

View File

@ -1,296 +0,0 @@
import logging
import os
import textwrap
from typing import Any
from cli.lib.common.gh_summary import write_gh_step_summary
from cli.lib.common.git_helper import clone_external_repo
from cli.lib.common.pip_helper import pip_install_packages
from cli.lib.common.utils import run_command, temp_environ, working_directory
from jinja2 import Template
logger = logging.getLogger(__name__)
_TPL_VLLM_INFO = Template(
textwrap.dedent("""\
## Vllm against Pytorch CI Test Summary
**Vllm Commit**: [{{ vllm_commit }}](https://github.com/vllm-project/vllm/commit/{{ vllm_commit }})
{%- if torch_sha %}
**Pytorch Commit**: [{{ torch_sha }}](https://github.com/pytorch/pytorch/commit/{{ torch_sha }})
{%- endif %}
""")
)
def sample_vllm_test_library():
"""
Simple sample to unblock the vllm ci development, which is mimic to
https://github.com/vllm-project/vllm/blob/main/.buildkite/test-pipeline.yaml
see run_test_plan for more details
"""
# TODO(elainewy): Read from yaml file to handle the env and tests for vllm
return {
"vllm_basic_correctness_test": {
"title": "Basic Correctness Test",
"id": "vllm_basic_correctness_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"steps": [
"pytest -v -s basic_correctness/test_cumem.py",
"pytest -v -s basic_correctness/test_basic_correctness.py",
"pytest -v -s basic_correctness/test_cpu_offload.py",
"VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1 pytest -v -s basic_correctness/test_preemption.py",
],
},
"vllm_basic_models_test": {
"title": "Basic models test",
"id": "vllm_basic_models_test",
"steps": [
"pytest -v -s models/test_transformers.py",
"pytest -v -s models/test_registry.py",
"pytest -v -s models/test_utils.py",
"pytest -v -s models/test_vision.py",
"pytest -v -s models/test_initialization.py",
],
},
"vllm_entrypoints_test": {
"title": "Entrypoints Test ",
"id": "vllm_entrypoints_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"steps": [
" ".join(
[
"pytest",
"-v",
"-s",
"entrypoints/llm",
"--ignore=entrypoints/llm/test_lazy_outlines.py",
"--ignore=entrypoints/llm/test_generate.py",
"--ignore=entrypoints/llm/test_generate_multiple_loras.py",
"--ignore=entrypoints/llm/test_collective_rpc.py",
]
),
"pytest -v -s entrypoints/llm/test_lazy_outlines.py",
"pytest -v -s entrypoints/llm/test_generate.py ",
"VLLM_USE_V1=0 pytest -v -s entrypoints/offline_mode",
],
},
"vllm_regression_test": {
"title": "Regression Test",
"id": "vllm_regression_test",
"package_install": ["modelscope"],
"steps": [
"pytest -v -s test_regression.py",
],
},
"vllm_lora_tp_test_distributed": {
"title": "LoRA TP Test (Distributed)",
"id": "vllm_lora_tp_test_distributed",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"num_gpus": 4,
"steps": [
"pytest -v -s -x lora/test_chatglm3_tp.py",
"pytest -v -s -x lora/test_llama_tp.py",
"pytest -v -s -x lora/test_llm_with_multi_loras.py",
],
},
"vllm_distributed_test_28_failure_test": {
"title": "Distributed Tests (2 GPUs) pytorch 2.8 release failure",
"id": "vllm_distributed_test_28_failure_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"num_gpus": 4,
"steps": [
"pytest -v -s distributed/test_sequence_parallel.py",
],
},
"vllm_lora_28_failure_test": {
"title": "LoRA pytorch 2.8 failure test",
"id": "vllm_lora_28_failure_test",
"steps": ["pytest -v lora/test_quant_model.py"],
},
"vllm_multi_model_processor_test": {
"title": "Multi-Modal Processor Test",
"id": "vllm_multi_model_processor_test",
"package_install": ["git+https://github.com/TIGER-AI-Lab/Mantis.git"],
"steps": [
"pytest -v -s models/multimodal/processing --ignore models/multimodal/processing/test_tensor_schema.py",
],
},
"vllm_multi_model_test_28_failure_test": {
"title": "Multi-Model Test (Failed 2.8 release)",
"id": "vllm_multi_model_test_28_failure_test",
"package_install": ["git+https://github.com/TIGER-AI-Lab/Mantis.git"],
"steps": [
"pytest -v -s models/multimodal/generation/test_voxtral.py",
"pytest -v -s models/multimodal/pooling",
],
},
"vllm_pytorch_compilation_unit_tests": {
"title": "PyTorch Compilation Unit Tests",
"id": "vllm_pytorch_compilation_unit_tests",
"steps": [
"pytest -v -s compile/test_pass_manager.py",
"pytest -v -s compile/test_fusion.py",
"pytest -v -s compile/test_fusion_attn.py",
"pytest -v -s compile/test_silu_mul_quant_fusion.py",
"pytest -v -s compile/test_sequence_parallelism.py",
"pytest -v -s compile/test_async_tp.py",
"pytest -v -s compile/test_fusion_all_reduce.py",
"pytest -v -s compile/test_decorator.py",
],
},
"vllm_languagde_model_test_extended_generation_28_failure_test": {
"title": "Language Models Test (Extended Generation) 2.8 release failure",
"id": "vllm_languagde_model_test_extended_generation_28_failure_test",
"package_install": [
"--no-build-isolation",
"git+https://github.com/Dao-AILab/causal-conv1d@v1.5.0.post8",
],
"steps": [
"pytest -v -s models/language/generation/test_mistral.py",
],
},
"vllm_distributed_test_2_gpu_28_failure_test": {
"title": "Distributed Tests (2 GPUs) pytorch 2.8 release failure",
"id": "vllm_distributed_test_2_gpu_28_failure_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"num_gpus": 4,
"steps": [
"pytest -v -s distributed/test_sequence_parallel.py",
],
},
# TODO(elainewy):need to add g6 with 4 gpus to run this test
"vllm_lora_test": {
"title": "LoRA Test %N",
"id": "lora_test",
"parallelism": 4,
"steps": [
"echo '[checking] list sharded lora tests:'",
" ".join(
[
"pytest -q --collect-only lora",
"--shard-id=$$BUILDKITE_PARALLEL_JOB",
"--num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT",
"--ignore=lora/test_chatglm3_tp.py --ignore=lora/test_llama_tp.py",
]
),
"echo '[checking] Done. list lora tests'",
" ".join(
[
"pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB",
"--num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT",
"--ignore=lora/test_chatglm3_tp.py --ignore=lora/test_llama_tp.py",
]
),
],
},
}
def check_parallelism(tests: Any, title: str, shard_id: int = 0, num_shards: int = 0):
"""
a method to check if the test plan is parallelism or not.
"""
parallelism = int(tests.get("parallelism", "0"))
is_parallel = parallelism and parallelism > 1
if not is_parallel:
return False
if shard_id > num_shards:
raise RuntimeError(
f"Test {title} expects {num_shards} shards, but invalid {shard_id} is provided"
)
if num_shards != parallelism:
raise RuntimeError(
f"Test {title} expects {parallelism} shards, but invalid {num_shards} is provided"
)
return True
def run_test_plan(
test_plan: str,
test_target: str,
tests_map: dict[str, Any],
shard_id: int = 0,
num_shards: int = 0,
):
"""
a method to run list of tests based on the test plan.
"""
logger.info("run %s tests.....", test_target)
if test_plan not in tests_map:
raise RuntimeError(
f"test {test_plan} not found, please add it to test plan pool"
)
tests = tests_map[test_plan]
pkgs = tests.get("package_install", [])
title = tests.get("title", "unknown test")
is_parallel = check_parallelism(tests, title, shard_id, num_shards)
if is_parallel:
title = title.replace("%N", f"{shard_id}/{num_shards}")
logger.info("Running tests: %s", title)
if pkgs:
logger.info("Installing packages: %s", pkgs)
pip_install_packages(packages=pkgs, prefer_uv=True)
with (
working_directory(tests.get("working_directory", "tests")),
temp_environ(tests.get("env_vars", {})),
):
failures = []
for step in tests["steps"]:
logger.info("Running step: %s", step)
if is_parallel:
step = replace_buildkite_placeholders(step, shard_id, num_shards)
logger.info("Running parallel step: %s", step)
code = run_command(cmd=step, check=False, use_shell=True)
if code != 0:
failures.append(step)
logger.info("Finish running step: %s", step)
if failures:
logger.error("Failed tests: %s", failures)
raise RuntimeError(f"{len(failures)} pytest runs failed: {failures}")
logger.info("Done. All tests passed")
def clone_vllm(dst: str = "vllm"):
_, commit = clone_external_repo(
target="vllm",
repo="https://github.com/vllm-project/vllm.git",
dst=dst,
update_submodules=True,
)
return commit
def replace_buildkite_placeholders(step: str, shard_id: int, num_shards: int) -> str:
mapping = {
"$$BUILDKITE_PARALLEL_JOB_COUNT": str(num_shards),
"$$BUILDKITE_PARALLEL_JOB": str(shard_id),
}
for k in sorted(mapping, key=len, reverse=True):
step = step.replace(k, mapping[k])
return step
def summarize_build_info(vllm_commit: str) -> bool:
torch_sha = os.getenv("GITHUB_SHA")
md = (
_TPL_VLLM_INFO.render(vllm_commit=vllm_commit, torch_sha=torch_sha).strip()
+ "\n"
)
return write_gh_step_summary(md)

View File

@ -1,280 +0,0 @@
import logging
import os
import re
import subprocess
import sys
from collections.abc import Iterable
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Any
from cli.lib.common.cli_helper import BaseRunner
from cli.lib.common.envs_helper import env_path_field, env_str_field, get_env
from cli.lib.common.path_helper import copy, get_path, remove_dir
from cli.lib.common.pip_helper import (
pip_install_first_match,
pip_install_packages,
pkg_exists,
run_python,
)
from cli.lib.common.utils import run_command, working_directory
from cli.lib.core.vllm.lib import clone_vllm, run_test_plan, sample_vllm_test_library
logger = logging.getLogger(__name__)
@dataclass
class VllmTestParameters:
"""
Parameters defining the vllm external test input
!!!DO NOT ADD SECRETS IN THIS CLASS!!!
you can put environment variable name in VllmTestParameters if it's not the same as the secret one
fetch secrests directly from env variables during runtime
"""
torch_whls_path: Path = env_path_field("WHEELS_PATH", "./dist")
vllm_whls_path: Path = env_path_field(
"VLLM_WHEELS_PATH", "./dist/external/vllm/wheels"
)
torch_cuda_arch_list: str = env_str_field("TORCH_CUDA_ARCH_LIST", "8.9")
cleaning_script: Path = env_path_field(
"cleaning_script", ".github/ci_configs/vllm/use_existing_torch.py"
)
def __post_init__(self):
if not self.torch_whls_path.exists():
raise ValueError("missing torch_whls_path")
if not self.vllm_whls_path.exists():
raise ValueError("missing vllm_whls_path")
class TestInpuType(Enum):
TEST_PLAN = "test_plan"
UNKNOWN = "unknown"
class VllmTestRunner(BaseRunner):
def __init__(self, args: Any):
self.work_directory = "vllm"
self.test_plan = ""
self.test_type = TestInpuType.UNKNOWN
self.shard_id = args.shard_id
self.num_shards = args.num_shards
if args.test_plan:
self.test_plan = args.test_plan
self.test_type = TestInpuType.TEST_PLAN
# Matches the structeur in the artifacts.zip from torcb build
self.TORCH_WHL_PATH_REGEX = "torch*.whl"
self.TORCH_WHL_EXTRA = "opt-einsum"
self.TORCH_ADDITIONAL_WHLS_REGEX = [
"vision/torchvision*.whl",
"audio/torchaudio*.whl",
]
# Match the structure of the artifacts.zip from vllm external build
self.VLLM_TEST_WHLS_REGEX = [
"xformers/*.whl",
"vllm/vllm*.whl",
"flashinfer-python/flashinfer*.whl",
]
def prepare(self):
"""
prepare test environment for vllm. This includes clone vllm repo, install all wheels, test dependencies and set env
"""
params = VllmTestParameters()
logger.info("Display VllmTestParameters %s", params)
self._set_envs(params)
clone_vllm(dst=self.work_directory)
self.cp_torch_cleaning_script(params)
with working_directory(self.work_directory):
remove_dir(Path("vllm"))
self._install_wheels(params)
self._install_dependencies()
# verify the torches are not overridden by test dependencies
check_versions()
def run(self):
"""
main function to run vllm test
"""
self.prepare()
try:
with working_directory(self.work_directory):
if self.test_type == TestInpuType.TEST_PLAN:
if self.num_shards > 1:
run_test_plan(
self.test_plan,
"vllm",
sample_vllm_test_library(),
self.shard_id,
self.num_shards,
)
else:
run_test_plan(
self.test_plan, "vllm", sample_vllm_test_library()
)
else:
raise ValueError(f"Unknown test type {self.test_type}")
finally:
# double check the torches are not overridden by other packages
check_versions()
def cp_torch_cleaning_script(self, params: VllmTestParameters):
script = get_path(params.cleaning_script, resolve=True)
vllm_script = Path(f"./{self.work_directory}/use_existing_torch.py")
copy(script, vllm_script)
def _install_wheels(self, params: VllmTestParameters):
logger.info("Running vllm test with inputs: %s", params)
if not pkg_exists("torch"):
# install torch from local whls if it's not installed yet.
torch_p = f"{str(params.torch_whls_path)}/{self.TORCH_WHL_PATH_REGEX}"
pip_install_first_match(torch_p, self.TORCH_WHL_EXTRA)
torch_whls_path = [
f"{str(params.torch_whls_path)}/{whl_path}"
for whl_path in self.TORCH_ADDITIONAL_WHLS_REGEX
]
for torch_whl in torch_whls_path:
pip_install_first_match(torch_whl)
logger.info("Done. Installed torch and other torch-related wheels ")
logger.info("Installing vllm wheels")
vllm_whls_path = [
f"{str(params.vllm_whls_path)}/{whl_path}"
for whl_path in self.VLLM_TEST_WHLS_REGEX
]
for vllm_whl in vllm_whls_path:
pip_install_first_match(vllm_whl)
logger.info("Done. Installed vllm wheels")
def _install_test_dependencies(self):
"""
This method replaces torch dependencies with local torch wheel info in
requirements/test.in file from vllm repo. then generates the test.txt
in runtime
"""
logger.info("generate test.txt from requirements/test.in with local torch whls")
preprocess_test_in()
copy("requirements/test.txt", "snapshot_constraint.txt")
run_command(
f"{sys.executable} -m uv pip compile requirements/test.in "
"-o test.txt "
"--index-strategy unsafe-best-match "
"--constraint snapshot_constraint.txt "
"--torch-backend cu128"
)
pip_install_packages(requirements="test.txt", prefer_uv=True)
logger.info("Done. installed requirements for test dependencies")
def _install_dependencies(self):
pip_install_packages(packages=["-e", "tests/vllm_test_utils"], prefer_uv=True)
pip_install_packages(packages=["hf_transfer"], prefer_uv=True)
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
# using script from vllm repo to remove all torch packages from requirements txt
run_python("use_existing_torch.py")
# install common packages
for requirements in ["requirements/common.txt", "requirements/build.txt"]:
pip_install_packages(
requirements=requirements,
prefer_uv=True,
)
# install test packages
self._install_test_dependencies()
def _set_envs(self, inputs: VllmTestParameters):
os.environ["TORCH_CUDA_ARCH_LIST"] = inputs.torch_cuda_arch_list
if not validate_cuda(get_env("TORCH_CUDA_ARCH_LIST")):
logger.warning(
"Missing supported TORCH_CUDA_ARCH_LIST. "
"Currently support TORCH_CUDA_ARCH_LIST env var "
"with supported arch [8.0, 8.9, 9.0]"
)
os.environ["HF_TOKEN"] = os.getenv("VLLM_TEST_HUGGING_FACE_TOKEN", "")
if not get_env("HF_TOKEN"):
raise ValueError(
"missing required HF_TOKEN, please set VLLM_TEST_HUGGING_FACE_TOKEN env var"
)
if not get_env("TORCH_CUDA_ARCH_LIST"):
raise ValueError(
"missing required TORCH_CUDA_ARCH_LIST, please set TORCH_CUDA_ARCH_LIST env var"
)
def preprocess_test_in(
target_file: str = "requirements/test.in", additional_packages: Iterable[str] = ()
):
"""
This modifies the target_file file in place in vllm work directory.
It removes torch and unwanted packages in target_file and replace with local torch whls
package with format "$WHEEL_PACKAGE_NAME @ file://<LOCAL_PATH>"
"""
additional_package_to_move = list(additional_packages or ())
pkgs_to_remove = [
"torch",
"torchvision",
"torchaudio",
"xformers",
"mamba_ssm",
] + additional_package_to_move
# Read current requirements
target_path = Path(target_file)
lines = target_path.read_text().splitlines()
pkgs_to_add = []
# Remove lines starting with the package names (==, @, >=) — case-insensitive
pattern = re.compile(rf"^({'|'.join(pkgs_to_remove)})\s*(==|@|>=)", re.IGNORECASE)
kept_lines = [line for line in lines if not pattern.match(line)]
# Get local installed torch/vision/audio from pip freeze
# This is hacky, but it works
pip_freeze = subprocess.check_output(["pip", "freeze"], text=True)
header_lines = [
line
for line in pip_freeze.splitlines()
if re.match(
r"^(torch|torchvision|torchaudio)\s*@\s*file://", line, re.IGNORECASE
)
]
# Write back: header_lines + blank + kept_lines
out_lines = header_lines + [""] + kept_lines
if pkgs_to_add:
out_lines += [""] + pkgs_to_add
out = "\n".join(out_lines) + "\n"
target_path.write_text(out)
logger.info("[INFO] Updated %s", target_file)
def validate_cuda(value: str) -> bool:
VALID_VALUES = {"8.0", "8.9", "9.0"}
return all(v in VALID_VALUES for v in value.split())
def check_versions():
"""
check installed packages version
"""
logger.info("Double check installed packages")
patterns = ["torch", "xformers", "torchvision", "torchaudio", "vllm"]
for pkg in patterns:
pkg_exists(pkg)
logger.info("Done. checked installed packages")

View File

@ -5,7 +5,6 @@ import logging
from cli.build_cli.register_build import register_build_commands from cli.build_cli.register_build import register_build_commands
from cli.lib.common.logger import setup_logging from cli.lib.common.logger import setup_logging
from cli.test_cli.register_test import register_test_commands
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -21,7 +20,6 @@ def main():
# registers second-level subcommands # registers second-level subcommands
register_build_commands(subparsers) register_build_commands(subparsers)
register_test_commands(subparsers)
# parse args after all options are registered # parse args after all options are registered
args = parser.parse_args() args = parser.parse_args()

View File

@ -1,62 +0,0 @@
import argparse
import logging
from cli.lib.common.cli_helper import register_targets, RichHelp, TargetSpec
from cli.lib.core.vllm.vllm_test import VllmTestRunner
logger = logging.getLogger(__name__)
# Maps targets to their argparse configuration and runner
# it adds new target to path python -m cli.run build external {target} with buildrunner
_TARGETS: dict[str, TargetSpec] = {
"vllm": {
"runner": VllmTestRunner,
"help": "test vLLM with pytorch main",
}
# add yours ...
}
def common_args(parser: argparse.ArgumentParser) -> None:
"""
Add common CLI arguments to the given parser.
"""
parser.add_argument(
"--shard-id",
type=int,
default=1,
help="a shard id to run, e.g. '0,1,2,3'",
)
parser.add_argument(
"--num-shards",
type=int,
default=1,
help="a number of shards to run, e.g. '4'",
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-tp",
"--test-plan",
type=str,
help="a pre-defined test plan to run, e.g. 'basic_correctness_test'",
)
def register_test_commands(subparsers: argparse._SubParsersAction) -> None:
build_parser = subparsers.add_parser(
"test",
help="test related commands",
formatter_class=RichHelp,
)
build_subparsers = build_parser.add_subparsers(dest="test_command", required=True)
overview = "\n".join(
f" {name:12} {spec.get('help', '')}" for name, spec in _TARGETS.items()
)
external_parser = build_subparsers.add_parser(
"external",
help="Test external targets",
description="Test third-party targets.\n\nAvailable targets:\n" + overview,
formatter_class=RichHelp,
)
register_targets(external_parser, _TARGETS, common_args=common_args)

View File

@ -6,7 +6,6 @@ dependencies = [
"GitPython==3.1.45", "GitPython==3.1.45",
"docker==7.1.0", "docker==7.1.0",
"pytest==7.3.2", "pytest==7.3.2",
"uv==0.8.6"
] ]
[tool.setuptools] [tool.setuptools]

View File

@ -1,185 +0,0 @@
# tests/test_run_test_plan.py
import importlib
from contextlib import nullcontext
from types import SimpleNamespace
from unittest.mock import MagicMock
import pytest
MOD = "cli.lib.core.vllm.lib"
# We import inside tests so the MOD override above applies everywhere
run_test_plan_import_path = f"{MOD}.run_test_plan"
def _get_cmd(c):
# Support both kwargs and positional args
return c.kwargs.get("cmd", c.args[0] if c.args else None)
def _get_check(c):
if "check" in c.kwargs:
return c.kwargs["check"]
# If positional, assume second arg is 'check' when present; default False
return c.args[1] if len(c.args) > 1 else False
@pytest.fixture
def patch_module(monkeypatch):
"""
Patch helpers ('pip_install_packages', 'temp_environ', 'working_directory',
'run_command', 'logger') inside the target module and expose them.
"""
module = importlib.import_module(MOD)
# Create fakes/mocks
pip_install_packages = MagicMock(name="pip_install_packages")
run_command = MagicMock(name="run_command", return_value=0)
# temp_environ / working_directory: record calls but act as context managers
temp_calls: list[dict] = []
workdir_calls: list[str] = []
def fake_working_directory(path: str):
workdir_calls.append(path)
return nullcontext()
def fake_temp_env(map: dict[str, str]):
temp_calls.append(map)
return nullcontext()
logger = SimpleNamespace(
info=MagicMock(name="logger.info"),
error=MagicMock(name="logger.error"),
)
# Apply patches (raise if attribute doesn't exist)
monkeypatch.setattr(
module, "pip_install_packages", pip_install_packages, raising=True
)
monkeypatch.setattr(module, "run_command", run_command, raising=True)
monkeypatch.setattr(
module, "working_directory", fake_working_directory, raising=True
)
monkeypatch.setattr(module, "temp_environ", fake_temp_env, raising=True)
monkeypatch.setattr(module, "logger", logger, raising=True)
return SimpleNamespace(
module=module,
run_test_plan=module.run_test_plan, # expose to avoid getattr("constant") (Ruff B009)
pip_install_packages=pip_install_packages,
run_command=run_command,
temp_calls=temp_calls,
workdir_calls=workdir_calls,
logger=logger,
)
def test_success_runs_all_steps_and_uses_env_and_workdir(monkeypatch, patch_module):
run_test_plan = patch_module.run_test_plan
tests_map = {
"basic": {
"title": "Basic suite",
"package_install": [],
"working_directory": "tests",
"env_vars": {"GLOBAL_FLAG": "1"},
"steps": [
"export A=x && pytest -q",
"export B=y && pytest -q tests/unit",
],
}
}
# One exit code per step (export + two pytest)
patch_module.run_command.side_effect = [0, 0, 0]
run_test_plan("basic", "cpu", tests_map)
calls = patch_module.run_command.call_args_list
cmds = [_get_cmd(c) for c in calls]
checks = [_get_check(c) for c in calls]
assert cmds == [
"export A=x && pytest -q",
"export B=y && pytest -q tests/unit",
]
assert all(chk is False for chk in checks)
assert patch_module.workdir_calls == ["tests"]
assert patch_module.temp_calls == [{"GLOBAL_FLAG": "1"}]
def test_installs_packages_when_present(monkeypatch, patch_module):
run_test_plan = patch_module.module.run_test_plan
tests_map = {
"with_pkgs": {
"title": "Needs deps",
"package_install": ["timm==1.0.0", "flash-attn"],
"steps": ["pytest -q"],
}
}
patch_module.run_command.return_value = 0
run_test_plan("with_pkgs", "gpu", tests_map)
patch_module.pip_install_packages.assert_called_once_with(
packages=["timm==1.0.0", "flash-attn"],
prefer_uv=True,
)
def test_raises_on_missing_plan(patch_module):
run_test_plan = patch_module.module.run_test_plan
with pytest.raises(RuntimeError) as ei:
run_test_plan("nope", "cpu", tests_map={})
assert "test nope not found" in str(ei.value)
def test_aggregates_failures_and_raises(monkeypatch, patch_module):
run_test_plan = patch_module.module.run_test_plan
tests_map = {
"mix": {
"title": "Some pass some fail",
"steps": [
"pytest test_a.py", # 0 → pass
"pytest test_b.py", # 1 → fail
"pytest test_c.py", # 2 → fail
],
}
}
# Simulate pass, fail, fail
patch_module.run_command.side_effect = [0, 1, 2]
with pytest.raises(RuntimeError) as ei:
run_test_plan("mix", "cpu", tests_map)
msg = str(ei.value)
assert "2 pytest runs failed" in msg
# Ensure logger captured failed tests list
patch_module.logger.error.assert_called_once()
# And we attempted all three commands
assert patch_module.run_command.call_count == 3
def test_custom_working_directory_used(patch_module):
run_test_plan = patch_module.module.run_test_plan
tests_map = {
"customwd": {
"title": "Custom wd",
"working_directory": "examples/ci",
"steps": ["pytest -q"],
}
}
patch_module.run_command.return_value = 0
run_test_plan("customwd", "cpu", tests_map)
assert patch_module.workdir_calls == ["examples/ci"]

View File

@ -1,143 +0,0 @@
import os
import tempfile
import unittest
from pathlib import Path
from cli.lib.common.utils import temp_environ, working_directory # <-- replace import
class EnvIsolatedTestCase(unittest.TestCase):
"""Base class that snapshots os.environ and CWD for isolation."""
def setUp(self):
import os
import tempfile
self._env_backup = dict(os.environ)
# Snapshot/repair CWD if it's gone
try:
self._cwd_backup = os.getcwd()
except FileNotFoundError:
# If CWD no longer exists, switch to a safe place and record that
self._cwd_backup = tempfile.gettempdir()
os.chdir(self._cwd_backup)
# Create a temporary directory for the test to run in
self._temp_dir = tempfile.mkdtemp()
os.chdir(self._temp_dir)
def tearDown(self):
import os
import shutil
import tempfile
# Restore cwd first (before cleaning up temp dir)
try:
os.chdir(self._cwd_backup)
except OSError:
os.chdir(tempfile.gettempdir())
# Clean up temporary directory
try:
shutil.rmtree(self._temp_dir, ignore_errors=True)
except Exception:
pass # Ignore cleanup errors
# Restore env
to_del = set(os.environ.keys()) - set(self._env_backup.keys())
for k in to_del:
os.environ.pop(k, None)
for k, v in self._env_backup.items():
os.environ[k] = v
class TestTempEnviron(EnvIsolatedTestCase):
def test_sets_and_restores_new_var(self):
var = "TEST_TMP_ENV_NEW"
self.assertNotIn(var, os.environ)
with temp_environ({var: "123"}):
self.assertEqual(os.environ[var], "123")
self.assertNotIn(var, os.environ) # removed after exit
def test_overwrites_and_restores_existing_var(self):
var = "TEST_TMP_ENV_OVERWRITE"
os.environ[var] = "orig"
with temp_environ({var: "override"}):
self.assertEqual(os.environ[var], "override")
self.assertEqual(os.environ[var], "orig") # restored
def test_multiple_vars_and_missing_cleanup(self):
v1, v2 = "TEST_ENV_V1", "TEST_ENV_V2"
os.environ.pop(v1, None)
os.environ[v2] = "keep"
with temp_environ({v1: "a", v2: "b"}):
self.assertEqual(os.environ[v1], "a")
self.assertEqual(os.environ[v2], "b")
self.assertNotIn(v1, os.environ) # newly-added -> removed
self.assertEqual(os.environ[v2], "keep") # pre-existing -> restored
def test_restores_even_on_exception(self):
var = "TEST_TMP_ENV_EXCEPTION"
self.assertNotIn(var, os.environ)
with self.assertRaises(RuntimeError):
with temp_environ({var: "x"}):
self.assertEqual(os.environ[var], "x")
raise RuntimeError("boom")
self.assertNotIn(var, os.environ) # removed after exception
class TestWorkingDirectory(EnvIsolatedTestCase):
def test_changes_and_restores(self):
start = Path.cwd()
with tempfile.TemporaryDirectory() as td:
target = Path(td) / "wd"
target.mkdir()
with working_directory(str(target)):
self.assertEqual(Path.cwd().resolve(), target.resolve())
self.assertEqual(Path.cwd(), start)
def test_noop_when_empty_path(self):
start = Path.cwd()
with working_directory(""):
self.assertEqual(Path.cwd(), start)
self.assertEqual(Path.cwd(), start)
def test_restores_on_exception(self):
start = Path.cwd()
with tempfile.TemporaryDirectory() as td:
target = Path(td) / "wd_exc"
target.mkdir()
with self.assertRaises(ValueError):
with working_directory(str(target)):
# Normalize both sides to handle /var -> /private/var
self.assertEqual(Path.cwd().resolve(), target.resolve())
raise ValueError("boom")
self.assertEqual(Path.cwd().resolve(), start.resolve())
def test_raises_for_missing_dir(self):
start = Path.cwd()
with tempfile.TemporaryDirectory() as td:
missing = Path(td) / "does_not_exist"
with self.assertRaises(FileNotFoundError):
# os.chdir should raise before yielding
with working_directory(str(missing)):
pass
self.assertEqual(Path.cwd(), start)
if __name__ == "__main__":
unittest.main(verbosity=2)

View File

@ -4,15 +4,12 @@ import unittest
from pathlib import Path from pathlib import Path
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
import cli.lib.core.vllm.vllm_build as vllm_build import cli.lib.core.vllm as vllm
_VLLM_BUILD_MODULE = "cli.lib.core.vllm.vllm_build"
class TestVllmBuildParameters(unittest.TestCase): class TestVllmBuildParameters(unittest.TestCase):
@patch(f"{_VLLM_BUILD_MODULE}.local_image_exists", return_value=True) @patch("cli.lib.core.vllm.local_image_exists", return_value=True)
@patch(f"{_VLLM_BUILD_MODULE}.is_path_exist", return_value=True) @patch("cli.lib.core.vllm.is_path_exist", return_value=True)
@patch( @patch(
"cli.lib.common.envs_helper.env_path_optional", "cli.lib.common.envs_helper.env_path_optional",
side_effect=lambda name, default=None, resolve=True: { side_effect=lambda name, default=None, resolve=True: {
@ -37,13 +34,13 @@ class TestVllmBuildParameters(unittest.TestCase):
def test_params_success_normalizes_and_validates( def test_params_success_normalizes_and_validates(
self, mock_env_path, mock_is_path, mock_local_img self, mock_env_path, mock_is_path, mock_local_img
): ):
params = vllm_build.VllmBuildParameters() params = vllm.VllmBuildParameters()
self.assertEqual(params.torch_whls_path, Path("/abs/dist")) self.assertEqual(params.torch_whls_path, Path("/abs/dist"))
self.assertEqual(params.dockerfile_path, Path("/abs/vllm/Dockerfile")) self.assertEqual(params.dockerfile_path, Path("/abs/vllm/Dockerfile"))
self.assertEqual(params.output_dir, Path("/abs/shared")) self.assertEqual(params.output_dir, Path("/abs/shared"))
self.assertEqual(params.base_image, "my/image:tag") self.assertEqual(params.base_image, "my/image:tag")
@patch(f"{_VLLM_BUILD_MODULE}.is_path_exist", return_value=False) @patch("cli.lib.core.vllm.is_path_exist", return_value=False)
@patch.dict( @patch.dict(
os.environ, {"USE_TORCH_WHEEL": "1", "TORCH_WHEELS_PATH": "dist"}, clear=True os.environ, {"USE_TORCH_WHEEL": "1", "TORCH_WHEELS_PATH": "dist"}, clear=True
) )
@ -51,14 +48,14 @@ class TestVllmBuildParameters(unittest.TestCase):
with tempfile.TemporaryDirectory() as td: with tempfile.TemporaryDirectory() as td:
os.chdir(td) os.chdir(td)
with self.assertRaises(ValueError) as cm: with self.assertRaises(ValueError) as cm:
vllm_build.VllmBuildParameters( vllm.VllmBuildParameters(
use_local_base_image=False, use_local_base_image=False,
use_local_dockerfile=False, use_local_dockerfile=False,
) )
err = cm.exception err = cm.exception
self.assertIn("TORCH_WHEELS_PATH", str(err)) self.assertIn("TORCH_WHEELS_PATH", str(err))
@patch(f"{_VLLM_BUILD_MODULE}.local_image_exists", return_value=False) @patch("cli.lib.core.vllm.local_image_exists", return_value=False)
@patch.dict( @patch.dict(
os.environ, {"USE_LOCAL_BASE_IMAGE": "1", "BASE_IMAGE": "img:tag"}, clear=True os.environ, {"USE_LOCAL_BASE_IMAGE": "1", "BASE_IMAGE": "img:tag"}, clear=True
) )
@ -66,14 +63,14 @@ class TestVllmBuildParameters(unittest.TestCase):
with tempfile.TemporaryDirectory() as td: with tempfile.TemporaryDirectory() as td:
os.chdir(td) os.chdir(td)
with self.assertRaises(ValueError) as cm: with self.assertRaises(ValueError) as cm:
vllm_build.VllmBuildParameters( vllm.VllmBuildParameters(
use_torch_whl=False, use_torch_whl=False,
use_local_dockerfile=False, use_local_dockerfile=False,
) )
err = cm.exception err = cm.exception
self.assertIn("BASE_IMAGE", str(err)) self.assertIn("BASE_IMAGE", str(err))
@patch(f"{_VLLM_BUILD_MODULE}.is_path_exist", return_value=False) @patch("cli.lib.core.vllm.is_path_exist", return_value=False)
@patch.dict( @patch.dict(
os.environ, os.environ,
{"USE_LOCAL_DOCKERFILE": "1", "DOCKERFILE_PATH": "Dockerfile"}, {"USE_LOCAL_DOCKERFILE": "1", "DOCKERFILE_PATH": "Dockerfile"},
@ -83,14 +80,14 @@ class TestVllmBuildParameters(unittest.TestCase):
with tempfile.TemporaryDirectory() as td: with tempfile.TemporaryDirectory() as td:
os.chdir(td) os.chdir(td)
with self.assertRaises(ValueError) as cm: with self.assertRaises(ValueError) as cm:
vllm_build.VllmBuildParameters( vllm.VllmBuildParameters(
use_torch_whl=False, use_torch_whl=False,
use_local_base_image=False, use_local_base_image=False,
) )
err = cm.exception err = cm.exception
self.assertIn("DOCKERFILE_PATH", str(err)) self.assertIn("DOCKERFILE_PATH", str(err))
@patch(f"{_VLLM_BUILD_MODULE}.is_path_exist", return_value=False) @patch("cli.lib.core.vllm.is_path_exist", return_value=False)
@patch.dict( @patch.dict(
os.environ, os.environ,
{"OUTPUT_DIR": ""}, {"OUTPUT_DIR": ""},
@ -98,13 +95,14 @@ class TestVllmBuildParameters(unittest.TestCase):
) )
def test_params_missing_output_dir(self, _is_path): def test_params_missing_output_dir(self, _is_path):
with self.assertRaises(FileNotFoundError): with self.assertRaises(FileNotFoundError):
vllm_build.VllmBuildParameters() vllm.VllmBuildParameters()
class TestBuildCmdAndRun(unittest.TestCase): class TestBuildCmdAndRun(unittest.TestCase):
@patch(f"{_VLLM_BUILD_MODULE}.local_image_exists", return_value=True) @patch("cli.lib.core.vllm.local_image_exists", return_value=True)
def test_generate_docker_build_cmd_includes_bits(self, _exists): def test_generate_docker_build_cmd_includes_bits(self, _exists):
runner = vllm_build.VllmBuildRunner() runner = vllm.VllmBuildRunner()
# Craft inputs that simulate a prepared build
inputs = MagicMock() inputs = MagicMock()
inputs.output_dir = Path("/abs/out") inputs.output_dir = Path("/abs/out")
inputs.use_local_base_image = True inputs.use_local_base_image = True
@ -120,7 +118,7 @@ class TestBuildCmdAndRun(unittest.TestCase):
inputs.tag_name = "vllm-wheels" inputs.tag_name = "vllm-wheels"
cmd = runner._generate_docker_build_cmd(inputs) cmd = runner._generate_docker_build_cmd(inputs)
squashed = " ".join(cmd.split()) squashed = " ".join(cmd.split()) # normalize whitespace for matching
self.assertIn("--output type=local,dest=/abs/out", squashed) self.assertIn("--output type=local,dest=/abs/out", squashed)
self.assertIn("-f docker/Dockerfile.nightly_torch", squashed) self.assertIn("-f docker/Dockerfile.nightly_torch", squashed)
@ -138,17 +136,18 @@ class TestBuildCmdAndRun(unittest.TestCase):
self.assertIn("--target export-wheels", squashed) self.assertIn("--target export-wheels", squashed)
self.assertIn("-t vllm-wheels", squashed) self.assertIn("-t vllm-wheels", squashed)
@patch(f"{_VLLM_BUILD_MODULE}.run_command") @patch("cli.lib.core.vllm.run_command")
@patch(f"{_VLLM_BUILD_MODULE}.ensure_dir_exists") @patch("cli.lib.core.vllm.ensure_dir_exists")
@patch(f"{_VLLM_BUILD_MODULE}.clone_vllm") @patch("cli.lib.core.vllm.clone_vllm")
@patch.object( @patch.object(
vllm_build.VllmBuildRunner, vllm.VllmBuildRunner,
"_generate_docker_build_cmd", "_generate_docker_build_cmd",
return_value="docker buildx ...", return_value="docker buildx ...",
) )
@patch.dict( @patch.dict(
os.environ, os.environ,
{ {
# Make __post_init__ validations pass cheaply
"USE_TORCH_WHEEL": "0", "USE_TORCH_WHEEL": "0",
"USE_LOCAL_BASE_IMAGE": "0", "USE_LOCAL_BASE_IMAGE": "0",
"USE_LOCAL_DOCKERFILE": "0", "USE_LOCAL_DOCKERFILE": "0",
@ -159,18 +158,24 @@ class TestBuildCmdAndRun(unittest.TestCase):
def test_run_calls_clone_prepare_and_build( def test_run_calls_clone_prepare_and_build(
self, mock_gen, mock_clone, mock_ensure, mock_run self, mock_gen, mock_clone, mock_ensure, mock_run
): ):
# Stub parameters instance so we avoid FS/Docker accesses in run()
params = MagicMock() params = MagicMock()
params.output_dir = Path("shared") params.output_dir = Path("shared")
params.use_local_dockerfile = False params.use_local_dockerfile = False
params.use_torch_whl = False params.use_torch_whl = False
with patch(f"{_VLLM_BUILD_MODULE}.VllmBuildParameters", return_value=params): with patch("cli.lib.core.vllm.VllmBuildParameters", return_value=params):
runner = vllm_build.VllmBuildRunner() runner = vllm.VllmBuildRunner()
runner.run() runner.run()
mock_clone.assert_called_once() mock_clone.assert_called_once()
mock_ensure.assert_called_once_with(Path("shared")) mock_ensure.assert_called_once_with(Path("shared"))
mock_gen.assert_called_once_with(params) mock_gen.assert_called_once_with(params)
mock_run.assert_called_once() mock_run.assert_called_once()
# ensure we run in vllm workdir
_, kwargs = mock_run.call_args _, kwargs = mock_run.call_args
assert kwargs.get("cwd") == "vllm" assert kwargs.get("cwd") == "vllm"
if __name__ == "__main__":
unittest.main()

View File

@ -16,7 +16,6 @@ DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
magma/build_magma.sh magma/build_magma.sh
.PHONY: all .PHONY: all
all: magma-cuda130
all: magma-cuda129 all: magma-cuda129
all: magma-cuda128 all: magma-cuda128
all: magma-cuda126 all: magma-cuda126
@ -26,12 +25,6 @@ clean:
$(RM) -r magma-* $(RM) -r magma-*
$(RM) -r output $(RM) -r output
.PHONY: magma-cuda130
magma-cuda130: DESIRED_CUDA := 13.0
magma-cuda130: CUDA_ARCH_LIST := -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90 -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
magma-cuda130:
$(DOCKER_RUN)
.PHONY: magma-cuda129 .PHONY: magma-cuda129
magma-cuda129: DESIRED_CUDA := 12.9 magma-cuda129: DESIRED_CUDA := 12.9
magma-cuda129: CUDA_ARCH_LIST += -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120 magma-cuda129: CUDA_ARCH_LIST += -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120

View File

@ -28,7 +28,6 @@ pushd ${PACKAGE_DIR}/magma-${MAGMA_VERSION}
patch < ${PACKAGE_FILES}/CMake.patch patch < ${PACKAGE_FILES}/CMake.patch
patch < ${PACKAGE_FILES}/cmakelists.patch patch < ${PACKAGE_FILES}/cmakelists.patch
patch -p0 < ${PACKAGE_FILES}/thread_queue.patch patch -p0 < ${PACKAGE_FILES}/thread_queue.patch
patch -p1 < ${PACKAGE_FILES}/cuda13.patch
patch -p1 < ${PACKAGE_FILES}/getrf_shfl.patch patch -p1 < ${PACKAGE_FILES}/getrf_shfl.patch
patch -p1 < ${PACKAGE_FILES}/getrf_nbparam.patch patch -p1 < ${PACKAGE_FILES}/getrf_nbparam.patch
# The build.sh script expects to be executed from the sources root folder # The build.sh script expects to be executed from the sources root folder
@ -38,7 +37,6 @@ popd
# Package recipe, license and tarball # Package recipe, license and tarball
# Folder and package name are backward compatible for the build workflow # Folder and package name are backward compatible for the build workflow
cp ${PACKAGE_FILES}/build.sh ${PACKAGE_RECIPE}/build.sh cp ${PACKAGE_FILES}/build.sh ${PACKAGE_RECIPE}/build.sh
cp ${PACKAGE_FILES}/cuda13.patch ${PACKAGE_RECIPE}/cuda13.patch
cp ${PACKAGE_FILES}/thread_queue.patch ${PACKAGE_RECIPE}/thread_queue.patch cp ${PACKAGE_FILES}/thread_queue.patch ${PACKAGE_RECIPE}/thread_queue.patch
cp ${PACKAGE_FILES}/cmakelists.patch ${PACKAGE_RECIPE}/cmakelists.patch cp ${PACKAGE_FILES}/cmakelists.patch ${PACKAGE_RECIPE}/cmakelists.patch
cp ${PACKAGE_FILES}/getrf_shfl.patch ${PACKAGE_RECIPE}/getrf_shfl.patch cp ${PACKAGE_FILES}/getrf_shfl.patch ${PACKAGE_RECIPE}/getrf_shfl.patch

View File

@ -1,26 +0,0 @@
diff --git a/interface_cuda/interface.cpp b/interface_cuda/interface.cpp
index 73fed1b20..e77519bfe 100644
--- a/interface_cuda/interface.cpp
+++ b/interface_cuda/interface.cpp
@@ -438,14 +438,20 @@ magma_print_environment()
cudaDeviceProp prop;
err = cudaGetDeviceProperties( &prop, dev );
check_error( err );
+ #ifdef MAGMA_HAVE_CUDA
+#if CUDA_VERSION < 13000
printf( "%% device %d: %s, %.1f MHz clock, %.1f MiB memory, capability %d.%d\n",
dev,
prop.name,
prop.clockRate / 1000.,
+#else
+ printf( "%% device %d: %s, ??? MHz clock, %.1f MiB memory, capability %d.%d\n",
+ dev,
+ prop.name,
+#endif
prop.totalGlobalMem / (1024.*1024.),
prop.major,
prop.minor );
- #ifdef MAGMA_HAVE_CUDA
int arch = prop.major*100 + prop.minor*10;
if ( arch < MAGMA_CUDA_ARCH_MIN ) {
printf("\n"

View File

@ -66,9 +66,6 @@ case ${CUDA_VERSION} in
TORCH_CUDA_ARCH_LIST="7.5;8.0;9.0;10.0;12.0+PTX" TORCH_CUDA_ARCH_LIST="7.5;8.0;9.0;10.0;12.0+PTX"
fi fi
;; ;;
13.0)
TORCH_CUDA_ARCH_LIST="7.5;8.0;8.6;9.0;10.0;12.0+PTX"
;;
12.6) 12.6)
TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;7.5;8.0;8.6;9.0" TORCH_CUDA_ARCH_LIST="5.0;6.0;7.0;7.5;8.0;8.6;9.0"
;; ;;
@ -113,18 +110,13 @@ DEPS_SONAME=(
) )
# CUDA_VERSION 12.*, 13.* # CUDA_VERSION 12.6, 12.8, 12.9
if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then if [[ $CUDA_VERSION == 12* ]]; then
export USE_STATIC_CUDNN=0 export USE_STATIC_CUDNN=0
# Try parallelizing nvcc as well # Try parallelizing nvcc as well
TORCH_NVCC_FLAGS="-Xfatbin -compress-all --threads 2" export TORCH_NVCC_FLAGS="-Xfatbin -compress-all --threads 2"
# Compress the fatbin with -compress-mode=size for CUDA 13
if [[ $CUDA_VERSION == 13* ]]; then
export TORCH_NVCC_FLAGS="$TORCH_NVCC_FLAGS -compress-mode=size"
fi
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
echo "Bundling with cudnn and cublas." echo "Bundling with cudnn and cublas."
DEPS_LIST+=( DEPS_LIST+=(
"/usr/local/cuda/lib64/libcudnn_adv.so.9" "/usr/local/cuda/lib64/libcudnn_adv.so.9"
"/usr/local/cuda/lib64/libcudnn_cnn.so.9" "/usr/local/cuda/lib64/libcudnn_cnn.so.9"
@ -134,11 +126,16 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9" "/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9"
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9" "/usr/local/cuda/lib64/libcudnn_heuristic.so.9"
"/usr/local/cuda/lib64/libcudnn.so.9" "/usr/local/cuda/lib64/libcudnn.so.9"
"/usr/local/cuda/lib64/libcublas.so.12"
"/usr/local/cuda/lib64/libcublasLt.so.12"
"/usr/local/cuda/lib64/libcusparseLt.so.0" "/usr/local/cuda/lib64/libcusparseLt.so.0"
"/usr/local/cuda/lib64/libcudart.so.12"
"/usr/local/cuda/lib64/libnvrtc.so.12"
"/usr/local/cuda/lib64/libnvrtc-builtins.so" "/usr/local/cuda/lib64/libnvrtc-builtins.so"
"/usr/local/cuda/lib64/libcufile.so.0" "/usr/local/cuda/lib64/libcufile.so.0"
"/usr/local/cuda/lib64/libcufile_rdma.so.1" "/usr/local/cuda/lib64/libcufile_rdma.so.1"
"/usr/local/cuda/lib64/libnvshmem_host.so.3" "/usr/local/cuda/lib64/libnvshem_host.so.3"
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12"
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so" "/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so"
) )
DEPS_SONAME+=( DEPS_SONAME+=(
@ -150,83 +147,42 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then
"libcudnn_engines_precompiled.so.9" "libcudnn_engines_precompiled.so.9"
"libcudnn_heuristic.so.9" "libcudnn_heuristic.so.9"
"libcudnn.so.9" "libcudnn.so.9"
"libcublas.so.12"
"libcublasLt.so.12"
"libcusparseLt.so.0" "libcusparseLt.so.0"
"libcudart.so.12"
"libnvrtc.so.12"
"libnvrtc-builtins.so" "libnvrtc-builtins.so"
"libnvshmem_host.so.3" "libnvshmem_host.so.3"
"libcufile.so.0" "libcufile.so.0"
"libcufile_rdma.so.1" "libcufile_rdma.so.1"
"libcupti.so.12"
"libnvperf_host.so" "libnvperf_host.so"
) )
# Add libnvToolsExt only if CUDA version is not 12.9 # Add libnvToolsExt only if CUDA version is not 12.9
if [[ $CUDA_VERSION == 13* ]]; then if [[ $CUDA_VERSION != 12.9* ]]; then
DEPS_LIST+=( DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1")
"/usr/local/cuda/lib64/libcublas.so.13" DEPS_SONAME+=("libnvToolsExt.so.1")
"/usr/local/cuda/lib64/libcublasLt.so.13"
"/usr/local/cuda/lib64/libcudart.so.13"
"/usr/local/cuda/lib64/libnvrtc.so.13"
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.13"
"/usr/local/cuda/lib64/libibverbs.so.1"
"/usr/local/cuda/lib64/librdmacm.so.1"
"/usr/local/cuda/lib64/libmlx5.so.1"
"/usr/local/cuda/lib64/libnl-3.so.200"
"/usr/local/cuda/lib64/libnl-route-3.so.200")
DEPS_SONAME+=(
"libcublas.so.13"
"libcublasLt.so.13"
"libcudart.so.13"
"libnvrtc.so.13"
"libcupti.so.13"
"libibverbs.so.1"
"librdmacm.so.1"
"libmlx5.so.1"
"libnl-3.so.200"
"libnl-route-3.so.200")
export USE_CUPTI_SO=1
export ATEN_STATIC_CUDA=0
export USE_CUDA_STATIC_LINK=0
export USE_CUFILE=0
else
DEPS_LIST+=(
"/usr/local/cuda/lib64/libnvToolsExt.so.1"
"/usr/local/cuda/lib64/libcublas.so.12"
"/usr/local/cuda/lib64/libcublasLt.so.12"
"/usr/local/cuda/lib64/libcudart.so.12"
"/usr/local/cuda/lib64/libnvrtc.so.12"
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12")
DEPS_SONAME+=(
"libnvToolsExt.so.1"
"libcublas.so.12"
"libcublasLt.so.12"
"libcudart.so.12"
"libnvrtc.so.12"
"libcupti.so.12")
fi fi
else else
echo "Using nvidia libs from pypi." echo "Using nvidia libs from pypi."
CUDA_RPATHS=( CUDA_RPATHS=(
'$ORIGIN/../../nvidia/cudnn/lib'
'$ORIGIN/../../nvidia/nvshmem/lib'
'$ORIGIN/../../nvidia/nccl/lib'
'$ORIGIN/../../nvidia/cusparselt/lib'
)
if [[ $CUDA_VERSION == 13* ]]; then
CUDA_RPATHS+=('$ORIGIN/../../nvidia/cu13/lib')
else
CUDA_RPATHS+=(
'$ORIGIN/../../nvidia/cublas/lib' '$ORIGIN/../../nvidia/cublas/lib'
'$ORIGIN/../../nvidia/cuda_cupti/lib' '$ORIGIN/../../nvidia/cuda_cupti/lib'
'$ORIGIN/../../nvidia/cuda_nvrtc/lib' '$ORIGIN/../../nvidia/cuda_nvrtc/lib'
'$ORIGIN/../../nvidia/cuda_runtime/lib' '$ORIGIN/../../nvidia/cuda_runtime/lib'
'$ORIGIN/../../nvidia/cudnn/lib'
'$ORIGIN/../../nvidia/cufft/lib' '$ORIGIN/../../nvidia/cufft/lib'
'$ORIGIN/../../nvidia/curand/lib' '$ORIGIN/../../nvidia/curand/lib'
'$ORIGIN/../../nvidia/cusolver/lib' '$ORIGIN/../../nvidia/cusolver/lib'
'$ORIGIN/../../nvidia/cusparse/lib' '$ORIGIN/../../nvidia/cusparse/lib'
'$ORIGIN/../../nvidia/cusparselt/lib'
'$ORIGIN/../../cusparselt/lib' '$ORIGIN/../../cusparselt/lib'
'$ORIGIN/../../nvidia/nccl/lib'
'$ORIGIN/../../nvidia/nvshmem/lib'
'$ORIGIN/../../nvidia/nvtx/lib' '$ORIGIN/../../nvidia/nvtx/lib'
'$ORIGIN/../../nvidia/cufile/lib' '$ORIGIN/../../nvidia/cufile/lib'
) )
fi
CUDA_RPATHS=$(IFS=: ; echo "${CUDA_RPATHS[*]}") CUDA_RPATHS=$(IFS=: ; echo "${CUDA_RPATHS[*]}")
export C_SO_RPATH=$CUDA_RPATHS':$ORIGIN:$ORIGIN/lib' export C_SO_RPATH=$CUDA_RPATHS':$ORIGIN:$ORIGIN/lib'
export LIB_SO_RPATH=$CUDA_RPATHS':$ORIGIN' export LIB_SO_RPATH=$CUDA_RPATHS':$ORIGIN'

View File

@ -25,7 +25,6 @@ source /opt/intel/oneapi/mpi/latest/env/vars.sh
export USE_STATIC_MKL=1 export USE_STATIC_MKL=1
export USE_ONEMKL=1 export USE_ONEMKL=1
export USE_XCCL=1 export USE_XCCL=1
export USE_MPI=0
WHEELHOUSE_DIR="wheelhousexpu" WHEELHOUSE_DIR="wheelhousexpu"
LIBTORCH_HOUSE_DIR="libtorch_housexpu" LIBTORCH_HOUSE_DIR="libtorch_housexpu"

View File

@ -173,7 +173,6 @@ if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
source /opt/intel/oneapi/mpi/latest/env/vars.sh source /opt/intel/oneapi/mpi/latest/env/vars.sh
# Enable XCCL build # Enable XCCL build
export USE_XCCL=1 export USE_XCCL=1
export USE_MPI=0
# XPU kineto feature dependencies are not fully ready, disable kineto build as temp WA # XPU kineto feature dependencies are not fully ready, disable kineto build as temp WA
export USE_KINETO=0 export USE_KINETO=0
export TORCH_XPU_ARCH_LIST=pvc export TORCH_XPU_ARCH_LIST=pvc
@ -195,16 +194,8 @@ fi
# We only build FlashAttention files for CUDA 8.0+, and they require large amounts of # We only build FlashAttention files for CUDA 8.0+, and they require large amounts of
# memory to build and will OOM # memory to build and will OOM
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]] && echo "${TORCH_CUDA_ARCH_LIST}" | tr ' ' '\n' | sed 's/$/>= 8.0/' | bc | grep -q 1; then if [[ "$BUILD_ENVIRONMENT" == *cuda* ]] && echo "${TORCH_CUDA_ARCH_LIST}" | tr ' ' '\n' | sed 's/$/>= 8.0/' | bc | grep -q 1; then
J=2 # default to 2 jobs export BUILD_CUSTOM_STEP="ninja -C build flash_attention -j 2"
case "$RUNNER" in
linux.12xlarge.memory|linux.24xlarge.memory)
J=24
;;
esac
echo "Building FlashAttention with job limit $J"
export BUILD_CUSTOM_STEP="ninja -C build flash_attention -j ${J}"
fi fi
if [[ "${BUILD_ENVIRONMENT}" == *clang* ]]; then if [[ "${BUILD_ENVIRONMENT}" == *clang* ]]; then

View File

@ -300,3 +300,24 @@ except RuntimeError as e:
exit 1 exit 1
fi fi
fi fi
###############################################################################
# Check for C++ ABI compatibility to GCC-11 - GCC 13
###############################################################################
if [[ "$(uname)" == 'Linux' && "$PACKAGE_TYPE" == 'manywheel' ]]; then
pushd /tmp
# Per https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Dialect-Options.html
# gcc-11 is ABI16, gcc-13 is ABI18, gcc-14 is ABI19
# gcc 11 - CUDA 11.8, xpu, rocm
# gcc 13 - CUDA 12.6, 12.8 and cpu
# Please see issue for reference: https://github.com/pytorch/pytorch/issues/152426
if [[ "$(uname -m)" == "s390x" ]]; then
cxx_abi="19"
elif [[ "$DESIRED_CUDA" != 'xpu' && "$DESIRED_CUDA" != 'rocm'* ]]; then
cxx_abi="18"
else
cxx_abi="16"
fi
python -c "import torch; exit(0 if torch._C._PYBIND11_BUILD_ABI == '_cxxabi10${cxx_abi}' else 1)"
popd
fi

View File

@ -149,19 +149,6 @@ function get_pinned_commit() {
cat .github/ci_commit_pins/"${1}".txt cat .github/ci_commit_pins/"${1}".txt
} }
function detect_cuda_arch() {
if [[ "${BUILD_ENVIRONMENT}" == *cuda* ]]; then
if command -v nvidia-smi; then
TORCH_CUDA_ARCH_LIST=$(nvidia-smi --query-gpu=compute_cap --format=csv | tail -n 1)
elif [[ "${TEST_CONFIG}" == *nogpu* ]]; then
# There won't be nvidia-smi in nogpu tests, so just set TORCH_CUDA_ARCH_LIST to the default
# minimum supported value here
TORCH_CUDA_ARCH_LIST=8.0
fi
export TORCH_CUDA_ARCH_LIST
fi
}
function install_torchaudio() { function install_torchaudio() {
local commit local commit
commit=$(get_pinned_commit audio) commit=$(get_pinned_commit audio)
@ -258,19 +245,11 @@ function install_torchrec_and_fbgemm() {
git clone --recursive https://github.com/pytorch/fbgemm git clone --recursive https://github.com/pytorch/fbgemm
pushd fbgemm/fbgemm_gpu pushd fbgemm/fbgemm_gpu
git checkout "${fbgemm_commit}" --recurse-submodules git checkout "${fbgemm_commit}" --recurse-submodules
# until the fbgemm_commit includes the tbb patch python setup.py bdist_wheel \
patch <<'EOF' --build-variant=rocm \
--- a/FbgemmGpu.cmake -DHIP_ROOT_DIR="${ROCM_PATH}" \
+++ b/FbgemmGpu.cmake -DCMAKE_C_FLAGS="-DTORCH_USE_HIP_DSA" \
@@ -184,5 +184,6 @@ gpu_cpp_library( -DCMAKE_CXX_FLAGS="-DTORCH_USE_HIP_DSA"
fbgemm_gpu_tbe_cache
fbgemm_gpu_tbe_optimizers
fbgemm_gpu_tbe_utils
+ tbb
DESTINATION
fbgemm_gpu)
EOF
python setup.py bdist_wheel --build-variant=rocm
popd popd
# Save the wheel before cleaning up # Save the wheel before cleaning up

View File

@ -35,10 +35,11 @@ fi
print_cmake_info print_cmake_info
if [[ ${BUILD_ENVIRONMENT} == *"distributed"* ]]; then if [[ ${BUILD_ENVIRONMENT} == *"distributed"* ]]; then
USE_OPENMP=1 WERROR=1 python setup.py bdist_wheel # Needed for inductor benchmarks, as lots of HF networks make `torch.distribtued` calls
USE_DISTRIBUTED=1 USE_OPENMP=1 WERROR=1 python setup.py bdist_wheel
else else
# NB: we always build with distributed; USE_DISTRIBUTED turns off all # Explicitly set USE_DISTRIBUTED=0 to align with the default build config on mac. This also serves as the sole CI config that tests
# backends (specifically the gloo backend), so test that this case works too # that building with USE_DISTRIBUTED=0 works at all. See https://github.com/pytorch/pytorch/issues/86448
USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel --plat-name macosx_11_0_arm64 USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel --plat-name macosx_11_0_arm64
fi fi
if which sccache > /dev/null; then if which sccache > /dev/null; then

View File

@ -13,13 +13,9 @@ if [[ ! $(python -c "import torch; print(int(torch.backends.openmp.is_available(
fi fi
popd popd
python -mpip install -r requirements.txt
# enable debug asserts in serialization # enable debug asserts in serialization
export TORCH_SERIALIZATION_DEBUG=1 export TORCH_SERIALIZATION_DEBUG=1
python -mpip install --no-input -r requirements.txt
setup_test_python() { setup_test_python() {
# The CircleCI worker hostname doesn't resolve to an address. # The CircleCI worker hostname doesn't resolve to an address.
# This environment variable makes ProcessGroupGloo default to # This environment variable makes ProcessGroupGloo default to
@ -178,15 +174,17 @@ checkout_install_torchbench() {
# to install and test other models # to install and test other models
python install.py --continue_on_fail python install.py --continue_on_fail
fi fi
popd
pip install -r .ci/docker/ci_commit_pins/huggingface-requirements.txt # soxr comes from https://github.com/huggingface/transformers/pull/39429
pip install transformers==4.54.0 soxr==0.5.0
# https://github.com/pytorch/pytorch/issues/160689 to remove torchao because # https://github.com/pytorch/pytorch/issues/160689 to remove torchao because
# its current version 0.12.0 doesn't work with transformers 4.54.0 # its current version 0.12.0 doesn't work with transformers 4.54.0
pip uninstall -y torchao pip uninstall -y torchao
echo "Print all dependencies after TorchBench is installed" echo "Print all dependencies after TorchBench is installed"
python -mpip freeze python -mpip freeze
popd
} }
torchbench_setup_macos() { torchbench_setup_macos() {
@ -199,7 +197,7 @@ torchbench_setup_macos() {
git checkout "$(cat ../.github/ci_commit_pins/vision.txt)" git checkout "$(cat ../.github/ci_commit_pins/vision.txt)"
git submodule update --init --recursive git submodule update --init --recursive
python setup.py clean python setup.py clean
python -m pip install -e . -v --no-build-isolation python setup.py develop
popd popd
pushd torchaudio pushd torchaudio
@ -208,7 +206,7 @@ torchbench_setup_macos() {
git submodule update --init --recursive git submodule update --init --recursive
python setup.py clean python setup.py clean
#TODO: Remove me, when figure out how to make TorchAudio find brew installed openmp #TODO: Remove me, when figure out how to make TorchAudio find brew installed openmp
USE_OPENMP=0 python -m pip install -e . -v --no-build-isolation USE_OPENMP=0 python setup.py develop
popd popd
checkout_install_torchbench checkout_install_torchbench
@ -306,47 +304,6 @@ test_torchbench_smoketest() {
fi fi
done done
echo "Pytorch benchmark on mps device completed"
}
test_aoti_torchbench_smoketest() {
print_cmake_info
echo "Launching AOTInductor torchbench setup"
pip_benchmark_deps
# shellcheck disable=SC2119,SC2120
torchbench_setup_macos
TEST_REPORTS_DIR=$(pwd)/test/test-reports
mkdir -p "$TEST_REPORTS_DIR"
local device=mps
local dtypes=(undefined float16 bfloat16 notset)
local dtype=${dtypes[$1]}
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor timm_resnet timm_vovnet vgg16)
echo "Launching torchbench inference performance run for AOT Inductor and dtype ${dtype}"
local dtype_arg="--${dtype}"
if [ "$dtype" == notset ]; then
dtype_arg="--float32"
fi
touch "$TEST_REPORTS_DIR/aot_inductor_torchbench_${dtype}_inference_${device}_performance.csv"
for model in "${models[@]}"; do
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
--performance --only "$model" --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_torchbench_${dtype}_inference_${device}_performance.csv" || true
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
--accuracy --only "$model" --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_torchbench_${dtype}_inference_${device}_accuracy.csv" || true
done
echo "Launching HuggingFace inference performance run for AOT Inductor and dtype ${dtype}"
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
--performance --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_huggingface_${dtype}_inference_${device}_performance.csv" || true
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
--accuracy --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_huggingface_${dtype}_inference_${device}_accuracy.csv" || true
echo "Pytorch benchmark on mps device completed" echo "Pytorch benchmark on mps device completed"
} }
@ -395,8 +352,6 @@ elif [[ $TEST_CONFIG == *"perf_timm"* ]]; then
test_timm_perf test_timm_perf
elif [[ $TEST_CONFIG == *"perf_smoketest"* ]]; then elif [[ $TEST_CONFIG == *"perf_smoketest"* ]]; then
test_torchbench_smoketest "${SHARD_NUMBER}" test_torchbench_smoketest "${SHARD_NUMBER}"
elif [[ $TEST_CONFIG == *"aot_inductor_perf_smoketest"* ]]; then
test_aoti_torchbench_smoketest "${SHARD_NUMBER}"
elif [[ $TEST_CONFIG == *"mps"* ]]; then elif [[ $TEST_CONFIG == *"mps"* ]]; then
test_python_mps test_python_mps
elif [[ $NUM_TEST_SHARDS -gt 1 ]]; then elif [[ $NUM_TEST_SHARDS -gt 1 ]]; then

View File

@ -45,7 +45,6 @@ if [[ "${SHARD_NUMBER:-2}" == "2" ]]; then
# DTensor tests # DTensor tests
time python test/run_test.py --verbose -i distributed/tensor/test_random_ops time python test/run_test.py --verbose -i distributed/tensor/test_random_ops
time python test/run_test.py --verbose -i distributed/tensor/test_dtensor_compile time python test/run_test.py --verbose -i distributed/tensor/test_dtensor_compile
time python test/run_test.py --verbose -i distributed/tensor/test_utils.py
# DeviceMesh test # DeviceMesh test
time python test/run_test.py --verbose -i distributed/test_device_mesh time python test/run_test.py --verbose -i distributed/test_device_mesh

View File

@ -386,8 +386,8 @@ def smoke_test_compile(device: str = "cpu") -> None:
def smoke_test_nvshmem() -> None: def smoke_test_nvshmem() -> None:
if not torch.cuda.is_available() or target_os == "windows": if not torch.cuda.is_available():
print("Windows platform or CUDA is not available, skipping NVSHMEM test") print("CUDA is not available, skipping NVSHMEM test")
return return
# Check if NVSHMEM is compiled in current build # Check if NVSHMEM is compiled in current build
@ -396,9 +396,7 @@ def smoke_test_nvshmem() -> None:
except ImportError: except ImportError:
# Not built with NVSHMEM support. # Not built with NVSHMEM support.
# torch is not compiled with NVSHMEM prior to 2.9 # torch is not compiled with NVSHMEM prior to 2.9
from torch.torch_version import TorchVersion if torch.__version__ < "2.9":
if TorchVersion(torch.__version__) < (2, 9):
return return
else: else:
# After 2.9: NVSHMEM is expected to be compiled in current build # After 2.9: NVSHMEM is expected to be compiled in current build

View File

@ -91,7 +91,6 @@ if [[ "$BUILD_ENVIRONMENT" == *clang9* || "$BUILD_ENVIRONMENT" == *xpu* ]]; then
export VALGRIND=OFF export VALGRIND=OFF
fi fi
detect_cuda_arch
if [[ "$BUILD_ENVIRONMENT" == *s390x* ]]; then if [[ "$BUILD_ENVIRONMENT" == *s390x* ]]; then
# There are additional warnings on s390x, maybe due to newer gcc. # There are additional warnings on s390x, maybe due to newer gcc.
@ -496,14 +495,6 @@ test_inductor_cpp_wrapper_shard() {
-k 'take' \ -k 'take' \
--shard "$1" "$NUM_TEST_SHARDS" \ --shard "$1" "$NUM_TEST_SHARDS" \
--verbose --verbose
if [[ "${BUILD_ENVIRONMENT}" == *xpu* ]]; then
python test/run_test.py \
--include inductor/test_mkldnn_pattern_matcher \
-k 'xpu' \
--shard "$1" "$NUM_TEST_SHARDS" \
--verbose
fi
} }
# "Global" flags for inductor benchmarking controlled by TEST_CONFIG # "Global" flags for inductor benchmarking controlled by TEST_CONFIG
@ -1638,10 +1629,6 @@ elif [[ "${TEST_CONFIG}" == *xla* ]]; then
install_torchvision install_torchvision
build_xla build_xla
test_xla test_xla
elif [[ "$TEST_CONFIG" == *vllm* ]]; then
echo "vLLM CI uses TORCH_CUDA_ARCH_LIST: $TORCH_CUDA_ARCH_LIST"
(cd .ci/lumen_cli && python -m pip install -e .)
python -m cli.run test external vllm --test-plan "$TEST_CONFIG" --shard-id "$SHARD_NUMBER" --num-shards "$NUM_TEST_SHARDS"
elif [[ "${TEST_CONFIG}" == *executorch* ]]; then elif [[ "${TEST_CONFIG}" == *executorch* ]]; then
test_executorch test_executorch
elif [[ "$TEST_CONFIG" == 'jit_legacy' ]]; then elif [[ "$TEST_CONFIG" == 'jit_legacy' ]]; then
@ -1721,6 +1708,11 @@ elif [[ "${TEST_CONFIG}" == *inductor_cpp_wrapper* ]]; then
elif [[ "${TEST_CONFIG}" == *inductor* ]]; then elif [[ "${TEST_CONFIG}" == *inductor* ]]; then
install_torchvision install_torchvision
test_inductor_shard "${SHARD_NUMBER}" test_inductor_shard "${SHARD_NUMBER}"
if [[ "${SHARD_NUMBER}" == 1 ]]; then
if [[ "${BUILD_ENVIRONMENT}" != linux-jammy-py3.9-gcc11-build ]]; then
test_inductor_distributed
fi
fi
elif [[ "${TEST_CONFIG}" == *einops* ]]; then elif [[ "${TEST_CONFIG}" == *einops* ]]; then
test_einops test_einops
elif [[ "${TEST_CONFIG}" == *dynamo_wrapped* ]]; then elif [[ "${TEST_CONFIG}" == *dynamo_wrapped* ]]; then

View File

@ -44,7 +44,7 @@ python -m pip install pytest-rerunfailures==10.3 pytest-cpp==2.3.0 tensorboard==
python -m pip install z3-solver==4.15.1.0 python -m pip install z3-solver==4.15.1.0
# Install tlparse for test\dynamo\test_structured_trace.py UTs. # Install tlparse for test\dynamo\test_structured_trace.py UTs.
python -m pip install tlparse==0.4.0 python -m pip install tlparse==0.3.30
# Install parameterized # Install parameterized
python -m pip install parameterized==0.8.1 python -m pip install parameterized==0.8.1

View File

@ -1,59 +0,0 @@
@echo off
set MODULE_NAME=pytorch
IF NOT EXIST "setup.py" IF NOT EXIST "%MODULE_NAME%" (
call internal\clone.bat
cd %~dp0
) ELSE (
call internal\clean.bat
)
IF ERRORLEVEL 1 goto :eof
call internal\check_deps.bat
IF ERRORLEVEL 1 goto :eof
REM Check for optional components
set USE_CUDA=
set CMAKE_GENERATOR=Visual Studio 15 2017 Win64
IF "%NVTOOLSEXT_PATH%"=="" (
IF EXIST "C:\Program Files\NVIDIA Corporation\NvToolsExt\lib\x64\nvToolsExt64_1.lib" (
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
) ELSE (
echo NVTX ^(Visual Studio Extension ^for CUDA^) ^not installed, failing
exit /b 1
)
)
IF "%CUDA_PATH_V130%"=="" (
IF EXIST "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.0\bin\nvcc.exe" (
set "CUDA_PATH_V130=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.0"
) ELSE (
echo CUDA 13.0 not found, failing
exit /b 1
)
)
IF "%BUILD_VISION%" == "" (
set TORCH_CUDA_ARCH_LIST=7.5;8.0;8.6;9.0;10.0;12.0
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
) ELSE (
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90 -gencode=arch=compute_100,code=compute_100 -gencode=arch=compute_120,code=compute_120
)
set "CUDA_PATH=%CUDA_PATH_V130%"
set "PATH=%CUDA_PATH_V130%\bin;%PATH%"
:optcheck
call internal\check_opts.bat
IF ERRORLEVEL 1 goto :eof
if exist "%NIGHTLIES_PYTORCH_ROOT%" cd %NIGHTLIES_PYTORCH_ROOT%\..
call %~dp0\internal\copy.bat
IF ERRORLEVEL 1 goto :eof
call %~dp0\internal\setup.bat
IF ERRORLEVEL 1 goto :eof

View File

@ -1,20 +1,12 @@
copy "%CUDA_PATH%\bin\cusparse*64_*.dll*" pytorch\torch\lib
if %CUDA_VERSION% geq 130 ( copy "%CUDA_PATH%\bin\cublas*64_*.dll*" pytorch\torch\lib
set "dll_path=bin\x64" copy "%CUDA_PATH%\bin\cudart*64_*.dll*" pytorch\torch\lib
) else ( copy "%CUDA_PATH%\bin\curand*64_*.dll*" pytorch\torch\lib
set "dll_path=bin" copy "%CUDA_PATH%\bin\cufft*64_*.dll*" pytorch\torch\lib
) copy "%CUDA_PATH%\bin\cusolver*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cusparse*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cublas*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cudart*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\curand*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cufft*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cusolver*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\nvrtc*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\nvJitLink_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\bin\cudnn*64_*.dll*" pytorch\torch\lib copy "%CUDA_PATH%\bin\cudnn*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\bin\nvrtc*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\extras\CUPTI\lib64\cupti64_*.dll*" pytorch\torch\lib copy "%CUDA_PATH%\extras\CUPTI\lib64\cupti64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\extras\CUPTI\lib64\nvperf_host*.dll*" pytorch\torch\lib copy "%CUDA_PATH%\extras\CUPTI\lib64\nvperf_host*.dll*" pytorch\torch\lib
@ -28,3 +20,8 @@ copy "%libuv_ROOT%\bin\uv.dll" pytorch\torch\lib
if exist "C:\Windows\System32\zlibwapi.dll" ( if exist "C:\Windows\System32\zlibwapi.dll" (
copy "C:\Windows\System32\zlibwapi.dll" pytorch\torch\lib copy "C:\Windows\System32\zlibwapi.dll" pytorch\torch\lib
) )
::copy nvJitLink dll is requires for cuda 12+
if exist "%CUDA_PATH%\bin\nvJitLink_*.dll*" (
copy "%CUDA_PATH%\bin\nvJitLink_*.dll*" pytorch\torch\lib
)

View File

@ -26,7 +26,6 @@ if exist "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%
if %CUDA_VER% EQU 126 goto cuda126 if %CUDA_VER% EQU 126 goto cuda126
if %CUDA_VER% EQU 128 goto cuda128 if %CUDA_VER% EQU 128 goto cuda128
if %CUDA_VER% EQU 129 goto cuda129 if %CUDA_VER% EQU 129 goto cuda129
if %CUDA_VER% EQU 130 goto cuda130
echo CUDA %CUDA_VERSION_STR% is not supported echo CUDA %CUDA_VERSION_STR% is not supported
exit /b 1 exit /b 1
@ -114,33 +113,6 @@ xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
goto cuda_common goto cuda_common
:cuda130
set CUDA_INSTALL_EXE=cuda_13.0.0_windows.exe
if not exist "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" (
curl -k -L "https://ossci-windows.s3.amazonaws.com/%CUDA_INSTALL_EXE%" --output "%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%" & REM @lint-ignore
if errorlevel 1 exit /b 1
set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\%CUDA_INSTALL_EXE%"
set "ARGS="
)
set CUDNN_FOLDER=cudnn-windows-x86_64-9.12.0.46_cuda13-archive
set CUDNN_LIB_FOLDER="lib"
set "CUDNN_INSTALL_ZIP=%CUDNN_FOLDER%.zip"
if not exist "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" (
curl -k -L "http://s3.amazonaws.com/ossci-windows/%CUDNN_INSTALL_ZIP%" --output "%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%" & REM @lint-ignore
if errorlevel 1 exit /b 1
set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\%CUDNN_INSTALL_ZIP%"
)
@REM cuDNN 8.3+ required zlib to be installed on the path
echo Installing ZLIB dlls
curl -k -L "http://s3.amazonaws.com/ossci-windows/zlib123dllx64.zip" --output "%SRC_DIR%\temp_build\zlib123dllx64.zip"
7z x "%SRC_DIR%\temp_build\zlib123dllx64.zip" -o"%SRC_DIR%\temp_build\zlib"
xcopy /Y "%SRC_DIR%\temp_build\zlib\dll_x64\*.dll" "C:\Windows\System32"
goto cuda_common
:cuda_common :cuda_common
:: NOTE: We only install CUDA if we don't have it installed already. :: NOTE: We only install CUDA if we don't have it installed already.
:: With GHA runners these should be pre-installed as part of our AMI process :: With GHA runners these should be pre-installed as part of our AMI process

View File

@ -1,9 +1,9 @@
set WIN_DRIVER_VN=580.88 set WIN_DRIVER_VN=528.89
set "DRIVER_DOWNLOAD_LINK=https://ossci-windows.s3.amazonaws.com/%WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe" & REM @lint-ignore set "DRIVER_DOWNLOAD_LINK=https://ossci-windows.s3.amazonaws.com/%WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe" & REM @lint-ignore
curl --retry 3 -kL %DRIVER_DOWNLOAD_LINK% --output %WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe curl --retry 3 -kL %DRIVER_DOWNLOAD_LINK% --output %WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1
start /wait %WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe -s -noreboot start /wait %WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe -s -noreboot
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1
del %WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe || ver > NUL del %WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe || ver > NUL

View File

@ -1,22 +1,12 @@
set ADDITIONAL_OPTIONS="" set ADDITIONAL_OPTIONS=""
set PYTHON_EXEC="python" set PYTHON_EXEC="python"
if "%DESIRED_PYTHON%" == "3.13t" ( if "%DESIRED_PYTHON%" == "3.13t" (
echo Python version is set to 3.13t echo Python version is set to 3.13t
set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.13.0/python-3.13.0-amd64.exe" set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.13.0/python-3.13.0-amd64.exe"
set ADDITIONAL_OPTIONS="Include_freethreaded=1" set ADDITIONAL_OPTIONS="Include_freethreaded=1"
set PYTHON_EXEC="python3.13t" set PYTHON_EXEC="python3.13t"
) else if "%DESIRED_PYTHON%"=="3.14" (
echo Python version is set to 3.14 or 3.14t
set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.14.0/python-3.14.0rc1-amd64.exe"
) else if "%DESIRED_PYTHON%"=="3.14t" (
echo Python version is set to 3.14 or 3.14t
set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.14.0/python-3.14.0rc1-amd64.exe"
set ADDITIONAL_OPTIONS="Include_freethreaded=1"
set PYTHON_EXEC="python3.14t"
) else ( ) else (
echo Python version is set to %DESIRED_PYTHON% echo DESIRED_PYTHON not defined, Python version is set to %DESIRED_PYTHON%
set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/%DESIRED_PYTHON%.0/python-%DESIRED_PYTHON%.0-amd64.exe" %= @lint-ignore =% set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/%DESIRED_PYTHON%.0/python-%DESIRED_PYTHON%.0-amd64.exe" %= @lint-ignore =%
) )

View File

@ -13,9 +13,9 @@ if not exist "%SRC_DIR%\temp_build" mkdir "%SRC_DIR%\temp_build"
:xpu_bundle_install_start :xpu_bundle_install_start
set XPU_BUNDLE_PARENT_DIR=C:\Program Files (x86)\Intel\oneAPI set XPU_BUNDLE_PARENT_DIR=C:\Program Files (x86)\Intel\oneAPI
set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/75d4eb97-914a-4a95-852c-7b9733d80f74/intel-deep-learning-essentials-2025.1.3.8_offline.exe set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/9d6d6c17-ca2d-4735-9331-99447e4a1280/intel-deep-learning-essentials-2025.0.1.28_offline.exe
set XPU_BUNDLE_PRODUCT_NAME=intel.oneapi.win.deep-learning-essentials.product set XPU_BUNDLE_PRODUCT_NAME=intel.oneapi.win.deep-learning-essentials.product
set XPU_BUNDLE_VERSION=2025.1.3+5 set XPU_BUNDLE_VERSION=2025.0.1+20
set XPU_BUNDLE_INSTALLED=0 set XPU_BUNDLE_INSTALLED=0
set XPU_BUNDLE_UNINSTALL=0 set XPU_BUNDLE_UNINSTALL=0
set XPU_EXTRA_URL=NULL set XPU_EXTRA_URL=NULL
@ -24,9 +24,9 @@ set XPU_EXTRA_VERSION=2025.0.1+1226
set XPU_EXTRA_INSTALLED=0 set XPU_EXTRA_INSTALLED=0
set XPU_EXTRA_UNINSTALL=0 set XPU_EXTRA_UNINSTALL=0
if not [%XPU_VERSION%]==[] if [%XPU_VERSION%]==[2025.2] ( if not [%XPU_VERSION%]==[] if [%XPU_VERSION%]==[2025.1] (
set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/24751ead-ddc5-4479-b9e6-f9fe2ff8b9f2/intel-deep-learning-essentials-2025.2.1.25_offline.exe set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/75d4eb97-914a-4a95-852c-7b9733d80f74/intel-deep-learning-essentials-2025.1.3.8_offline.exe
set XPU_BUNDLE_VERSION=2025.2.1+20 set XPU_BUNDLE_VERSION=2025.1.3+5
) )
:: Check if XPU bundle is target version or already installed :: Check if XPU bundle is target version or already installed
@ -90,3 +90,14 @@ if errorlevel 1 exit /b 1
del xpu_extra.exe del xpu_extra.exe
:xpu_install_end :xpu_install_end
if not "%XPU_ENABLE_KINETO%"=="1" goto install_end
:: Install Level Zero SDK
set XPU_EXTRA_LZ_URL=https://github.com/oneapi-src/level-zero/releases/download/v1.14.0/level-zero-sdk_1.14.0.zip
curl -k -L %XPU_EXTRA_LZ_URL% --output "%SRC_DIR%\temp_build\level_zero_sdk.zip"
echo "Installing level zero SDK..."
7z x "%SRC_DIR%\temp_build\level_zero_sdk.zip" -o"%SRC_DIR%\temp_build\level_zero"
set "INCLUDE=%SRC_DIR%\temp_build\level_zero\include;%INCLUDE%"
del "%SRC_DIR%\temp_build\level_zero_sdk.zip"
:install_end

View File

@ -7,8 +7,6 @@ call "internal\install_python.bat"
%PYTHON_EXEC% --version %PYTHON_EXEC% --version
set "PATH=%CD%\Python\Lib\site-packages\cmake\data\bin;%CD%\Python\Scripts;%CD%\Python;%PATH%" set "PATH=%CD%\Python\Lib\site-packages\cmake\data\bin;%CD%\Python\Scripts;%CD%\Python;%PATH%"
if "%DESIRED_PYTHON%" == "3.14t" %PYTHON_EXEC% -m pip install numpy==2.3.2 cmake
if "%DESIRED_PYTHON%" == "3.14" %PYTHON_EXEC% -m pip install numpy==2.3.2 cmake
if "%DESIRED_PYTHON%" == "3.13t" %PYTHON_EXEC% -m pip install numpy==2.2.1 cmake if "%DESIRED_PYTHON%" == "3.13t" %PYTHON_EXEC% -m pip install numpy==2.2.1 cmake
if "%DESIRED_PYTHON%" == "3.13" %PYTHON_EXEC% -m pip install numpy==2.1.2 cmake if "%DESIRED_PYTHON%" == "3.13" %PYTHON_EXEC% -m pip install numpy==2.1.2 cmake
if "%DESIRED_PYTHON%" == "3.12" %PYTHON_EXEC% -m pip install numpy==2.0.2 cmake if "%DESIRED_PYTHON%" == "3.12" %PYTHON_EXEC% -m pip install numpy==2.0.2 cmake

View File

@ -85,7 +85,7 @@ mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true
# Create an isolated directory to store this builds pytorch checkout and conda # Create an isolated directory to store this builds pytorch checkout and conda
# installation # installation
if [[ -z "$MAC_PACKAGE_WORK_DIR" ]]; then if [[ -z "$MAC_PACKAGE_WORK_DIR" ]]; then
MAC_PACKAGE_WORK_DIR="$(pwd)/tmp_wheel_${DESIRED_PYTHON}_$(date +%H%M%S)" MAC_PACKAGE_WORK_DIR="$(pwd)/tmp_wheel_conda_${DESIRED_PYTHON}_$(date +%H%M%S)"
fi fi
mkdir -p "$MAC_PACKAGE_WORK_DIR" || true mkdir -p "$MAC_PACKAGE_WORK_DIR" || true
if [[ -n ${GITHUB_ACTIONS} ]]; then if [[ -n ${GITHUB_ACTIONS} ]]; then
@ -96,11 +96,11 @@ fi
whl_tmp_dir="${MAC_PACKAGE_WORK_DIR}/dist" whl_tmp_dir="${MAC_PACKAGE_WORK_DIR}/dist"
mkdir -p "$whl_tmp_dir" mkdir -p "$whl_tmp_dir"
mac_version='macosx-11_0-arm64' mac_version='macosx_11_0_arm64'
libtorch_arch='arm64' libtorch_arch='arm64'
# Create a consistent wheel package name to rename the wheel to # Create a consistent wheel package name to rename the wheel to
wheel_filename_new="${TORCH_PACKAGE_NAME}-${build_version}${build_number_prefix}-cp${python_nodot}-none-${mac_version//[-,]/_}.whl" wheel_filename_new="${TORCH_PACKAGE_NAME}-${build_version}${build_number_prefix}-cp${python_nodot}-none-${mac_version}.whl"
########################################################### ###########################################################
@ -124,61 +124,91 @@ popd
export TH_BINARY_BUILD=1 export TH_BINARY_BUILD=1
export INSTALL_TEST=0 # dont install test binaries into site-packages export INSTALL_TEST=0 # dont install test binaries into site-packages
export MACOSX_DEPLOYMENT_TARGET=11.0 export MACOSX_DEPLOYMENT_TARGET=10.15
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
SETUPTOOLS_PINNED_VERSION="==70.1.0"
PYYAML_PINNED_VERSION="=5.3"
EXTRA_CONDA_INSTALL_FLAGS="" EXTRA_CONDA_INSTALL_FLAGS=""
CONDA_ENV_CREATE_FLAGS="" CONDA_ENV_CREATE_FLAGS=""
RENAME_WHEEL=true RENAME_WHEEL=true
case $desired_python in case $desired_python in
3.14t) 3.14t)
echo "Using 3.14 deps" echo "Using 3.14 deps"
mac_version='macosx-11.0-arm64' SETUPTOOLS_PINNED_VERSION=">=70.1.0"
NUMPY_PINNED_VERSION="==2.1.0" PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="=2.1.0"
CONDA_ENV_CREATE_FLAGS="python-freethreading"
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge/label/python_rc -c conda-forge"
desired_python="3.14.0rc1"
RENAME_WHEEL=false RENAME_WHEEL=false
;; ;;
3.14) 3.14)
echo "Using 3.14t deps" echo "Using 3.14t deps"
mac_version='macosx-11.0-arm64' SETUPTOOLS_PINNED_VERSION=">=70.1.0"
NUMPY_PINNED_VERSION="==2.1.0" PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="=2.1.0"
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge/label/python_rc -c conda-forge"
desired_python="3.14.0rc1"
RENAME_WHEEL=false RENAME_WHEEL=false
;; ;;
3.13t) 3.13t)
echo "Using 3.13 deps" echo "Using 3.13 deps"
NUMPY_PINNED_VERSION="==2.1.0" SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="=2.1.0"
CONDA_ENV_CREATE_FLAGS="python-freethreading"
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge"
desired_python="3.13"
RENAME_WHEEL=false RENAME_WHEEL=false
;; ;;
3.13) 3.13)
echo "Using 3.13 deps" echo "Using 3.13 deps"
NUMPY_PINNED_VERSION="==2.1.0" SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="=2.1.0"
;; ;;
3.12) 3.12)
echo "Using 3.12 deps" echo "Using 3.12 deps"
NUMPY_PINNED_VERSION="==2.0.2" SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="=2.0.2"
;; ;;
3.11) 3.11)
echo "Using 3.11 deps" echo "Using 3.11 deps"
NUMPY_PINNED_VERSION="==2.0.2" SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=5.3"
NUMPY_PINNED_VERSION="=2.0.2"
;; ;;
3.10) 3.10)
echo "Using 3.10 deps" echo "Using 3.10 deps"
NUMPY_PINNED_VERSION="==2.0.2" SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=5.3"
NUMPY_PINNED_VERSION="=2.0.2"
;;
3.9)
echo "Using 3.9 deps"
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=5.3"
NUMPY_PINNED_VERSION="=2.0.2"
;; ;;
*) *)
echo "Unsupported version $desired_python" echo "Using default deps"
exit 1 NUMPY_PINNED_VERSION="=1.11.3"
;; ;;
esac esac
PINNED_PACKAGES=( # Install into a fresh env
"numpy${NUMPY_PINNED_VERSION}" tmp_env_name="wheel_py$python_nodot"
) conda create ${EXTRA_CONDA_INSTALL_FLAGS} -yn "$tmp_env_name" python="$desired_python" ${CONDA_ENV_CREATE_FLAGS}
python -mvenv ~/${desired_python}-build source activate "$tmp_env_name"
source ~/${desired_python}-build/bin/activate
retry pip install "${PINNED_PACKAGES[@]}" -r "${pytorch_rootdir}/requirements.txt" retry pip install -r "${pytorch_rootdir}/requirements-build.txt"
pip install "numpy=${NUMPY_PINNED_VERSION}" "pyyaml${PYYAML_PINNED_VERSION}" requests ninja "setuptools${SETUPTOOLS_PINNED_VERSION}" typing-extensions
retry pip install -r "${pytorch_rootdir}/requirements.txt" || true
retry brew install libomp retry brew install libomp
# For USE_DISTRIBUTED=1 on macOS, this enables gloo, which needs libuv, which # For USE_DISTRIBUTED=1 on macOS, need libuv, which is build as part of tensorpipe submodule
# is build as part of tensorpipe submodule
export USE_DISTRIBUTED=1 export USE_DISTRIBUTED=1
export USE_MKLDNN=OFF export USE_MKLDNN=OFF
@ -188,7 +218,7 @@ export BUILD_TEST=OFF
pushd "$pytorch_rootdir" pushd "$pytorch_rootdir"
echo "Calling setup.py bdist_wheel at $(date)" echo "Calling setup.py bdist_wheel at $(date)"
_PYTHON_HOST_PLATFORM=${mac_version} ARCHFLAGS="-arch arm64" python setup.py bdist_wheel -d "$whl_tmp_dir" --plat-name "${mac_version//[-.]/_}" python setup.py bdist_wheel -d "$whl_tmp_dir"
echo "Finished setup.py bdist_wheel at $(date)" echo "Finished setup.py bdist_wheel at $(date)"

View File

@ -75,8 +75,8 @@ TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
# Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT # Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64'" TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64'"
# CUDA 12.9/13.0 builds have triton for Linux and Linux aarch64 binaries. # CUDA 12.9 builds have triton for Linux and Linux aarch64 binaries.
if [[ "$DESIRED_CUDA" == "cu129" ]] || [[ "$DESIRED_CUDA" == "cu130" ]]; then if [[ "$DESIRED_CUDA" == "cu129" ]]; then
TRITON_CONSTRAINT="platform_system == 'Linux'" TRITON_CONSTRAINT="platform_system == 'Linux'"
fi fi

View File

@ -51,12 +51,16 @@ s3_upload() {
s3_upload_dir="${s3_root_dir}/${UPLOAD_SUBFOLDER}/" s3_upload_dir="${s3_root_dir}/${UPLOAD_SUBFOLDER}/"
fi fi
( (
cache_control_flag=""
if [[ "${UPLOAD_CHANNEL}" = "test" ]]; then
cache_control_flag="--cache-control='no-cache,no-store,must-revalidate'"
fi
for pkg in ${PKG_DIR}/*.${extension}; do for pkg in ${PKG_DIR}/*.${extension}; do
( (
set -x set -x
shm_id=$(sha256sum "${pkg}" | awk '{print $1}') shm_id=$(sha256sum "${pkg}" | awk '{print $1}')
${AWS_S3_CP} --no-progress --acl public-read "${pkg}" "${s3_upload_dir}" \ ${AWS_S3_CP} --no-progress --acl public-read "${pkg}" "${s3_upload_dir}" \
--metadata "checksum-sha256=${shm_id}" --metadata "checksum-sha256=${shm_id}" ${cache_control_flag}
) )
done done
) )

View File

@ -15,7 +15,8 @@ fi
if [[ "$DESIRED_CUDA" == 'xpu' ]]; then if [[ "$DESIRED_CUDA" == 'xpu' ]]; then
export VC_YEAR=2022 export VC_YEAR=2022
export USE_SCCACHE=0 export USE_SCCACHE=0
export XPU_VERSION=2025.2 export XPU_VERSION=2025.1
export XPU_ENABLE_KINETO=1
fi fi
echo "Free space on filesystem before build:" echo "Free space on filesystem before build:"

View File

@ -8,7 +8,7 @@ export VC_YEAR=2022
if [[ "$DESIRED_CUDA" == 'xpu' ]]; then if [[ "$DESIRED_CUDA" == 'xpu' ]]; then
export VC_YEAR=2022 export VC_YEAR=2022
export XPU_VERSION=2025.2 export XPU_VERSION=2025.1
fi fi
pushd "$PYTORCH_ROOT/.ci/pytorch/" pushd "$PYTORCH_ROOT/.ci/pytorch/"

View File

@ -48,7 +48,6 @@ per-file-ignores =
torch/__init__.py: F401,TOR901 torch/__init__.py: F401,TOR901
torch/_custom_op/impl.py: TOR901 torch/_custom_op/impl.py: TOR901
torch/_export/serde/upgrade.py: TOR901 torch/_export/serde/upgrade.py: TOR901
torch/_functorch/predispatch.py: TOR901
torch/_functorch/vmap.py: TOR901 torch/_functorch/vmap.py: TOR901
torch/_inductor/test_operators.py: TOR901 torch/_inductor/test_operators.py: TOR901
torch/_library/abstract_impl.py: TOR901 torch/_library/abstract_impl.py: TOR901
@ -73,7 +72,7 @@ exclude =
./docs/src, ./docs/src,
./functorch/docs, ./functorch/docs,
./functorch/examples, ./functorch/examples,
./functorch/docs/source/tutorials, ./functorch/notebooks,
./scripts, ./scripts,
./test/generated_type_hints_smoketest.py, ./test/generated_type_hints_smoketest.py,
./third_party, ./third_party,

View File

@ -12,9 +12,7 @@ self-hosted-runner:
- linux.9xlarge.ephemeral - linux.9xlarge.ephemeral
- am2.linux.9xlarge.ephemeral - am2.linux.9xlarge.ephemeral
- linux.12xlarge - linux.12xlarge
- linux.12xlarge.memory
- linux.24xlarge - linux.24xlarge
- linux.24xlarge.memory
- linux.24xlarge.ephemeral - linux.24xlarge.ephemeral
- linux.24xlarge.amd - linux.24xlarge.amd
- linux.arm64.2xlarge - linux.arm64.2xlarge

View File

@ -4,11 +4,6 @@ name: Build External packages
description: build external packages for PyTorch description: build external packages for PyTorch
inputs: inputs:
cuda-version:
description: CUDA version to use
type: string
required: true
default: '12.8.1'
cuda-arch-list: cuda-arch-list:
description: TORCH_CUDA_ARCH_LIST (e.g., "8.0;8.9;9.0") description: TORCH_CUDA_ARCH_LIST (e.g., "8.0;8.9;9.0")
type: string type: string
@ -49,12 +44,10 @@ runs:
env: env:
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2 SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
SCCACHE_REGION: us-east-1 SCCACHE_REGION: us-east-1
CUDA_VERSION: ${{ inputs.cuda-version }}
TORCH_CUDA_ARCH_LIST: ${{ inputs.cuda-arch-list }} TORCH_CUDA_ARCH_LIST: ${{ inputs.cuda-arch-list }}
BASE_IMAGE: ${{ inputs.docker-image }} BASE_IMAGE: ${{ inputs.docker-image }}
BUILD_TARGETS: ${{ inputs.build-targets }} BUILD_TARGETS: ${{ inputs.build-targets }}
PARENT_OUTPUT_DIR: ${{ inputs.output-dir }} PARENT_OUTPUT_DIR: ${{ inputs.output-dir}}
TORCH_WHEELS_PATH: ${{ inputs.torch-wheel-dir }}
shell: bash shell: bash
run: | run: |
set -euo pipefail set -euo pipefail
@ -75,6 +68,7 @@ runs:
export OUTPUT_DIR export OUTPUT_DIR
echo "Building external package: $target in directory $OUTPUT_DIR" echo "Building external package: $target in directory $OUTPUT_DIR"
python3 -m cli.run build external "$target" python3 -m cli.run build external "$target"
done done
END_TIME=$(date +%s) END_TIME=$(date +%s)

View File

@ -57,21 +57,6 @@ runs:
submodules: ${{ inputs.submodules }} submodules: ${{ inputs.submodules }}
show-progress: false show-progress: false
- name: Clean submodules post checkout
id: clean-submodules
if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }}
shell: bash
env:
NO_SUDO: ${{ inputs.no-sudo }}
run: |
cd "${GITHUB_WORKSPACE}"
# Clean stale submodule dirs
if [ -z "${NO_SUDO}" ]; then
sudo git submodule foreach --recursive git clean -ffdx
else
git submodule foreach --recursive git clean -ffdx
fi
- name: Clean workspace (try again) - name: Clean workspace (try again)
if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' && if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' &&
(steps.first-clean.outcome != 'success' || steps.first-checkout-attempt.outcome != 'success') }} (steps.first-clean.outcome != 'success' || steps.first-checkout-attempt.outcome != 'success') }}

View File

@ -1 +1 @@
caba63f0fa29ef9e3d566699f32f11c07c8bda4e f92ceca80df7a36194468665d62b0f791b1826c5

View File

@ -1 +1 @@
08ae0af1395c8d8471f4025deb6af9aef90b342f 7f1de94a4c2d14f59ad4ca84538c36084ea6b2c8

View File

@ -1 +1 @@
f510715882304796a96e33028b4f6de1b026c2c7 0ca2393b47e72c4424a49aa3b32c7c5d0e378a72

View File

@ -1 +1 @@
6c5478ff7c3d50dd1e3047d72ec5909bea474073 095faec1e7b6cc47220181e74ae9cde2605f9b00

View File

@ -12,46 +12,54 @@ ARG BUILD_BASE_IMAGE=torch-nightly-base
# by default, it uses devel-ubuntu22.04 official image. # by default, it uses devel-ubuntu22.04 official image.
ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
# The logic is copied from https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile
ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py"
#################### TORCH NIGHTLY BASE IMAGE #################### #################### TORCH NIGHTLY BASE IMAGE ####################
# A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci # A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base From nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base
ARG CUDA_VERSION=12.8.1
ARG PYTHON_VERSION=3.12
ARG TARGETPLATFORM
ENV DEBIAN_FRONTEND=noninteractive
ARG CUDA_VERSION RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
ARG PYTHON_VERSION echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
ARG GET_PIP_URL
# Install Python and other dependencies # Install Python and other dependencies if it does not existed
RUN apt-get update -y \ RUN if ! command -v python3 >/dev/null || ! python3 --version | grep -q "${PYTHON_VERSION}"; then \
&& apt-get install -y ccache software-properties-common git curl wget sudo vim \ echo "Installing Python ${PYTHON_VERSION}..." && \
&& add-apt-repository -y ppa:deadsnakes/ppa \ echo 'tzdata tzdata/Areas select America' | debconf-set-selections && \
&& apt-get update -y \ echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections && \
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ apt-get update -y && \
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ apt-get install -y ccache software-properties-common git curl sudo && \
&& update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ for i in 1 2 3; do \
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ add-apt-repository -y ppa:deadsnakes/ppa && break || \
&& curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \ { echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
done && \
apt-get update -y && \
apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv && \
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 && \
update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} && \
ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config && \
curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}; \
else \
echo "Python ${PYTHON_VERSION} already present, skipping setup."; \
fi \
&& python3 --version && python3 -m pip --version && python3 --version && python3 -m pip --version
# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519 # Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519
# as it was causing spam when compiling the CUTLASS kernels # as it was causing spam when compiling the CUTLASS kernels
# Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519) # Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519)
RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \ RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \
if command -v apt-get >/dev/null; then \
if [ "$current_gcc_version" -lt 10 ]; then \ if [ "$current_gcc_version" -lt 10 ]; then \
echo "GCC version is $current_gcc_version, installing gcc-10..."; \ echo "GCC version is $current_gcc_version, installing gcc-10..."; \
apt-get update \ apt-get update && \
&& apt-get install -y gcc-10 g++-10 \ apt-get install -y gcc-10 g++-10 && \
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 \ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 && \
&& update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \ update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \
else \ else \
echo "GCC version is $current_gcc_version, no need to install gcc-10."; \ echo "GCC version is $current_gcc_version, no need to install gcc-10."; \
fi \ fi && \
fi \ gcc --version && g++ --version
&& gcc --version && g++ --version
# install uv for faster pip installs # install uv for faster pip installs
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
@ -59,8 +67,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
ENV UV_HTTP_TIMEOUT=500 ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match" ENV UV_INDEX_STRATEGY="unsafe-best-match"
# Use copy mode to avoid hardlink failures with Docker cache mounts
ENV UV_LINK_MODE=copy
#################### TORCH NIGHTLY BASE IMAGE #################### #################### TORCH NIGHTLY BASE IMAGE ####################
@ -71,21 +77,6 @@ ENV UV_LINK_MODE=copy
FROM ${BUILD_BASE_IMAGE} AS base FROM ${BUILD_BASE_IMAGE} AS base
USER root USER root
ARG CUDA_VERSION
ARG PYTHON_VERSION
# TODO (huydhn): Only work with PyTorch manylinux builder
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}"
# Install some system dependencies and double check python version
RUN if command -v apt-get >/dev/null; then \
apt-get update -y \
&& apt-get install -y ccache software-properties-common git curl wget sudo vim; \
else \
dnf install -y git curl wget sudo vim; \
fi \
&& python3 --version && python3 -m pip --version
# Workaround for https://github.com/openai/triton/issues/2507 and # Workaround for https://github.com/openai/triton/issues/2507 and
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully # https://github.com/pytorch/pytorch/issues/107960 -- hopefully
# this won't be needed for future versions of this docker image # this won't be needed for future versions of this docker image
@ -99,8 +90,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
fi fi
ENV UV_HTTP_TIMEOUT=500 ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match" ENV UV_INDEX_STRATEGY="unsafe-best-match"
# Use copy mode to avoid hardlink failures with Docker cache mounts
ENV UV_LINK_MODE=copy
WORKDIR /workspace WORKDIR /workspace
@ -123,17 +112,18 @@ ARG PINNED_TORCH_VERSION
RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \ RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
--mount=type=cache,target=/root/.cache/uv \ --mount=type=cache,target=/root/.cache/uv \
if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \ if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
echo "[INFO] Installing torch wheels to build vllm"; \
torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \ torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \
vision_whl=$(find /dist -name 'torchvision*.whl' | head -n1 | xargs); \ vision_whl=$(find /dist/vision -name 'torchvision*.whl' | head -n1 | xargs); \
audio_whl=$(find /dist -name 'torchaudio*.whl' | head -n1 | xargs); \ audio_whl=$(find /dist/audio -name 'torchaudio*.whl' | head -n1 | xargs); \
uv pip install --system "${torch_whl}[opt-einsum]" "${vision_whl}" "${audio_whl}" /dist/*.whl; \ uv pip install --system "${torch_whl}[opt-einsum]"; \
uv pip install --system "${vision_whl}"; \
uv pip install --system "${audio_whl}"; \
elif [ -n "$PINNED_TORCH_VERSION" ]; then \ elif [ -n "$PINNED_TORCH_VERSION" ]; then \
echo "[INFO] Installing pinned torch nightly version to build vllm: $PINNED_TORCH_VERSION"; \ echo "[INFO] Installing pinned torch nightly version: $PINNED_TORCH_VERSION"; \
uv pip install --system "$PINNED_TORCH_VERSION" --index-url https://download.pytorch.org/whl/nightly/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.'); \ uv pip install --system "$PINNED_TORCH_VERSION" --index-url https://download.pytorch.org/whl/nightly/cu128; \
else \ else \
echo "[INFO] Installing torch nightly with latest one to build vllm"; \ echo "[INFO] Installing torch nightly with latest one"; \
uv pip install --system torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.'); \ uv pip install --system torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu128; \
fi fi
# Install numba 0.61.2 for cuda environment # Install numba 0.61.2 for cuda environment
@ -142,25 +132,19 @@ RUN --mount=type=cache,target=/root/.cache/uv \
# Install common dependencies from vllm common.txt # Install common dependencies from vllm common.txt
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements/common.txt uv pip install --system -r requirements/common.txt
# Must put before installing xformers, so it can install the correct version of xfomrers. # Must put before installing xformers, so it can install the correct version of xfomrers.
ARG xformers_cuda_arch_list='7.5;8.0+PTX;9.0a' ARG torch_cuda_arch_list='8.0;8.6;8.9;9.0'
ENV TORCH_CUDA_ARCH_LIST=${xformers_cuda_arch_list} ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
ARG max_jobs=16 ARG max_jobs=16
ENV MAX_JOBS=${max_jobs} ENV MAX_JOBS=${max_jobs}
RUN echo ${TORCH_CUDA_ARCH_LIST}
RUN echo ${MAX_JOBS}
RUN pip freeze | grep -E 'ninja'
# Build xformers with cuda and torch nightly/wheel # Build xformers with cuda and torch nightly/wheel
# following official xformers guidance: https://github.com/facebookresearch/xformers#build # following official xformers guidance: https://github.com/facebookresearch/xformers#build
# sha for https://github.com/facebookresearch/xformers/tree/v0.0.32.post2 ARG XFORMERS_COMMIT=f2de641ef670510cadab099ce6954031f52f191c
ARG XFORMERS_COMMIT=5d4b92a5e5a9c6c6d4878283f47d82e17995b468
ENV CCACHE_DIR=/root/.cache/ccache ENV CCACHE_DIR=/root/.cache/ccache
RUN --mount=type=cache,target=/root/.cache/ccache \ RUN --mount=type=cache,target=/root/.cache/ccache \
--mount=type=cache,target=/root/.cache/uv \ --mount=type=cache,target=/root/.cache/uv \
echo 'git clone xformers...' \ echo 'git clone xformers...' \
@ -173,15 +157,14 @@ RUN --mount=type=cache,target=/root/.cache/ccache \
&& python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose \ && python3 setup.py bdist_wheel --dist-dir=../xformers-dist --verbose \
&& cd .. \ && cd .. \
&& rm -rf xformers && rm -rf xformers
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system xformers-dist/*.whl --verbose uv pip install --system xformers-dist/*.whl --verbose
# Build can take a long time, and the torch nightly version fetched from url can be different in next docker stage. # Build can take a long time, and the torch nightly version fetched from url can be different in next docker stage.
# track the nightly torch version used in the build, when we set up runtime environment we can make sure the version is the same # track the nightly torch version used in the build, when we set up runtime environment we can make sure the version is the same
RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio' > torch_build_versions.txt RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio' > torch_build_versions.txt
RUN cat torch_build_versions.txt RUN cat torch_build_versions.txt
RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio' RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio'
#################### BASE BUILD IMAGE #################### #################### BASE BUILD IMAGE ####################
@ -192,6 +175,9 @@ RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio'
FROM base AS build FROM base AS build
ARG TARGETPLATFORM ARG TARGETPLATFORM
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
COPY . . COPY . .
RUN python3 use_existing_torch.py RUN python3 use_existing_torch.py
@ -206,7 +192,7 @@ RUN --mount=type=bind,source=.git,target=.git \
# Max jobs used by Ninja to build extensions # Max jobs used by Ninja to build extensions
ARG max_jobs=16 ARG max_jobs=16
ENV MAX_JOBS=${max_jobs} ENV MAX_JOBS=${max_jobs}
ARG nvcc_threads=4 ARG nvcc_threads=2
ENV NVCC_THREADS=$nvcc_threads ENV NVCC_THREADS=$nvcc_threads
ARG torch_cuda_arch_list='8.0;8.6;8.9;9.0' ARG torch_cuda_arch_list='8.0;8.6;8.9;9.0'
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
@ -230,14 +216,11 @@ RUN --mount=type=cache,target=/root/.cache/uv \
&& export SCCACHE_S3_NO_CREDENTIALS=${SCCACHE_S3_NO_CREDENTIALS} \ && export SCCACHE_S3_NO_CREDENTIALS=${SCCACHE_S3_NO_CREDENTIALS} \
&& export SCCACHE_IDLE_TIMEOUT=0 \ && export SCCACHE_IDLE_TIMEOUT=0 \
&& export CMAKE_BUILD_TYPE=Release \ && export CMAKE_BUILD_TYPE=Release \
&& export VLLM_DOCKER_BUILD_CONTEXT=1 \
&& sccache --show-stats \ && sccache --show-stats \
&& python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38 \ && python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38 \
&& sccache --show-stats; \ && sccache --show-stats; \
fi fi
ARG vllm_target_device="cuda"
ENV VLLM_TARGET_DEVICE=${vllm_target_device}
ENV CCACHE_DIR=/root/.cache/ccache ENV CCACHE_DIR=/root/.cache/ccache
RUN --mount=type=cache,target=/root/.cache/ccache \ RUN --mount=type=cache,target=/root/.cache/ccache \
--mount=type=cache,target=/root/.cache/uv \ --mount=type=cache,target=/root/.cache/uv \
@ -246,13 +229,12 @@ RUN --mount=type=cache,target=/root/.cache/ccache \
# Clean any existing CMake artifacts # Clean any existing CMake artifacts
rm -rf .deps && \ rm -rf .deps && \
mkdir -p .deps && \ mkdir -p .deps && \
export VLLM_DOCKER_BUILD_CONTEXT=1 && \
python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \ python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \
fi fi
RUN echo "[INFO] Listing current directory:" && \ RUN echo "[DEBUG] Listing current directory:" && \
ls -al && \ ls -al && \
echo "[INFO] Showing torch_build_versions.txt content:" && \ echo "[DEBUG] Showing torch_build_versions.txt content:" && \
cat torch_build_versions.txt cat torch_build_versions.txt
#################### WHEEL BUILD IMAGE #################### #################### WHEEL BUILD IMAGE ####################
@ -262,40 +244,42 @@ RUN echo "[INFO] Listing current directory:" && \
# Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer # Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer
FROM ${FINAL_BASE_IMAGE} AS vllm-base FROM ${FINAL_BASE_IMAGE} AS vllm-base
USER root USER root
ARG CUDA_VERSION
ARG PYTHON_VERSION
ARG GET_PIP_URL
# TODO (huydhn): Only work with PyTorch manylinux builder
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}"
# prepare for environment starts # prepare for environment starts
WORKDIR /workspace WORKDIR /workspace
# Install Python and other dependencies RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
RUN if command -v apt-get >/dev/null; then \ echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
apt-get update -y \
&& apt-get install -y ccache software-properties-common git curl wget sudo vim \ # Install Python and other dependencies if it does not existed
&& add-apt-repository -y ppa:deadsnakes/ppa \ RUN if ! command -v python3 >/dev/null || ! python3 --version | grep -q "${PYTHON_VERSION}"; then \
&& apt-get update -y \ echo "Installing Python ${PYTHON_VERSION}..." && \
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ echo 'tzdata tzdata/Areas select America' | debconf-set-selections && \
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections && \
&& update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ apt-get update -y && \
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ apt-get install -y ccache software-properties-common git curl sudo && \
&& curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION}; \ for i in 1 2 3; do \
add-apt-repository -y ppa:deadsnakes/ppa && break || \
{ echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
done && \
apt-get update -y && \
apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv && \
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 && \
update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} && \
ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config && \
curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}; \
else \ else \
dnf install -y git curl wget sudo vim; \ echo "Python ${PYTHON_VERSION} already present, skipping setup."; \
fi \ fi \
&& python3 --version && python3 -m pip --version && python3 --version && python3 -m pip --version
# Get the torch versions, and whls used in previous stagtes for consistency # Get the torch versions, and whls used in previous stagtes for consistency
COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt
COPY --from=base /workspace/xformers-dist /wheels/xformers COPY --from=base /workspace/xformers-dist /wheels/xformers
COPY --from=build /workspace/vllm-dist /wheels/vllm COPY --from=build /workspace/vllm-dist /wheels/vllm
RUN echo "[INFO] Listing current directory before torch install step:" && \ RUN echo "[DEBUG] Listing current directory before torch install step:" && \
ls -al && \ ls -al && \
echo "[INFO] Showing torch_build_versions.txt content:" && \ echo "[DEBUG] Showing torch_build_versions.txt content:" && \
cat torch_build_versions.txt cat torch_build_versions.txt
# Workaround for https://github.com/openai/triton/issues/2507 and # Workaround for https://github.com/openai/triton/issues/2507 and
@ -304,6 +288,7 @@ RUN echo "[INFO] Listing current directory before torch install step:" && \
# or future versions of triton. # or future versions of triton.
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/ RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
# Install uv for faster pip installs if not existed # Install uv for faster pip installs if not existed
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
if ! python3 -m uv --version > /dev/null 2>&1; then \ if ! python3 -m uv --version > /dev/null 2>&1; then \
@ -311,8 +296,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
fi fi
ENV UV_HTTP_TIMEOUT=500 ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match" ENV UV_INDEX_STRATEGY="unsafe-best-match"
# Use copy mode to avoid hardlink failures with Docker cache mounts
ENV UV_LINK_MODE=copy
# Default mount file as placeholder, this just avoid the mount error # Default mount file as placeholder, this just avoid the mount error
ARG TORCH_WHEELS_PATH="./requirements" ARG TORCH_WHEELS_PATH="./requirements"
@ -323,13 +306,15 @@ RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
--mount=type=cache,target=/root/.cache/uv \ --mount=type=cache,target=/root/.cache/uv \
if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \ if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \ torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \
vision_whl=$(find /dist -name 'torchvision*.whl' | head -n1 | xargs); \ vision_whl=$(find /dist/vision -name 'torchvision*.whl' | head -n1 | xargs); \
audio_whl=$(find /dist -name 'torchaudio*.whl' | head -n1 | xargs); \ audio_whl=$(find /dist/audio -name 'torchaudio*.whl' | head -n1 | xargs); \
echo "[INFO] Use wheels to build : '${torch_whl}' '${audio_whl}' '${vision_whl}'"; \ echo "Found: '${torch_whl}' '${audio_whl}' '${vision_whl}'"; \
uv pip install --system "${torch_whl}[opt-einsum]" "${vision_whl}" "${audio_whl}" /dist/*.whl; \ uv pip install --system "${torch_whl}[opt-einsum]"; \
uv pip install --system "${vision_whl}"; \
uv pip install --system "${audio_whl}"; \
else \ else \
echo "[INFO] Installing torch versions from torch_build_versions.txt"; \ echo "[INFO] Installing torch versions from torch_build_versions.txt"; \
uv pip install --system $(cat torch_build_versions.txt | xargs) --index-url https://download.pytorch.org/whl/nightly/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.'); \ uv pip install --system $(cat torch_build_versions.txt | xargs) --index-url https://download.pytorch.org/whl/nightly/cu128; \
fi fi
# Install the vllm wheel from previous stage # Install the vllm wheel from previous stage
@ -340,8 +325,9 @@ RUN --mount=type=cache,target=/root/.cache/uv \
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system /wheels/xformers/*.whl --verbose uv pip install --system /wheels/xformers/*.whl --verbose
# Build flashinfer from source. # Build flashinfer from source.
ARG torch_cuda_arch_list='8.0;8.9;9.0a;10.0a;12.0' ARG torch_cuda_arch_list='8.0;8.9;9.0a'
# install package for build flashinfer # install package for build flashinfer
# see issue: https://github.com/flashinfer-ai/flashinfer/issues/738 # see issue: https://github.com/flashinfer-ai/flashinfer/issues/738
@ -352,7 +338,7 @@ ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
# Build flashinfer for torch nightly from source around 10 mins # Build flashinfer for torch nightly from source around 10 mins
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git" ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
# Keep this in sync with https://github.com/vllm-project/vllm/blob/main/requirements/cuda.txt # Keep this in sync with https://github.com/vllm-project/vllm/blob/main/requirements/cuda.txt
ARG FLASHINFER_GIT_REF="v0.2.14.post1" ARG FLASHINFER_GIT_REF="v0.2.9rc2"
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
git clone --depth 1 --recursive --shallow-submodules \ git clone --depth 1 --recursive --shallow-submodules \
--branch ${FLASHINFER_GIT_REF} \ --branch ${FLASHINFER_GIT_REF} \
@ -370,7 +356,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
# Logging to confirm the torch versions # Logging to confirm the torch versions
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer' RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'
RUN uv pip freeze | grep -i '^torch\|^torchvision\|^torchaudio\|^xformers\|^vllm\|^flashinfer' > build_summary.txt
################### VLLM INSTALLED IMAGE #################### ################### VLLM INSTALLED IMAGE ####################
@ -379,8 +364,6 @@ FROM vllm-base as test
ENV UV_HTTP_TIMEOUT=500 ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match" ENV UV_INDEX_STRATEGY="unsafe-best-match"
# Use copy mode to avoid hardlink failures with Docker cache mounts
ENV UV_LINK_MODE=copy
COPY tests/ tests/ COPY tests/ tests/
COPY examples examples COPY examples examples
@ -409,6 +392,11 @@ RUN --mount=type=cache,target=/root/.cache/uv \
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements/nightly_torch_test.txt uv pip install --system -r requirements/nightly_torch_test.txt
# Workaround for #17068
# pinned commit for v2.2.4
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system --no-build-isolation "git+https://github.com/state-spaces/mamba@95d8aba8a8c75aedcaa6143713b11e745e7cd0d9#egg=mamba-ssm"
# Logging to confirm the torch versions # Logging to confirm the torch versions
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer' RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'
@ -423,5 +411,4 @@ FROM scratch as export-wheels
# Just copy the wheels we prepared in previous stages # Just copy the wheels we prepared in previous stages
COPY --from=base /workspace/xformers-dist /wheels/xformers COPY --from=base /workspace/xformers-dist /wheels/xformers
COPY --from=build /workspace/vllm-dist /wheels/vllm COPY --from=build /workspace/vllm-dist /wheels/vllm
COPY --from=vllm-base /workspace/build_summary.txt /wheels/build_summary.txt
COPY --from=vllm-base /workspace/wheels/flashinfer /wheels/flashinfer-python COPY --from=vllm-base /workspace/wheels/flashinfer /wheels/flashinfer-python

View File

@ -1,17 +0,0 @@
import glob
requires_files = glob.glob("requirements/*.txt")
requires_files += ["pyproject.toml"]
for file in requires_files:
print(f">>> cleaning {file}")
with open(file) as f:
lines = f.readlines()
if "torch" in "".join(lines).lower():
print("removed:")
with open(file, "w") as f:
for line in lines:
if "torch" not in line.lower():
f.write(line)
print(f"<<< done cleaning {file}")
print()

View File

@ -1,24 +0,0 @@
version: 2
updates:
# Update to the latest transformers version with dependabot
- package-ecosystem: "pip"
directory: "/.ci/docker/ci_commit_pins"
schedule:
interval: "daily"
target-branch: "main"
allow:
- dependency-name: "transformers"
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-patch"]
commit-message:
prefix: "[Dependabot] Update"
include: "scope"
labels:
- "dependencies"
- "open source"
- "python"
- "topic: not user facing"
- "module: ci"
- "module: inductor"
- "ciflow/inductor"

View File

@ -27,7 +27,6 @@ ciflow_push_tags:
- ciflow/trunk - ciflow/trunk
- ciflow/unstable - ciflow/unstable
- ciflow/xpu - ciflow/xpu
- ciflow/vllm
- ciflow/torchbench - ciflow/torchbench
- ciflow/op-benchmark - ciflow/op-benchmark
- ciflow/pull - ciflow/pull

View File

@ -15,7 +15,7 @@ optree==0.13.0
packaging==23.1 packaging==23.1
parameterized==0.8.1 parameterized==0.8.1
pillow==10.3.0 pillow==10.3.0
protobuf==5.29.5 protobuf==5.29.4
psutil==5.9.8 psutil==5.9.8
pygments==2.15.0 pygments==2.15.0
pytest-cpp==2.3.0 pytest-cpp==2.3.0
@ -26,9 +26,9 @@ pytest-xdist==3.3.1
pytest==7.3.2 pytest==7.3.2
pyyaml==6.0.2 pyyaml==6.0.2
scipy==1.12.0 scipy==1.12.0
setuptools==78.1.1 setuptools==72.1.0
sympy==1.13.3 sympy==1.13.3
tlparse==0.4.0 tlparse==0.3.30
tensorboard==2.13.0 tensorboard==2.13.0
typing-extensions==4.12.2 typing-extensions==4.12.2
unittest-xml-reporting<=3.2.0,>=2.0.0 unittest-xml-reporting<=3.2.0,>=2.0.0

View File

@ -84,7 +84,6 @@ def build_triton(
["git", "checkout", f"release/{ver}.{rev}.x"], cwd=triton_basedir ["git", "checkout", f"release/{ver}.{rev}.x"], cwd=triton_basedir
) )
else: else:
check_call(["git", "fetch", "origin", commit_hash], cwd=triton_basedir)
check_call(["git", "checkout", commit_hash], cwd=triton_basedir) check_call(["git", "checkout", commit_hash], cwd=triton_basedir)
# change built wheel name and version # change built wheel name and version

View File

@ -39,9 +39,7 @@ def main() -> None:
pull_request_label_names = [label.name for label in pull_request_labels] pull_request_label_names = [label.name for label in pull_request_labels]
issue_label_names = [label.name for label in issue_labels] issue_label_names = [label.name for label in issue_labels]
labels_to_add = [ labels_to_add = [
label label for label in issue_label_names if label not in pull_request_label_names
for label in issue_label_names
if label not in pull_request_label_names and label != "actionable"
] ]
if not labels_to_add: if not labels_to_add:
print("The pull request already has the same labels.") print("The pull request already has the same labels.")

View File

@ -16,17 +16,17 @@ from typing import Optional
# NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this # NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this
CUDA_ARCHES = ["12.6", "12.8", "13.0"] CUDA_ARCHES = ["12.6", "12.8", "12.9"]
CUDA_STABLE = "12.8" CUDA_STABLE = "12.8"
CUDA_ARCHES_FULL_VERSION = { CUDA_ARCHES_FULL_VERSION = {
"12.6": "12.6.3", "12.6": "12.6.3",
"12.8": "12.8.1", "12.8": "12.8.1",
"13.0": "13.0.0", "12.9": "12.9.1",
} }
CUDA_ARCHES_CUDNN_VERSION = { CUDA_ARCHES_CUDNN_VERSION = {
"12.6": "9", "12.6": "9",
"12.8": "9", "12.8": "9",
"13.0": "9", "12.9": "9",
} }
# NOTE: Please also update the ROCm sources in `PIP_SOURCES` in tools/nightly.py when changing this # NOTE: Please also update the ROCm sources in `PIP_SOURCES` in tools/nightly.py when changing this
@ -38,82 +38,82 @@ CPU_AARCH64_ARCH = ["cpu-aarch64"]
CPU_S390X_ARCH = ["cpu-s390x"] CPU_S390X_ARCH = ["cpu-s390x"]
CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "13.0-aarch64"] CUDA_AARCH64_ARCHES = ["12.9-aarch64"]
PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
"12.6": ( "12.6": (
"nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | " "nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | " "nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | " "nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | " "nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | " "nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | " "nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | " "nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | " "nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | " "nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | " "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | " "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | " "nvidia-nvshmem-cu12==3.3.9; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | " "nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | " "nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'" "nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'"
), ),
"12.8": ( "12.8": (
"nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | " "nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | " "nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | " "nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | " "nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | " "nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | " "nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | " "nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | " "nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | " "nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | " "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' | " "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | " "nvidia-nvshmem-cu12==3.3.9; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | " "nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | " "nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'" "nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'"
), ),
"13.0": ( "12.9": (
"nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | " "nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | " "nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | " "nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | " "nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cublas==13.0.0.19; platform_system == 'Linux' | " "nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufft==12.0.0.15; platform_system == 'Linux' | " "nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-curand==10.4.0.35; platform_system == 'Linux' | " "nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | " "nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | " "nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | " "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' | " "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | " "nvidia-nvshmem-cu12==3.3.9; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvtx==13.0.39; platform_system == 'Linux' | " "nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | " "nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufile==1.15.0.42; platform_system == 'Linux'" "nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'"
), ),
"xpu": ( "xpu": (
"intel-cmplr-lib-rt==2025.2.1 | " "intel-cmplr-lib-rt==2025.1.1 | "
"intel-cmplr-lib-ur==2025.2.1 | " "intel-cmplr-lib-ur==2025.1.1 | "
"intel-cmplr-lic-rt==2025.2.1 | " "intel-cmplr-lic-rt==2025.1.1 | "
"intel-sycl-rt==2025.2.1 | " "intel-sycl-rt==2025.1.1 | "
"oneccl-devel==2021.16.1; platform_system == 'Linux' and platform_machine == 'x86_64' | " "oneccl-devel==2021.15.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"oneccl==2021.16.1; platform_system == 'Linux' and platform_machine == 'x86_64' | " "oneccl==2021.15.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"impi-rt==2021.16.1; platform_system == 'Linux' and platform_machine == 'x86_64' | " "impi-rt==2021.15.0; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"onemkl-sycl-blas==2025.2.0 | " "onemkl-sycl-blas==2025.1.0 | "
"onemkl-sycl-dft==2025.2.0 | " "onemkl-sycl-dft==2025.1.0 | "
"onemkl-sycl-lapack==2025.2.0 | " "onemkl-sycl-lapack==2025.1.0 | "
"onemkl-sycl-rng==2025.2.0 | " "onemkl-sycl-rng==2025.1.0 | "
"onemkl-sycl-sparse==2025.2.0 | " "onemkl-sycl-sparse==2025.1.0 | "
"dpcpp-cpp-rt==2025.2.1 | " "dpcpp-cpp-rt==2025.1.1 | "
"intel-opencl-rt==2025.2.1 | " "intel-opencl-rt==2025.1.1 | "
"mkl==2025.2.0 | " "mkl==2025.1.0 | "
"intel-openmp==2025.2.1 | " "intel-openmp==2025.1.1 | "
"tbb==2022.2.0 | " "tbb==2022.1.0 | "
"tcmlib==1.4.0 | " "tcmlib==1.3.0 | "
"umf==0.11.0 | " "umf==0.10.0 | "
"intel-pti==0.13.1" "intel-pti==0.12.3"
), ),
} }
@ -124,7 +124,9 @@ def get_nccl_wheel_version(arch_version: str) -> str:
requirements = map( requirements = map(
str.strip, re.split("[;|]", PYTORCH_EXTRA_INSTALL_REQUIREMENTS[arch_version]) str.strip, re.split("[;|]", PYTORCH_EXTRA_INSTALL_REQUIREMENTS[arch_version])
) )
return next(x for x in requirements if x.startswith("nvidia-nccl")).split("==")[1] return next(x for x in requirements if x.startswith("nvidia-nccl-cu")).split("==")[
1
]
def read_nccl_pin(arch_version: str) -> str: def read_nccl_pin(arch_version: str) -> str:
@ -191,7 +193,7 @@ LIBTORCH_CONTAINER_IMAGES: dict[str, str] = {
"cpu": "libtorch-cxx11-builder:cpu", "cpu": "libtorch-cxx11-builder:cpu",
} }
FULL_PYTHON_VERSIONS = ["3.10", "3.11", "3.12", "3.13", "3.13t", "3.14", "3.14t"] FULL_PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12", "3.13", "3.13t", "3.14", "3.14t"]
def translate_desired_cuda(gpu_arch_type: str, gpu_arch_version: str) -> str: def translate_desired_cuda(gpu_arch_type: str, gpu_arch_version: str) -> str:
@ -309,20 +311,19 @@ def generate_wheels_matrix(
else arch_version else arch_version
) )
# TODO: Enable python 3.14 for rest # TODO: Enable python 3.13t on cpu-s390x
if os not in [ if gpu_arch_type == "cpu-s390x" and python_version == "3.13t":
"linux", continue
"linux-aarch64", # TODO: Enable python 3.14 on non linux OSes
"linux-s390x", if os not in ["linux", "macos-arm64"] and (
"macos-arm64", python_version == "3.14" or python_version == "3.14t"
"windows", ):
] and (python_version == "3.14" or python_version == "3.14t"):
continue continue
# cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install # cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install
if ( if (
arch_version in ["13.0", "12.8", "12.6"] arch_version in ["12.9", "12.8", "12.6"]
and os == "linux" and os == "linux"
or arch_version in CUDA_AARCH64_ARCHES or arch_version in CUDA_AARCH64_ARCHES
): ):
@ -355,6 +356,29 @@ def generate_wheels_matrix(
), # include special case for aarch64 build, remove the -aarch64 postfix ), # include special case for aarch64 build, remove the -aarch64 postfix
} }
) )
# Special build building to use on Colab. Python 3.11 for 12.6 CUDA
if python_version == "3.11" and arch_version == CUDA_STABLE:
ret.append(
{
"python_version": python_version,
"gpu_arch_type": gpu_arch_type,
"gpu_arch_version": gpu_arch_version,
"desired_cuda": translate_desired_cuda(
gpu_arch_type, gpu_arch_version
),
"container_image": WHEEL_CONTAINER_IMAGES[
arch_version
].split(":")[0],
"container_image_tag_prefix": WHEEL_CONTAINER_IMAGES[
arch_version
].split(":")[1],
"package_type": package_type,
"pytorch_extra_install_requirements": "",
"build_name": f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}-full".replace( # noqa: B950
".", "_"
),
}
)
else: else:
ret.append( ret.append(
{ {
@ -385,6 +409,6 @@ def generate_wheels_matrix(
return ret return ret
validate_nccl_dep_consistency("13.0") validate_nccl_dep_consistency("12.9")
validate_nccl_dep_consistency("12.8") validate_nccl_dep_consistency("12.8")
validate_nccl_dep_consistency("12.6") validate_nccl_dep_consistency("12.6")

View File

@ -1,91 +0,0 @@
#!/usr/bin/env bash
set -eux
torch_version=$(unzip -p torch-* '**/METADATA' | grep '^Version: ' | cut -d' ' -f2)
nightly=$(echo ${torch_version} | cut -d'.' -f4)
# Copied from .ci/manywheel/build_common.sh
make_wheel_record() {
fpath=$1
if echo $fpath | grep RECORD >/dev/null 2>&1; then
echo "$fpath,,"
else
fhash=$(openssl dgst -sha256 -binary $fpath | openssl base64 | sed -e 's/+/-/g' | sed -e 's/\//_/g' | sed -e 's/=//g')
fsize=$(ls -nl $fpath | awk '{print $5}')
echo "$fpath,sha256=$fhash,$fsize"
fi
}
change_wheel_version() {
local package=$1
local wheel=$2
local f_version=$3
local t_version=$4
# Extract the wheel
${PYTHON_EXECUTABLE} -mwheel unpack $wheel
mv "${package}-${f_version}" "${package}-${t_version}"
# Change the version from f_version to t_version in the dist-info dir
pushd "${package}-${t_version}"
mv "${package}-${f_version}.dist-info" "${package}-${t_version}.dist-info"
pushd "${package}-${t_version}.dist-info"
sed -i "s/${package}-${f_version}.dist-info/${package}-${t_version}.dist-info/g" RECORD
# Update the version in METADATA and its SHA256 hash
sed -i "s/Version: ${f_version}/Version: ${t_version}/g" METADATA
# then add PyTorch nightly dependency of vLLM
if [[ "${package}" == vllm ]] || [[ "${package}" == xformers ]]; then
sed -i "/License-File/a\Requires-Dist: torch==${torch_version}" METADATA
fi
sed -i '/METADATA,sha256/d' RECORD
popd
make_wheel_record "${package}-${t_version}.dist-info/METADATA" >> "${package}-${t_version}.dist-info/RECORD"
popd
# Repack the wheel
${PYTHON_EXECUTABLE} -mwheel pack "${package}-${t_version}"
# Clean up
rm -rf "${package}-${t_version}"
}
repackage_wheel() {
local package=$1
pushd $package
local orig_wheel=$(find . -name *${package//-/_}*)
local orig_version=$(unzip -p $orig_wheel '**/METADATA' | grep '^Version: ' | cut -d' ' -f2)
local version=""
if [[ "${package}" == vllm ]]; then
# Copied from vllm/.buildkite/scripts/upload-wheels.sh
version=1.0.0
else
version=$(echo $orig_version | tr '.+' '.' | cut -d'.' -f1-3)
fi
local nightly_version=$version.$nightly
# Use nightly version
change_wheel_version ${package//-/_} $orig_wheel $orig_version $nightly_version
# Clean up
rm "${orig_wheel}"
auditwheel repair --plat $PLATFORM *.whl \
--exclude libc10* --exclude libtorch* --exclude libcu* --exclude libnv*
local repair_wheel=$(find wheelhouse -name *${PLATFORM}*)
local repair_wheel=$(basename ${repair_wheel})
popd
cp ${package}/wheelhouse/${repair_wheel} .
rm -rf $package
}
pushd externals/vllm/wheels
for package in xformers flashinfer-python vllm; do
repackage_wheel $package
done
popd

View File

@ -27,7 +27,6 @@ from trymerge import (
get_drci_classifications, get_drci_classifications,
gh_get_team_members, gh_get_team_members,
GitHubPR, GitHubPR,
iter_issue_timeline_until_comment,
JobCheckState, JobCheckState,
main as trymerge_main, main as trymerge_main,
MandatoryChecksMissingError, MandatoryChecksMissingError,
@ -35,8 +34,6 @@ from trymerge import (
RE_GHSTACK_DESC, RE_GHSTACK_DESC,
read_merge_rules, read_merge_rules,
remove_job_name_suffix, remove_job_name_suffix,
sha_from_committed_event,
sha_from_force_push_after,
validate_revert, validate_revert,
) )
@ -127,7 +124,7 @@ def mock_parse_args(revert: bool = False, force: bool = False) -> Any:
self.force = force self.force = force
self.pr_num = 76123 self.pr_num = 76123
self.dry_run = True self.dry_run = True
self.comment_id = 12345 # Set to non-zero value self.comment_id = 0
self.reason = "this is for testing" self.reason = "this is for testing"
self.ignore_current = False self.ignore_current = False
self.check_mergeability = False self.check_mergeability = False
@ -155,9 +152,9 @@ def mock_revert(
def mock_merge( def mock_merge(
pr: GitHubPR, pr: GitHubPR,
repo: GitRepo, repo: GitRepo,
comment_id: int,
dry_run: bool = False, dry_run: bool = False,
skip_mandatory_checks: bool = False, skip_mandatory_checks: bool = False,
comment_id: Optional[int] = None,
timeout_minutes: int = 400, timeout_minutes: int = 400,
stale_pr_days: int = 3, stale_pr_days: int = 3,
ignore_current: bool = False, ignore_current: bool = False,
@ -473,9 +470,9 @@ class TestTryMerge(TestCase):
mock_merge.assert_called_once_with( mock_merge.assert_called_once_with(
mock.ANY, mock.ANY,
mock.ANY, mock.ANY,
comment_id=mock.ANY,
dry_run=mock.ANY, dry_run=mock.ANY,
skip_mandatory_checks=True, skip_mandatory_checks=True,
comment_id=mock.ANY,
ignore_current=False, ignore_current=False,
) )
@ -488,9 +485,9 @@ class TestTryMerge(TestCase):
mock_merge.assert_called_once_with( mock_merge.assert_called_once_with(
mock.ANY, mock.ANY,
mock.ANY, mock.ANY,
comment_id=mock.ANY,
dry_run=mock.ANY, dry_run=mock.ANY,
skip_mandatory_checks=False, skip_mandatory_checks=False,
comment_id=mock.ANY,
ignore_current=False, ignore_current=False,
) )
@ -1141,176 +1138,5 @@ Pull Request resolved: https://github.com/pytorch/pytorch/pull/154394"""
) )
@mock.patch("trymerge.gh_graphql", side_effect=mocked_gh_graphql)
@mock.patch("trymerge.gh_fetch_merge_base", return_value="")
@mock.patch(
"trymerge.get_drci_classifications", side_effect=mocked_drci_classifications
)
class TestTimelineFunctions(TestCase):
"""Tests for the new timeline-related functions"""
def test_sha_from_committed_event(self, *args: Any) -> None:
"""Test extracting SHA from committed event"""
# Based on actual GitHub API format - committed events have "sha" at top level
event = {
"event": "committed",
"sha": "fb21ce932ded6670c918804a0d9151b773770a7c",
}
self.assertEqual(
sha_from_committed_event(event), "fb21ce932ded6670c918804a0d9151b773770a7c"
)
# Test with missing SHA
event_no_sha = {"event": "committed"}
self.assertIsNone(sha_from_committed_event(event_no_sha))
def test_sha_from_force_push_after(self, *args: Any) -> None:
"""Test extracting SHA from force push event"""
# NOTE: The current function doesn't handle the actual GitHub API format
# Real force push events have "commit_id" at top level, but this function
# looks for "after", "after_commit", "after_sha", or "head_sha" fields
# Test with the legacy format the current function handles
event_legacy = {
"event": "head_ref_force_pushed",
"after": {"sha": "ef22bcbc54bb0f787e1e4ffd3d83df18fc407f5e"},
}
self.assertEqual(
sha_from_force_push_after(event_legacy),
"ef22bcbc54bb0f787e1e4ffd3d83df18fc407f5e",
)
# Test with current GitHub API format (should return None with current implementation)
event_real_api = {
"event": "head_ref_force_pushed",
"commit_id": "ef22bcbc54bb0f787e1e4ffd3d83df18fc407f5e",
}
self.assertEqual(
sha_from_force_push_after(event_real_api),
"ef22bcbc54bb0f787e1e4ffd3d83df18fc407f5e",
) # Current function doesn't handle commit_id
# Test with missing SHA
event_no_sha = {"event": "head_ref_force_pushed"}
self.assertIsNone(sha_from_force_push_after(event_no_sha))
@mock.patch("trymerge.gh_fetch_json_list")
def test_iter_issue_timeline_until_comment(
self, mock_gh_fetch_json_list: Any, *args: Any
) -> None:
"""Test timeline iteration until target comment"""
# Mock timeline data based on actual GitHub API format
timeline_data = [
{"event": "commented", "id": 100, "body": "first comment"},
{"event": "committed", "sha": "fb21ce932ded6670c918804a0d9151b773770a7c"},
{"event": "commented", "id": 200, "body": "target comment"},
{"event": "commented", "id": 300, "body": "after target"},
]
mock_gh_fetch_json_list.return_value = timeline_data
# Test iteration stops at target comment
events = list(iter_issue_timeline_until_comment("pytorch", "pytorch", 123, 200))
self.assertEqual(len(events), 3) # Should stop at target comment
self.assertEqual(events[0]["event"], "commented")
self.assertEqual(events[0]["id"], 100)
self.assertEqual(events[1]["event"], "committed")
self.assertEqual(events[1]["sha"], "fb21ce932ded6670c918804a0d9151b773770a7c")
self.assertEqual(events[2]["event"], "commented")
self.assertEqual(events[2]["id"], 200)
@mock.patch("trymerge.gh_fetch_json_list")
def test_iter_issue_timeline_until_comment_not_found(
self, mock_gh_fetch_json_list: Any, *args: Any
) -> None:
"""Test timeline iteration when target comment is not found"""
# Mock empty timeline
mock_gh_fetch_json_list.return_value = []
events = list(iter_issue_timeline_until_comment("pytorch", "pytorch", 123, 999))
self.assertEqual(len(events), 0)
@mock.patch("trymerge.iter_issue_timeline_until_comment")
def test_get_commit_sha_at_comment_commit_after_comment(
self, mock_iter_timeline: Any, *args: Any
) -> None:
"""Test get_commit_sha_at_comment returns correct SHA after comment"""
mock_iter_timeline.return_value = [
{"event": "committed", "sha": "commit1"},
{"event": "committed", "sha": "commit2"},
{"event": "commented", "id": 100},
{"event": "head_ref_force_pushed", "after": {"sha": "commit3"}},
]
pr = GitHubPR("pytorch", "pytorch", 77700)
sha = pr.get_commit_sha_at_comment(100)
self.assertEqual(sha, "commit2")
@mock.patch("trymerge.iter_issue_timeline_until_comment")
def test_get_commit_sha_at_comment_force_push_before_comment(
self, mock_iter_timeline: Any, *args: Any
) -> None:
mock_iter_timeline.return_value = [
{"event": "committed", "sha": "commit1"},
{"event": "committed", "sha": "commit2"},
{"event": "head_ref_force_pushed", "commit_id": "commit3"},
{"event": "commented", "id": 100},
]
pr = GitHubPR("pytorch", "pytorch", 77700)
sha = pr.get_commit_sha_at_comment(100)
self.assertEqual(sha, "commit3")
@mock.patch("trymerge.iter_issue_timeline_until_comment")
def test_get_commit_sha_at_comment_force_push_before_comment_legacy_mode(
self, mock_iter_timeline: Any, *args: Any
) -> None:
mock_iter_timeline.return_value = [
{"event": "committed", "sha": "commit1"},
{"event": "committed", "sha": "commit2"},
{"event": "head_ref_force_pushed", "after": {"sha": "commit3"}},
{"event": "commented", "id": 100},
]
pr = GitHubPR("pytorch", "pytorch", 77700)
sha = pr.get_commit_sha_at_comment(100)
self.assertEqual(sha, "commit3")
@mock.patch("trymerge.iter_issue_timeline_until_comment")
def test_get_commit_sha_at_comment_multiple_comments(
self, mock_iter_timeline: Any, *args: Any
) -> None:
mock_iter_timeline.return_value = [
{"event": "committed", "sha": "commit1"},
{"event": "commented", "id": 100},
{"event": "committed", "sha": "commit2"},
{"event": "commented", "id": 200},
{"event": "head_ref_force_pushed", "after": {"sha": "commit3"}},
{"event": "commented", "id": 300},
]
pr = GitHubPR("pytorch", "pytorch", 77700)
sha = pr.get_commit_sha_at_comment(200)
self.assertEqual(sha, "commit2")
sha = pr.get_commit_sha_at_comment(300)
self.assertEqual(sha, "commit3")
@mock.patch("trymerge.iter_issue_timeline_until_comment")
def test_get_commit_sha_at_comment_no_events(
self, mock_iter_timeline: Any, *args: Any
) -> None:
mock_iter_timeline.return_value = [
{"event": "commented", "id": 100},
{"event": "labeled", "label": {"name": "test"}},
]
pr = GitHubPR("pytorch", "pytorch", 77700)
sha = pr.get_commit_sha_at_comment(100)
self.assertIsNone(sha)
@mock.patch("trymerge.iter_issue_timeline_until_comment")
def test_get_commit_sha_at_comment_exception(
self, mock_iter_timeline: Any, *args: Any
) -> None:
mock_iter_timeline.side_effect = Exception("API error")
pr = GitHubPR("pytorch", "pytorch", 77700)
sha = pr.get_commit_sha_at_comment(100)
self.assertIsNone(sha)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -450,63 +450,6 @@ HAS_NO_CONNECTED_DIFF_TITLE = (
IGNORABLE_FAILED_CHECKS_THESHOLD = 10 IGNORABLE_FAILED_CHECKS_THESHOLD = 10
def iter_issue_timeline_until_comment(
org: str, repo: str, issue_number: int, target_comment_id: int, max_pages: int = 200
) -> Any:
"""
Yield timeline entries in order until (and including) the entry whose id == target_comment_id
for a 'commented' event. Stops once the target comment is encountered.
"""
page = 1
while page <= max_pages:
url = (
f"https://api.github.com/repos/{org}/{repo}/issues/{issue_number}/timeline"
)
params = {"per_page": 100, "page": page}
batch = gh_fetch_json_list(url, params)
if not batch:
return
for ev in batch:
# The target is the issue comment row with event == "commented" and id == issue_comment_id
if ev.get("event") == "commented" and ev.get("id") == target_comment_id:
yield ev # nothing in the timeline after this matters, so stop early
return
yield ev
if len(batch) < 100:
return
page += 1
# If we got here without finding the comment, then we either hit a bug or some github PR
# has a _really_ long timeline.
# The max # of pages found on any pytorch/pytorch PR at the time of this change was 41
raise RuntimeError(
f"Could not find a merge commit in the first {max_pages} pages of the timeline at url {url}."
f"This is most likely a bug, please report it to the @pytorch/pytorch-dev-infra team."
)
def sha_from_committed_event(ev: dict[str, Any]) -> Optional[str]:
"""Extract SHA from committed event in timeline"""
return ev.get("sha")
def sha_from_force_push_after(ev: dict[str, Any]) -> Optional[str]:
"""Extract SHA from force push event in timeline"""
# The current GitHub API format
commit_id = ev.get("commit_id")
if commit_id:
return str(commit_id)
# Legacy format
after = ev.get("after") or ev.get("after_commit") or {}
if isinstance(after, dict):
return after.get("sha") or after.get("oid")
return ev.get("after_sha") or ev.get("head_sha")
def gh_get_pr_info(org: str, proj: str, pr_no: int) -> Any: def gh_get_pr_info(org: str, proj: str, pr_no: int) -> Any:
rc = gh_graphql(GH_GET_PR_INFO_QUERY, name=proj, owner=org, number=pr_no) rc = gh_graphql(GH_GET_PR_INFO_QUERY, name=proj, owner=org, number=pr_no)
return rc["data"]["repository"]["pullRequest"] return rc["data"]["repository"]["pullRequest"]
@ -794,24 +737,16 @@ class GitHubPR:
def last_commit(self) -> Any: def last_commit(self) -> Any:
return self.info["commits"]["nodes"][-1]["commit"] return self.info["commits"]["nodes"][-1]["commit"]
def last_commit_sha(self, default: Optional[str] = None) -> str:
# for commits, the oid is the sha
if default is None:
return str(self.last_commit()["oid"])
return str(self.last_commit().get("oid", default))
def get_merge_base(self) -> str: def get_merge_base(self) -> str:
if self.merge_base: if self.merge_base:
return self.merge_base return self.merge_base
last_commit_sha = self.last_commit_sha() last_commit_oid = self.last_commit()["oid"]
# NB: We could use self.base_ref() here for regular PR, however, that doesn't # NB: We could use self.base_ref() here for regular PR, however, that doesn't
# work for ghstack where the base is the custom branch, i.e. gh/USER/ID/base, # work for ghstack where the base is the custom branch, i.e. gh/USER/ID/base,
# so let's just use main instead # so let's just use main instead
self.merge_base = gh_fetch_merge_base( self.merge_base = gh_fetch_merge_base(
self.org, self.project, last_commit_sha, self.default_branch() self.org, self.project, last_commit_oid, self.default_branch()
) )
# Fallback to baseRefOid if the API call fails, i.e. rate limit. Note that baseRefOid # Fallback to baseRefOid if the API call fails, i.e. rate limit. Note that baseRefOid
@ -900,44 +835,6 @@ class GitHubPR:
def get_commit_count(self) -> int: def get_commit_count(self) -> int:
return int(self.info["commits_with_authors"]["totalCount"]) return int(self.info["commits_with_authors"]["totalCount"])
def get_commit_sha_at_comment(self, comment_id: int) -> Optional[str]:
"""
Get the PR head commit SHA that was present when a specific comment was posted.
This ensures we only merge the state of the PR at the time the merge command was issued,
not any subsequent commits that may have been pushed after.
Returns None if no head-changing events found before the comment or if the comment was not found.
"""
head = None
try:
for event in iter_issue_timeline_until_comment(
self.org, self.project, self.pr_num, comment_id
):
etype = event.get("event")
if etype == "committed":
sha = sha_from_committed_event(event)
if sha:
head = sha
print(f"Timeline: Found commit event for SHA {sha}")
elif etype == "head_ref_force_pushed":
sha = sha_from_force_push_after(event)
if sha:
head = sha
print(f"Timeline: Found force push event for SHA {sha}")
elif etype == "commented":
if event.get("id") == comment_id:
print(f"Timeline: Found final comment with sha {sha}")
return head
except Exception as e:
print(
f"Warning: Failed to reconstruct timeline for comment {comment_id}: {e}"
)
return None
print(f"Did not find comment with id {comment_id} in the PR timeline")
return None
def get_pr_creator_login(self) -> str: def get_pr_creator_login(self) -> str:
return cast(str, self.info["author"]["login"]) return cast(str, self.info["author"]["login"])
@ -1254,7 +1151,7 @@ class GitHubPR:
*, *,
skip_mandatory_checks: bool = False, skip_mandatory_checks: bool = False,
dry_run: bool = False, dry_run: bool = False,
comment_id: int, comment_id: Optional[int] = None,
ignore_current_checks: Optional[list[str]] = None, ignore_current_checks: Optional[list[str]] = None,
) -> None: ) -> None:
# Raises exception if matching rule is not found # Raises exception if matching rule is not found
@ -1270,7 +1167,7 @@ class GitHubPR:
skip_internal_checks=can_skip_internal_checks(self, comment_id), skip_internal_checks=can_skip_internal_checks(self, comment_id),
ignore_current_checks=ignore_current_checks, ignore_current_checks=ignore_current_checks,
) )
additional_merged_prs = self.merge_changes_locally( additional_merged_prs = self.merge_changes(
repo, skip_mandatory_checks, comment_id repo, skip_mandatory_checks, comment_id
) )
@ -1299,7 +1196,7 @@ class GitHubPR:
broken_trunk_checks=ignorable_checks.get("BROKEN_TRUNK", []), broken_trunk_checks=ignorable_checks.get("BROKEN_TRUNK", []),
flaky_checks=ignorable_checks.get("FLAKY", []), flaky_checks=ignorable_checks.get("FLAKY", []),
unstable_checks=ignorable_checks.get("UNSTABLE", []), unstable_checks=ignorable_checks.get("UNSTABLE", []),
last_commit_sha=self.last_commit_sha(default=""), last_commit_sha=self.last_commit().get("oid", ""),
merge_base_sha=self.get_merge_base(), merge_base_sha=self.get_merge_base(),
merge_commit_sha=merge_commit_sha, merge_commit_sha=merge_commit_sha,
is_failed=False, is_failed=False,
@ -1320,7 +1217,7 @@ class GitHubPR:
dry_run=dry_run, dry_run=dry_run,
) )
def merge_changes_locally( def merge_changes(
self, self,
repo: GitRepo, repo: GitRepo,
skip_mandatory_checks: bool = False, skip_mandatory_checks: bool = False,
@ -1329,63 +1226,33 @@ class GitHubPR:
skip_all_rule_checks: bool = False, skip_all_rule_checks: bool = False,
) -> list["GitHubPR"]: ) -> list["GitHubPR"]:
""" """
:param skip_all_rule_checks: If true, skips all rule checks on ghstack PRs, useful for dry-running merge locally :param skip_all_rule_checks: If true, skips all rule checks, useful for dry-running merge locally
""" """
branch_to_merge_into = self.default_branch() if branch is None else branch branch_to_merge_into = self.default_branch() if branch is None else branch
if repo.current_branch() != branch_to_merge_into: if repo.current_branch() != branch_to_merge_into:
repo.checkout(branch_to_merge_into) repo.checkout(branch_to_merge_into)
if not self.is_ghstack_pr():
# It's okay to skip the commit SHA check for ghstack PRs since
# authoring requires write access to the repo.
if self.is_ghstack_pr():
return self.merge_ghstack_into(
repo,
skip_mandatory_checks,
comment_id=comment_id,
skip_all_rule_checks=skip_all_rule_checks,
)
msg = self.gen_commit_message() msg = self.gen_commit_message()
pr_branch_name = f"__pull-request-{self.pr_num}__init__" pr_branch_name = f"__pull-request-{self.pr_num}__init__"
repo.fetch(self.last_commit()["oid"], pr_branch_name)
# Determine which commit SHA to merge
commit_to_merge = None
if not comment_id:
raise ValueError("Must provide --comment-id when merging regular PRs")
# Get the commit SHA that was present when the comment was made
commit_to_merge = self.get_commit_sha_at_comment(comment_id)
if not commit_to_merge:
raise RuntimeError(
f"Could not find commit that was pushed before comment {comment_id}"
)
# Validate that this commit is the latest commit on the PR
latest_commit = self.last_commit_sha()
if commit_to_merge != latest_commit:
raise RuntimeError(
f"Commit {commit_to_merge} was HEAD when comment {comment_id} was posted "
f"but now the latest commit on the PR is {latest_commit}. "
f"Please re-issue the merge command to merge the latest commit."
)
print(f"Merging commit {commit_to_merge} locally")
repo.fetch(commit_to_merge, pr_branch_name)
repo._run_git("merge", "--squash", pr_branch_name) repo._run_git("merge", "--squash", pr_branch_name)
repo._run_git("commit", f'--author="{self.get_author()}"', "-m", msg) repo._run_git("commit", f'--author="{self.get_author()}"', "-m", msg)
# Did the PR change since we started the merge? # Did the PR change since we started the merge?
pulled_sha = repo.show_ref(pr_branch_name) pulled_sha = repo.show_ref(pr_branch_name)
latest_pr_status = GitHubPR(self.org, self.project, self.pr_num) latest_pr_status = GitHubPR(self.org, self.project, self.pr_num)
if ( if pulled_sha != latest_pr_status.last_commit()["oid"]:
pulled_sha != latest_pr_status.last_commit_sha()
or pulled_sha != commit_to_merge
):
raise RuntimeError( raise RuntimeError(
"PR has been updated since CI checks last passed. Please rerun the merge command." "PR has been updated since CI checks last passed. Please rerun the merge command."
) )
return [] return []
else:
return self.merge_ghstack_into(
repo,
skip_mandatory_checks,
comment_id=comment_id,
skip_all_rule_checks=skip_all_rule_checks,
)
class MergeRuleFailedError(RuntimeError): class MergeRuleFailedError(RuntimeError):
@ -1591,7 +1458,7 @@ def find_matching_merge_rule(
pending_checks = [] pending_checks = []
failed_checks = [] failed_checks = []
hud_link = f"https://hud.pytorch.org/{pr.org}/{pr.project}/commit/{pr.last_commit_sha()}" hud_link = f"https://hud.pytorch.org/{pr.org}/{pr.project}/commit/{pr.last_commit()['oid']}"
if len(failed_checks) > 0: if len(failed_checks) > 0:
if reject_reason_score < 30000: if reject_reason_score < 30000:
reject_reason_score = 30000 reject_reason_score = 30000
@ -2289,14 +2156,14 @@ def categorize_checks(
def merge( def merge(
pr: GitHubPR, pr: GitHubPR,
repo: GitRepo, repo: GitRepo,
comment_id: int,
dry_run: bool = False, dry_run: bool = False,
skip_mandatory_checks: bool = False, skip_mandatory_checks: bool = False,
comment_id: Optional[int] = None,
timeout_minutes: int = 400, timeout_minutes: int = 400,
stale_pr_days: int = 3, stale_pr_days: int = 3,
ignore_current: bool = False, ignore_current: bool = False,
) -> None: ) -> None:
initial_commit_sha = pr.last_commit_sha() initial_commit_sha = pr.last_commit()["oid"]
pr_link = f"https://github.com/{pr.org}/{pr.project}/pull/{pr.pr_num}" pr_link = f"https://github.com/{pr.org}/{pr.project}/pull/{pr.pr_num}"
print(f"Attempting merge of {initial_commit_sha} ({pr_link})") print(f"Attempting merge of {initial_commit_sha} ({pr_link})")
@ -2367,7 +2234,7 @@ def merge(
f"Attempting merge of https://github.com/{pr.org}/{pr.project}/pull/{pr.pr_num} ({elapsed_time / 60} minutes elapsed)" f"Attempting merge of https://github.com/{pr.org}/{pr.project}/pull/{pr.pr_num} ({elapsed_time / 60} minutes elapsed)"
) )
pr = GitHubPR(pr.org, pr.project, pr.pr_num) pr = GitHubPR(pr.org, pr.project, pr.pr_num)
if initial_commit_sha != pr.last_commit_sha(): if initial_commit_sha != pr.last_commit()["oid"]:
raise RuntimeError( raise RuntimeError(
"New commits were pushed while merging. Please rerun the merge command." "New commits were pushed while merging. Please rerun the merge command."
) )
@ -2534,7 +2401,7 @@ def main() -> None:
if args.check_mergeability: if args.check_mergeability:
if pr.is_ghstack_pr(): if pr.is_ghstack_pr():
get_ghstack_prs(repo, pr) # raises error if out of sync get_ghstack_prs(repo, pr) # raises error if out of sync
pr.merge_changes_locally( pr.merge_changes(
repo, repo,
skip_mandatory_checks=True, skip_mandatory_checks=True,
skip_all_rule_checks=True, skip_all_rule_checks=True,
@ -2549,18 +2416,12 @@ def main() -> None:
gh_post_pr_comment(org, project, args.pr_num, message, dry_run=args.dry_run) gh_post_pr_comment(org, project, args.pr_num, message, dry_run=args.dry_run)
return return
try: try:
# Ensure comment id is set, else fail
if not args.comment_id:
raise ValueError(
"Comment ID is required for merging PRs, please provide it using --comment-id"
)
merge( merge(
pr, pr,
repo, repo,
comment_id=args.comment_id,
dry_run=args.dry_run, dry_run=args.dry_run,
skip_mandatory_checks=args.force, skip_mandatory_checks=args.force,
comment_id=args.comment_id,
ignore_current=args.ignore_current, ignore_current=args.ignore_current,
) )
except Exception as e: except Exception as e:
@ -2582,7 +2443,7 @@ def main() -> None:
broken_trunk_checks=[], broken_trunk_checks=[],
flaky_checks=[], flaky_checks=[],
unstable_checks=[], unstable_checks=[],
last_commit_sha=pr.last_commit_sha(default=""), last_commit_sha=pr.last_commit().get("oid", ""),
merge_base_sha=pr.get_merge_base(), merge_base_sha=pr.get_merge_base(),
is_failed=True, is_failed=True,
skip_mandatory_checks=args.force, skip_mandatory_checks=args.force,

View File

@ -35,9 +35,6 @@ cd magma
mkdir build && cd build mkdir build && cd build
set GPU_TARGET=All set GPU_TARGET=All
if "%CUVER_NODOT%" == "130" (
set CUDA_ARCH_LIST=-gencode=arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90 -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
)
if "%CUVER_NODOT%" == "129" ( if "%CUVER_NODOT%" == "129" (
set CUDA_ARCH_LIST=-gencode=arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90 -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120 set CUDA_ARCH_LIST=-gencode=arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90 -gencode arch=compute_100,code=sm_100 -gencode arch=compute_120,code=sm_120
) )

View File

@ -1,12 +1,18 @@
@echo on @echo on
set DESIRED_PYTHON=%PY_VERS% set PYTHON_PREFIX=%PY_VERS:.=%
call .ci/pytorch/windows/internal/install_python.bat set PYTHON_PREFIX=py%PYTHON_PREFIX:;=;py%
call .ci/pytorch/win-test-helpers/installation-helpers/activate_miniconda3.bat
:: Create a new conda environment
if "%PY_VERS%" == "3.13t" (
call conda create -n %PYTHON_PREFIX% -y -c=conda-forge python-freethreading python=3.13
) else (
call conda create -n %PYTHON_PREFIX% -y -c=conda-forge python=%PY_VERS%
)
:: Fix cmake version for issue https://github.com/pytorch/pytorch/issues/150480 :: Fix cmake version for issue https://github.com/pytorch/pytorch/issues/150480
%PYTHON_EXEC% -m pip install wheel pybind11 certifi cython cmake==3.31.6 setuptools==72.1.0 ninja==1.11.1.4 call conda run -n %PYTHON_PREFIX% pip install wheel pybind11 certifi cython cmake==3.31.6 setuptools==72.1.0 ninja==1.11.1.4
dir "%VC_INSTALL_PATH%" dir "%VC_INSTALL_PATH%"
call "%VC_INSTALL_PATH%\VC\Auxiliary\Build\vcvarsall.bat" x64 call "%VC_INSTALL_PATH%\VC\Auxiliary\Build\vcvarsall.bat" x64
%PYTHON_EXEC% .github/scripts/build_triton_wheel.py --device=%BUILD_DEVICE% %RELEASE% call conda run -n %PYTHON_PREFIX% python .github/scripts/build_triton_wheel.py --device=%BUILD_DEVICE% %RELEASE%

View File

@ -4,7 +4,7 @@
{%- set download_artifact_action = "actions/download-artifact@v4.1.7" -%} {%- set download_artifact_action = "actions/download-artifact@v4.1.7" -%}
{%- set timeout_minutes = 240 -%} {%- set timeout_minutes = 240 -%}
{%- set timeout_minutes_windows_binary = 360 -%} {%- set timeout_minutes_windows_binary = 300 -%}
{%- macro concurrency(build_environment) -%} {%- macro concurrency(build_environment) -%}
concurrency: concurrency:

View File

@ -114,12 +114,12 @@ jobs:
ALPINE_IMAGE: "docker.io/s390x/alpine" ALPINE_IMAGE: "docker.io/s390x/alpine"
{%- elif config["gpu_arch_type"] == "rocm" %} {%- elif config["gpu_arch_type"] == "rocm" %}
runs_on: linux.rocm.gpu runs_on: linux.rocm.gpu
{%- elif config["gpu_arch_type"] == "cuda" and config["gpu_arch_version"] in ["12.6"] %} {%- elif config["gpu_arch_type"] == "cuda" and config["gpu_arch_version"] in ["12.8", "12.9"] %}
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
runs_on: linux.4xlarge.nvidia.gpu # 12.6 build can use maxwell (sm_50) runner runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8 and 12.9 build need sm_70+ runner
{%- elif config["gpu_arch_type"] == "cuda" %} {%- elif config["gpu_arch_type"] == "cuda" %}
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
runs_on: linux.g4dn.4xlarge.nvidia.gpu # 12.8+ builds need sm_70+ runner runs_on: linux.4xlarge.nvidia.gpu # for other cuda versions, we use 4xlarge runner
{%- else %} {%- else %}
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
runs_on: linux.4xlarge runs_on: linux.4xlarge
@ -135,7 +135,7 @@ jobs:
contents: read contents: read
steps: steps:
- name: Setup XPU - name: Setup XPU
uses: pytorch/pytorch/.github/actions/setup-xpu@main uses: ./.github/actions/setup-xpu
- name: configure aws credentials - name: configure aws credentials
id: aws_creds id: aws_creds
uses: aws-actions/configure-aws-credentials@v4 uses: aws-actions/configure-aws-credentials@v4

View File

@ -22,16 +22,6 @@ name: !{{ build_environment }}
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}" echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
{%- endmacro %} {%- endmacro %}
{%- macro setup_python(py_ver) -%}
- name: Setup Python
uses: actions/setup-python@v6
with:
# TODO: Removeme once 3.14 is out
# .4 version is min minor for 3.10, and also no-gil version of 3.13 needs at least 3.13.3
python-version: "!{{ (py_ver.strip('t') + '.4') if '3.14' not in py_ver else '3.14.0-rc.2' }}"
freethreaded: !{{ "true" if py_ver.endswith('t') else "false" }}
{%- endmacro %}
on: on:
# TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321 # TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321
push: push:
@ -71,13 +61,28 @@ jobs:
{%- endif %} {%- endif %}
steps: steps:
!{{ set_runner_specific_vars() }} !{{ set_runner_specific_vars() }}
!{{ setup_python(config.get("python_version", "3.10")) }} - name: Install conda and dependencies
run: |
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
chmod +x "${RUNNER_TEMP}/conda.sh"
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
if [ -d "/Applications/Xcode_14.3.1.app" ]; then
echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
fi
!{{ common.checkout(deep_clone=False, directory="pytorch") }} !{{ common.checkout(deep_clone=False, directory="pytorch") }}
- name: Populate binary env - name: Populate binary env
run: | run: |
# shellcheck disable=SC1091
source "${RUNNER_TEMP}/anaconda/bin/activate"
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh" "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- name: Build PyTorch binary - name: Build PyTorch binary
run: | run: |
# shellcheck disable=SC1091
source "${RUNNER_TEMP}/anaconda/bin/activate"
set -eux -o pipefail set -eux -o pipefail
# shellcheck disable=SC1090 # shellcheck disable=SC1090
source "${BINARY_ENV_FILE:-/Users/distiller/project/env}" source "${BINARY_ENV_FILE:-/Users/distiller/project/env}"
@ -94,6 +99,8 @@ jobs:
{%- if config["package_type"] == "wheel" %} {%- if config["package_type"] == "wheel" %}
- name: Test PyTorch wheel - name: Test PyTorch wheel
run: | run: |
# shellcheck disable=SC1091
source "${RUNNER_TEMP}/anaconda/bin/activate"
set -eux -o pipefail set -eux -o pipefail
# shellcheck disable=SC1090 # shellcheck disable=SC1090
source "${BINARY_ENV_FILE:-/Users/distiller/project/env}" source "${BINARY_ENV_FILE:-/Users/distiller/project/env}"
@ -104,9 +111,33 @@ jobs:
SMOKE_TEST_PARAMS="" SMOKE_TEST_PARAMS=""
EXTRA_CONDA_INSTALL_FLAGS=""
CONDA_ENV_CREATE_FLAGS=""
# shellcheck disable=SC2153
case $DESIRED_PYTHON in
3.14t)
CONDA_ENV_CREATE_FLAGS="python-freethreading"
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge/label/python_rc -c conda-forge"
desired_python="3.14.0rc1"
;;
3.14)
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge/label/python_rc -c conda-forge"
desired_python="3.14.0rc1"
;;
3.13t)
CONDA_ENV_CREATE_FLAGS="python-freethreading"
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge"
desired_python="3.13"
;;
*)
# shellcheck disable=SC2153
desired_python=${DESIRED_PYTHON}
;;
esac
# shellcheck disable=SC2086 # shellcheck disable=SC2086
python -mvenv test_venv conda create -yn "test_conda_env" python="$desired_python" ${CONDA_ENV_CREATE_FLAGS} ${EXTRA_CONDA_INSTALL_FLAGS}
source test_venv/bin/activate conda activate test_conda_env
pip install "$PYTORCH_FINAL_PACKAGE_DIR"/*.whl numpy -v pip install "$PYTORCH_FINAL_PACKAGE_DIR"/*.whl numpy -v
# shellcheck disable=SC2086 # shellcheck disable=SC2086

View File

@ -15,7 +15,7 @@
# favor of GPU_ARCH_VERSION # favor of GPU_ARCH_VERSION
DESIRED_CUDA: !{{ config["desired_cuda"] }} DESIRED_CUDA: !{{ config["desired_cuda"] }}
{%- if config["gpu_arch_version"] %} {%- if config["gpu_arch_version"] %}
GPU_ARCH_VERSION: "!{{ config["gpu_arch_version"] }}" GPU_ARCH_VERSION: !{{ config["gpu_arch_version"] }}
{%- endif %} {%- endif %}
GPU_ARCH_TYPE: !{{ config["gpu_arch_type"] }} GPU_ARCH_TYPE: !{{ config["gpu_arch_type"] }}
{%- if include_skip_tests %} {%- if include_skip_tests %}
@ -33,7 +33,7 @@
{%- if is_windows %} {%- if is_windows %}
# This is a dummy value for libtorch to work correctly with our batch scripts # This is a dummy value for libtorch to work correctly with our batch scripts
# without this value pip does not get installed for some reason # without this value pip does not get installed for some reason
DESIRED_PYTHON: "3.10" DESIRED_PYTHON: "3.9"
{%- endif %} {%- endif %}
{%- else %} {%- else %}

View File

@ -187,8 +187,6 @@ jobs:
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
uses: pytorch/test-infra/.github/actions/setup-nvidia@main uses: pytorch/test-infra/.github/actions/setup-nvidia@main
with:
driver-version: ${{ startsWith(inputs.GPU_ARCH_VERSION, '13') && '580.65.06' || '570.133.07' }}
if: ${{ inputs.GPU_ARCH_TYPE == 'cuda' && steps.filter.outputs.is-test-matrix-empty == 'False' }} if: ${{ inputs.GPU_ARCH_TYPE == 'cuda' && steps.filter.outputs.is-test-matrix-empty == 'False' }}
- name: configure aws credentials - name: configure aws credentials

Some files were not shown because too many files have changed in this diff Show More