Compare commits

..

1 Commits

Author SHA1 Message Date
aa7e793a8e Update (base update)
[ghstack-poisoned]
2025-08-27 19:17:52 -07:00
2281 changed files with 54883 additions and 113989 deletions

View File

@ -3,20 +3,8 @@ set -eux -o pipefail
GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-} GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
# Set CUDA architecture lists to match x86 build_cuda.sh if [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then
if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0"
elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0" export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
elif [[ "$GPU_ARCH_VERSION" == *"13.0"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;11.0;12.0+PTX"
fi
# Compress the fatbin with -compress-mode=size for CUDA 13
if [[ "$DESIRED_CUDA" == *"13"* ]]; then
export TORCH_NVCC_FLAGS="-compress-mode=size"
# Bundle ptxas into the cu13 wheel, see https://github.com/pytorch/pytorch/issues/163801
export BUILD_BUNDLE_PTXAS=1
fi fi
SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
@ -30,22 +18,14 @@ cd /
# on the mounted pytorch repo # on the mounted pytorch repo
git config --global --add safe.directory /pytorch git config --global --add safe.directory /pytorch
pip install -r /pytorch/requirements.txt pip install -r /pytorch/requirements.txt
pip install auditwheel==6.2.0 wheel pip install auditwheel==6.2.0
if [ "$DESIRED_CUDA" = "cpu" ]; then if [ "$DESIRED_CUDA" = "cpu" ]; then
echo "BASE_CUDA_VERSION is not set. Building cpu wheel." echo "BASE_CUDA_VERSION is not set. Building cpu wheel."
python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn #USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files
USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn
else else
echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA" echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA"
export USE_SYSTEM_NCCL=1 export USE_SYSTEM_NCCL=1
#USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files
# Check if we should use NVIDIA libs from PyPI (similar to x86 build_cuda.sh logic) USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
echo "Bundling CUDA libraries with wheel for aarch64."
else
echo "Using nvidia libs from pypi for aarch64."
echo "Updated PYTORCH_EXTRA_INSTALL_REQUIREMENTS for aarch64: $PYTORCH_EXTRA_INSTALL_REQUIREMENTS"
export USE_NVIDIA_PYPI_LIBS=1
fi
python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda
fi fi

View File

@ -69,186 +69,62 @@ def replace_tag(filename) -> None:
f.writelines(lines) f.writelines(lines)
def patch_library_rpath(
folder: str,
lib_name: str,
use_nvidia_pypi_libs: bool = False,
desired_cuda: str = "",
) -> None:
"""Apply patchelf to set RPATH for a library in torch/lib"""
lib_path = f"{folder}/tmp/torch/lib/{lib_name}"
if use_nvidia_pypi_libs:
# For PyPI NVIDIA libraries, construct CUDA RPATH
cuda_rpaths = [
"$ORIGIN/../../nvidia/cudnn/lib",
"$ORIGIN/../../nvidia/nvshmem/lib",
"$ORIGIN/../../nvidia/nccl/lib",
"$ORIGIN/../../nvidia/cusparselt/lib",
]
if "130" in desired_cuda:
cuda_rpaths.append("$ORIGIN/../../nvidia/cu13/lib")
else:
cuda_rpaths.extend(
[
"$ORIGIN/../../nvidia/cublas/lib",
"$ORIGIN/../../nvidia/cuda_cupti/lib",
"$ORIGIN/../../nvidia/cuda_nvrtc/lib",
"$ORIGIN/../../nvidia/cuda_runtime/lib",
"$ORIGIN/../../nvidia/cufft/lib",
"$ORIGIN/../../nvidia/curand/lib",
"$ORIGIN/../../nvidia/cusolver/lib",
"$ORIGIN/../../nvidia/cusparse/lib",
"$ORIGIN/../../nvidia/nvtx/lib",
"$ORIGIN/../../nvidia/cufile/lib",
]
)
# Add $ORIGIN for local torch libs
rpath = ":".join(cuda_rpaths) + ":$ORIGIN"
else:
# For bundled libraries, just use $ORIGIN
rpath = "$ORIGIN"
if os.path.exists(lib_path):
os.system(
f"cd {folder}/tmp/torch/lib/; "
f"patchelf --set-rpath '{rpath}' --force-rpath {lib_name}"
)
def copy_and_patch_library(
src_path: str,
folder: str,
use_nvidia_pypi_libs: bool = False,
desired_cuda: str = "",
) -> None:
"""Copy a library to torch/lib and patch its RPATH"""
if os.path.exists(src_path):
lib_name = os.path.basename(src_path)
shutil.copy2(src_path, f"{folder}/tmp/torch/lib/{lib_name}")
patch_library_rpath(folder, lib_name, use_nvidia_pypi_libs, desired_cuda)
def package_cuda_wheel(wheel_path, desired_cuda) -> None: def package_cuda_wheel(wheel_path, desired_cuda) -> None:
""" """
Package the cuda wheel libraries Package the cuda wheel libraries
""" """
folder = os.path.dirname(wheel_path) folder = os.path.dirname(wheel_path)
wheelname = os.path.basename(wheel_path)
os.mkdir(f"{folder}/tmp") os.mkdir(f"{folder}/tmp")
os.system(f"unzip {wheel_path} -d {folder}/tmp") os.system(f"unzip {wheel_path} -d {folder}/tmp")
# Delete original wheel since it will be repackaged libs_to_copy = [
os.system(f"rm {wheel_path}") "/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12",
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so",
"/usr/local/cuda/lib64/libcudnn.so.9",
"/usr/local/cuda/lib64/libcublas.so.12",
"/usr/local/cuda/lib64/libcublasLt.so.12",
"/usr/local/cuda/lib64/libcudart.so.12",
"/usr/local/cuda/lib64/libcufft.so.11",
"/usr/local/cuda/lib64/libcusparse.so.12",
"/usr/local/cuda/lib64/libcusparseLt.so.0",
"/usr/local/cuda/lib64/libcusolver.so.11",
"/usr/local/cuda/lib64/libcurand.so.10",
"/usr/local/cuda/lib64/libnccl.so.2",
"/usr/local/cuda/lib64/libnvJitLink.so.12",
"/usr/local/cuda/lib64/libnvrtc.so.12",
"/usr/local/cuda/lib64/libnvshmem_host.so.3",
"/usr/local/cuda/lib64/libcudnn_adv.so.9",
"/usr/local/cuda/lib64/libcudnn_cnn.so.9",
"/usr/local/cuda/lib64/libcudnn_graph.so.9",
"/usr/local/cuda/lib64/libcudnn_ops.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9",
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9",
"/lib64/libgomp.so.1",
"/usr/lib64/libgfortran.so.5",
"/acl/build/libarm_compute.so",
"/acl/build/libarm_compute_graph.so",
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_lapack_core.so.0",
"/usr/local/lib/libnvpl_blas_core.so.0",
]
# Check if we should use PyPI NVIDIA libraries or bundle system libraries if "129" in desired_cuda:
use_nvidia_pypi_libs = os.getenv("USE_NVIDIA_PYPI_LIBS", "0") == "1" libs_to_copy += [
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.9",
if use_nvidia_pypi_libs:
print("Using nvidia libs from pypi - skipping CUDA library bundling")
# For PyPI approach, we don't bundle CUDA libraries - they come from PyPI packages
# We only need to bundle non-NVIDIA libraries
minimal_libs_to_copy = [
"/lib64/libgomp.so.1",
"/usr/lib64/libgfortran.so.5",
"/acl/build/libarm_compute.so",
"/acl/build/libarm_compute_graph.so",
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_lapack_core.so.0",
"/usr/local/lib/libnvpl_blas_core.so.0",
]
# Copy minimal libraries to unzipped_folder/torch/lib
for lib_path in minimal_libs_to_copy:
copy_and_patch_library(lib_path, folder, use_nvidia_pypi_libs, desired_cuda)
# Patch torch libraries used for searching libraries
torch_libs_to_patch = [
"libtorch.so",
"libtorch_cpu.so",
"libtorch_cuda.so",
"libtorch_cuda_linalg.so",
"libtorch_global_deps.so",
"libtorch_python.so",
"libtorch_nvshmem.so",
"libc10.so",
"libc10_cuda.so",
"libcaffe2_nvrtc.so",
"libshm.so",
]
for lib_name in torch_libs_to_patch:
patch_library_rpath(folder, lib_name, use_nvidia_pypi_libs, desired_cuda)
else:
print("Bundling CUDA libraries with wheel")
# Original logic for bundling system CUDA libraries
# Common libraries for all CUDA versions
common_libs = [
# Non-NVIDIA system libraries
"/lib64/libgomp.so.1",
"/usr/lib64/libgfortran.so.5",
"/acl/build/libarm_compute.so",
"/acl/build/libarm_compute_graph.so",
# Common CUDA libraries (same for all versions)
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_lapack_core.so.0",
"/usr/local/lib/libnvpl_blas_core.so.0",
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so",
"/usr/local/cuda/lib64/libcudnn.so.9",
"/usr/local/cuda/lib64/libcusparseLt.so.0",
"/usr/local/cuda/lib64/libcurand.so.10",
"/usr/local/cuda/lib64/libnccl.so.2",
"/usr/local/cuda/lib64/libnvshmem_host.so.3",
"/usr/local/cuda/lib64/libcudnn_adv.so.9",
"/usr/local/cuda/lib64/libcudnn_cnn.so.9",
"/usr/local/cuda/lib64/libcudnn_graph.so.9",
"/usr/local/cuda/lib64/libcudnn_ops.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9",
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9",
"/usr/local/cuda/lib64/libcufile.so.0", "/usr/local/cuda/lib64/libcufile.so.0",
"/usr/local/cuda/lib64/libcufile_rdma.so.1", "/usr/local/cuda/lib64/libcufile_rdma.so.1",
"/usr/local/cuda/lib64/libcusparse.so.12",
] ]
# CUDA version-specific libraries # Copy libraries to unzipped_folder/a/lib
if "13" in desired_cuda: for lib_path in libs_to_copy:
minor_version = desired_cuda[-1] lib_name = os.path.basename(lib_path)
version_specific_libs = [ shutil.copy2(lib_path, f"{folder}/tmp/torch/lib/{lib_name}")
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.13", os.system(
"/usr/local/cuda/lib64/libcublas.so.13", f"cd {folder}/tmp/torch/lib/; "
"/usr/local/cuda/lib64/libcublasLt.so.13", f"patchelf --set-rpath '$ORIGIN' --force-rpath {folder}/tmp/torch/lib/{lib_name}"
"/usr/local/cuda/lib64/libcudart.so.13", )
"/usr/local/cuda/lib64/libcufft.so.12",
"/usr/local/cuda/lib64/libcusolver.so.12",
"/usr/local/cuda/lib64/libnvJitLink.so.13",
"/usr/local/cuda/lib64/libnvrtc.so.13",
f"/usr/local/cuda/lib64/libnvrtc-builtins.so.13.{minor_version}",
]
elif "12" in desired_cuda:
# Get the last character for libnvrtc-builtins version (e.g., "129" -> "9")
minor_version = desired_cuda[-1]
version_specific_libs = [
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12",
"/usr/local/cuda/lib64/libcublas.so.12",
"/usr/local/cuda/lib64/libcublasLt.so.12",
"/usr/local/cuda/lib64/libcudart.so.12",
"/usr/local/cuda/lib64/libcufft.so.11",
"/usr/local/cuda/lib64/libcusolver.so.11",
"/usr/local/cuda/lib64/libnvJitLink.so.12",
"/usr/local/cuda/lib64/libnvrtc.so.12",
f"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.{minor_version}",
]
else:
raise ValueError(f"Unsupported CUDA version: {desired_cuda}.")
# Combine all libraries
libs_to_copy = common_libs + version_specific_libs
# Copy libraries to unzipped_folder/torch/lib
for lib_path in libs_to_copy:
copy_and_patch_library(lib_path, folder, use_nvidia_pypi_libs, desired_cuda)
# Make sure the wheel is tagged with manylinux_2_28 # Make sure the wheel is tagged with manylinux_2_28
for f in os.scandir(f"{folder}/tmp/"): for f in os.scandir(f"{folder}/tmp/"):
@ -256,8 +132,14 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
replace_tag(f"{f.path}/WHEEL") replace_tag(f"{f.path}/WHEEL")
break break
os.system(f"wheel pack {folder}/tmp/ -d {folder}") os.mkdir(f"{folder}/cuda_wheel")
os.system(f"rm -rf {folder}/tmp/") os.system(f"cd {folder}/tmp/; zip -r {folder}/cuda_wheel/{wheelname} *")
shutil.move(
f"{folder}/cuda_wheel/{wheelname}",
f"{folder}/{wheelname}",
copy_function=shutil.copy2,
)
os.system(f"rm -rf {folder}/tmp/ {folder}/cuda_wheel/")
def complete_wheel(folder: str) -> str: def complete_wheel(folder: str) -> str:
@ -280,7 +162,14 @@ def complete_wheel(folder: str) -> str:
f"/{folder}/dist/{repaired_wheel_name}", f"/{folder}/dist/{repaired_wheel_name}",
) )
else: else:
repaired_wheel_name = list_dir(f"/{folder}/dist")[0] repaired_wheel_name = wheel_name.replace(
"linux_aarch64", "manylinux_2_28_aarch64"
)
print(f"Renaming {wheel_name} wheel to {repaired_wheel_name}")
os.rename(
f"/{folder}/dist/{wheel_name}",
f"/{folder}/dist/{repaired_wheel_name}",
)
print(f"Copying {repaired_wheel_name} to artifacts") print(f"Copying {repaired_wheel_name} to artifacts")
shutil.copy2( shutil.copy2(
@ -317,21 +206,11 @@ if __name__ == "__main__":
).decode() ).decode()
print("Building PyTorch wheel") print("Building PyTorch wheel")
build_vars = "" build_vars = "CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
# MAX_JOB=5 is not required for CPU backend (see commit 465d98b) # MAX_JOB=5 is not required for CPU backend (see commit 465d98b)
if enable_cuda: if enable_cuda:
build_vars += "MAX_JOBS=5 " build_vars += "MAX_JOBS=5 "
# Handle PyPI NVIDIA libraries vs bundled libraries
use_nvidia_pypi_libs = os.getenv("USE_NVIDIA_PYPI_LIBS", "0") == "1"
if use_nvidia_pypi_libs:
print("Configuring build for PyPI NVIDIA libraries")
# Configure for dynamic linking (matching x86 logic)
build_vars += "ATEN_STATIC_CUDA=0 USE_CUDA_STATIC_LINK=0 USE_CUPTI_SO=1 "
else:
print("Configuring build for bundled NVIDIA libraries")
# Keep existing static linking approach - already configured above
override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION") override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION")
desired_cuda = os.getenv("DESIRED_CUDA") desired_cuda = os.getenv("DESIRED_CUDA")
if override_package_version is not None: if override_package_version is not None:
@ -372,7 +251,7 @@ if __name__ == "__main__":
else: else:
print("build pytorch without mkldnn backend") print("build pytorch without mkldnn backend")
os.system(f"cd /pytorch; {build_vars} python3 -m build --wheel --no-isolation") os.system(f"cd /pytorch; {build_vars} python3 setup.py bdist_wheel")
if enable_cuda: if enable_cuda:
print("Updating Cuda Dependency") print("Updating Cuda Dependency")
filename = os.listdir("/pytorch/dist/") filename = os.listdir("/pytorch/dist/")

View File

@ -241,7 +241,7 @@ def wait_for_connection(addr, port, timeout=15, attempt_cnt=5):
try: try:
with socket.create_connection((addr, port), timeout=timeout): with socket.create_connection((addr, port), timeout=timeout):
return return
except (ConnectionRefusedError, TimeoutError): # noqa: PERF203 except (ConnectionRefusedError, socket.timeout): # noqa: PERF203
if i == attempt_cnt - 1: if i == attempt_cnt - 1:
raise raise
time.sleep(timeout) time.sleep(timeout)
@ -442,7 +442,7 @@ def build_torchvision(
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd vision && {build_vars} python3 -m build --wheel --no-isolation") host.run_cmd(f"cd vision && {build_vars} python3 setup.py bdist_wheel")
vision_wheel_name = host.list_dir("vision/dist")[0] vision_wheel_name = host.list_dir("vision/dist")[0]
embed_libgomp(host, use_conda, os.path.join("vision", "dist", vision_wheel_name)) embed_libgomp(host, use_conda, os.path.join("vision", "dist", vision_wheel_name))
@ -497,7 +497,7 @@ def build_torchdata(
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd data && {build_vars} python3 -m build --wheel --no-isolation") host.run_cmd(f"cd data && {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("data/dist")[0] wheel_name = host.list_dir("data/dist")[0]
embed_libgomp(host, use_conda, os.path.join("data", "dist", wheel_name)) embed_libgomp(host, use_conda, os.path.join("data", "dist", wheel_name))
@ -553,7 +553,7 @@ def build_torchtext(
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd text && {build_vars} python3 -m build --wheel --no-isolation") host.run_cmd(f"cd text && {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("text/dist")[0] wheel_name = host.list_dir("text/dist")[0]
embed_libgomp(host, use_conda, os.path.join("text", "dist", wheel_name)) embed_libgomp(host, use_conda, os.path.join("text", "dist", wheel_name))
@ -614,7 +614,7 @@ def build_torchaudio(
host.run_cmd( host.run_cmd(
f"cd audio && export FFMPEG_ROOT=$(pwd)/third_party/ffmpeg && export USE_FFMPEG=1 \ f"cd audio && export FFMPEG_ROOT=$(pwd)/third_party/ffmpeg && export USE_FFMPEG=1 \
&& ./packaging/ffmpeg/build.sh \ && ./packaging/ffmpeg/build.sh \
&& {build_vars} python3 -m build --wheel --no-isolation" && {build_vars} python3 setup.py bdist_wheel"
) )
wheel_name = host.list_dir("audio/dist")[0] wheel_name = host.list_dir("audio/dist")[0]
@ -726,7 +726,7 @@ def start_build(
print("Building PyTorch wheel") print("Building PyTorch wheel")
build_opts = "" build_opts = ""
if pytorch_build_number is not None: if pytorch_build_number is not None:
build_opts += f" -C--build-option=--build-number={pytorch_build_number}" build_opts += f" --build-number {pytorch_build_number}"
# Breakpad build fails on aarch64 # Breakpad build fails on aarch64
build_vars = "USE_BREAKPAD=0 " build_vars = "USE_BREAKPAD=0 "
if branch == "nightly": if branch == "nightly":
@ -747,8 +747,7 @@ def start_build(
print("build pytorch with mkldnn+acl backend") print("build pytorch with mkldnn+acl backend")
build_vars += " USE_MKLDNN=ON USE_MKLDNN_ACL=ON" build_vars += " USE_MKLDNN=ON USE_MKLDNN_ACL=ON"
host.run_cmd( host.run_cmd(
f"cd $HOME/pytorch && export ACL_ROOT_DIR=$HOME/ComputeLibrary && " f"cd $HOME/pytorch && export ACL_ROOT_DIR=$HOME/ComputeLibrary && {build_vars} python3 setup.py bdist_wheel{build_opts}"
f"{build_vars} python3 -m build --wheel --no-isolation{build_opts}"
) )
print("Repair the wheel") print("Repair the wheel")
pytorch_wheel_name = host.list_dir("pytorch/dist")[0] pytorch_wheel_name = host.list_dir("pytorch/dist")[0]
@ -764,7 +763,7 @@ def start_build(
else: else:
print("build pytorch without mkldnn backend") print("build pytorch without mkldnn backend")
host.run_cmd( host.run_cmd(
f"cd pytorch && {build_vars} python3 -m build --wheel --no-isolation{build_opts}" f"cd pytorch && {build_vars} python3 setup.py bdist_wheel{build_opts}"
) )
print("Deleting build folder") print("Deleting build folder")
@ -1005,7 +1004,7 @@ if __name__ == "__main__":
install_condaforge_python(host, args.python_version) install_condaforge_python(host, args.python_version)
sys.exit(0) sys.exit(0)
python_version = args.python_version if args.python_version is not None else "3.10" python_version = args.python_version if args.python_version is not None else "3.9"
if args.use_torch_from_pypi: if args.use_torch_from_pypi:
configure_system(host, compiler=args.compiler, python_version=python_version) configure_system(host, compiler=args.compiler, python_version=python_version)

View File

@ -69,8 +69,7 @@ RUN bash ./install_cuda.sh 13.0
ENV DESIRED_CUDA=13.0 ENV DESIRED_CUDA=13.0
FROM ${ROCM_IMAGE} as rocm FROM ${ROCM_IMAGE} as rocm
ARG PYTORCH_ROCM_ARCH ENV PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
ADD ./common/install_mkl.sh install_mkl.sh ADD ./common/install_mkl.sh install_mkl.sh
RUN bash ./install_mkl.sh && rm install_mkl.sh RUN bash ./install_mkl.sh && rm install_mkl.sh
ENV MKLROOT /opt/intel ENV MKLROOT /opt/intel

View File

@ -36,12 +36,6 @@ case ${DOCKER_TAG_PREFIX} in
;; ;;
rocm*) rocm*)
BASE_TARGET=rocm BASE_TARGET=rocm
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
# add gfx950 conditionally starting in ROCm 7.0
if [[ "$ROCM_VERSION" == *"7.0"* ]]; then
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950"
fi
EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}"
;; ;;
*) *)
echo "ERROR: Unknown docker tag ${DOCKER_TAG_PREFIX}" echo "ERROR: Unknown docker tag ${DOCKER_TAG_PREFIX}"

View File

@ -81,11 +81,11 @@ elif [[ "$image" == *riscv* ]]; then
DOCKERFILE="ubuntu-cross-riscv/Dockerfile" DOCKERFILE="ubuntu-cross-riscv/Dockerfile"
fi fi
_UCX_COMMIT=7836b165abdbe468a2f607e7254011c07d788152 _UCX_COMMIT=7bb2722ff2187a0cad557ae4a6afa090569f83fb
_UCC_COMMIT=430e241bf5d38cbc73fc7a6b89155397232e3f96 _UCC_COMMIT=20eae37090a4ce1b32bcce6144ccad0b49943e0b
if [[ "$image" == *rocm* ]]; then if [[ "$image" == *rocm* ]]; then
_UCX_COMMIT=29831d319e6be55cb8c768ca61de335c934ca39e _UCX_COMMIT=cc312eaa4655c0cc5c2bcd796db938f90563bcf6
_UCC_COMMIT=9f4b242cbbd8b1462cbc732eb29316cdfa124b77 _UCC_COMMIT=0c0fc21559835044ab107199e334f7157d6a0d3d
fi fi
tag=$(echo $image | awk -F':' '{print $2}') tag=$(echo $image | awk -F':' '{print $2}')
@ -114,19 +114,31 @@ case "$tag" in
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11) pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks)
CUDA_VERSION=13.0.0 CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11 GCC_VERSION=9
VISION=yes VISION=yes
KATEX=yes KATEX=yes
UCX_COMMIT=${_UCX_COMMIT} UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes TRITON=yes
INDUCTOR_BENCHMARKS=yes
;; ;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks) pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc9-inductor-benchmarks)
CUDA_VERSION=12.8.1 CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=9
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.13-gcc9-inductor-benchmarks)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.13
GCC_VERSION=9 GCC_VERSION=9
VISION=yes VISION=yes
KATEX=yes KATEX=yes
@ -175,6 +187,20 @@ case "$tag" in
fi fi
GCC_VERSION=11 GCC_VERSION=11
VISION=yes VISION=yes
ROCM_VERSION=6.4
NINJA_VERSION=1.9.0
TRITON=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
if [[ $tag =~ "benchmarks" ]]; then
INDUCTOR_BENCHMARKS=yes
fi
;;
pytorch-linux-noble-rocm-alpha-py3)
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
VISION=yes
ROCM_VERSION=7.0 ROCM_VERSION=7.0
NINJA_VERSION=1.9.0 NINJA_VERSION=1.9.0
TRITON=yes TRITON=yes
@ -182,28 +208,25 @@ case "$tag" in
UCX_COMMIT=${_UCX_COMMIT} UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950" PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950"
if [[ $tag =~ "benchmarks" ]]; then
INDUCTOR_BENCHMARKS=yes
fi
;; ;;
pytorch-linux-jammy-xpu-n-1-py3) pytorch-linux-jammy-xpu-2025.0-py3)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11
VISION=yes
XPU_VERSION=2025.0
NINJA_VERSION=1.9.0
TRITON=yes
;;
pytorch-linux-jammy-xpu-2025.1-py3)
ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11 GCC_VERSION=11
VISION=yes VISION=yes
XPU_VERSION=2025.1 XPU_VERSION=2025.1
NINJA_VERSION=1.9.0 NINJA_VERSION=1.9.0
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-xpu-n-py3) pytorch-linux-jammy-py3.9-gcc11-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11
VISION=yes
XPU_VERSION=2025.2
NINJA_VERSION=1.9.0
TRITON=yes
;;
pytorch-linux-jammy-py3-gcc11-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11 GCC_VERSION=11
VISION=yes VISION=yes
KATEX=yes KATEX=yes
@ -251,10 +274,13 @@ case "$tag" in
TRITON_CPU=yes TRITON_CPU=yes
;; ;;
pytorch-linux-jammy-linter) pytorch-linux-jammy-linter)
PYTHON_VERSION=3.10 # TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
# We will need to update mypy version eventually, but that's for another day. The task
# would be to upgrade mypy to 1.0.0 with Python 3.11
PYTHON_VERSION=3.9
;; ;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.10-linter) pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-linter)
PYTHON_VERSION=3.10 PYTHON_VERSION=3.9
CUDA_VERSION=12.8.1 CUDA_VERSION=12.8.1
;; ;;
pytorch-linux-jammy-aarch64-py3.10-gcc11) pytorch-linux-jammy-aarch64-py3.10-gcc11)
@ -441,3 +467,12 @@ elif [ "$HAS_TRITON" = "yes" ]; then
echo "expecting triton to not be installed, but it is" echo "expecting triton to not be installed, but it is"
exit 1 exit 1
fi fi
# Sanity check cmake version. Executorch reinstalls cmake and I'm not sure if
# they support 4.0.0 yet, so exclude them from this check.
CMAKE_VERSION=$(drun cmake --version)
if [[ "$EXECUTORCH" != *yes* && "$CMAKE_VERSION" != *4.* ]]; then
echo "CMake version is not 4.0.0:"
drun cmake --version
exit 1
fi

View File

@ -56,13 +56,9 @@ ENV INSTALLED_VISION ${VISION}
# Install rocm # Install rocm
ARG ROCM_VERSION ARG ROCM_VERSION
RUN mkdir ci_commit_pins
COPY ./common/common_utils.sh common_utils.sh
COPY ./ci_commit_pins/rocm-composable-kernel.txt ci_commit_pins/rocm-composable-kernel.txt
COPY ./common/install_rocm.sh install_rocm.sh COPY ./common/install_rocm.sh install_rocm.sh
RUN bash ./install_rocm.sh RUN bash ./install_rocm.sh
RUN rm install_rocm.sh common_utils.sh RUN rm install_rocm.sh
RUN rm -r ci_commit_pins
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} RUN bash ./install_rocm_magma.sh ${ROCM_VERSION}
RUN rm install_rocm_magma.sh RUN rm install_rocm_magma.sh

View File

@ -1 +1 @@
e0dda9059d082537cee36be6c5e4fe3b18c880c0 56392aa978594cc155fa8af48cd949f5b5f1823a

View File

@ -1,2 +1,2 @@
transformers==4.56.0 transformers==4.54.0
soxr==0.5.0 soxr==0.5.0

View File

@ -1 +1 @@
v2.28.3-1 v2.27.5-1

View File

@ -1 +1 @@
v2.28.3-1 v2.27.7-1

View File

@ -1 +0,0 @@
7fe50dc3da2069d6645d9deb8c017a876472a977

View File

@ -1 +1 @@
74a23feff57432129df84d8099e622773cf77925 e03a63be43e33596f7f0a43b0f530353785e4a59

View File

@ -1 +1 @@
1b0418a9a454b2b93ab8d71f40e59d2297157fae a6572fb0be5b9b0a19b0641a0ce05810fa04e44c

View File

@ -1 +1 @@
bbb06c0334a6772b92d24bde54956e675c8c6604 f7888497a1eb9e98d4c07537f0d0bcfe180d1363

View File

@ -83,9 +83,9 @@ function build_cpython {
py_suffix=${py_ver::-1} py_suffix=${py_ver::-1}
py_folder=$py_suffix py_folder=$py_suffix
fi fi
# Update to rc2 due to https://github.com/python/cpython/commit/c72699086fe4 # Only b3 is available now
if [ "$py_suffix" == "3.14.0" ]; then if [ "$py_suffix" == "3.14.0" ]; then
py_suffix="3.14.0rc2" py_suffix="3.14.0b3"
fi fi
wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz
do_cpython_build $py_ver Python-$py_suffix do_cpython_build $py_ver Python-$py_suffix

View File

@ -147,7 +147,7 @@ function install_128 {
} }
function install_130 { function install_130 {
CUDNN_VERSION=9.13.0.50 CUDNN_VERSION=9.12.0.46
echo "Installing CUDA 13.0 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1" echo "Installing CUDA 13.0 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
# install CUDA 13.0 in the same container # install CUDA 13.0 in the same container
install_cuda 13.0.0 cuda_13.0.0_580.65.06_linux install_cuda 13.0.0 cuda_13.0.0_580.65.06_linux

View File

@ -42,27 +42,22 @@ install_pip_dependencies() {
# A workaround, ExecuTorch has moved to numpy 2.0 which is not compatible with the current # A workaround, ExecuTorch has moved to numpy 2.0 which is not compatible with the current
# numba and scipy version used in PyTorch CI # numba and scipy version used in PyTorch CI
conda_run pip uninstall -y numba scipy conda_run pip uninstall -y numba scipy
# Yaspin is needed for running CI test (get_benchmark_analysis_data.py)
pip_install yaspin==3.1.0
popd popd
} }
setup_executorch() { setup_executorch() {
pushd executorch
export PYTHON_EXECUTABLE=python export PYTHON_EXECUTABLE=python
export CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON -DEXECUTORCH_BUILD_TESTS=ON" export CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
as_jenkins .ci/scripts/setup-linux.sh --build-tool cmake || true as_jenkins .ci/scripts/setup-linux.sh --build-tool cmake || true
popd
} }
if [ $# -eq 0 ]; then clone_executorch
clone_executorch install_buck2
install_buck2 install_conda_dependencies
install_conda_dependencies install_pip_dependencies
install_pip_dependencies setup_executorch
pushd executorch
setup_executorch
popd
else
"$@"
fi

View File

@ -2,11 +2,6 @@
set -ex set -ex
# for pip_install function
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
ROCM_COMPOSABLE_KERNEL_VERSION="$(cat $(dirname $0)/../ci_commit_pins/rocm-composable-kernel.txt)"
ver() { ver() {
printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' '); printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' ');
} }
@ -42,6 +37,12 @@ EOF
rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}" rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}"
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu" amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu"
# Special case for ROCM_VERSION == 7.0
if [[ $(ver "$ROCM_VERSION") -eq $(ver 7.0) ]]; then
rocm_baseurl="https://repo.radeon.com/rocm/apt/7.0_alpha2"
amdgpu_baseurl="https://repo.radeon.com/amdgpu/30.10_alpha2/ubuntu"
fi
# Add amdgpu repository # Add amdgpu repository
UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'` UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
@ -112,8 +113,6 @@ EOF
rm -rf HIP clr rm -rf HIP clr
fi fi
pip_install "git+https://github.com/rocm/composable_kernel@$ROCM_COMPOSABLE_KERNEL_VERSION"
# Cleanup # Cleanup
apt-get autoclean && apt-get clean apt-get autoclean && apt-get clean
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
@ -177,8 +176,6 @@ install_centos() {
sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;" sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;"
done done
pip_install "git+https://github.com/rocm/composable_kernel@$ROCM_COMPOSABLE_KERNEL_VERSION"
# Cleanup # Cleanup
yum clean all yum clean all
rm -rf /var/cache/yum rm -rf /var/cache/yum

View File

@ -12,8 +12,8 @@ function do_install() {
rocm_version_nodot=${rocm_version//./} rocm_version_nodot=${rocm_version//./}
# https://github.com/icl-utk-edu/magma/pull/65 # Version 2.7.2 + ROCm related updates
MAGMA_VERSION=d6e4117bc88e73f06d26c6c2e14f064e8fc3d1ec MAGMA_VERSION=a1625ff4d9bc362906bd01f805dbbe12612953f6
magma_archive="magma-rocm${rocm_version_nodot}-${MAGMA_VERSION}-1.tar.bz2" magma_archive="magma-rocm${rocm_version_nodot}-${MAGMA_VERSION}-1.tar.bz2"
rocm_dir="/opt/rocm" rocm_dir="/opt/rocm"

View File

@ -57,7 +57,7 @@ if [ ! -f setup.py ]; then
cd python cd python
fi fi
pip_install pybind11==3.0.1 pip_install pybind11==2.13.6
# TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527 # TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527
as_jenkins sed -i -e 's/https:\/\/tritonlang.blob.core.windows.net\/llvm-builds/https:\/\/oaitriton.blob.core.windows.net\/public\/llvm-builds/g' setup.py as_jenkins sed -i -e 's/https:\/\/tritonlang.blob.core.windows.net\/llvm-builds/https:\/\/oaitriton.blob.core.windows.net\/public\/llvm-builds/g' setup.py
@ -66,15 +66,15 @@ if [ -n "${UBUNTU_VERSION}" ] && [ -n "${GCC_VERSION}" ] && [[ "${GCC_VERSION}"
# Triton needs at least gcc-9 to build # Triton needs at least gcc-9 to build
apt-get install -y g++-9 apt-get install -y g++-9
CXX=g++-9 conda_run python -m build --wheel --no-isolation CXX=g++-9 conda_run python setup.py bdist_wheel
elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then
# Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain # Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain
add-apt-repository -y ppa:ubuntu-toolchain-r/test add-apt-repository -y ppa:ubuntu-toolchain-r/test
apt-get install -y g++-9 apt-get install -y g++-9
CXX=g++-9 conda_run python -m build --wheel --no-isolation CXX=g++-9 conda_run python setup.py bdist_wheel
else else
conda_run python -m build --wheel --no-isolation conda_run python setup.py bdist_wheel
fi fi
# Copy the wheel to /opt for multi stage docker builds # Copy the wheel to /opt for multi stage docker builds

View File

@ -44,12 +44,8 @@ function install_ucc() {
./autogen.sh ./autogen.sh
if [[ -n "$CUDA_VERSION" && $CUDA_VERSION == 13* ]]; then # We only run distributed tests on Tesla M60 and A10G
NVCC_GENCODE="-gencode=arch=compute_86,code=compute_86" NVCC_GENCODE="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_86,code=compute_86"
else
# We only run distributed tests on Tesla M60 and A10G
NVCC_GENCODE="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_86,code=compute_86"
fi
if [[ -n "$ROCM_VERSION" ]]; then if [[ -n "$ROCM_VERSION" ]]; then
if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then

View File

@ -65,14 +65,10 @@ function install_ubuntu() {
function install_rhel() { function install_rhel() {
. /etc/os-release . /etc/os-release
if [[ "${ID}" == "rhel" ]]; then
if [[ ! " 8.8 8.9 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then if [[ ! " 8.8 8.10 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then
echo "RHEL version ${VERSION_ID} not supported" echo "RHEL version ${VERSION_ID} not supported"
exit exit
fi
elif [[ "${ID}" == "almalinux" ]]; then
# Workaround for almalinux8 which used by quay.io/pypa/manylinux_2_28_x86_64
VERSION_ID="8.8"
fi fi
dnf install -y 'dnf-command(config-manager)' dnf install -y 'dnf-command(config-manager)'
@ -150,11 +146,11 @@ if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then
XPU_DRIVER_VERSION="/lts/2350" XPU_DRIVER_VERSION="/lts/2350"
fi fi
# Default use Intel® oneAPI Deep Learning Essentials 2025.1 # Default use Intel® oneAPI Deep Learning Essentials 2025.0
if [[ "$XPU_VERSION" == "2025.2" ]]; then if [[ "$XPU_VERSION" == "2025.1" ]]; then
XPU_PACKAGES="intel-deep-learning-essentials-2025.2"
else
XPU_PACKAGES="intel-deep-learning-essentials-2025.1" XPU_PACKAGES="intel-deep-learning-essentials-2025.1"
else
XPU_PACKAGES="intel-deep-learning-essentials-2025.0"
fi fi
# The installation depends on the base OS # The installation depends on the base OS

View File

@ -74,14 +74,6 @@ RUN bash ./install_cuda.sh 13.0
RUN bash ./install_magma.sh 13.0 RUN bash ./install_magma.sh 13.0
RUN ln -sf /usr/local/cuda-13.0 /usr/local/cuda RUN ln -sf /usr/local/cuda-13.0 /usr/local/cuda
# Install libibverbs for libtorch and copy to CUDA directory
RUN apt-get update -y && \
apt-get install -y libibverbs-dev librdmacm-dev && \
cp /usr/lib/x86_64-linux-gnu/libmlx5.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/librdmacm.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/libibverbs.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/libnl* /usr/local/cuda/lib64/
FROM cpu as rocm FROM cpu as rocm
ARG ROCM_VERSION ARG ROCM_VERSION
ARG PYTORCH_ROCM_ARCH ARG PYTORCH_ROCM_ARCH

View File

@ -40,16 +40,12 @@ case ${DOCKER_TAG_PREFIX} in
;; ;;
rocm*) rocm*)
# we want the patch version of 6.4 instead # we want the patch version of 6.4 instead
if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then if [[ $(ver $GPU_ARCH_VERSION) -eq $(ver 6.4) ]]; then
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2" GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
fi fi
BASE_TARGET=rocm BASE_TARGET=rocm
GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201" PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
# add gfx950 conditionally starting in ROCm 7.0
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950"
fi
DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}" DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}"
;; ;;
*) *)

View File

@ -175,6 +175,6 @@ ENV XPU_DRIVER_TYPE ROLLING
RUN python3 -m pip install --upgrade pip && \ RUN python3 -m pip install --upgrade pip && \
python3 -mpip install cmake==3.28.4 python3 -mpip install cmake==3.28.4
ADD ./common/install_xpu.sh install_xpu.sh ADD ./common/install_xpu.sh install_xpu.sh
ENV XPU_VERSION 2025.2 ENV XPU_VERSION 2025.1
RUN bash ./install_xpu.sh && rm install_xpu.sh RUN bash ./install_xpu.sh && rm install_xpu.sh
RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd

View File

@ -0,0 +1,71 @@
FROM centos:8 as base
ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
ENV PATH /opt/rh/gcc-toolset-11/root/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
# change to a valid repo
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*.repo
# enable to install ninja-build
RUN sed -i 's|enabled=0|enabled=1|g' /etc/yum.repos.d/CentOS-Linux-PowerTools.repo
RUN yum -y update
RUN yum install -y wget curl perl util-linux xz bzip2 git patch which zlib-devel sudo
RUN yum install -y autoconf automake make cmake gdb gcc-toolset-11-gcc-c++
FROM base as openssl
ADD ./common/install_openssl.sh install_openssl.sh
RUN bash ./install_openssl.sh && rm install_openssl.sh
# Install python
FROM base as python
RUN yum install -y openssl-devel zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel libpcap-devel xz-devel libffi-devel
ADD common/install_cpython.sh install_cpython.sh
RUN bash ./install_cpython.sh && rm install_cpython.sh
FROM base as conda
ADD ./common/install_conda_docker.sh install_conda.sh
RUN bash ./install_conda.sh && rm install_conda.sh
RUN /opt/conda/bin/conda install -y cmake
FROM base as intel
# Install MKL
COPY --from=python /opt/python /opt/python
COPY --from=python /opt/_internal /opt/_internal
COPY --from=conda /opt/conda /opt/conda
ENV PATH=/opt/conda/bin:$PATH
ADD ./common/install_mkl.sh install_mkl.sh
RUN bash ./install_mkl.sh && rm install_mkl.sh
FROM base as patchelf
ADD ./common/install_patchelf.sh install_patchelf.sh
RUN bash ./install_patchelf.sh && rm install_patchelf.sh
RUN cp $(which patchelf) /patchelf
FROM base as jni
ADD ./common/install_jni.sh install_jni.sh
ADD ./java/jni.h jni.h
RUN bash ./install_jni.sh && rm install_jni.sh
FROM base as libpng
ADD ./common/install_libpng.sh install_libpng.sh
RUN bash ./install_libpng.sh && rm install_libpng.sh
FROM base as final
COPY --from=openssl /opt/openssl /opt/openssl
COPY --from=python /opt/python /opt/python
COPY --from=python /opt/_internal /opt/_internal
COPY --from=intel /opt/intel /opt/intel
COPY --from=conda /opt/conda /opt/conda
COPY --from=patchelf /usr/local/bin/patchelf /usr/local/bin/patchelf
COPY --from=jni /usr/local/include/jni.h /usr/local/include/jni.h
COPY --from=libpng /usr/local/bin/png* /usr/local/bin/
COPY --from=libpng /usr/local/bin/libpng* /usr/local/bin/
COPY --from=libpng /usr/local/include/png* /usr/local/include/
COPY --from=libpng /usr/local/include/libpng* /usr/local/include/
COPY --from=libpng /usr/local/lib/libpng* /usr/local/lib/
COPY --from=libpng /usr/local/lib/pkgconfig /usr/local/lib/pkgconfig
RUN yum install -y ninja-build

View File

@ -43,6 +43,12 @@ case ${image} in
MANY_LINUX_VERSION="2_28_aarch64" MANY_LINUX_VERSION="2_28_aarch64"
OPENBLAS_VERSION="v0.3.30" OPENBLAS_VERSION="v0.3.30"
;; ;;
manylinuxcxx11-abi-builder:cpu-cxx11-abi)
TARGET=final
GPU_IMAGE=""
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9"
MANY_LINUX_VERSION="cxx11-abi"
;;
manylinuxs390x-builder:cpu-s390x) manylinuxs390x-builder:cpu-s390x)
TARGET=final TARGET=final
GPU_IMAGE=s390x/almalinux:8 GPU_IMAGE=s390x/almalinux:8
@ -76,7 +82,7 @@ case ${image} in
;; ;;
manylinux2_28-builder:rocm*) manylinux2_28-builder:rocm*)
# we want the patch version of 6.4 instead # we want the patch version of 6.4 instead
if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then if [[ $(ver $GPU_ARCH_VERSION) -eq $(ver 6.4) ]]; then
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2" GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
fi fi
TARGET=rocm_final TARGET=rocm_final
@ -84,10 +90,6 @@ case ${image} in
DEVTOOLSET_VERSION="11" DEVTOOLSET_VERSION="11"
GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201" PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
# add gfx950 conditionally starting in ROCm 7.0
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950"
fi
DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}"
;; ;;
manylinux2_28-builder:xpu) manylinux2_28-builder:xpu)

View File

@ -10,11 +10,6 @@ boto3==1.35.42
#Pinned versions: 1.19.12, 1.16.34 #Pinned versions: 1.19.12, 1.16.34
#test that import: #test that import:
build==1.3.0
#Description: A simple, correct Python build frontend.
#Pinned versions: 1.3.0
#test that import:
click click
#Description: Command Line Interface Creation Kit #Description: Command Line Interface Creation Kit
#Pinned versions: #Pinned versions:
@ -98,9 +93,8 @@ librosa==0.10.2 ; python_version == "3.12" and platform_machine != "s390x"
#Pinned versions: #Pinned versions:
#test that import: #test that import:
mypy==1.16.0 ; platform_system != "Windows" mypy==1.16.0
# Pin MyPy version because new errors are likely to appear with each release # Pin MyPy version because new errors are likely to appear with each release
# Skip on Windows as lots of type annotations are POSIX specific
#Description: linter #Description: linter
#Pinned versions: 1.16.0 #Pinned versions: 1.16.0
#test that import: test_typing.py, test_type_hints.py #test that import: test_typing.py, test_type_hints.py
@ -111,12 +105,14 @@ networkx==2.8.8
#Pinned versions: 2.8.8 #Pinned versions: 2.8.8
#test that import: functorch #test that import: functorch
ninja==1.11.1.4 ninja==1.11.1.3
#Description: build system. Used in some tests. Used in build to generate build #Description: build system. Used in some tests. Used in build to generate build
#time tracing information #time tracing information
#Pinned versions: 1.11.1.4 #Pinned versions: 1.11.1.3
#test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py #test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py
numba==0.49.0 ; python_version < "3.9" and platform_machine != "s390x"
numba==0.55.2 ; python_version == "3.9" and platform_machine != "s390x"
numba==0.55.2 ; python_version == "3.10" and platform_machine != "s390x" numba==0.55.2 ; python_version == "3.10" and platform_machine != "s390x"
numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x" numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x"
#Description: Just-In-Time Compiler for Numerical Functions #Description: Just-In-Time Compiler for Numerical Functions
@ -137,7 +133,7 @@ numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x"
#test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py, #test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py,
#test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py, #test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py,
#test_binary_ufuncs.py #test_binary_ufuncs.py
numpy==1.22.4; python_version == "3.10" numpy==1.22.4; python_version == "3.9" or python_version == "3.10"
numpy==1.26.2; python_version == "3.11" or python_version == "3.12" numpy==1.26.2; python_version == "3.11" or python_version == "3.12"
numpy==2.1.2; python_version >= "3.13" numpy==2.1.2; python_version >= "3.13"
@ -172,9 +168,9 @@ pillow==11.0.0
#Pinned versions: 10.3.0 #Pinned versions: 10.3.0
#test that import: #test that import:
protobuf==5.29.5 protobuf==5.29.4
#Description: Google's data interchange format #Description: Google's data interchange format
#Pinned versions: 5.29.5 #Pinned versions: 5.29.4
#test that import: test_tensorboard.py, test/onnx/* #test that import: test_tensorboard.py, test/onnx/*
psutil psutil
@ -329,6 +325,8 @@ pywavelets==1.7.0 ; python_version >= "3.12"
lxml==5.3.0 lxml==5.3.0
#Description: This is a requirement of unittest-xml-reporting #Description: This is a requirement of unittest-xml-reporting
# Python-3.9 binaries
PyGithub==2.3.0 PyGithub==2.3.0
sympy==1.13.3 sympy==1.13.3
@ -378,10 +376,10 @@ dataclasses_json==0.6.7
#Pinned versions: 0.6.7 #Pinned versions: 0.6.7
#test that import: #test that import:
cmake==3.31.6 cmake==4.0.0
#Description: required for building #Description: required for building
tlparse==0.4.0 tlparse==0.3.30
#Description: required for log parsing #Description: required for log parsing
cuda-bindings>=12.0,<13.0 ; platform_machine != "s390x" cuda-bindings>=12.0,<13.0 ; platform_machine != "s390x"

View File

@ -1,15 +1,8 @@
sphinx==5.3.0 sphinx==5.3.0
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 5.3.0 #Pinned versions: 5.3.0
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@722b7e6f9ca512fcc526ad07d62b3d28c50bb6cd#egg=pytorch_sphinx_theme2
standard-imghdr==3.13.0; python_version >= "3.13"
#Description: This is needed by Sphinx, so it needs to be added here.
# The reasons are as follows:
# 1) This module has been removed from the Python standard library since Python 3.13(https://peps.python.org/pep-0594/#imghdr);
# 2) The current version of Sphinx (5.3.0) is not compatible with Python 3.13.
# Once Sphinx is upgraded to a version compatible with Python 3.13 or later, we can remove this dependency.
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@71e55749be14ceb56e7f8211a9fb649866b87ad4#egg=pytorch_sphinx_theme2
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering # TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
# but it doesn't seem to work and hangs around idly. The initial thought that it is probably # but it doesn't seem to work and hangs around idly. The initial thought that it is probably
# something related to Docker setup. We can investigate this later. # something related to Docker setup. We can investigate this later.

View File

@ -1 +1 @@
3.5.0 3.4.0

View File

@ -1 +1 @@
3.5.0 3.4.0

View File

@ -52,13 +52,9 @@ ENV INSTALLED_VISION ${VISION}
# Install rocm # Install rocm
ARG ROCM_VERSION ARG ROCM_VERSION
RUN mkdir ci_commit_pins
COPY ./common/common_utils.sh common_utils.sh
COPY ./ci_commit_pins/rocm-composable-kernel.txt ci_commit_pins/rocm-composable-kernel.txt
COPY ./common/install_rocm.sh install_rocm.sh COPY ./common/install_rocm.sh install_rocm.sh
RUN bash ./install_rocm.sh RUN bash ./install_rocm.sh
RUN rm install_rocm.sh common_utils.sh RUN rm install_rocm.sh
RUN rm -r ci_commit_pins
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} RUN bash ./install_rocm_magma.sh ${ROCM_VERSION}
RUN rm install_rocm_magma.sh RUN rm install_rocm_magma.sh

View File

@ -66,7 +66,6 @@ ENV NCCL_LIB_DIR="/usr/local/cuda/lib64/"
# (optional) Install UCC # (optional) Install UCC
ARG UCX_COMMIT ARG UCX_COMMIT
ARG UCC_COMMIT ARG UCC_COMMIT
ARG CUDA_VERSION
ENV UCX_COMMIT $UCX_COMMIT ENV UCX_COMMIT $UCX_COMMIT
ENV UCC_COMMIT $UCC_COMMIT ENV UCC_COMMIT $UCC_COMMIT
ENV UCX_HOME /usr ENV UCX_HOME /usr

View File

@ -7,4 +7,4 @@ set -ex
SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
USE_NVSHMEM=0 USE_CUSPARSELT=0 BUILD_PYTHONLESS=1 DESIRED_PYTHON="3.10" ${SCRIPTPATH}/../manywheel/build.sh USE_NVSHMEM=0 USE_CUSPARSELT=0 BUILD_PYTHONLESS=1 DESIRED_PYTHON="3.9" ${SCRIPTPATH}/../manywheel/build.sh

View File

@ -3,6 +3,7 @@ from __future__ import annotations
import logging import logging
import os import os
import textwrap import textwrap
import xml.etree.ElementTree as ET
from pathlib import Path from pathlib import Path
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
@ -16,6 +17,23 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# ---- Template (title + per-command failures) ----
_TPL_FAIL_BY_CMD = Template(
textwrap.dedent("""\
## {{ title }}
{%- for section in sections if section.failures %}
### Test Command: {{ section.label }}
{%- for f in section.failures %}
- {{ f }}
{%- endfor %}
{%- endfor %}
""")
)
_TPL_CONTENT = Template( _TPL_CONTENT = Template(
textwrap.dedent("""\ textwrap.dedent("""\
## {{ title }} ## {{ title }}
@ -141,3 +159,65 @@ def render_content(
tpl = _TPL_CONTENT tpl = _TPL_CONTENT
md = tpl.render(title=title, content=content, lang=lang) md = tpl.render(title=title, content=content, lang=lang)
return md return md
def summarize_failures_by_test_command(
xml_and_labels: Iterable[tuple[str | Path, str]],
*,
title: str = "Pytest Failures by Test Command",
dedupe_within_command: bool = True,
) -> bool:
"""
Render a single Markdown block summarizing failures grouped by test command.
Returns True if anything was written, False otherwise.
"""
sections: list[dict] = []
for xml_path, label in xml_and_labels:
xmlp = Path(xml_path)
if not xmlp.exists():
# optional: your logger
# logger.warning("XML %s not found, skipping", xmlp)
continue
failed = _parse_failed(xmlp)
if dedupe_within_command:
failed = sorted(set(failed))
# collect even if empty; we'll filter in the template render
sections.append({"label": label, "failures": failed})
# If *all* sections are empty or we collected nothing, skip writing.
if not sections or all(not s["failures"] for s in sections):
return False
md = _TPL_FAIL_BY_CMD.render(title=title, sections=sections).rstrip() + "\n"
return write_gh_step_summary(md)
def _to_name_from_testcase(tc: ET.Element) -> str:
name = tc.attrib.get("name", "")
file_attr = tc.attrib.get("file")
if file_attr:
return f"{file_attr}:{name}"
classname = tc.attrib.get("classname", "")
parts = classname.split(".") if classname else []
if len(parts) >= 1:
mod_parts = parts[:-1] if len(parts) >= 2 else parts
mod_path = "/".join(mod_parts) + ".py" if mod_parts else "unknown.py"
return f"{mod_path}:{name}"
return f"unknown.py:{name or 'unknown_test'}"
def _parse_failed(xml_path: Path) -> list[str]:
if not xml_path.exists():
return []
tree = ET.parse(xml_path)
root = tree.getroot()
failed: list[str] = []
for tc in root.iter("testcase"):
if any(x.tag in {"failure", "error"} for x in tc):
failed.append(_to_name_from_testcase(tc))
return failed

View File

@ -4,6 +4,7 @@ General Utility helpers for CLI tasks.
import logging import logging
import os import os
import secrets
import shlex import shlex
import subprocess import subprocess
import sys import sys
@ -11,6 +12,8 @@ from contextlib import contextmanager
from pathlib import Path from pathlib import Path
from typing import Optional from typing import Optional
from cli.lib.common.path_helper import ensure_dir_exists
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -137,3 +140,42 @@ def get_wheels(
relpath = str((Path(dirpath) / fname).relative_to(root)) relpath = str((Path(dirpath) / fname).relative_to(root))
items.append({"pkg": pkg, "relpath": relpath}) items.append({"pkg": pkg, "relpath": relpath})
return items return items
def attach_junitxml_if_pytest(
cmd: str,
dir: Optional[Path],
prefix: str,
*,
ensure_unique: bool = False,
resolve_xml: bool = False,
) -> tuple[str, Optional[Path]]:
"""
Append --junitxml=<ABS_PATH> to a pytest command string.
The XML filename is <prefix>_<random-hex>.xml.
- dir: target folder (will be created), if None, skip the junitxml attachment
- prefix: filename prefix (e.g., "junit" -> junit_ab12cd34.xml)
- ensure_unique: if True, regenerate a hash with 8 characters
Returns: (amended_cmd, abs_xml_path)
"""
if "pytest" not in cmd:
return cmd, None
if dir is None:
return cmd, None
ensure_dir_exists(dir)
file_name_prefix = f"{prefix}"
if ensure_unique:
file_name_prefix += f"_{unique_hex(8)}"
xml_path = dir / f"{file_name_prefix}_junit_pytest.xml"
if resolve_xml:
xml_path = xml_path.resolve()
return f"{cmd} --junitxml={xml_path.as_posix()}", xml_path
def unique_hex(length: int = 8) -> str:
"""Return a random hex string of `length` characters."""
return secrets.token_hex((length + 1) // 2)[:length]

View File

@ -1,12 +1,18 @@
import logging import logging
import os import os
import textwrap import textwrap
from typing import Any from pathlib import Path
from typing import Any, Optional
from cli.lib.common.gh_summary import write_gh_step_summary from cli.lib.common.gh_summary import write_gh_step_summary
from cli.lib.common.git_helper import clone_external_repo from cli.lib.common.git_helper import clone_external_repo
from cli.lib.common.pip_helper import pip_install_packages from cli.lib.common.pip_helper import pip_install_packages
from cli.lib.common.utils import run_command, temp_environ, working_directory from cli.lib.common.utils import (
attach_junitxml_if_pytest,
run_command,
temp_environ,
working_directory,
)
from jinja2 import Template from jinja2 import Template
@ -41,6 +47,7 @@ def sample_vllm_test_library():
"pytest -v -s basic_correctness/test_cumem.py", "pytest -v -s basic_correctness/test_cumem.py",
"pytest -v -s basic_correctness/test_basic_correctness.py", "pytest -v -s basic_correctness/test_basic_correctness.py",
"pytest -v -s basic_correctness/test_cpu_offload.py", "pytest -v -s basic_correctness/test_cpu_offload.py",
"VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1 pytest -v -s basic_correctness/test_preemption.py",
], ],
}, },
"vllm_basic_models_test": { "vllm_basic_models_test": {
@ -67,12 +74,16 @@ def sample_vllm_test_library():
"-v", "-v",
"-s", "-s",
"entrypoints/llm", "entrypoints/llm",
"--ignore=entrypoints/llm/test_lazy_outlines.py",
"--ignore=entrypoints/llm/test_generate.py", "--ignore=entrypoints/llm/test_generate.py",
"--ignore=entrypoints/llm/test_generate_multiple_loras.py",
"--ignore=entrypoints/llm/test_collective_rpc.py", "--ignore=entrypoints/llm/test_collective_rpc.py",
] ]
), ),
"pytest -v -s entrypoints/llm/test_generate.py", "pytest -v -s entrypoints/llm/test_lazy_outlines.py",
"pytest -v -s entrypoints/offline_mode", "pytest -v -s entrypoints/llm/test_generate.py ",
"pytest -v -s entrypoints/llm/test_generate_multiple_loras.py",
"VLLM_USE_V1=0 pytest -v -s entrypoints/offline_mode",
], ],
}, },
"vllm_regression_test": { "vllm_regression_test": {
@ -92,24 +103,14 @@ def sample_vllm_test_library():
"num_gpus": 4, "num_gpus": 4,
"steps": [ "steps": [
"pytest -v -s -x lora/test_chatglm3_tp.py", "pytest -v -s -x lora/test_chatglm3_tp.py",
"echo $VLLM_WORKER_MULTIPROC_METHOD",
"pytest -v -s -x lora/test_llama_tp.py", "pytest -v -s -x lora/test_llama_tp.py",
"pytest -v -s -x lora/test_llm_with_multi_loras.py", "pytest -v -s -x lora/test_multi_loras_with_tp.py",
], ],
}, },
"vllm_distributed_test_28_failure_test": { "vllm_lora_280_failure_test": {
"title": "Distributed Tests (2 GPUs) pytorch 2.8 release failure", "title": "LoRA 280 failure test",
"id": "vllm_distributed_test_28_failure_test", "id": "vllm_lora_280_failure_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"num_gpus": 4,
"steps": [
"pytest -v -s distributed/test_sequence_parallel.py",
],
},
"vllm_lora_28_failure_test": {
"title": "LoRA pytorch 2.8 failure test",
"id": "vllm_lora_28_failure_test",
"steps": ["pytest -v lora/test_quant_model.py"], "steps": ["pytest -v lora/test_quant_model.py"],
}, },
"vllm_multi_model_processor_test": { "vllm_multi_model_processor_test": {
@ -120,15 +121,6 @@ def sample_vllm_test_library():
"pytest -v -s models/multimodal/processing --ignore models/multimodal/processing/test_tensor_schema.py", "pytest -v -s models/multimodal/processing --ignore models/multimodal/processing/test_tensor_schema.py",
], ],
}, },
"vllm_multi_model_test_28_failure_test": {
"title": "Multi-Model Test (Failed 2.8 release)",
"id": "vllm_multi_model_test_28_failure_test",
"package_install": ["git+https://github.com/TIGER-AI-Lab/Mantis.git"],
"steps": [
"pytest -v -s models/multimodal/generation/test_voxtral.py",
"pytest -v -s models/multimodal/pooling",
],
},
"vllm_pytorch_compilation_unit_tests": { "vllm_pytorch_compilation_unit_tests": {
"title": "PyTorch Compilation Unit Tests", "title": "PyTorch Compilation Unit Tests",
"id": "vllm_pytorch_compilation_unit_tests", "id": "vllm_pytorch_compilation_unit_tests",
@ -143,28 +135,6 @@ def sample_vllm_test_library():
"pytest -v -s compile/test_decorator.py", "pytest -v -s compile/test_decorator.py",
], ],
}, },
"vllm_languagde_model_test_extended_generation_28_failure_test": {
"title": "Language Models Test (Extended Generation) 2.8 release failure",
"id": "vllm_languagde_model_test_extended_generation_28_failure_test",
"package_install": [
"--no-build-isolation",
"git+https://github.com/Dao-AILab/causal-conv1d@v1.5.0.post8",
],
"steps": [
"pytest -v -s models/language/generation/test_mistral.py",
],
},
"vllm_distributed_test_2_gpu_28_failure_test": {
"title": "Distributed Tests (2 GPUs) pytorch 2.8 release failure",
"id": "vllm_distributed_test_2_gpu_28_failure_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"num_gpus": 4,
"steps": [
"pytest -v -s distributed/test_sequence_parallel.py",
],
},
# TODO(elainewy):need to add g6 with 4 gpus to run this test # TODO(elainewy):need to add g6 with 4 gpus to run this test
"vllm_lora_test": { "vllm_lora_test": {
"title": "LoRA Test %N", "title": "LoRA Test %N",
@ -222,6 +192,9 @@ def run_test_plan(
tests_map: dict[str, Any], tests_map: dict[str, Any],
shard_id: int = 0, shard_id: int = 0,
num_shards: int = 0, num_shards: int = 0,
*,
test_summary_path: Optional[Path] = None,
test_summary_result: Optional[list[tuple[str, str]]] = None,
): ):
""" """
a method to run list of tests based on the test plan. a method to run list of tests based on the test plan.
@ -234,7 +207,6 @@ def run_test_plan(
tests = tests_map[test_plan] tests = tests_map[test_plan]
pkgs = tests.get("package_install", []) pkgs = tests.get("package_install", [])
title = tests.get("title", "unknown test") title = tests.get("title", "unknown test")
is_parallel = check_parallelism(tests, title, shard_id, num_shards) is_parallel = check_parallelism(tests, title, shard_id, num_shards)
if is_parallel: if is_parallel:
title = title.replace("%N", f"{shard_id}/{num_shards}") title = title.replace("%N", f"{shard_id}/{num_shards}")
@ -248,7 +220,15 @@ def run_test_plan(
temp_environ(tests.get("env_vars", {})), temp_environ(tests.get("env_vars", {})),
): ):
failures = [] failures = []
for step in tests["steps"]: for idx, step in enumerate(tests["steps"]):
# generate xml report for each test for test summary if needed
step, xml_file_path = attach_junitxml_if_pytest(
cmd=step, dir=test_summary_path, prefix=f"{test_plan}_{idx}"
)
if xml_file_path and xml_file_path.exists() and test_summary_result:
test_summary_result.append((title, str(xml_file_path)))
else:
logger.info("No test report will be generate for %s", step)
logger.info("Running step: %s", step) logger.info("Running step: %s", step)
if is_parallel: if is_parallel:
step = replace_buildkite_placeholders(step, shard_id, num_shards) step = replace_buildkite_placeholders(step, shard_id, num_shards)

View File

@ -66,11 +66,6 @@ class VllmBuildParameters:
"DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm" "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm"
) )
# the cleaning script to remove torch dependencies from pip
cleaning_script: Path = env_path_field(
"cleaning_script", ".github/ci_configs/vllm/use_existing_torch.py"
)
# OUTPUT_DIR: where docker buildx (local exporter) will write artifacts # OUTPUT_DIR: where docker buildx (local exporter) will write artifacts
output_dir: Path = env_path_field("OUTPUT_DIR", "external/vllm") output_dir: Path = env_path_field("OUTPUT_DIR", "external/vllm")
@ -165,7 +160,6 @@ class VllmBuildRunner(BaseRunner):
logger.info("Running vllm build with inputs: %s", inputs) logger.info("Running vllm build with inputs: %s", inputs)
vllm_commit = clone_vllm() vllm_commit = clone_vllm()
self.cp_torch_cleaning_script(inputs)
self.cp_dockerfile_if_exist(inputs) self.cp_dockerfile_if_exist(inputs)
# cp torch wheels from root direct to vllm workspace if exist # cp torch wheels from root direct to vllm workspace if exist
self.cp_torch_whls_if_exist(inputs) self.cp_torch_whls_if_exist(inputs)
@ -211,11 +205,6 @@ class VllmBuildRunner(BaseRunner):
copy(inputs.torch_whls_path, tmp_dir) copy(inputs.torch_whls_path, tmp_dir)
return tmp_dir return tmp_dir
def cp_torch_cleaning_script(self, inputs: VllmBuildParameters):
script = get_path(inputs.cleaning_script, resolve=True)
vllm_script = Path(f"./{self.work_directory}/use_existing_torch.py")
copy(script, vllm_script)
def cp_dockerfile_if_exist(self, inputs: VllmBuildParameters): def cp_dockerfile_if_exist(self, inputs: VllmBuildParameters):
if not inputs.use_local_dockerfile: if not inputs.use_local_dockerfile:
logger.info("using vllm default dockerfile.torch_nightly for build") logger.info("using vllm default dockerfile.torch_nightly for build")

View File

@ -11,15 +11,20 @@ from typing import Any
from cli.lib.common.cli_helper import BaseRunner from cli.lib.common.cli_helper import BaseRunner
from cli.lib.common.envs_helper import env_path_field, env_str_field, get_env from cli.lib.common.envs_helper import env_path_field, env_str_field, get_env
from cli.lib.common.path_helper import copy, get_path, remove_dir from cli.lib.common.gh_summary import (
gh_summary_path,
summarize_failures_by_test_command,
write_gh_step_summary,
)
from cli.lib.common.path_helper import copy, remove_dir
from cli.lib.common.pip_helper import ( from cli.lib.common.pip_helper import (
pip_install_first_match, pip_install_first_match,
pip_install_packages, pip_install_packages,
pkg_exists, pkg_exists,
run_python, run_python,
) )
from cli.lib.common.utils import run_command, working_directory from cli.lib.common.utils import ensure_dir_exists, run_command, working_directory
from cli.lib.core.vllm.lib import clone_vllm, run_test_plan, sample_vllm_test_library from cli.lib.core.vllm.lib import clone_vllm, run_test_plan, sample_vllm_test_library, summarize_build_info
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -43,10 +48,6 @@ class VllmTestParameters:
torch_cuda_arch_list: str = env_str_field("TORCH_CUDA_ARCH_LIST", "8.9") torch_cuda_arch_list: str = env_str_field("TORCH_CUDA_ARCH_LIST", "8.9")
cleaning_script: Path = env_path_field(
"cleaning_script", ".github/ci_configs/vllm/use_existing_torch.py"
)
def __post_init__(self): def __post_init__(self):
if not self.torch_whls_path.exists(): if not self.torch_whls_path.exists():
raise ValueError("missing torch_whls_path") raise ValueError("missing torch_whls_path")
@ -95,21 +96,26 @@ class VllmTestRunner(BaseRunner):
logger.info("Display VllmTestParameters %s", params) logger.info("Display VllmTestParameters %s", params)
self._set_envs(params) self._set_envs(params)
clone_vllm(dst=self.work_directory) vllm_commit = clone_vllm(dst=self.work_directory)
self.cp_torch_cleaning_script(params)
with working_directory(self.work_directory): with working_directory(self.work_directory):
remove_dir(Path("vllm")) remove_dir(Path("vllm"))
self._install_wheels(params) self._install_wheels(params)
self._install_dependencies() self._install_dependencies()
# verify the torches are not overridden by test dependencies # verify the torches are not overridden by test dependencies
check_versions() check_versions()
return vllm_commit
def run(self): def run(self):
""" """
main function to run vllm test main function to run vllm test
""" """
self.prepare() vllm_commit = self.prepare()
# prepare test summary
test_summary_path = Path("tmp_pytest_report").resolve()
ensure_dir_exists(test_summary_path)
test_summary_result = []
try: try:
with working_directory(self.work_directory): with working_directory(self.work_directory):
if self.test_type == TestInpuType.TEST_PLAN: if self.test_type == TestInpuType.TEST_PLAN:
@ -120,21 +126,24 @@ class VllmTestRunner(BaseRunner):
sample_vllm_test_library(), sample_vllm_test_library(),
self.shard_id, self.shard_id,
self.num_shards, self.num_shards,
) test_summary_path=test_summary_path,
else: test_summary_result=test_summary_result,
run_test_plan(
self.test_plan, "vllm", sample_vllm_test_library()
) )
else: else:
raise ValueError(f"Unknown test type {self.test_type}") raise ValueError(f"Unknown test type {self.test_type}")
except Exception as e:
logger.error("Failed to run vllm test: %s", e)
finally: finally:
# double check the torches are not overridden by other packages self.vllm_test_gh_summary(vllm_commit, test_summary_result)
check_versions()
def cp_torch_cleaning_script(self, params: VllmTestParameters): def vllm_test_gh_summary(
script = get_path(params.cleaning_script, resolve=True) self, vllm_commit: str, test_summary_results: list[tuple[str, str]]
vllm_script = Path(f"./{self.work_directory}/use_existing_torch.py") ):
copy(script, vllm_script) if not gh_summary_path():
return logger.info("Skipping, not detect GH Summary env var....")
logger.info("Generate GH Summary ...")
summarize_build_info(vllm_commit)
summarize_failures_by_test_command(test_summary_results)
def _install_wheels(self, params: VllmTestParameters): def _install_wheels(self, params: VllmTestParameters):
logger.info("Running vllm test with inputs: %s", params) logger.info("Running vllm test with inputs: %s", params)

View File

@ -1,11 +1,11 @@
SHELL=/usr/bin/env bash SHELL=/usr/bin/env bash
DOCKER_CMD ?= docker DOCKER_CMD ?= docker
DESIRED_ROCM ?= 7.0 DESIRED_ROCM ?= 6.4
DESIRED_ROCM_SHORT = $(subst .,,$(DESIRED_ROCM)) DESIRED_ROCM_SHORT = $(subst .,,$(DESIRED_ROCM))
PACKAGE_NAME = magma-rocm PACKAGE_NAME = magma-rocm
# inherit this from underlying docker image, do not pass this env var to docker # inherit this from underlying docker image, do not pass this env var to docker
#PYTORCH_ROCM_ARCH ?= gfx900;gfx906;gfx908;gfx90a;gfx942;gfx950;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201 #PYTORCH_ROCM_ARCH ?= gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201
DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \ DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
-v $(shell git rev-parse --show-toplevel)/.ci:/builder \ -v $(shell git rev-parse --show-toplevel)/.ci:/builder \
@ -16,7 +16,6 @@ DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
magma-rocm/build_magma.sh magma-rocm/build_magma.sh
.PHONY: all .PHONY: all
all: magma-rocm70
all: magma-rocm64 all: magma-rocm64
all: magma-rocm63 all: magma-rocm63
@ -25,11 +24,6 @@ clean:
$(RM) -r magma-* $(RM) -r magma-*
$(RM) -r output $(RM) -r output
.PHONY: magma-rocm70
magma-rocm70: DESIRED_ROCM := 7.0
magma-rocm70:
$(DOCKER_RUN)
.PHONY: magma-rocm64 .PHONY: magma-rocm64
magma-rocm64: DESIRED_ROCM := 6.4 magma-rocm64: DESIRED_ROCM := 6.4
magma-rocm64: magma-rocm64:

View File

@ -6,8 +6,8 @@ set -eou pipefail
# The script expects DESIRED_CUDA and PACKAGE_NAME to be set # The script expects DESIRED_CUDA and PACKAGE_NAME to be set
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
# https://github.com/icl-utk-edu/magma/pull/65 # Version 2.7.2 + ROCm related updates
MAGMA_VERSION=d6e4117bc88e73f06d26c6c2e14f064e8fc3d1ec MAGMA_VERSION=a1625ff4d9bc362906bd01f805dbbe12612953f6
# Folders for the build # Folders for the build
PACKAGE_FILES=${ROOT_DIR}/magma-rocm/package_files # metadata PACKAGE_FILES=${ROOT_DIR}/magma-rocm/package_files # metadata
@ -20,7 +20,7 @@ mkdir -p ${PACKAGE_DIR} ${PACKAGE_OUTPUT}/linux-64 ${PACKAGE_BUILD} ${PACKAGE_RE
# Fetch magma sources and verify checksum # Fetch magma sources and verify checksum
pushd ${PACKAGE_DIR} pushd ${PACKAGE_DIR}
git clone https://github.com/jeffdaily/magma git clone https://bitbucket.org/icl/magma.git
pushd magma pushd magma
git checkout ${MAGMA_VERSION} git checkout ${MAGMA_VERSION}
popd popd

View File

@ -142,7 +142,7 @@ time CMAKE_ARGS=${CMAKE_ARGS[@]} \
EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \ EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \
BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \ BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \
USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \ USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \
python -m build --wheel --no-isolation --outdir /tmp/$WHEELHOUSE_DIR python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR
echo "Finished setup.py bdist at $(date)" echo "Finished setup.py bdist at $(date)"
# Build libtorch packages # Build libtorch packages

View File

@ -124,7 +124,6 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then
fi fi
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
echo "Bundling with cudnn and cublas." echo "Bundling with cudnn and cublas."
DEPS_LIST+=( DEPS_LIST+=(
"/usr/local/cuda/lib64/libcudnn_adv.so.9" "/usr/local/cuda/lib64/libcudnn_adv.so.9"
"/usr/local/cuda/lib64/libcudnn_cnn.so.9" "/usr/local/cuda/lib64/libcudnn_cnn.so.9"
@ -134,11 +133,16 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9" "/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9"
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9" "/usr/local/cuda/lib64/libcudnn_heuristic.so.9"
"/usr/local/cuda/lib64/libcudnn.so.9" "/usr/local/cuda/lib64/libcudnn.so.9"
"/usr/local/cuda/lib64/libcublas.so.12"
"/usr/local/cuda/lib64/libcublasLt.so.12"
"/usr/local/cuda/lib64/libcusparseLt.so.0" "/usr/local/cuda/lib64/libcusparseLt.so.0"
"/usr/local/cuda/lib64/libcudart.so.12"
"/usr/local/cuda/lib64/libnvrtc.so.12"
"/usr/local/cuda/lib64/libnvrtc-builtins.so" "/usr/local/cuda/lib64/libnvrtc-builtins.so"
"/usr/local/cuda/lib64/libcufile.so.0" "/usr/local/cuda/lib64/libcufile.so.0"
"/usr/local/cuda/lib64/libcufile_rdma.so.1" "/usr/local/cuda/lib64/libcufile_rdma.so.1"
"/usr/local/cuda/lib64/libnvshmem_host.so.3" "/usr/local/cuda/lib64/libnvshmem_host.so.3"
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12"
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so" "/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so"
) )
DEPS_SONAME+=( DEPS_SONAME+=(
@ -150,56 +154,22 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then
"libcudnn_engines_precompiled.so.9" "libcudnn_engines_precompiled.so.9"
"libcudnn_heuristic.so.9" "libcudnn_heuristic.so.9"
"libcudnn.so.9" "libcudnn.so.9"
"libcublas.so.12"
"libcublasLt.so.12"
"libcusparseLt.so.0" "libcusparseLt.so.0"
"libcudart.so.12"
"libnvrtc.so.12"
"libnvrtc-builtins.so" "libnvrtc-builtins.so"
"libnvshmem_host.so.3" "libnvshmem_host.so.3"
"libcufile.so.0" "libcufile.so.0"
"libcufile_rdma.so.1" "libcufile_rdma.so.1"
"libcupti.so.12"
"libnvperf_host.so" "libnvperf_host.so"
) )
# Add libnvToolsExt only if CUDA version is not 12.9 # Add libnvToolsExt only if CUDA version is not 12.9
if [[ $CUDA_VERSION == 13* ]]; then if [[ $CUDA_VERSION != 12.9* ]]; then
DEPS_LIST+=( DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1")
"/usr/local/cuda/lib64/libcublas.so.13" DEPS_SONAME+=("libnvToolsExt.so.1")
"/usr/local/cuda/lib64/libcublasLt.so.13"
"/usr/local/cuda/lib64/libcudart.so.13"
"/usr/local/cuda/lib64/libnvrtc.so.13"
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.13"
"/usr/local/cuda/lib64/libibverbs.so.1"
"/usr/local/cuda/lib64/librdmacm.so.1"
"/usr/local/cuda/lib64/libmlx5.so.1"
"/usr/local/cuda/lib64/libnl-3.so.200"
"/usr/local/cuda/lib64/libnl-route-3.so.200")
DEPS_SONAME+=(
"libcublas.so.13"
"libcublasLt.so.13"
"libcudart.so.13"
"libnvrtc.so.13"
"libcupti.so.13"
"libibverbs.so.1"
"librdmacm.so.1"
"libmlx5.so.1"
"libnl-3.so.200"
"libnl-route-3.so.200")
export USE_CUPTI_SO=1
export ATEN_STATIC_CUDA=0
export USE_CUDA_STATIC_LINK=0
export USE_CUFILE=0
else
DEPS_LIST+=(
"/usr/local/cuda/lib64/libnvToolsExt.so.1"
"/usr/local/cuda/lib64/libcublas.so.12"
"/usr/local/cuda/lib64/libcublasLt.so.12"
"/usr/local/cuda/lib64/libcudart.so.12"
"/usr/local/cuda/lib64/libnvrtc.so.12"
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12")
DEPS_SONAME+=(
"libnvToolsExt.so.1"
"libcublas.so.12"
"libcublasLt.so.12"
"libcudart.so.12"
"libnvrtc.so.12"
"libcupti.so.12")
fi fi
else else
echo "Using nvidia libs from pypi." echo "Using nvidia libs from pypi."

View File

@ -104,7 +104,7 @@ if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then
export ROCclr_DIR=/opt/rocm/rocclr/lib/cmake/rocclr export ROCclr_DIR=/opt/rocm/rocclr/lib/cmake/rocclr
fi fi
echo "Calling -m pip install . -v --no-build-isolation at $(date)" echo "Calling 'python -m pip install .' at $(date)"
if [[ $LIBTORCH_VARIANT = *"static"* ]]; then if [[ $LIBTORCH_VARIANT = *"static"* ]]; then
STATIC_CMAKE_FLAG="-DTORCH_STATIC=1" STATIC_CMAKE_FLAG="-DTORCH_STATIC=1"

View File

@ -107,10 +107,6 @@ if [[ $ROCM_INT -ge 60200 ]]; then
ROCM_SO_FILES+=("librocm-core.so") ROCM_SO_FILES+=("librocm-core.so")
fi fi
if [[ $ROCM_INT -ge 70000 ]]; then
ROCM_SO_FILES+=("librocroller.so")
fi
OS_NAME=`awk -F= '/^NAME/{print $2}' /etc/os-release` OS_NAME=`awk -F= '/^NAME/{print $2}' /etc/os-release`
if [[ "$OS_NAME" == *"CentOS Linux"* || "$OS_NAME" == *"AlmaLinux"* ]]; then if [[ "$OS_NAME" == *"CentOS Linux"* || "$OS_NAME" == *"AlmaLinux"* ]]; then
LIBGOMP_PATH="/usr/lib64/libgomp.so.1" LIBGOMP_PATH="/usr/lib64/libgomp.so.1"

View File

@ -290,13 +290,13 @@ else
WERROR=1 python setup.py clean WERROR=1 python setup.py clean
WERROR=1 python -m build --wheel --no-isolation WERROR=1 python setup.py bdist_wheel
else else
python setup.py clean python setup.py clean
if [[ "$BUILD_ENVIRONMENT" == *xla* ]]; then if [[ "$BUILD_ENVIRONMENT" == *xla* ]]; then
source .ci/pytorch/install_cache_xla.sh source .ci/pytorch/install_cache_xla.sh
fi fi
python -m build --wheel --no-isolation python setup.py bdist_wheel
fi fi
pip_install_whl "$(echo dist/*.whl)" pip_install_whl "$(echo dist/*.whl)"

View File

@ -300,3 +300,24 @@ except RuntimeError as e:
exit 1 exit 1
fi fi
fi fi
###############################################################################
# Check for C++ ABI compatibility to GCC-11 - GCC 13
###############################################################################
if [[ "$(uname)" == 'Linux' && "$PACKAGE_TYPE" == 'manywheel' ]]; then
pushd /tmp
# Per https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Dialect-Options.html
# gcc-11 is ABI16, gcc-13 is ABI18, gcc-14 is ABI19
# gcc 11 - CUDA 11.8, xpu, rocm
# gcc 13 - CUDA 12.6, 12.8 and cpu
# Please see issue for reference: https://github.com/pytorch/pytorch/issues/152426
if [[ "$(uname -m)" == "s390x" ]]; then
cxx_abi="19"
elif [[ "$DESIRED_CUDA" != 'xpu' && "$DESIRED_CUDA" != 'rocm'* ]]; then
cxx_abi="18"
else
cxx_abi="16"
fi
python -c "import torch; exit(0 if torch._C._PYBIND11_BUILD_ABI == '_cxxabi10${cxx_abi}' else 1)"
popd
fi

View File

@ -258,19 +258,11 @@ function install_torchrec_and_fbgemm() {
git clone --recursive https://github.com/pytorch/fbgemm git clone --recursive https://github.com/pytorch/fbgemm
pushd fbgemm/fbgemm_gpu pushd fbgemm/fbgemm_gpu
git checkout "${fbgemm_commit}" --recurse-submodules git checkout "${fbgemm_commit}" --recurse-submodules
# until the fbgemm_commit includes the tbb patch python setup.py bdist_wheel \
patch <<'EOF' --build-variant=rocm \
--- a/FbgemmGpu.cmake -DHIP_ROOT_DIR="${ROCM_PATH}" \
+++ b/FbgemmGpu.cmake -DCMAKE_C_FLAGS="-DTORCH_USE_HIP_DSA" \
@@ -184,5 +184,6 @@ gpu_cpp_library( -DCMAKE_CXX_FLAGS="-DTORCH_USE_HIP_DSA"
fbgemm_gpu_tbe_cache
fbgemm_gpu_tbe_optimizers
fbgemm_gpu_tbe_utils
+ tbb
DESTINATION
fbgemm_gpu)
EOF
python setup.py bdist_wheel --build-variant=rocm
popd popd
# Save the wheel before cleaning up # Save the wheel before cleaning up

View File

@ -58,7 +58,7 @@ time python tools/setup_helpers/generate_code.py \
# Build the docs # Build the docs
pushd docs/cpp pushd docs/cpp
time make VERBOSE=1 html time make VERBOSE=1 html -j
popd popd
popd popd

View File

@ -0,0 +1,40 @@
#!/bin/bash
# This is where the local pytorch install in the docker image is located
pt_checkout="/var/lib/jenkins/workspace"
source "$pt_checkout/.ci/pytorch/common_utils.sh"
echo "functorch_doc_push_script.sh: Invoked with $*"
set -ex -o pipefail
version=${DOCS_VERSION:-nightly}
echo "version: $version"
# Build functorch docs
pushd $pt_checkout/functorch/docs
make html
popd
git clone https://github.com/pytorch/functorch -b gh-pages --depth 1 functorch_ghpages
pushd functorch_ghpages
if [ "$version" == "main" ]; then
version=nightly
fi
git rm -rf "$version" || true
mv "$pt_checkout/functorch/docs/build/html" "$version"
git add "$version" || true
git status
git config user.email "soumith+bot@pytorch.org"
git config user.name "pytorchbot"
# If there aren't changes, don't make a commit; push is no-op
git commit -m "Generate Python docs from pytorch/pytorch@${GITHUB_SHA}" || true
git status
if [[ "${WITH_PUSH:-}" == true ]]; then
git push -u origin gh-pages
fi
popd

View File

@ -36,11 +36,11 @@ fi
print_cmake_info print_cmake_info
if [[ ${BUILD_ENVIRONMENT} == *"distributed"* ]]; then if [[ ${BUILD_ENVIRONMENT} == *"distributed"* ]]; then
# Needed for inductor benchmarks, as lots of HF networks make `torch.distribtued` calls # Needed for inductor benchmarks, as lots of HF networks make `torch.distribtued` calls
USE_DISTRIBUTED=1 USE_OPENMP=1 WERROR=1 python -m build --wheel --no-isolation USE_DISTRIBUTED=1 USE_OPENMP=1 WERROR=1 python setup.py bdist_wheel
else else
# Explicitly set USE_DISTRIBUTED=0 to align with the default build config on mac. This also serves as the sole CI config that tests # Explicitly set USE_DISTRIBUTED=0 to align with the default build config on mac. This also serves as the sole CI config that tests
# that building with USE_DISTRIBUTED=0 works at all. See https://github.com/pytorch/pytorch/issues/86448 # that building with USE_DISTRIBUTED=0 works at all. See https://github.com/pytorch/pytorch/issues/86448
USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python -m build --wheel --no-isolation -C--build-option=--plat-name=macosx_11_0_arm64 USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel --plat-name macosx_11_0_arm64
fi fi
if which sccache > /dev/null; then if which sccache > /dev/null; then
print_sccache_stats print_sccache_stats

View File

@ -55,7 +55,7 @@ test_python_shard() {
setup_test_python setup_test_python
time python test/run_test.py --verbose --exclude-jit-executor --exclude-distributed-tests --exclude-quantization-tests --shard "$1" "$NUM_TEST_SHARDS" time python test/run_test.py --verbose --exclude-jit-executor --exclude-distributed-tests --shard "$1" "$NUM_TEST_SHARDS"
assert_git_not_dirty assert_git_not_dirty
} }
@ -195,7 +195,7 @@ torchbench_setup_macos() {
git checkout "$(cat ../.github/ci_commit_pins/vision.txt)" git checkout "$(cat ../.github/ci_commit_pins/vision.txt)"
git submodule update --init --recursive git submodule update --init --recursive
python setup.py clean python setup.py clean
python -m pip install -e . -v --no-build-isolation python setup.py develop
popd popd
pushd torchaudio pushd torchaudio
@ -204,7 +204,7 @@ torchbench_setup_macos() {
git submodule update --init --recursive git submodule update --init --recursive
python setup.py clean python setup.py clean
#TODO: Remove me, when figure out how to make TorchAudio find brew installed openmp #TODO: Remove me, when figure out how to make TorchAudio find brew installed openmp
USE_OPENMP=0 python -m pip install -e . -v --no-build-isolation USE_OPENMP=0 python setup.py develop
popd popd
checkout_install_torchbench checkout_install_torchbench
@ -302,47 +302,6 @@ test_torchbench_smoketest() {
fi fi
done done
echo "Pytorch benchmark on mps device completed"
}
test_aoti_torchbench_smoketest() {
print_cmake_info
echo "Launching AOTInductor torchbench setup"
pip_benchmark_deps
# shellcheck disable=SC2119,SC2120
torchbench_setup_macos
TEST_REPORTS_DIR=$(pwd)/test/test-reports
mkdir -p "$TEST_REPORTS_DIR"
local device=mps
local dtypes=(undefined float16 bfloat16 notset)
local dtype=${dtypes[$1]}
local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor timm_resnet timm_vovnet vgg16)
echo "Launching torchbench inference performance run for AOT Inductor and dtype ${dtype}"
local dtype_arg="--${dtype}"
if [ "$dtype" == notset ]; then
dtype_arg="--float32"
fi
touch "$TEST_REPORTS_DIR/aot_inductor_torchbench_${dtype}_inference_${device}_performance.csv"
for model in "${models[@]}"; do
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
--performance --only "$model" --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_torchbench_${dtype}_inference_${device}_performance.csv" || true
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
--accuracy --only "$model" --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_torchbench_${dtype}_inference_${device}_accuracy.csv" || true
done
echo "Launching HuggingFace inference performance run for AOT Inductor and dtype ${dtype}"
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
--performance --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_huggingface_${dtype}_inference_${device}_performance.csv" || true
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
--accuracy --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_huggingface_${dtype}_inference_${device}_accuracy.csv" || true
echo "Pytorch benchmark on mps device completed" echo "Pytorch benchmark on mps device completed"
} }
@ -391,8 +350,6 @@ elif [[ $TEST_CONFIG == *"perf_timm"* ]]; then
test_timm_perf test_timm_perf
elif [[ $TEST_CONFIG == *"perf_smoketest"* ]]; then elif [[ $TEST_CONFIG == *"perf_smoketest"* ]]; then
test_torchbench_smoketest "${SHARD_NUMBER}" test_torchbench_smoketest "${SHARD_NUMBER}"
elif [[ $TEST_CONFIG == *"aot_inductor_perf_smoketest"* ]]; then
test_aoti_torchbench_smoketest "${SHARD_NUMBER}"
elif [[ $TEST_CONFIG == *"mps"* ]]; then elif [[ $TEST_CONFIG == *"mps"* ]]; then
test_python_mps test_python_mps
elif [[ $NUM_TEST_SHARDS -gt 1 ]]; then elif [[ $NUM_TEST_SHARDS -gt 1 ]]; then

View File

@ -26,7 +26,6 @@ if [[ "${SHARD_NUMBER:-2}" == "2" ]]; then
time python test/run_test.py --verbose -i distributed/test_c10d_spawn_gloo time python test/run_test.py --verbose -i distributed/test_c10d_spawn_gloo
time python test/run_test.py --verbose -i distributed/test_c10d_spawn_nccl time python test/run_test.py --verbose -i distributed/test_c10d_spawn_nccl
time python test/run_test.py --verbose -i distributed/test_compute_comm_reordering time python test/run_test.py --verbose -i distributed/test_compute_comm_reordering
time python test/run_test.py --verbose -i distributed/test_aten_comm_compute_reordering
time python test/run_test.py --verbose -i distributed/test_store time python test/run_test.py --verbose -i distributed/test_store
time python test/run_test.py --verbose -i distributed/test_symmetric_memory time python test/run_test.py --verbose -i distributed/test_symmetric_memory
time python test/run_test.py --verbose -i distributed/test_pg_wrapper time python test/run_test.py --verbose -i distributed/test_pg_wrapper

View File

@ -1,25 +0,0 @@
From 6e08c9d08e9de59c7af28b720289debbbd384764 Mon Sep 17 00:00:00 2001
From: Michael Wang <13521008+isVoid@users.noreply.github.com>
Date: Tue, 1 Apr 2025 17:28:05 -0700
Subject: [PATCH] Avoid bumping certain driver API to avoid future breakage
(#185)
Co-authored-by: isVoid <isVoid@users.noreply.github.com>
---
numba_cuda/numba/cuda/cudadrv/driver.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/numba_cuda/numba/cuda/cudadrv/driver.py b/numba_cuda/numba/cuda/cudadrv/driver.py
index 1641bf77..233e9ed7 100644
--- a/numba_cuda/numba/cuda/cudadrv/driver.py
+++ b/numba_cuda/numba/cuda/cudadrv/driver.py
@@ -365,6 +365,9 @@ def _find_api(self, fname):
else:
variants = ('_v2', '')
+ if fname in ("cuCtxGetDevice", "cuCtxSynchronize"):
+ return getattr(self.lib, fname)
+
for variant in variants:
try:
return getattr(self.lib, f'{fname}{variant}')

View File

@ -386,8 +386,8 @@ def smoke_test_compile(device: str = "cpu") -> None:
def smoke_test_nvshmem() -> None: def smoke_test_nvshmem() -> None:
if not torch.cuda.is_available() or target_os == "windows": if not torch.cuda.is_available():
print("Windows platform or CUDA is not available, skipping NVSHMEM test") print("CUDA is not available, skipping NVSHMEM test")
return return
# Check if NVSHMEM is compiled in current build # Check if NVSHMEM is compiled in current build
@ -396,9 +396,7 @@ def smoke_test_nvshmem() -> None:
except ImportError: except ImportError:
# Not built with NVSHMEM support. # Not built with NVSHMEM support.
# torch is not compiled with NVSHMEM prior to 2.9 # torch is not compiled with NVSHMEM prior to 2.9
from torch.torch_version import TorchVersion if torch.__version__ < "2.9":
if TorchVersion(torch.__version__) < (2, 9):
return return
else: else:
# After 2.9: NVSHMEM is expected to be compiled in current build # After 2.9: NVSHMEM is expected to be compiled in current build

View File

@ -32,16 +32,6 @@ if [[ "$BUILD_ENVIRONMENT" != *rocm* && "$BUILD_ENVIRONMENT" != *s390x* && -d /v
git config --global --add safe.directory /var/lib/jenkins/workspace git config --global --add safe.directory /var/lib/jenkins/workspace
fi fi
# Patch numba to avoid CUDA-13 crash, see https://github.com/pytorch/pytorch/issues/162878
NUMBA_CUDA_DIR=$(python -c "import os;import numba.cuda; print(os.path.dirname(numba.cuda.__file__))" 2>/dev/null || true)
if [ -n "$NUMBA_CUDA_DIR" ]; then
NUMBA_PATCH="$(dirname "$(realpath "${BASH_SOURCE[0]}")")/numba-cuda-13.patch"
pushd "$NUMBA_CUDA_DIR"
patch -p4 <"$NUMBA_PATCH"
popd
fi
echo "Environment variables:" echo "Environment variables:"
env env
@ -322,29 +312,23 @@ test_python_shard() {
# modify LD_LIBRARY_PATH to ensure it has the conda env. # modify LD_LIBRARY_PATH to ensure it has the conda env.
# This set of tests has been shown to be buggy without it for the split-build # This set of tests has been shown to be buggy without it for the split-build
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests --exclude-quantization-tests $INCLUDE_CLAUSE --shard "$1" "$NUM_TEST_SHARDS" --verbose $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests $INCLUDE_CLAUSE --shard "$1" "$NUM_TEST_SHARDS" --verbose $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
assert_git_not_dirty assert_git_not_dirty
} }
test_python() { test_python() {
# shellcheck disable=SC2086 # shellcheck disable=SC2086
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests --exclude-quantization-tests $INCLUDE_CLAUSE --verbose $PYTHON_TEST_EXTRA_OPTION time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests $INCLUDE_CLAUSE --verbose $PYTHON_TEST_EXTRA_OPTION
assert_git_not_dirty assert_git_not_dirty
} }
test_python_smoke() { test_python_smoke() {
# Smoke tests for H100/B200 # Smoke tests for H100
time python test/run_test.py --include test_matmul_cuda inductor/test_fp8 inductor/test_max_autotune $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running time python test/run_test.py --include test_matmul_cuda inductor/test_fp8 inductor/test_max_autotune $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
assert_git_not_dirty assert_git_not_dirty
} }
test_python_smoke_b200() {
# Targeted smoke tests for B200 - staged approach to avoid too many failures
time python test/run_test.py --include test_matmul_cuda inductor/test_fp8 $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
assert_git_not_dirty
}
test_h100_distributed() { test_h100_distributed() {
# Distributed tests at H100 # Distributed tests at H100
time python test/run_test.py --include distributed/_composable/test_composability/test_pp_composability.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running time python test/run_test.py --include distributed/_composable/test_composability/test_pp_composability.py $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
@ -390,7 +374,6 @@ test_dynamo_wrapped_shard() {
--exclude-distributed-tests \ --exclude-distributed-tests \
--exclude-torch-export-tests \ --exclude-torch-export-tests \
--exclude-aot-dispatch-tests \ --exclude-aot-dispatch-tests \
--exclude-quantization-tests \
--shard "$1" "$NUM_TEST_SHARDS" \ --shard "$1" "$NUM_TEST_SHARDS" \
--verbose \ --verbose \
--upload-artifacts-while-running --upload-artifacts-while-running
@ -435,7 +418,7 @@ test_inductor_distributed() {
# this runs on both single-gpu and multi-gpu instance. It should be smart about skipping tests that aren't supported # this runs on both single-gpu and multi-gpu instance. It should be smart about skipping tests that aren't supported
# with if required # gpus aren't available # with if required # gpus aren't available
python test/run_test.py --include distributed/test_dynamo_distributed distributed/test_inductor_collectives distributed/test_aten_comm_compute_reordering distributed/test_compute_comm_reordering --verbose python test/run_test.py --include distributed/test_dynamo_distributed distributed/test_inductor_collectives distributed/test_compute_comm_reordering --verbose
assert_git_not_dirty assert_git_not_dirty
} }
@ -513,14 +496,6 @@ test_inductor_cpp_wrapper_shard() {
-k 'take' \ -k 'take' \
--shard "$1" "$NUM_TEST_SHARDS" \ --shard "$1" "$NUM_TEST_SHARDS" \
--verbose --verbose
if [[ "${BUILD_ENVIRONMENT}" == *xpu* ]]; then
python test/run_test.py \
--include inductor/test_mkldnn_pattern_matcher \
-k 'xpu' \
--shard "$1" "$NUM_TEST_SHARDS" \
--verbose
fi
} }
# "Global" flags for inductor benchmarking controlled by TEST_CONFIG # "Global" flags for inductor benchmarking controlled by TEST_CONFIG
@ -1163,12 +1138,6 @@ test_distributed() {
fi fi
} }
test_quantization() {
echo "Testing quantization"
python test/test_quantization.py
}
test_rpc() { test_rpc() {
echo "Testing RPC C++ tests" echo "Testing RPC C++ tests"
# NB: the ending test_rpc must match the current function name for the current # NB: the ending test_rpc must match the current function name for the current
@ -1415,7 +1384,7 @@ EOF
pip3 install -r requirements.txt pip3 install -r requirements.txt
# shellcheck source=./common-build.sh # shellcheck source=./common-build.sh
source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh" source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh"
python -m build --wheel --no-isolation -C--build-option=--bdist-dir="base_bdist_tmp" --outdir "base_dist" python setup.py bdist_wheel --bdist-dir="base_bdist_tmp" --dist-dir="base_dist"
python -mpip install base_dist/*.whl python -mpip install base_dist/*.whl
echo "::endgroup::" echo "::endgroup::"
@ -1563,10 +1532,14 @@ test_executorch() {
install_torchvision install_torchvision
install_torchaudio install_torchaudio
INSTALL_SCRIPT="$(pwd)/.ci/docker/common/install_executorch.sh"
pushd /executorch pushd /executorch
"${INSTALL_SCRIPT}" setup_executorch
export PYTHON_EXECUTABLE=python
export CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
# NB: We need to rebuild ExecuTorch runner here because it depends on PyTorch
# from the PR
bash .ci/scripts/setup-linux.sh --build-tool cmake
echo "Run ExecuTorch unit tests" echo "Run ExecuTorch unit tests"
pytest -v -n auto pytest -v -n auto
@ -1580,14 +1553,17 @@ test_executorch() {
popd popd
# Test torchgen generated code for Executorch.
echo "Testing ExecuTorch op registration"
"$BUILD_BIN_DIR"/test_edge_op_registration
assert_git_not_dirty assert_git_not_dirty
} }
test_linux_aarch64() { test_linux_aarch64() {
python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp test_torch test_dynamic_shapes \ python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp test_torch test_dynamic_shapes \
test_transformers test_multiprocessing test_numpy_interop test_autograd test_binary_ufuncs test_complex test_spectral_ops \ test_transformers test_multiprocessing test_numpy_interop test_autograd test_binary_ufuncs test_complex test_spectral_ops \
test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops test_ops profiler/test_memory_profiler \ test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops test_ops \
distributed/elastic/timer/api_test distributed/elastic/timer/local_timer_example distributed/elastic/timer/local_timer_test \
--shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose --shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose
# Dynamo tests # Dynamo tests
@ -1617,7 +1593,7 @@ test_operator_benchmark() {
test_inductor_set_cpu_affinity test_inductor_set_cpu_affinity
cd benchmarks/operator_benchmark/pt_extension cd benchmarks/operator_benchmark/pt_extension
python -m pip install . -v --no-build-isolation python -m pip install .
cd "${TEST_DIR}"/benchmarks/operator_benchmark cd "${TEST_DIR}"/benchmarks/operator_benchmark
$TASKSET python -m benchmark_all_test --device "$1" --tag-filter "$2" \ $TASKSET python -m benchmark_all_test --device "$1" --tag-filter "$2" \
@ -1630,25 +1606,6 @@ test_operator_benchmark() {
--expected "expected_ci_operator_benchmark_eager_float32_cpu.csv" --expected "expected_ci_operator_benchmark_eager_float32_cpu.csv"
} }
test_operator_microbenchmark() {
TEST_REPORTS_DIR=$(pwd)/test/test-reports
mkdir -p "$TEST_REPORTS_DIR"
TEST_DIR=$(pwd)
cd benchmarks/operator_benchmark/pt_extension
python -m pip install .
cd "${TEST_DIR}"/benchmarks/operator_benchmark
for OP_BENCHMARK_TESTS in matmul mm addmm bmm; do
$TASKSET python -m pt.${OP_BENCHMARK_TESTS}_test --tag-filter long \
--output-json-for-dashboard "${TEST_REPORTS_DIR}/operator_microbenchmark_${OP_BENCHMARK_TESTS}_compile.json" \
--benchmark-name "PyTorch operator microbenchmark" --use-compile
$TASKSET python -m pt.${OP_BENCHMARK_TESTS}_test --tag-filter long \
--output-json-for-dashboard "${TEST_REPORTS_DIR}/operator_microbenchmark_${OP_BENCHMARK_TESTS}.json" \
--benchmark-name "PyTorch operator microbenchmark"
done
}
if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then
(cd test && python -c "import torch; print(torch.__config__.show())") (cd test && python -c "import torch; print(torch.__config__.show())")
@ -1681,8 +1638,6 @@ elif [[ "${TEST_CONFIG}" == *executorch* ]]; then
test_executorch test_executorch
elif [[ "$TEST_CONFIG" == 'jit_legacy' ]]; then elif [[ "$TEST_CONFIG" == 'jit_legacy' ]]; then
test_python_legacy_jit test_python_legacy_jit
elif [[ "$TEST_CONFIG" == 'quantization' ]]; then
test_quantization
elif [[ "${BUILD_ENVIRONMENT}" == *libtorch* ]]; then elif [[ "${BUILD_ENVIRONMENT}" == *libtorch* ]]; then
# TODO: run some C++ tests # TODO: run some C++ tests
echo "no-op at the moment" echo "no-op at the moment"
@ -1705,8 +1660,6 @@ elif [[ "${TEST_CONFIG}" == *operator_benchmark* ]]; then
test_operator_benchmark cpu ${TEST_MODE} test_operator_benchmark cpu ${TEST_MODE}
fi fi
elif [[ "${TEST_CONFIG}" == *operator_microbenchmark* ]]; then
test_operator_microbenchmark
elif [[ "${TEST_CONFIG}" == *inductor_distributed* ]]; then elif [[ "${TEST_CONFIG}" == *inductor_distributed* ]]; then
test_inductor_distributed test_inductor_distributed
elif [[ "${TEST_CONFIG}" == *inductor-halide* ]]; then elif [[ "${TEST_CONFIG}" == *inductor-halide* ]]; then
@ -1760,6 +1713,11 @@ elif [[ "${TEST_CONFIG}" == *inductor_cpp_wrapper* ]]; then
elif [[ "${TEST_CONFIG}" == *inductor* ]]; then elif [[ "${TEST_CONFIG}" == *inductor* ]]; then
install_torchvision install_torchvision
test_inductor_shard "${SHARD_NUMBER}" test_inductor_shard "${SHARD_NUMBER}"
if [[ "${SHARD_NUMBER}" == 1 ]]; then
if [[ "${BUILD_ENVIRONMENT}" != linux-jammy-py3.9-gcc11-build ]]; then
test_inductor_distributed
fi
fi
elif [[ "${TEST_CONFIG}" == *einops* ]]; then elif [[ "${TEST_CONFIG}" == *einops* ]]; then
test_einops test_einops
elif [[ "${TEST_CONFIG}" == *dynamo_wrapped* ]]; then elif [[ "${TEST_CONFIG}" == *dynamo_wrapped* ]]; then
@ -1809,14 +1767,10 @@ elif [[ "${BUILD_ENVIRONMENT}" == *xpu* ]]; then
test_xpu_bin test_xpu_bin
elif [[ "${TEST_CONFIG}" == smoke ]]; then elif [[ "${TEST_CONFIG}" == smoke ]]; then
test_python_smoke test_python_smoke
elif [[ "${TEST_CONFIG}" == smoke_b200 ]]; then
test_python_smoke_b200
elif [[ "${TEST_CONFIG}" == h100_distributed ]]; then elif [[ "${TEST_CONFIG}" == h100_distributed ]]; then
test_h100_distributed test_h100_distributed
elif [[ "${TEST_CONFIG}" == "h100-symm-mem" ]]; then elif [[ "${TEST_CONFIG}" == "h100-symm-mem" ]]; then
test_h100_symm_mem test_h100_symm_mem
elif [[ "${TEST_CONFIG}" == "b200-symm-mem" ]]; then
test_h100_symm_mem
elif [[ "${TEST_CONFIG}" == h100_cutlass_backend ]]; then elif [[ "${TEST_CONFIG}" == h100_cutlass_backend ]]; then
test_h100_cutlass_backend test_h100_cutlass_backend
else else

View File

@ -70,7 +70,7 @@ sccache --zero-stats
sccache --show-stats sccache --show-stats
# Build the wheel # Build the wheel
python -m build --wheel --no-build-isolation python setup.py bdist_wheel
if ($LASTEXITCODE -ne 0) { exit 1 } if ($LASTEXITCODE -ne 0) { exit 1 }
# Install the wheel locally # Install the wheel locally

View File

@ -130,14 +130,14 @@ if "%USE_CUDA%"=="1" (
:: Print all existing environment variable for debugging :: Print all existing environment variable for debugging
set set
python -m build --wheel --no-isolation python setup.py bdist_wheel
if errorlevel 1 goto fail if errorlevel 1 goto fail
if not errorlevel 0 goto fail if not errorlevel 0 goto fail
sccache --show-stats sccache --show-stats
python -c "import os, glob; os.system('python -mpip install --no-index --no-deps ' + glob.glob('dist/*.whl')[0])" python -c "import os, glob; os.system('python -mpip install --no-index --no-deps ' + glob.glob('dist/*.whl')[0])"
( (
if "%BUILD_ENVIRONMENT%"=="" ( if "%BUILD_ENVIRONMENT%"=="" (
echo NOTE: To run `import torch`, please make sure to activate the conda environment by running `call %CONDA_ROOT_DIR%\Scripts\activate.bat %CONDA_ROOT_DIR%\envs\py_tmp` in Command Prompt before running Git Bash. echo NOTE: To run `import torch`, please make sure to activate the conda environment by running `call %CONDA_PARENT_DIR%\Miniconda3\Scripts\activate.bat %CONDA_PARENT_DIR%\Miniconda3` in Command Prompt before running Git Bash.
) else ( ) else (
copy /Y "dist\*.whl" "%PYTORCH_FINAL_PACKAGE_DIR%" copy /Y "dist\*.whl" "%PYTORCH_FINAL_PACKAGE_DIR%"

View File

@ -3,12 +3,12 @@ if "%BUILD_ENVIRONMENT%"=="" (
) else ( ) else (
set CONDA_PARENT_DIR=C:\Jenkins set CONDA_PARENT_DIR=C:\Jenkins
) )
set CONDA_ROOT_DIR=%CONDA_PARENT_DIR%\Miniconda3
:: Be conservative here when rolling out the new AMI with conda. This will try :: Be conservative here when rolling out the new AMI with conda. This will try
:: to install conda as before if it couldn't find the conda installation. This :: to install conda as before if it couldn't find the conda installation. This
:: can be removed eventually after we gain enough confidence in the AMI :: can be removed eventually after we gain enough confidence in the AMI
if not exist %CONDA_ROOT_DIR% ( if not exist %CONDA_PARENT_DIR%\Miniconda3 (
set INSTALL_FRESH_CONDA=1 set INSTALL_FRESH_CONDA=1
) )
@ -17,14 +17,10 @@ if "%INSTALL_FRESH_CONDA%"=="1" (
if errorlevel 1 exit /b if errorlevel 1 exit /b
if not errorlevel 0 exit /b if not errorlevel 0 exit /b
%TMP_DIR_WIN%\Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /AddToPath=0 /D=%CONDA_ROOT_DIR% %TMP_DIR_WIN%\Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /AddToPath=0 /D=%CONDA_PARENT_DIR%\Miniconda3
if errorlevel 1 exit /b if errorlevel 1 exit /b
if not errorlevel 0 exit /b if not errorlevel 0 exit /b
) )
:: Activate conda so that we can use its commands, i.e. conda, python, pip :: Activate conda so that we can use its commands, i.e. conda, python, pip
call %CONDA_ROOT_DIR%\Scripts\activate.bat %CONDA_ROOT_DIR% call %CONDA_PARENT_DIR%\Miniconda3\Scripts\activate.bat %CONDA_PARENT_DIR%\Miniconda3
:: Activate conda so that we can use its commands, i.e. conda, python, pip
call conda activate py_tmp
call pip install -r .ci/docker/requirements-ci.txt

View File

@ -14,7 +14,7 @@ if not errorlevel 0 exit /b
:: build\torch. Rather than changing all these references, making a copy of torch folder :: build\torch. Rather than changing all these references, making a copy of torch folder
:: from conda to the current workspace is easier. The workspace will be cleaned up after :: from conda to the current workspace is easier. The workspace will be cleaned up after
:: the job anyway :: the job anyway
xcopy /s %CONDA_ROOT_DIR%\envs\py_tmp\Lib\site-packages\torch %TMP_DIR_WIN%\build\torch\ xcopy /s %CONDA_PARENT_DIR%\Miniconda3\Lib\site-packages\torch %TMP_DIR_WIN%\build\torch\
pushd . pushd .
if "%VC_VERSION%" == "" ( if "%VC_VERSION%" == "" (

View File

@ -25,7 +25,7 @@ echo Copying over test times file
robocopy /E "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.additional_ci_files" "%PROJECT_DIR_WIN%\.additional_ci_files" robocopy /E "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.additional_ci_files" "%PROJECT_DIR_WIN%\.additional_ci_files"
echo Run nn tests echo Run nn tests
python run_test.py --exclude-jit-executor --exclude-distributed-tests --exclude-quantization-tests --shard "%SHARD_NUMBER%" "%NUM_TEST_SHARDS%" --verbose python run_test.py --exclude-jit-executor --exclude-distributed-tests --shard "%SHARD_NUMBER%" "%NUM_TEST_SHARDS%" --verbose
if ERRORLEVEL 1 goto fail if ERRORLEVEL 1 goto fail
popd popd

View File

@ -38,20 +38,13 @@ if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
fi fi
# TODO: Move both of them to Windows AMI # TODO: Move both of them to Windows AMI
python -m pip install tensorboard==2.13.0 protobuf==5.29.4 pytest-subtests==0.13.1 python -m pip install pytest-rerunfailures==10.3 pytest-cpp==2.3.0 tensorboard==2.13.0 protobuf==5.29.4 pytest-subtests==0.13.1
# Copied from https://github.com/pytorch/test-infra/blob/be01a40157c36cd5a48391fdf44a7bc3ebd4c7e3/aws/ami/windows/scripts/Installers/Install-Pip-Dependencies.ps1#L16 with some adjustments
# pytest-rerunfailures==10.3 as 10.2 fails with INTERNALERROR> pluggy._manager.PluginValidationError: unknown hook 'pytest_configure_node'
# scipy from 1.6.3 to 1.10
# expecttest from 0.1.3 to 0.3.0
# xdoctest from 1.0.2 to 1.3.0
python -m pip install "future==0.18.2" "hypothesis==5.35.1" "expecttest==0.3.0" "librosa>=0.6.2" "scipy==1.10.1" "psutil==5.9.1" "pynvml==11.4.1" "pillow==9.2.0" "unittest-xml-reporting<=3.2.0,>=2.0.0" "pytest==7.1.3" "pytest-xdist==2.5.0" "pytest-flakefinder==1.1.0" "pytest-rerunfailures==10.3" "pytest-shard==0.1.2" "sympy==1.11.1" "xdoctest==1.3.0" "pygments==2.12.0" "opt-einsum>=3.3" "networkx==2.8.8" "mpmath==1.2.1" "pytest-cpp==2.3.0" "boto3==1.35.42"
# Install Z3 optional dependency for Windows builds. # Install Z3 optional dependency for Windows builds.
python -m pip install z3-solver==4.15.1.0 python -m pip install z3-solver==4.15.1.0
# Install tlparse for test\dynamo\test_structured_trace.py UTs. # Install tlparse for test\dynamo\test_structured_trace.py UTs.
python -m pip install tlparse==0.4.0 python -m pip install tlparse==0.3.30
# Install parameterized # Install parameterized
python -m pip install parameterized==0.8.1 python -m pip install parameterized==0.8.1
@ -59,6 +52,9 @@ python -m pip install parameterized==0.8.1
# Install pulp for testing ilps under torch\distributed\_tools # Install pulp for testing ilps under torch\distributed\_tools
python -m pip install pulp==2.9.0 python -m pip install pulp==2.9.0
# Install expecttest to merge https://github.com/pytorch/pytorch/pull/155308
python -m pip install expecttest==0.3.0
run_tests() { run_tests() {
# Run nvidia-smi if available # Run nvidia-smi if available
for path in '/c/Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe' /c/Windows/System32/nvidia-smi.exe; do for path in '/c/Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe' /c/Windows/System32/nvidia-smi.exe; do

View File

@ -48,7 +48,7 @@ sccache --zero-stats
sccache --show-stats sccache --show-stats
:: Call PyTorch build script :: Call PyTorch build script
python -m build --wheel --no-isolation --outdir "%PYTORCH_FINAL_PACKAGE_DIR%" python setup.py bdist_wheel -d "%PYTORCH_FINAL_PACKAGE_DIR%"
:: show sccache stats :: show sccache stats
sccache --show-stats sccache --show-stats

View File

@ -1,20 +1,12 @@
copy "%CUDA_PATH%\bin\cusparse*64_*.dll*" pytorch\torch\lib
if %CUDA_VERSION% geq 130 ( copy "%CUDA_PATH%\bin\cublas*64_*.dll*" pytorch\torch\lib
set "dll_path=bin\x64" copy "%CUDA_PATH%\bin\cudart*64_*.dll*" pytorch\torch\lib
) else ( copy "%CUDA_PATH%\bin\curand*64_*.dll*" pytorch\torch\lib
set "dll_path=bin" copy "%CUDA_PATH%\bin\cufft*64_*.dll*" pytorch\torch\lib
) copy "%CUDA_PATH%\bin\cusolver*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cusparse*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cublas*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cudart*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\curand*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cufft*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cusolver*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\nvrtc*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\nvJitLink_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\bin\cudnn*64_*.dll*" pytorch\torch\lib copy "%CUDA_PATH%\bin\cudnn*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\bin\nvrtc*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\extras\CUPTI\lib64\cupti64_*.dll*" pytorch\torch\lib copy "%CUDA_PATH%\extras\CUPTI\lib64\cupti64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\extras\CUPTI\lib64\nvperf_host*.dll*" pytorch\torch\lib copy "%CUDA_PATH%\extras\CUPTI\lib64\nvperf_host*.dll*" pytorch\torch\lib
@ -28,3 +20,8 @@ copy "%libuv_ROOT%\bin\uv.dll" pytorch\torch\lib
if exist "C:\Windows\System32\zlibwapi.dll" ( if exist "C:\Windows\System32\zlibwapi.dll" (
copy "C:\Windows\System32\zlibwapi.dll" pytorch\torch\lib copy "C:\Windows\System32\zlibwapi.dll" pytorch\torch\lib
) )
::copy nvJitLink dll is requires for cuda 12+
if exist "%CUDA_PATH%\bin\nvJitLink_*.dll*" (
copy "%CUDA_PATH%\bin\nvJitLink_*.dll*" pytorch\torch\lib
)

View File

@ -1,9 +1,9 @@
set WIN_DRIVER_VN=580.88 set WIN_DRIVER_VN=528.89
set "DRIVER_DOWNLOAD_LINK=https://ossci-windows.s3.amazonaws.com/%WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe" & REM @lint-ignore set "DRIVER_DOWNLOAD_LINK=https://ossci-windows.s3.amazonaws.com/%WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe" & REM @lint-ignore
curl --retry 3 -kL %DRIVER_DOWNLOAD_LINK% --output %WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe curl --retry 3 -kL %DRIVER_DOWNLOAD_LINK% --output %WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1
start /wait %WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe -s -noreboot start /wait %WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe -s -noreboot
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1
del %WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe || ver > NUL del %WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe || ver > NUL

View File

@ -28,5 +28,5 @@ start /wait "" python-amd64.exe /quiet InstallAllUsers=1 PrependPath=0 Include_t
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1
set "PATH=%CD%\Python\Scripts;%CD%\Python;%PATH%" set "PATH=%CD%\Python\Scripts;%CD%\Python;%PATH%"
%PYTHON_EXEC% -m pip install --upgrade pip setuptools packaging wheel build %PYTHON_EXEC% -m pip install --upgrade pip setuptools packaging wheel
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1

View File

@ -86,7 +86,7 @@ copy /Y "%LIBTORCH_PREFIX%-%PYTORCH_BUILD_VERSION%.zip" "%PYTORCH_FINAL_PACKAGE_
goto build_end goto build_end
:pytorch :pytorch
%PYTHON_EXEC% -m build --wheel --no-isolation --outdir "%PYTORCH_FINAL_PACKAGE_DIR%" %PYTHON_EXEC% setup.py bdist_wheel -d "%PYTORCH_FINAL_PACKAGE_DIR%"
:build_end :build_end
IF ERRORLEVEL 1 exit /b 1 IF ERRORLEVEL 1 exit /b 1

View File

@ -63,7 +63,7 @@ if errorlevel 1 exit /b 1
call %CONDA_HOME%\condabin\activate.bat testenv call %CONDA_HOME%\condabin\activate.bat testenv
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1
call conda install -y -q -c conda-forge libuv=1.51 call conda install -y -q -c conda-forge libuv=1.39
call conda install -y -q intel-openmp call conda install -y -q intel-openmp
echo "install and test libtorch" echo "install and test libtorch"

View File

@ -13,9 +13,9 @@ if not exist "%SRC_DIR%\temp_build" mkdir "%SRC_DIR%\temp_build"
:xpu_bundle_install_start :xpu_bundle_install_start
set XPU_BUNDLE_PARENT_DIR=C:\Program Files (x86)\Intel\oneAPI set XPU_BUNDLE_PARENT_DIR=C:\Program Files (x86)\Intel\oneAPI
set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/75d4eb97-914a-4a95-852c-7b9733d80f74/intel-deep-learning-essentials-2025.1.3.8_offline.exe set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/9d6d6c17-ca2d-4735-9331-99447e4a1280/intel-deep-learning-essentials-2025.0.1.28_offline.exe
set XPU_BUNDLE_PRODUCT_NAME=intel.oneapi.win.deep-learning-essentials.product set XPU_BUNDLE_PRODUCT_NAME=intel.oneapi.win.deep-learning-essentials.product
set XPU_BUNDLE_VERSION=2025.1.3+5 set XPU_BUNDLE_VERSION=2025.0.1+20
set XPU_BUNDLE_INSTALLED=0 set XPU_BUNDLE_INSTALLED=0
set XPU_BUNDLE_UNINSTALL=0 set XPU_BUNDLE_UNINSTALL=0
set XPU_EXTRA_URL=NULL set XPU_EXTRA_URL=NULL
@ -24,9 +24,9 @@ set XPU_EXTRA_VERSION=2025.0.1+1226
set XPU_EXTRA_INSTALLED=0 set XPU_EXTRA_INSTALLED=0
set XPU_EXTRA_UNINSTALL=0 set XPU_EXTRA_UNINSTALL=0
if not [%XPU_VERSION%]==[] if [%XPU_VERSION%]==[2025.2] ( if not [%XPU_VERSION%]==[] if [%XPU_VERSION%]==[2025.1] (
set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/24751ead-ddc5-4479-b9e6-f9fe2ff8b9f2/intel-deep-learning-essentials-2025.2.1.25_offline.exe set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/75d4eb97-914a-4a95-852c-7b9733d80f74/intel-deep-learning-essentials-2025.1.3.8_offline.exe
set XPU_BUNDLE_VERSION=2025.2.1+20 set XPU_BUNDLE_VERSION=2025.1.3+5
) )
:: Check if XPU bundle is target version or already installed :: Check if XPU bundle is target version or already installed
@ -90,3 +90,14 @@ if errorlevel 1 exit /b 1
del xpu_extra.exe del xpu_extra.exe
:xpu_install_end :xpu_install_end
if not "%XPU_ENABLE_KINETO%"=="1" goto install_end
:: Install Level Zero SDK
set XPU_EXTRA_LZ_URL=https://github.com/oneapi-src/level-zero/releases/download/v1.14.0/level-zero-sdk_1.14.0.zip
curl -k -L %XPU_EXTRA_LZ_URL% --output "%SRC_DIR%\temp_build\level_zero_sdk.zip"
echo "Installing level zero SDK..."
7z x "%SRC_DIR%\temp_build\level_zero_sdk.zip" -o"%SRC_DIR%\temp_build\level_zero"
set "INCLUDE=%SRC_DIR%\temp_build\level_zero\include;%INCLUDE%"
del "%SRC_DIR%\temp_build\level_zero_sdk.zip"
:install_end

View File

@ -18,7 +18,7 @@ if "%DESIRED_PYTHON%" == "3.9" %PYTHON_EXEC% -m pip install numpy==2.0.2 cmake
%PYTHON_EXEC% -m pip install pyyaml %PYTHON_EXEC% -m pip install pyyaml
%PYTHON_EXEC% -m pip install mkl-include mkl-static %PYTHON_EXEC% -m pip install mkl-include mkl-static
%PYTHON_EXEC% -m pip install boto3 requests ninja typing_extensions setuptools==72.1.0 %PYTHON_EXEC% -m pip install boto3 ninja typing_extensions setuptools==72.1.0
where cmake.exe where cmake.exe

View File

@ -85,7 +85,7 @@ mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true
# Create an isolated directory to store this builds pytorch checkout and conda # Create an isolated directory to store this builds pytorch checkout and conda
# installation # installation
if [[ -z "$MAC_PACKAGE_WORK_DIR" ]]; then if [[ -z "$MAC_PACKAGE_WORK_DIR" ]]; then
MAC_PACKAGE_WORK_DIR="$(pwd)/tmp_wheel_${DESIRED_PYTHON}_$(date +%H%M%S)" MAC_PACKAGE_WORK_DIR="$(pwd)/tmp_wheel_conda_${DESIRED_PYTHON}_$(date +%H%M%S)"
fi fi
mkdir -p "$MAC_PACKAGE_WORK_DIR" || true mkdir -p "$MAC_PACKAGE_WORK_DIR" || true
if [[ -n ${GITHUB_ACTIONS} ]]; then if [[ -n ${GITHUB_ACTIONS} ]]; then
@ -96,11 +96,11 @@ fi
whl_tmp_dir="${MAC_PACKAGE_WORK_DIR}/dist" whl_tmp_dir="${MAC_PACKAGE_WORK_DIR}/dist"
mkdir -p "$whl_tmp_dir" mkdir -p "$whl_tmp_dir"
mac_version='macosx-11_0-arm64' mac_version='macosx_11_0_arm64'
libtorch_arch='arm64' libtorch_arch='arm64'
# Create a consistent wheel package name to rename the wheel to # Create a consistent wheel package name to rename the wheel to
wheel_filename_new="${TORCH_PACKAGE_NAME}-${build_version}${build_number_prefix}-cp${python_nodot}-none-${mac_version//[-,]/_}.whl" wheel_filename_new="${TORCH_PACKAGE_NAME}-${build_version}${build_number_prefix}-cp${python_nodot}-none-${mac_version}.whl"
########################################################### ###########################################################
@ -124,58 +124,93 @@ popd
export TH_BINARY_BUILD=1 export TH_BINARY_BUILD=1
export INSTALL_TEST=0 # dont install test binaries into site-packages export INSTALL_TEST=0 # dont install test binaries into site-packages
export MACOSX_DEPLOYMENT_TARGET=11.0 export MACOSX_DEPLOYMENT_TARGET=10.15
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
SETUPTOOLS_PINNED_VERSION="==70.1.0"
PYYAML_PINNED_VERSION="==5.3"
EXTRA_CONDA_INSTALL_FLAGS="" EXTRA_CONDA_INSTALL_FLAGS=""
CONDA_ENV_CREATE_FLAGS="" CONDA_ENV_CREATE_FLAGS=""
RENAME_WHEEL=true RENAME_WHEEL=true
case $desired_python in case $desired_python in
3.14t) 3.14t)
echo "Using 3.14 deps" echo "Using 3.14 deps"
mac_version='macosx-11.0-arm64' SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="==2.1.0" NUMPY_PINNED_VERSION="==2.1.0"
CONDA_ENV_CREATE_FLAGS="python-freethreading"
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge/label/python_rc -c conda-forge"
desired_python="3.14.0rc1"
RENAME_WHEEL=false RENAME_WHEEL=false
;; ;;
3.14) 3.14)
echo "Using 3.14t deps" echo "Using 3.14t deps"
mac_version='macosx-11.0-arm64' SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="==2.1.0" NUMPY_PINNED_VERSION="==2.1.0"
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge/label/python_rc -c conda-forge"
desired_python="3.14.0rc1"
RENAME_WHEEL=false RENAME_WHEEL=false
;; ;;
3.13t) 3.13t)
echo "Using 3.13t deps" echo "Using 3.13 deps"
mac_version='macosx-11.0-arm64' SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="==2.1.0" NUMPY_PINNED_VERSION="==2.1.0"
CONDA_ENV_CREATE_FLAGS="python-freethreading"
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge"
desired_python="3.13"
RENAME_WHEEL=false RENAME_WHEEL=false
;; ;;
3.13) 3.13)
echo "Using 3.13 deps" echo "Using 3.13 deps"
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="==2.1.0" NUMPY_PINNED_VERSION="==2.1.0"
;; ;;
3.12) 3.12)
echo "Using 3.12 deps" echo "Using 3.12 deps"
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="==2.0.2" NUMPY_PINNED_VERSION="==2.0.2"
;; ;;
3.11) 3.11)
echo "Using 3.11 deps" echo "Using 3.11 deps"
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=5.3"
NUMPY_PINNED_VERSION="==2.0.2" NUMPY_PINNED_VERSION="==2.0.2"
;; ;;
3.10) 3.10)
echo "Using 3.10 deps" echo "Using 3.10 deps"
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=5.3"
NUMPY_PINNED_VERSION="==2.0.2"
;;
3.9)
echo "Using 3.9 deps"
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=5.3"
NUMPY_PINNED_VERSION="==2.0.2" NUMPY_PINNED_VERSION="==2.0.2"
;; ;;
*) *)
echo "Unsupported version $desired_python" echo "Using default deps"
exit 1 NUMPY_PINNED_VERSION="==1.11.3"
;; ;;
esac esac
# Install into a fresh env
tmp_env_name="wheel_py$python_nodot"
conda create ${EXTRA_CONDA_INSTALL_FLAGS} -yn "$tmp_env_name" python="$desired_python" ${CONDA_ENV_CREATE_FLAGS}
source activate "$tmp_env_name"
PINNED_PACKAGES=( PINNED_PACKAGES=(
"setuptools${SETUPTOOLS_PINNED_VERSION}"
"pyyaml${PYYAML_PINNED_VERSION}"
"numpy${NUMPY_PINNED_VERSION}" "numpy${NUMPY_PINNED_VERSION}"
) )
python -mvenv ~/${desired_python}-build retry pip install "${PINNED_PACKAGES[@]}" -r "${pytorch_rootdir}/requirements-build.txt"
source ~/${desired_python}-build/bin/activate pip install requests ninja typing-extensions
retry pip install "${PINNED_PACKAGES[@]}" -r "${pytorch_rootdir}/requirements.txt" retry pip install -r "${pytorch_rootdir}/requirements.txt" || true
retry brew install libomp retry brew install libomp
# For USE_DISTRIBUTED=1 on macOS, need libuv, which is build as part of tensorpipe submodule # For USE_DISTRIBUTED=1 on macOS, need libuv, which is build as part of tensorpipe submodule
@ -186,11 +221,11 @@ export USE_QNNPACK=OFF
export BUILD_TEST=OFF export BUILD_TEST=OFF
pushd "$pytorch_rootdir" pushd "$pytorch_rootdir"
echo "Calling -m build --wheel --no-isolation at $(date)" echo "Calling setup.py bdist_wheel at $(date)"
_PYTHON_HOST_PLATFORM=${mac_version} ARCHFLAGS="-arch arm64" python -m build --wheel --no-isolation --outdir "$whl_tmp_dir" -C--plat-name="${mac_version//[-.]/_}" python setup.py bdist_wheel -d "$whl_tmp_dir"
echo "Finished -m build --wheel --no-isolation at $(date)" echo "Finished setup.py bdist_wheel at $(date)"
if [[ $package_type != 'libtorch' ]]; then if [[ $package_type != 'libtorch' ]]; then
echo "delocating wheel dependencies" echo "delocating wheel dependencies"

View File

@ -75,8 +75,8 @@ TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
# Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT # Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64'" TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64'"
# CUDA 12.9/13.0 builds have triton for Linux and Linux aarch64 binaries. # CUDA 12.9 builds have triton for Linux and Linux aarch64 binaries.
if [[ "$DESIRED_CUDA" == "cu129" ]] || [[ "$DESIRED_CUDA" == "cu130" ]]; then if [[ "$DESIRED_CUDA" == "cu129" ]]; then
TRITON_CONSTRAINT="platform_system == 'Linux'" TRITON_CONSTRAINT="platform_system == 'Linux'"
fi fi

View File

@ -15,7 +15,8 @@ fi
if [[ "$DESIRED_CUDA" == 'xpu' ]]; then if [[ "$DESIRED_CUDA" == 'xpu' ]]; then
export VC_YEAR=2022 export VC_YEAR=2022
export USE_SCCACHE=0 export USE_SCCACHE=0
export XPU_VERSION=2025.2 export XPU_VERSION=2025.1
export XPU_ENABLE_KINETO=1
fi fi
echo "Free space on filesystem before build:" echo "Free space on filesystem before build:"

View File

@ -8,7 +8,7 @@ export VC_YEAR=2022
if [[ "$DESIRED_CUDA" == 'xpu' ]]; then if [[ "$DESIRED_CUDA" == 'xpu' ]]; then
export VC_YEAR=2022 export VC_YEAR=2022
export XPU_VERSION=2025.2 export XPU_VERSION=2025.1
fi fi
pushd "$PYTORCH_ROOT/.ci/pytorch/" pushd "$PYTORCH_ROOT/.ci/pytorch/"

View File

@ -0,0 +1,47 @@
#!/bin/bash
# =================== The following code **should** be executed inside Docker container ===================
# Install dependencies
sudo apt-get -y update
sudo apt-get -y install expect-dev
# This is where the local pytorch install in the docker image is located
pt_checkout="/var/lib/jenkins/workspace"
source "$pt_checkout/.ci/pytorch/common_utils.sh"
echo "functorch_doc_push_script.sh: Invoked with $*"
set -ex
version=${DOCS_VERSION:-nightly}
echo "version: $version"
# Build functorch docs
pushd $pt_checkout/functorch/docs
pip -q install -r requirements.txt
make html
popd
git clone https://github.com/pytorch/functorch -b gh-pages --depth 1 functorch_ghpages
pushd functorch_ghpages
if [ $version == "main" ]; then
version=nightly
fi
git rm -rf "$version" || true
mv "$pt_checkout/functorch/docs/build/html" "$version"
git add "$version" || true
git status
git config user.email "soumith+bot@pytorch.org"
git config user.name "pytorchbot"
# If there aren't changes, don't make a commit; push is no-op
git commit -m "Generate Python docs from pytorch/pytorch@${GITHUB_SHA}" || true
git status
if [[ "${WITH_PUSH:-}" == true ]]; then
git push -u origin gh-pages
fi
popd
# =================== The above code **should** be executed inside Docker container ===================

View File

@ -69,8 +69,6 @@ readability-string-compare,
' '
HeaderFilterRegex: '^(aten/|c10/|torch/).*$' HeaderFilterRegex: '^(aten/|c10/|torch/).*$'
WarningsAsErrors: '*' WarningsAsErrors: '*'
LineFilter:
- name: '/usr/include/.*'
CheckOptions: CheckOptions:
cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor: true cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor: true
cppcoreguidelines-special-member-functions.AllowImplicitlyDeletedCopyOrMove: true cppcoreguidelines-special-member-functions.AllowImplicitlyDeletedCopyOrMove: true

View File

@ -73,7 +73,7 @@ exclude =
./docs/src, ./docs/src,
./functorch/docs, ./functorch/docs,
./functorch/examples, ./functorch/examples,
./functorch/docs/source/tutorials, ./functorch/notebooks,
./scripts, ./scripts,
./test/generated_type_hints_smoketest.py, ./test/generated_type_hints_smoketest.py,
./third_party, ./third_party,

View File

@ -1,10 +1,6 @@
--- ---
name: "⚠️ CI SEV" name: "⚠️ CI SEV"
about: Tracking incidents for PyTorch's CI infra. about: Tracking incidents for PyTorch's CI infra.
title: ''
labels: ''
assignees: ''
--- ---
> NOTE: Remember to label this issue with "`ci: sev`" > NOTE: Remember to label this issue with "`ci: sev`"

View File

@ -1,18 +0,0 @@
---
name: DISABLE AUTOREVERT
about: Disables autorevert when open
title: "❌​\U0001F519 [DISABLE AUTOREVERT]"
labels: 'ci: disable-autorevert'
assignees: ''
---
This issue, while open, disables the autorevert functionality.
More details can be found [here](https://github.com/pytorch/test-infra/blob/main/aws/lambda/pytorch-auto-revert/README.md)
## Why are you disabling autorevert?
## Links to any issues/commits/errors that shows the source of problem

View File

@ -1,10 +1,8 @@
--- ---
name: Disable CI jobs (PyTorch Dev Infra only) name: Disable CI jobs (PyTorch Dev Infra only)
about: Use this template to disable CI jobs about: Use this template to disable CI jobs
title: DISABLED [WORKFLOW_NAME] / [PLATFORM_NAME] / [JOB_NAME] title: "DISABLED [WORKFLOW_NAME] / [PLATFORM_NAME] / [JOB_NAME]"
labels: 'module: ci' labels: "module: ci"
assignees: ''
--- ---
> For example, DISABLED pull / win-vs2022-cpu-py3 / test (default). Once > For example, DISABLED pull / win-vs2022-cpu-py3 / test (default). Once

View File

@ -12,19 +12,13 @@ self-hosted-runner:
- linux.9xlarge.ephemeral - linux.9xlarge.ephemeral
- am2.linux.9xlarge.ephemeral - am2.linux.9xlarge.ephemeral
- linux.12xlarge - linux.12xlarge
- linux.12xlarge.memory
- linux.24xlarge - linux.24xlarge
- linux.24xlarge.memory
- linux.24xlarge.ephemeral - linux.24xlarge.ephemeral
- linux.24xlarge.amd - linux.24xlarge.amd
- linux.arm64.2xlarge - linux.arm64.2xlarge
- linux.arm64.2xlarge.ephemeral - linux.arm64.2xlarge.ephemeral
- linux.arm64.m7g.4xlarge - linux.arm64.m7g.4xlarge
- linux.arm64.m7g.4xlarge.ephemeral - linux.arm64.m7g.4xlarge.ephemeral
- linux.arm64.r7g.12xlarge.memory
- linux.aws.h100
- linux.aws.h100.4
- linux.aws.h100.8
- linux.4xlarge.nvidia.gpu - linux.4xlarge.nvidia.gpu
- linux.8xlarge.nvidia.gpu - linux.8xlarge.nvidia.gpu
- linux.16xlarge.nvidia.gpu - linux.16xlarge.nvidia.gpu

View File

@ -4,11 +4,6 @@ name: Build External packages
description: build external packages for PyTorch description: build external packages for PyTorch
inputs: inputs:
cuda-version:
description: CUDA version to use
type: string
required: true
default: '12.8.1'
cuda-arch-list: cuda-arch-list:
description: TORCH_CUDA_ARCH_LIST (e.g., "8.0;8.9;9.0") description: TORCH_CUDA_ARCH_LIST (e.g., "8.0;8.9;9.0")
type: string type: string
@ -49,12 +44,11 @@ runs:
env: env:
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2 SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
SCCACHE_REGION: us-east-1 SCCACHE_REGION: us-east-1
CUDA_VERSION: ${{ inputs.cuda-version }}
TORCH_CUDA_ARCH_LIST: ${{ inputs.cuda-arch-list }} TORCH_CUDA_ARCH_LIST: ${{ inputs.cuda-arch-list }}
BASE_IMAGE: ${{ inputs.docker-image }} BASE_IMAGE: ${{ inputs.docker-image }}
BUILD_TARGETS: ${{ inputs.build-targets }} BUILD_TARGETS: ${{ inputs.build-targets }}
PARENT_OUTPUT_DIR: ${{ inputs.output-dir }} PARENT_OUTPUT_DIR: ${{ inputs.output-dir}}
TORCH_WHEELS_PATH: ${{ inputs.torch-wheel-dir }}
shell: bash shell: bash
run: | run: |
set -euo pipefail set -euo pipefail
@ -75,6 +69,7 @@ runs:
export OUTPUT_DIR export OUTPUT_DIR
echo "Building external package: $target in directory $OUTPUT_DIR" echo "Building external package: $target in directory $OUTPUT_DIR"
python3 -m cli.run build external "$target" python3 -m cli.run build external "$target"
done done
END_TIME=$(date +%s) END_TIME=$(date +%s)

View File

@ -57,21 +57,6 @@ runs:
submodules: ${{ inputs.submodules }} submodules: ${{ inputs.submodules }}
show-progress: false show-progress: false
- name: Clean submodules post checkout
id: clean-submodules
if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }}
shell: bash
env:
NO_SUDO: ${{ inputs.no-sudo }}
run: |
cd "${GITHUB_WORKSPACE}"
# Clean stale submodule dirs
if [ -z "${NO_SUDO}" ]; then
sudo git submodule foreach --recursive git clean -ffdx
else
git submodule foreach --recursive git clean -ffdx
fi
- name: Clean workspace (try again) - name: Clean workspace (try again)
if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' && if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' &&
(steps.first-clean.outcome != 'success' || steps.first-checkout-attempt.outcome != 'success') }} (steps.first-clean.outcome != 'success' || steps.first-checkout-attempt.outcome != 'success') }}

View File

@ -264,7 +264,7 @@ def unzip_artifact_and_replace_files() -> None:
change_content_to_new_version(f"artifacts/dist/{old_stem}/torch/version.py") change_content_to_new_version(f"artifacts/dist/{old_stem}/torch/version.py")
for file in Path(f"artifacts/dist/{old_stem}").glob( for file in Path(f"artifacts/dist/{old_stem}").glob(
"*.dist-info/*", "*.dist-info/**",
): ):
change_content_to_new_version(file) change_content_to_new_version(file)

View File

@ -6,12 +6,6 @@ inputs:
cuda-version: cuda-version:
description: which cuda version to install, 'cpu' for none description: which cuda version to install, 'cpu' for none
required: true required: true
python-version:
required: false
type: string
default: "3.10"
description: |
The python version to be used. Will be 3.10 by default
runs: runs:
using: composite using: composite
@ -44,24 +38,18 @@ runs:
CONDA="C:\Jenkins\Miniconda3\condabin\conda.bat" CONDA="C:\Jenkins\Miniconda3\condabin\conda.bat"
{ {
echo "CONDA=${CONDA}";
echo "CONDA_RUN=${CONDA} run --no-capture-output"; echo "CONDA_RUN=${CONDA} run --no-capture-output";
echo "CONDA_BUILD=${CONDA} run conda-build"; echo "CONDA_BUILD=${CONDA} run conda-build";
echo "CONDA_INSTALL=${CONDA} install"; echo "CONDA_INSTALL=${CONDA} install";
} >> "${GITHUB_ENV}" } >> "${GITHUB_ENV}"
- name: Setup Python3 - name: Setup Python3
env:
PYTHON_VERSION: ${{ inputs.python-version }}
shell: bash shell: bash
run: | run: |
set +e set +e
set -x set -x
# Create new py_tmp env with python-version PYTHON3=$(${CONDA_RUN} which python3)
${CONDA} create -y -n py_tmp python=${PYTHON_VERSION} intel-openmp libuv
PYTHON3=$(${CONDA_RUN} -n py_tmp which python3)
EXIT_CODE=$? EXIT_CODE=$?
if [[ "${EXIT_CODE}" == "0" ]]; then if [[ "${EXIT_CODE}" == "0" ]]; then
@ -74,7 +62,7 @@ runs:
# installation, which is Python 3 based. Its Python is default to Python 3. Further, there # installation, which is Python 3 based. Its Python is default to Python 3. Further, there
# is also the Miniconda installation that is Python 2 based, and both can be installed if # is also the Miniconda installation that is Python 2 based, and both can be installed if
# needed. In both cases, Python binary is just called python # needed. In both cases, Python binary is just called python
PYTHON=$(${CONDA_RUN} -n py_tmp which python) PYTHON=$(${CONDA_RUN} which python)
EXIT_CODE=$? EXIT_CODE=$?
if [[ "${EXIT_CODE}" == "0" ]]; then if [[ "${EXIT_CODE}" == "0" ]]; then

View File

@ -1 +1 @@
87ff22e49ed0e92576c4935ccb8c143daac4a3cd 10a5002c6195bd95e34df8fe28ff8a2d55a2a922

View File

@ -1 +1 @@
08ae0af1395c8d8471f4025deb6af9aef90b342f 7f1de94a4c2d14f59ad4ca84538c36084ea6b2c8

View File

@ -1 +1 @@
78a47f87ce259a48f0391fa9ae15add05ea7432b add1adfec742dfb13e614dab3372b5aafd1ff046

View File

@ -1 +1 @@
0fc62aa26a30ed7ca419d285f285cb5ba02c4394 a1c6ee92c85e8b0955c20892ed68f032a6015c09

View File

@ -12,46 +12,54 @@ ARG BUILD_BASE_IMAGE=torch-nightly-base
# by default, it uses devel-ubuntu22.04 official image. # by default, it uses devel-ubuntu22.04 official image.
ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
# The logic is copied from https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile
ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py"
#################### TORCH NIGHTLY BASE IMAGE ####################
#################### TORCH NIGHTLY BASE IMAGE ####################
# A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci # A base image for building vLLM with devel ubuntu 22.04, this is mainly used to build vllm in vllm builtkite ci
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base From nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 as torch-nightly-base
ARG CUDA_VERSION=12.8.1
ARG PYTHON_VERSION=3.12
ARG TARGETPLATFORM
ENV DEBIAN_FRONTEND=noninteractive
ARG CUDA_VERSION RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
ARG PYTHON_VERSION echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
ARG GET_PIP_URL
# Install Python and other dependencies # Install Python and other dependencies if it does not existed
RUN apt-get update -y \ RUN if ! command -v python3 >/dev/null || ! python3 --version | grep -q "${PYTHON_VERSION}"; then \
&& apt-get install -y ccache software-properties-common git curl wget sudo vim \ echo "Installing Python ${PYTHON_VERSION}..." && \
&& add-apt-repository -y ppa:deadsnakes/ppa \ echo 'tzdata tzdata/Areas select America' | debconf-set-selections && \
&& apt-get update -y \ echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections && \
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ apt-get update -y && \
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ apt-get install -y ccache software-properties-common git curl sudo && \
&& update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ for i in 1 2 3; do \
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ add-apt-repository -y ppa:deadsnakes/ppa && break || \
&& curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \ { echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
&& python3 --version && python3 -m pip --version done && \
apt-get update -y && \
apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv && \
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 && \
update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} && \
ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config && \
curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}; \
else \
echo "Python ${PYTHON_VERSION} already present, skipping setup."; \
fi \
&& python3 --version && python3 -m pip --version
# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519 # Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519
# as it was causing spam when compiling the CUTLASS kernels # as it was causing spam when compiling the CUTLASS kernels
# Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519) # Ensure gcc >= 10 to avoid CUTLASS issues (bug 92519)
RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \ RUN current_gcc_version=$(gcc -dumpversion | cut -f1 -d.) && \
if command -v apt-get >/dev/null; then \ if [ "$current_gcc_version" -lt 10 ]; then \
if [ "$current_gcc_version" -lt 10 ]; then \ echo "GCC version is $current_gcc_version, installing gcc-10..."; \
echo "GCC version is $current_gcc_version, installing gcc-10..."; \ apt-get update && \
apt-get update \ apt-get install -y gcc-10 g++-10 && \
&& apt-get install -y gcc-10 g++-10 \ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 && \
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 \ update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \
&& update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100; \ else \
else \ echo "GCC version is $current_gcc_version, no need to install gcc-10."; \
echo "GCC version is $current_gcc_version, no need to install gcc-10."; \ fi && \
fi \ gcc --version && g++ --version
fi \
&& gcc --version && g++ --version
# install uv for faster pip installs # install uv for faster pip installs
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
@ -71,20 +79,11 @@ ENV UV_LINK_MODE=copy
FROM ${BUILD_BASE_IMAGE} AS base FROM ${BUILD_BASE_IMAGE} AS base
USER root USER root
ARG CUDA_VERSION # Workaround for https://github.com/openai/triton/issues/2507 and
ARG PYTHON_VERSION # https://github.com/pytorch/pytorch/issues/107960 -- hopefully
# this won't be needed for future versions of this docker image
# TODO (huydhn): Only work with PyTorch manylinux builder # or future versions of triton.
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}" RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
# Install some system dependencies and double check python version
RUN if command -v apt-get >/dev/null; then \
apt-get update -y \
&& apt-get install -y ccache software-properties-common git curl wget sudo vim; \
else \
dnf install -y git curl wget sudo; \
fi \
&& python3 --version && python3 -m pip --version
# Install uv for faster pip installs if not existed # Install uv for faster pip installs if not existed
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
@ -119,15 +118,17 @@ RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \ if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
echo "[INFO] Installing torch wheels to build vllm"; \ echo "[INFO] Installing torch wheels to build vllm"; \
torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \ torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \
vision_whl=$(find /dist -name 'torchvision*.whl' | head -n1 | xargs); \ vision_whl=$(find /dist/vision -name 'torchvision*.whl' | head -n1 | xargs); \
audio_whl=$(find /dist -name 'torchaudio*.whl' | head -n1 | xargs); \ audio_whl=$(find /dist/audio -name 'torchaudio*.whl' | head -n1 | xargs); \
uv pip install --system "${torch_whl}[opt-einsum]" "${vision_whl}" "${audio_whl}" /dist/*.whl; \ uv pip install --system "${torch_whl}[opt-einsum]"; \
uv pip install --system "${vision_whl}"; \
uv pip install --system "${audio_whl}"; \
elif [ -n "$PINNED_TORCH_VERSION" ]; then \ elif [ -n "$PINNED_TORCH_VERSION" ]; then \
echo "[INFO] Installing pinned torch nightly version to build vllm: $PINNED_TORCH_VERSION"; \ echo "[INFO] Installing pinned torch nightly version to build vllm: $PINNED_TORCH_VERSION"; \
uv pip install --system "$PINNED_TORCH_VERSION" --index-url https://download.pytorch.org/whl/nightly/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.'); \ uv pip install --system "$PINNED_TORCH_VERSION" --index-url https://download.pytorch.org/whl/nightly/cu128; \
else \ else \
echo "[INFO] Installing torch nightly with latest one to build vllm"; \ echo "[INFO] Installing torch nightly with latest one to build vllm"; \
uv pip install --system torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.'); \ uv pip install --system torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu128; \
fi fi
# Install numba 0.61.2 for cuda environment # Install numba 0.61.2 for cuda environment
@ -136,11 +137,12 @@ RUN --mount=type=cache,target=/root/.cache/uv \
# Install common dependencies from vllm common.txt # Install common dependencies from vllm common.txt
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements/common.txt uv pip install --system -r requirements/common.txt
# Must put before installing xformers, so it can install the correct version of xfomrers. # Must put before installing xformers, so it can install the correct version of xfomrers.
ARG xformers_cuda_arch_list='7.5;8.0+PTX;9.0a' ARG exformer_cuda_arch_list='7.5;8.0+PTX;9.0a'
ENV TORCH_CUDA_ARCH_LIST=${xformers_cuda_arch_list} ENV TORCH_CUDA_ARCH_LIST=${exformer_cuda_arch_list}
ARG max_jobs=16 ARG max_jobs=16
ENV MAX_JOBS=${max_jobs} ENV MAX_JOBS=${max_jobs}
@ -151,8 +153,8 @@ RUN pip freeze | grep -E 'ninja'
# Build xformers with cuda and torch nightly/wheel # Build xformers with cuda and torch nightly/wheel
# following official xformers guidance: https://github.com/facebookresearch/xformers#build # following official xformers guidance: https://github.com/facebookresearch/xformers#build
# sha for https://github.com/facebookresearch/xformers/tree/v0.0.32.post2 # sha for https://github.com/facebookresearch/xformers/tree/v0.0.31
ARG XFORMERS_COMMIT=5d4b92a5e5a9c6c6d4878283f47d82e17995b468 ARG XFORMERS_COMMIT=eb0946a363464da96ea40afd1a7f72a907c25497
ENV CCACHE_DIR=/root/.cache/ccache ENV CCACHE_DIR=/root/.cache/ccache
RUN --mount=type=cache,target=/root/.cache/ccache \ RUN --mount=type=cache,target=/root/.cache/ccache \
@ -186,6 +188,11 @@ RUN pip freeze | grep -E 'torch|xformers|torchvision|torchaudio'
FROM base AS build FROM base AS build
ARG TARGETPLATFORM ARG TARGETPLATFORM
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
# Use copy mode to avoid hardlink failures with Docker cache mounts
ENV UV_LINK_MODE=copy
COPY . . COPY . .
RUN python3 use_existing_torch.py RUN python3 use_existing_torch.py
@ -214,16 +221,11 @@ ARG SCCACHE_S3_NO_CREDENTIALS=0
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=.git,target=.git \ --mount=type=bind,source=.git,target=.git \
if [ "$USE_SCCACHE" = "1" ]; then \ if [ "$USE_SCCACHE" = "1" ]; then \
echo "Installing sccache..."; \ echo "Installing sccache..." \
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ && curl -L -o sccache.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz \
SCCACHE_ARCHIVE="sccache-v0.8.1-aarch64-unknown-linux-musl"; \
else \
SCCACHE_ARCHIVE="sccache-v0.8.1-x86_64-unknown-linux-musl"; \
fi; \
curl -L -o sccache.tar.gz "https://github.com/mozilla/sccache/releases/download/v0.8.1/${SCCACHE_ARCHIVE}.tar.gz" \
&& tar -xzf sccache.tar.gz \ && tar -xzf sccache.tar.gz \
&& sudo mv "${SCCACHE_ARCHIVE}"/sccache /usr/bin/sccache \ && sudo mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/bin/sccache \
&& rm -rf sccache.tar.gz "${SCCACHE_ARCHIVE}" \ && rm -rf sccache.tar.gz sccache-v0.8.1-x86_64-unknown-linux-musl \
&& export SCCACHE_BUCKET=${SCCACHE_BUCKET_NAME} \ && export SCCACHE_BUCKET=${SCCACHE_BUCKET_NAME} \
&& export SCCACHE_REGION=${SCCACHE_REGION_NAME} \ && export SCCACHE_REGION=${SCCACHE_REGION_NAME} \
&& export SCCACHE_S3_NO_CREDENTIALS=${SCCACHE_S3_NO_CREDENTIALS} \ && export SCCACHE_S3_NO_CREDENTIALS=${SCCACHE_S3_NO_CREDENTIALS} \
@ -249,9 +251,9 @@ RUN --mount=type=cache,target=/root/.cache/ccache \
python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \ python3 setup.py bdist_wheel --dist-dir=vllm-dist --py-limited-api=cp38; \
fi fi
RUN echo "[INFO] Listing current directory:" && \ RUN echo "[DEBUG] Listing current directory:" && \
ls -al && \ ls -al && \
echo "[INFO] Showing torch_build_versions.txt content:" && \ echo "[DEBUG] Showing torch_build_versions.txt content:" && \
cat torch_build_versions.txt cat torch_build_versions.txt
#################### WHEEL BUILD IMAGE #################### #################### WHEEL BUILD IMAGE ####################
@ -261,42 +263,51 @@ RUN echo "[INFO] Listing current directory:" && \
# Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer # Setup clean environment for vLLM for test and api server using ubuntu22.04 with AOT flashinfer
FROM ${FINAL_BASE_IMAGE} AS vllm-base FROM ${FINAL_BASE_IMAGE} AS vllm-base
USER root USER root
ARG CUDA_VERSION
ARG PYTHON_VERSION
ARG GET_PIP_URL
# TODO (huydhn): Only work with PyTorch manylinux builder
ENV PATH="/opt/python/cp312-cp312/bin:${PATH}"
# prepare for environment starts # prepare for environment starts
WORKDIR /workspace WORKDIR /workspace
# Install Python and other dependencies RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
RUN if command -v apt-get >/dev/null; then \ echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
apt-get update -y \
&& apt-get install -y ccache software-properties-common git curl wget sudo vim \ # Install Python and other dependencies if it does not existed
&& add-apt-repository -y ppa:deadsnakes/ppa \ RUN if ! command -v python3 >/dev/null || ! python3 --version | grep -q "${PYTHON_VERSION}"; then \
&& apt-get update -y \ echo "Installing Python ${PYTHON_VERSION}..." && \
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ echo 'tzdata tzdata/Areas select America' | debconf-set-selections && \
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections && \
&& update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ apt-get update -y && \
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ apt-get install -y ccache software-properties-common git curl sudo && \
&& curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION}; \ for i in 1 2 3; do \
else \ add-apt-repository -y ppa:deadsnakes/ppa && break || \
dnf install -y git curl wget sudo; \ { echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
fi \ done && \
&& python3 --version && python3 -m pip --version apt-get update -y && \
apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv && \
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 && \
update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} && \
ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config && \
curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}; \
else \
echo "Python ${PYTHON_VERSION} already present, skipping setup."; \
fi \
&& python3 --version && python3 -m pip --version
# Get the torch versions, and whls used in previous stagtes for consistency # Get the torch versions, and whls used in previous stagtes for consistency
COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt
COPY --from=base /workspace/xformers-dist /wheels/xformers COPY --from=base /workspace/xformers-dist /wheels/xformers
COPY --from=build /workspace/vllm-dist /wheels/vllm COPY --from=build /workspace/vllm-dist /wheels/vllm
RUN echo "[INFO] Listing current directory before torch install step:" && \ RUN echo "[DEBUG] Listing current directory before torch install step:" && \
ls -al && \ ls -al && \
echo "[INFO] Showing torch_build_versions.txt content:" && \ echo "[DEBUG] Showing torch_build_versions.txt content:" && \
cat torch_build_versions.txt cat torch_build_versions.txt
# Workaround for https://github.com/openai/triton/issues/2507 and
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully
# this won't be needed for future versions of this docker image
# or future versions of triton.
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
# Install uv for faster pip installs if not existed # Install uv for faster pip installs if not existed
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
if ! python3 -m uv --version > /dev/null 2>&1; then \ if ! python3 -m uv --version > /dev/null 2>&1; then \
@ -316,13 +327,15 @@ RUN --mount=type=bind,source=${TORCH_WHEELS_PATH},target=/dist \
--mount=type=cache,target=/root/.cache/uv \ --mount=type=cache,target=/root/.cache/uv \
if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \ if [ -n "$TORCH_WHEELS_PATH" ] && [ "$TORCH_WHEELS_PATH" != "./requirements" ] && [ -d "/dist" ] && ls /dist/torch*.whl >/dev/null 2>&1; then \
torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \ torch_whl=$(find /dist -maxdepth 1 -name 'torch-*.whl' -print -quit); \
vision_whl=$(find /dist -name 'torchvision*.whl' | head -n1 | xargs); \ vision_whl=$(find /dist/vision -name 'torchvision*.whl' | head -n1 | xargs); \
audio_whl=$(find /dist -name 'torchaudio*.whl' | head -n1 | xargs); \ audio_whl=$(find /dist/audio -name 'torchaudio*.whl' | head -n1 | xargs); \
echo "[INFO] Use wheels to build : '${torch_whl}' '${audio_whl}' '${vision_whl}'"; \ echo "[INFO] Use wheels to build : '${torch_whl}' '${audio_whl}' '${vision_whl}'"; \
uv pip install --system "${torch_whl}[opt-einsum]" "${vision_whl}" "${audio_whl}" /dist/*.whl; \ uv pip install --system "${torch_whl}[opt-einsum]"; \
uv pip install --system "${vision_whl}"; \
uv pip install --system "${audio_whl}"; \
else \ else \
echo "[INFO] Installing torch versions from torch_build_versions.txt"; \ echo "[INFO] Installing torch versions from torch_build_versions.txt"; \
uv pip install --system $(cat torch_build_versions.txt | xargs) --index-url https://download.pytorch.org/whl/nightly/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.'); \ uv pip install --system $(cat torch_build_versions.txt | xargs) --index-url https://download.pytorch.org/whl/nightly/cu128; \
fi fi
# Install the vllm wheel from previous stage # Install the vllm wheel from previous stage
@ -333,8 +346,9 @@ RUN --mount=type=cache,target=/root/.cache/uv \
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system /wheels/xformers/*.whl --verbose uv pip install --system /wheels/xformers/*.whl --verbose
# Build flashinfer from source. # Build flashinfer from source.
ARG torch_cuda_arch_list='8.0;8.9;9.0a;10.0a;12.0' ARG torch_cuda_arch_list='8.0;8.9;9.0a'
# install package for build flashinfer # install package for build flashinfer
# see issue: https://github.com/flashinfer-ai/flashinfer/issues/738 # see issue: https://github.com/flashinfer-ai/flashinfer/issues/738
@ -402,6 +416,11 @@ RUN --mount=type=cache,target=/root/.cache/uv \
RUN --mount=type=cache,target=/root/.cache/uv \ RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements/nightly_torch_test.txt uv pip install --system -r requirements/nightly_torch_test.txt
# Workaround for #17068
# pinned commit for v2.2.4
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system --no-build-isolation "git+https://github.com/state-spaces/mamba@95d8aba8a8c75aedcaa6143713b11e745e7cd0d9#egg=mamba-ssm"
# Logging to confirm the torch versions # Logging to confirm the torch versions
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer' RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'

View File

@ -1,17 +0,0 @@
import glob
requires_files = glob.glob("requirements/*.txt")
requires_files += ["pyproject.toml"]
for file in requires_files:
print(f">>> cleaning {file}")
with open(file) as f:
lines = f.readlines()
if "torch" in "".join(lines).lower():
print("removed:")
with open(file, "w") as f:
for line in lines:
if "torch" not in line.lower():
f.write(line)
print(f"<<< done cleaning {file}")
print()

3
.github/labeler.yml vendored
View File

@ -130,6 +130,3 @@
- torch/csrc/inductor/aoti_include/** - torch/csrc/inductor/aoti_include/**
- torchgen/aoti/** - torchgen/aoti/**
- torchgen/gen_aoti_c_shim.py - torchgen/gen_aoti_c_shim.py
"ciflow/vllm":
- .github/ci_commit_pins/vllm.txt

View File

@ -525,21 +525,6 @@
- Lint - Lint
- pull - pull
- name: typechecking
patterns:
- 'pyrefly.toml'
- 'mypy.ini'
- 'mypy-strict.ini'
approved_by:
- lolpack
- maggiemoss
- ndmitchell
- kinto0
mandatory_checks_name:
- EasyCLA
- Lint
- pull
- name: superuser - name: superuser
patterns: patterns:
- '*' - '*'

View File

@ -1,44 +1,41 @@
tracking_issue: 24422 tracking_issue: 24422
ciflow_tracking_issue: 64124 ciflow_tracking_issue: 64124
ciflow_push_tags: ciflow_push_tags:
- ciflow/b200
- ciflow/b200-symm-mem
- ciflow/binaries - ciflow/binaries
- ciflow/binaries_libtorch - ciflow/binaries_libtorch
- ciflow/binaries_wheel - ciflow/binaries_wheel
- ciflow/h100 - ciflow/triton_binaries
- ciflow/h100-cutlass-backend
- ciflow/h100-distributed
- ciflow/h100-symm-mem
- ciflow/inductor - ciflow/inductor
- ciflow/inductor-cu126
- ciflow/inductor-micro-benchmark
- ciflow/inductor-micro-benchmark-cpu-x86
- ciflow/inductor-perf-compare
- ciflow/inductor-perf-test-nightly-rocm
- ciflow/inductor-perf-test-nightly-x86-zen
- ciflow/inductor-periodic - ciflow/inductor-periodic
- ciflow/inductor-rocm - ciflow/inductor-rocm
- ciflow/inductor-perf-test-nightly-rocm
- ciflow/inductor-perf-compare
- ciflow/inductor-micro-benchmark
- ciflow/inductor-micro-benchmark-cpu-x86
- ciflow/inductor-perf-test-nightly-x86-zen
- ciflow/inductor-cu126
- ciflow/linux-aarch64 - ciflow/linux-aarch64
- ciflow/mps - ciflow/mps
- ciflow/nightly - ciflow/nightly
- ciflow/op-benchmark
- ciflow/periodic - ciflow/periodic
- ciflow/periodic-rocm-mi300 - ciflow/periodic-rocm-mi300
- ciflow/pull
- ciflow/quantization-periodic
- ciflow/riscv64
- ciflow/rocm - ciflow/rocm
- ciflow/rocm-mi300 - ciflow/rocm-mi300
- ciflow/s390 - ciflow/s390
- ciflow/riscv64
- ciflow/slow - ciflow/slow
- ciflow/torchbench
- ciflow/triton_binaries
- ciflow/trunk - ciflow/trunk
- ciflow/unstable - ciflow/unstable
- ciflow/vllm
- ciflow/win-arm64
- ciflow/xpu - ciflow/xpu
- ciflow/vllm
- ciflow/torchbench
- ciflow/op-benchmark
- ciflow/pull
- ciflow/h100
- ciflow/h100-distributed
- ciflow/win-arm64
- ciflow/h100-symm-mem
- ciflow/h100-cutlass-backend
retryable_workflows: retryable_workflows:
- pull - pull
- trunk - trunk
@ -47,4 +44,4 @@ retryable_workflows:
- inductor-A100-perf-nightly - inductor-A100-perf-nightly
labeler_config: labeler.yml labeler_config: labeler.yml
label_to_label_config: label_to_label.yml label_to_label_config: label_to_label.yml
mergebot: true mergebot: True

View File

@ -1,5 +1,4 @@
boto3==1.35.42 boto3==1.35.42
build==1.2.2.post1
cmake==3.27.* cmake==3.27.*
expecttest==0.3.0 expecttest==0.3.0
fbscribelogger==0.1.7 fbscribelogger==0.1.7
@ -16,7 +15,7 @@ optree==0.13.0
packaging==23.1 packaging==23.1
parameterized==0.8.1 parameterized==0.8.1
pillow==10.3.0 pillow==10.3.0
protobuf==5.29.5 protobuf==5.29.4
psutil==5.9.8 psutil==5.9.8
pygments==2.15.0 pygments==2.15.0
pytest-cpp==2.3.0 pytest-cpp==2.3.0
@ -27,9 +26,9 @@ pytest-xdist==3.3.1
pytest==7.3.2 pytest==7.3.2
pyyaml==6.0.2 pyyaml==6.0.2
scipy==1.12.0 scipy==1.12.0
setuptools==78.1.1 setuptools==72.1.0
sympy==1.13.3 sympy==1.13.3
tlparse==0.4.0 tlparse==0.3.30
tensorboard==2.13.0 tensorboard==2.13.0
typing-extensions==4.12.2 typing-extensions==4.12.2
unittest-xml-reporting<=3.2.0,>=2.0.0 unittest-xml-reporting<=3.2.0,>=2.0.0

View File

@ -84,7 +84,6 @@ def build_triton(
["git", "checkout", f"release/{ver}.{rev}.x"], cwd=triton_basedir ["git", "checkout", f"release/{ver}.{rev}.x"], cwd=triton_basedir
) )
else: else:
check_call(["git", "fetch", "origin", commit_hash], cwd=triton_basedir)
check_call(["git", "checkout", commit_hash], cwd=triton_basedir) check_call(["git", "checkout", commit_hash], cwd=triton_basedir)
# change built wheel name and version # change built wheel name and version

View File

@ -39,9 +39,7 @@ def main() -> None:
pull_request_label_names = [label.name for label in pull_request_labels] pull_request_label_names = [label.name for label in pull_request_labels]
issue_label_names = [label.name for label in issue_labels] issue_label_names = [label.name for label in issue_labels]
labels_to_add = [ labels_to_add = [
label label for label in issue_label_names if label not in pull_request_label_names
for label in issue_label_names
if label not in pull_request_label_names and label != "actionable"
] ]
if not labels_to_add: if not labels_to_add:
print("The pull request already has the same labels.") print("The pull request already has the same labels.")

View File

@ -16,21 +16,23 @@ from typing import Optional
# NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this # NOTE: Please also update the CUDA sources in `PIP_SOURCES` in tools/nightly.py when changing this
CUDA_ARCHES = ["12.6", "12.8", "13.0"] CUDA_ARCHES = ["12.6", "12.8", "12.9", "13.0"]
CUDA_STABLE = "12.8" CUDA_STABLE = "12.8"
CUDA_ARCHES_FULL_VERSION = { CUDA_ARCHES_FULL_VERSION = {
"12.6": "12.6.3", "12.6": "12.6.3",
"12.8": "12.8.1", "12.8": "12.8.1",
"12.9": "12.9.1",
"13.0": "13.0.0", "13.0": "13.0.0",
} }
CUDA_ARCHES_CUDNN_VERSION = { CUDA_ARCHES_CUDNN_VERSION = {
"12.6": "9", "12.6": "9",
"12.8": "9", "12.8": "9",
"12.9": "9",
"13.0": "9", "13.0": "9",
} }
# NOTE: Please also update the ROCm sources in `PIP_SOURCES` in tools/nightly.py when changing this # NOTE: Please also update the ROCm sources in `PIP_SOURCES` in tools/nightly.py when changing this
ROCM_ARCHES = ["6.4", "7.0"] ROCM_ARCHES = ["6.3", "6.4"]
XPU_ARCHES = ["xpu"] XPU_ARCHES = ["xpu"]
@ -38,82 +40,99 @@ CPU_AARCH64_ARCH = ["cpu-aarch64"]
CPU_S390X_ARCH = ["cpu-s390x"] CPU_S390X_ARCH = ["cpu-s390x"]
CUDA_AARCH64_ARCHES = ["12.6-aarch64", "12.8-aarch64", "13.0-aarch64"] CUDA_AARCH64_ARCHES = ["12.9-aarch64"]
PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
"12.6": ( "12.6": (
"nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' | " "nvidia-cuda-nvrtc-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' | " "nvidia-cuda-runtime-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' | " "nvidia-cuda-cupti-cu12==12.6.80; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | " "nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' | " "nvidia-cublas-cu12==12.6.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' | " "nvidia-cufft-cu12==11.3.0.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' | " "nvidia-curand-cu12==10.3.7.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' | " "nvidia-cusolver-cu12==11.7.1.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' | " "nvidia-cusparse-cu12==12.5.4.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | " "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | " "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | " "nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' | " "nvidia-nvtx-cu12==12.6.77; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' | " "nvidia-nvjitlink-cu12==12.6.85; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux'" "nvidia-cufile-cu12==1.11.1.6; platform_system == 'Linux' and platform_machine == 'x86_64'"
), ),
"12.8": ( "12.8": (
"nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' | " "nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' | " "nvidia-cuda-runtime-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' | " "nvidia-cuda-cupti-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' | " "nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' | " "nvidia-cublas-cu12==12.8.4.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' | " "nvidia-cufft-cu12==11.3.3.83; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' | " "nvidia-curand-cu12==10.3.9.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' | " "nvidia-cusolver-cu12==11.7.3.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' | " "nvidia-cusparse-cu12==12.5.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' | " "nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nccl-cu12==2.28.3; platform_system == 'Linux' | " "nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvshmem-cu12==3.3.24; platform_system == 'Linux' | " "nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' | " "nvidia-nvtx-cu12==12.8.90; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' | " "nvidia-nvjitlink-cu12==12.8.93; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux'" "nvidia-cufile-cu12==1.13.1.3; platform_system == 'Linux' and platform_machine == 'x86_64'"
),
"12.9": (
"nvidia-cuda-nvrtc-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-runtime-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-cupti-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cudnn-cu12==9.10.2.21; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cublas-cu12==12.9.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufft-cu12==11.4.1.4; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-curand-cu12==10.3.10.19; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusolver-cu12==11.7.5.82; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparse-cu12==12.5.10.65; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparselt-cu12==0.7.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nccl-cu12==2.27.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvshmem-cu12==3.3.20; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvtx-cu12==12.9.79; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvjitlink-cu12==12.9.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufile-cu12==1.14.1.1; platform_system == 'Linux' and platform_machine == 'x86_64'"
), ),
"13.0": ( "13.0": (
"nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' | " "nvidia-cuda-nvrtc==13.0.48; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' | " "nvidia-cuda-runtime==13.0.48; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' | " "nvidia-cuda-cupti==13.0.48; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cudnn-cu13==9.13.0.50; platform_system == 'Linux' | " "nvidia-cudnn-cu13==9.12.0.46; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cublas==13.0.0.19; platform_system == 'Linux' | " "nvidia-cublas==13.0.0.19; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufft==12.0.0.15; platform_system == 'Linux' | " "nvidia-cufft==12.0.0.15; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-curand==10.4.0.35; platform_system == 'Linux' | " "nvidia-curand==10.4.0.35; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusolver==12.0.3.29; platform_system == 'Linux' | " "nvidia-cusolver==12.0.3.29; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparse==12.6.2.49; platform_system == 'Linux' | " "nvidia-cusparse==12.6.2.49; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' | " "nvidia-cusparselt-cu13==0.8.0; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nccl-cu13==2.28.3; platform_system == 'Linux' | " "nvidia-nccl-cu13==2.27.7; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' | " "nvidia-nvshmem-cu13==3.3.24; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvtx==13.0.39; platform_system == 'Linux' | " "nvidia-nvtx==13.0.39; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-nvjitlink==13.0.39; platform_system == 'Linux' | " "nvidia-nvjitlink==13.0.39; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"nvidia-cufile==1.15.0.42; platform_system == 'Linux'" "nvidia-cufile==1.15.0.42; platform_system == 'Linux' and platform_machine == 'x86_64'"
), ),
"xpu": ( "xpu": (
"intel-cmplr-lib-rt==2025.2.1 | " "intel-cmplr-lib-rt==2025.1.1 | "
"intel-cmplr-lib-ur==2025.2.1 | " "intel-cmplr-lib-ur==2025.1.1 | "
"intel-cmplr-lic-rt==2025.2.1 | " "intel-cmplr-lic-rt==2025.1.1 | "
"intel-sycl-rt==2025.2.1 | " "intel-sycl-rt==2025.1.1 | "
"oneccl-devel==2021.16.1; platform_system == 'Linux' and platform_machine == 'x86_64' | " "oneccl-devel==2021.15.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"oneccl==2021.16.1; platform_system == 'Linux' and platform_machine == 'x86_64' | " "oneccl==2021.15.2; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"impi-rt==2021.16.1; platform_system == 'Linux' and platform_machine == 'x86_64' | " "impi-rt==2021.15.0; platform_system == 'Linux' and platform_machine == 'x86_64' | "
"onemkl-sycl-blas==2025.2.0 | " "onemkl-sycl-blas==2025.1.0 | "
"onemkl-sycl-dft==2025.2.0 | " "onemkl-sycl-dft==2025.1.0 | "
"onemkl-sycl-lapack==2025.2.0 | " "onemkl-sycl-lapack==2025.1.0 | "
"onemkl-sycl-rng==2025.2.0 | " "onemkl-sycl-rng==2025.1.0 | "
"onemkl-sycl-sparse==2025.2.0 | " "onemkl-sycl-sparse==2025.1.0 | "
"dpcpp-cpp-rt==2025.2.1 | " "dpcpp-cpp-rt==2025.1.1 | "
"intel-opencl-rt==2025.2.1 | " "intel-opencl-rt==2025.1.1 | "
"mkl==2025.2.0 | " "mkl==2025.1.0 | "
"intel-openmp==2025.2.1 | " "intel-openmp==2025.1.1 | "
"tbb==2022.2.0 | " "tbb==2022.1.0 | "
"tcmlib==1.4.0 | " "tcmlib==1.3.0 | "
"umf==0.11.0 | " "umf==0.10.0 | "
"intel-pti==0.13.1" "intel-pti==0.12.3"
), ),
} }
@ -221,8 +240,12 @@ def generate_libtorch_matrix(
if os == "linux": if os == "linux":
arches += CUDA_ARCHES arches += CUDA_ARCHES
arches += ROCM_ARCHES arches += ROCM_ARCHES
if "13.0" in arches:
arches.remove("13.0")
elif os == "windows": elif os == "windows":
arches += CUDA_ARCHES arches += CUDA_ARCHES
if "13.0" in arches:
arches.remove("13.0")
if libtorch_variants is None: if libtorch_variants is None:
libtorch_variants = [ libtorch_variants = [
"shared-with-deps", "shared-with-deps",
@ -287,6 +310,8 @@ def generate_wheels_matrix(
arches += CUDA_ARCHES + ROCM_ARCHES + XPU_ARCHES arches += CUDA_ARCHES + ROCM_ARCHES + XPU_ARCHES
elif os == "windows": elif os == "windows":
arches += CUDA_ARCHES + XPU_ARCHES arches += CUDA_ARCHES + XPU_ARCHES
if "13.0" in arches:
arches.remove("13.0")
elif os == "linux-aarch64": elif os == "linux-aarch64":
# Separate new if as the CPU type is different and # Separate new if as the CPU type is different and
# uses different build/test scripts # uses different build/test scripts
@ -309,20 +334,19 @@ def generate_wheels_matrix(
else arch_version else arch_version
) )
# TODO: Enable python 3.13t on cpu-s390x
if gpu_arch_type == "cpu-s390x" and python_version == "3.13t":
continue
# TODO: Enable python 3.14 for rest # TODO: Enable python 3.14 for rest
if os not in [ if os not in ["linux", "linux-aarch64", "macos-arm64", "windows"] and (
"linux", python_version == "3.14" or python_version == "3.14t"
"linux-aarch64", ):
"linux-s390x",
"macos-arm64",
"windows",
] and (python_version == "3.14" or python_version == "3.14t"):
continue continue
# cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install # cuda linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install
if ( if (
arch_version in ["13.0", "12.8", "12.6"] arch_version in ["13.0", "12.9", "12.8", "12.6"]
and os == "linux" and os == "linux"
or arch_version in CUDA_AARCH64_ARCHES or arch_version in CUDA_AARCH64_ARCHES
): ):
@ -386,5 +410,6 @@ def generate_wheels_matrix(
validate_nccl_dep_consistency("13.0") validate_nccl_dep_consistency("13.0")
validate_nccl_dep_consistency("12.9")
validate_nccl_dep_consistency("12.8") validate_nccl_dep_consistency("12.8")
validate_nccl_dep_consistency("12.6") validate_nccl_dep_consistency("12.6")

View File

@ -127,6 +127,53 @@ LINUX_BINARY_BUILD_WORFKLOWS = [
), ),
] ]
ROCM_SMOKE_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="manywheel",
build_variant="rocm",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.LINUX,
arches=["6.4"],
python_versions=["3.9"],
),
ciflow_config=CIFlowConfig(
labels={
LABEL_CIFLOW_BINARIES,
LABEL_CIFLOW_BINARIES_WHEEL,
LABEL_CIFLOW_ROCM,
},
isolated_workflow=True,
),
branches="main",
),
]
LINUX_BINARY_SMOKE_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="manywheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.LINUX,
arches=["12.8"],
python_versions=["3.12"],
),
branches="main",
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="libtorch",
build_variant=generate_binary_build_matrix.RELEASE,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.LINUX,
generate_binary_build_matrix.RELEASE,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
branches="main",
),
]
WINDOWS_BINARY_BUILD_WORKFLOWS = [ WINDOWS_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow( BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS, os=OperatingSystem.WINDOWS,
@ -212,6 +259,39 @@ WINDOWS_BINARY_BUILD_WORKFLOWS = [
), ),
] ]
WINDOWS_BINARY_SMOKE_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
build_variant=generate_binary_build_matrix.RELEASE,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS,
generate_binary_build_matrix.RELEASE,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
branches="main",
ciflow_config=CIFlowConfig(
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
build_variant=generate_binary_build_matrix.DEBUG,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS,
generate_binary_build_matrix.DEBUG,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
branches="main",
ciflow_config=CIFlowConfig(
isolated_workflow=True,
),
),
]
MACOS_BINARY_BUILD_WORKFLOWS = [ MACOS_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow( BinaryBuildWorkflow(
os=OperatingSystem.MACOS_ARM64, os=OperatingSystem.MACOS_ARM64,
@ -292,10 +372,23 @@ def main() -> None:
jinja_env.get_template("linux_binary_build_workflow.yml.j2"), jinja_env.get_template("linux_binary_build_workflow.yml.j2"),
S390X_BINARY_BUILD_WORKFLOWS, S390X_BINARY_BUILD_WORKFLOWS,
), ),
(
# Give rocm it's own workflow file
jinja_env.get_template("linux_binary_build_workflow.yml.j2"),
ROCM_SMOKE_WORKFLOWS,
),
(
jinja_env.get_template("linux_binary_build_workflow.yml.j2"),
LINUX_BINARY_SMOKE_WORKFLOWS,
),
( (
jinja_env.get_template("windows_binary_build_workflow.yml.j2"), jinja_env.get_template("windows_binary_build_workflow.yml.j2"),
WINDOWS_BINARY_BUILD_WORKFLOWS, WINDOWS_BINARY_BUILD_WORKFLOWS,
), ),
(
jinja_env.get_template("windows_binary_build_workflow.yml.j2"),
WINDOWS_BINARY_SMOKE_WORKFLOWS,
),
( (
jinja_env.get_template("macos_binary_build_workflow.yml.j2"), jinja_env.get_template("macos_binary_build_workflow.yml.j2"),
MACOS_BINARY_BUILD_WORKFLOWS, MACOS_BINARY_BUILD_WORKFLOWS,

Some files were not shown because too many files have changed in this diff Show More