Compare commits

..

3 Commits

Author SHA1 Message Date
a0801ef6be add workflow to dispatch 2025-08-26 18:20:49 -07:00
735b375db4 echo variables
ghstack-source-id: 3c8f54e83cad9760fb06b39366bea2f31a39342f
Pull-Request: https://github.com/pytorch/pytorch/pull/161565
2025-08-26 17:10:56 -07:00
011155aea3 echo variables
ghstack-source-id: bd39100f9f9c99a5c45b85a48020375ac5f95da6
Pull-Request: https://github.com/pytorch/pytorch/pull/161537
2025-08-26 17:10:55 -07:00
4421 changed files with 100541 additions and 250180 deletions

View File

@ -13,4 +13,3 @@ exclude:
- "**/benchmarks/**" - "**/benchmarks/**"
- "**/test_*.py" - "**/test_*.py"
- "**/*_test.py" - "**/*_test.py"
- "tools/**"

View File

@ -3,22 +3,8 @@ set -eux -o pipefail
GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-} GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
# Set CUDA architecture lists to match x86 build_cuda.sh if [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then
if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0"
elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0" export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
elif [[ "$GPU_ARCH_VERSION" == *"12.9"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0"
elif [[ "$GPU_ARCH_VERSION" == *"13.0"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;11.0;12.0+PTX"
fi
# Compress the fatbin with -compress-mode=size for CUDA 13
if [[ "$DESIRED_CUDA" == *"13"* ]]; then
export TORCH_NVCC_FLAGS="-compress-mode=size"
# Bundle ptxas into the cu13 wheel, see https://github.com/pytorch/pytorch/issues/163801
export BUILD_BUNDLE_PTXAS=1
fi fi
SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
@ -32,22 +18,14 @@ cd /
# on the mounted pytorch repo # on the mounted pytorch repo
git config --global --add safe.directory /pytorch git config --global --add safe.directory /pytorch
pip install -r /pytorch/requirements.txt pip install -r /pytorch/requirements.txt
pip install auditwheel==6.2.0 wheel pip install auditwheel==6.2.0
if [ "$DESIRED_CUDA" = "cpu" ]; then if [ "$DESIRED_CUDA" = "cpu" ]; then
echo "BASE_CUDA_VERSION is not set. Building cpu wheel." echo "BASE_CUDA_VERSION is not set. Building cpu wheel."
python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn #USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files
USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn
else else
echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA" echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA"
export USE_SYSTEM_NCCL=1 export USE_SYSTEM_NCCL=1
#USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files
# Check if we should use NVIDIA libs from PyPI (similar to x86 build_cuda.sh logic) USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
echo "Bundling CUDA libraries with wheel for aarch64."
else
echo "Using nvidia libs from pypi for aarch64."
echo "Updated PYTORCH_EXTRA_INSTALL_REQUIREMENTS for aarch64: $PYTORCH_EXTRA_INSTALL_REQUIREMENTS"
export USE_NVIDIA_PYPI_LIBS=1
fi
python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda
fi fi

View File

@ -13,6 +13,49 @@ def list_dir(path: str) -> list[str]:
return check_output(["ls", "-1", path]).decode().split("\n") return check_output(["ls", "-1", path]).decode().split("\n")
def build_ArmComputeLibrary() -> None:
"""
Using ArmComputeLibrary for aarch64 PyTorch
"""
print("Building Arm Compute Library")
acl_build_flags = [
"debug=0",
"neon=1",
"opencl=0",
"os=linux",
"openmp=1",
"cppthreads=0",
"arch=armv8a",
"multi_isa=1",
"fixed_format_kernels=1",
"build=native",
]
acl_install_dir = "/acl"
acl_checkout_dir = os.getenv("ACL_SOURCE_DIR", "ComputeLibrary")
if os.path.isdir(acl_install_dir):
shutil.rmtree(acl_install_dir)
if not os.path.isdir(acl_checkout_dir) or not len(os.listdir(acl_checkout_dir)):
check_call(
[
"git",
"clone",
"https://github.com/ARM-software/ComputeLibrary.git",
"-b",
"v25.02",
"--depth",
"1",
"--shallow-submodules",
]
)
check_call(
["scons", "Werror=1", f"-j{os.cpu_count()}"] + acl_build_flags,
cwd=acl_checkout_dir,
)
for d in ["arm_compute", "include", "utils", "support", "src", "build"]:
shutil.copytree(f"{acl_checkout_dir}/{d}", f"{acl_install_dir}/{d}")
def replace_tag(filename) -> None: def replace_tag(filename) -> None:
with open(filename) as f: with open(filename) as f:
lines = f.readlines() lines = f.readlines()
@ -26,186 +69,62 @@ def replace_tag(filename) -> None:
f.writelines(lines) f.writelines(lines)
def patch_library_rpath(
folder: str,
lib_name: str,
use_nvidia_pypi_libs: bool = False,
desired_cuda: str = "",
) -> None:
"""Apply patchelf to set RPATH for a library in torch/lib"""
lib_path = f"{folder}/tmp/torch/lib/{lib_name}"
if use_nvidia_pypi_libs:
# For PyPI NVIDIA libraries, construct CUDA RPATH
cuda_rpaths = [
"$ORIGIN/../../nvidia/cudnn/lib",
"$ORIGIN/../../nvidia/nvshmem/lib",
"$ORIGIN/../../nvidia/nccl/lib",
"$ORIGIN/../../nvidia/cusparselt/lib",
]
if "130" in desired_cuda:
cuda_rpaths.append("$ORIGIN/../../nvidia/cu13/lib")
else:
cuda_rpaths.extend(
[
"$ORIGIN/../../nvidia/cublas/lib",
"$ORIGIN/../../nvidia/cuda_cupti/lib",
"$ORIGIN/../../nvidia/cuda_nvrtc/lib",
"$ORIGIN/../../nvidia/cuda_runtime/lib",
"$ORIGIN/../../nvidia/cufft/lib",
"$ORIGIN/../../nvidia/curand/lib",
"$ORIGIN/../../nvidia/cusolver/lib",
"$ORIGIN/../../nvidia/cusparse/lib",
"$ORIGIN/../../nvidia/nvtx/lib",
"$ORIGIN/../../nvidia/cufile/lib",
]
)
# Add $ORIGIN for local torch libs
rpath = ":".join(cuda_rpaths) + ":$ORIGIN"
else:
# For bundled libraries, just use $ORIGIN
rpath = "$ORIGIN"
if os.path.exists(lib_path):
os.system(
f"cd {folder}/tmp/torch/lib/; "
f"patchelf --set-rpath '{rpath}' --force-rpath {lib_name}"
)
def copy_and_patch_library(
src_path: str,
folder: str,
use_nvidia_pypi_libs: bool = False,
desired_cuda: str = "",
) -> None:
"""Copy a library to torch/lib and patch its RPATH"""
if os.path.exists(src_path):
lib_name = os.path.basename(src_path)
shutil.copy2(src_path, f"{folder}/tmp/torch/lib/{lib_name}")
patch_library_rpath(folder, lib_name, use_nvidia_pypi_libs, desired_cuda)
def package_cuda_wheel(wheel_path, desired_cuda) -> None: def package_cuda_wheel(wheel_path, desired_cuda) -> None:
""" """
Package the cuda wheel libraries Package the cuda wheel libraries
""" """
folder = os.path.dirname(wheel_path) folder = os.path.dirname(wheel_path)
wheelname = os.path.basename(wheel_path)
os.mkdir(f"{folder}/tmp") os.mkdir(f"{folder}/tmp")
os.system(f"unzip {wheel_path} -d {folder}/tmp") os.system(f"unzip {wheel_path} -d {folder}/tmp")
# Delete original wheel since it will be repackaged libs_to_copy = [
os.system(f"rm {wheel_path}") "/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12",
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so",
"/usr/local/cuda/lib64/libcudnn.so.9",
"/usr/local/cuda/lib64/libcublas.so.12",
"/usr/local/cuda/lib64/libcublasLt.so.12",
"/usr/local/cuda/lib64/libcudart.so.12",
"/usr/local/cuda/lib64/libcufft.so.11",
"/usr/local/cuda/lib64/libcusparse.so.12",
"/usr/local/cuda/lib64/libcusparseLt.so.0",
"/usr/local/cuda/lib64/libcusolver.so.11",
"/usr/local/cuda/lib64/libcurand.so.10",
"/usr/local/cuda/lib64/libnccl.so.2",
"/usr/local/cuda/lib64/libnvJitLink.so.12",
"/usr/local/cuda/lib64/libnvrtc.so.12",
"/usr/local/cuda/lib64/libnvshmem_host.so.3",
"/usr/local/cuda/lib64/libcudnn_adv.so.9",
"/usr/local/cuda/lib64/libcudnn_cnn.so.9",
"/usr/local/cuda/lib64/libcudnn_graph.so.9",
"/usr/local/cuda/lib64/libcudnn_ops.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9",
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9",
"/lib64/libgomp.so.1",
"/usr/lib64/libgfortran.so.5",
"/acl/build/libarm_compute.so",
"/acl/build/libarm_compute_graph.so",
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_lapack_core.so.0",
"/usr/local/lib/libnvpl_blas_core.so.0",
]
# Check if we should use PyPI NVIDIA libraries or bundle system libraries if "129" in desired_cuda:
use_nvidia_pypi_libs = os.getenv("USE_NVIDIA_PYPI_LIBS", "0") == "1" libs_to_copy += [
"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.9",
if use_nvidia_pypi_libs:
print("Using nvidia libs from pypi - skipping CUDA library bundling")
# For PyPI approach, we don't bundle CUDA libraries - they come from PyPI packages
# We only need to bundle non-NVIDIA libraries
minimal_libs_to_copy = [
"/lib64/libgomp.so.1",
"/usr/lib64/libgfortran.so.5",
"/acl/build/libarm_compute.so",
"/acl/build/libarm_compute_graph.so",
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_lapack_core.so.0",
"/usr/local/lib/libnvpl_blas_core.so.0",
]
# Copy minimal libraries to unzipped_folder/torch/lib
for lib_path in minimal_libs_to_copy:
copy_and_patch_library(lib_path, folder, use_nvidia_pypi_libs, desired_cuda)
# Patch torch libraries used for searching libraries
torch_libs_to_patch = [
"libtorch.so",
"libtorch_cpu.so",
"libtorch_cuda.so",
"libtorch_cuda_linalg.so",
"libtorch_global_deps.so",
"libtorch_python.so",
"libtorch_nvshmem.so",
"libc10.so",
"libc10_cuda.so",
"libcaffe2_nvrtc.so",
"libshm.so",
]
for lib_name in torch_libs_to_patch:
patch_library_rpath(folder, lib_name, use_nvidia_pypi_libs, desired_cuda)
else:
print("Bundling CUDA libraries with wheel")
# Original logic for bundling system CUDA libraries
# Common libraries for all CUDA versions
common_libs = [
# Non-NVIDIA system libraries
"/lib64/libgomp.so.1",
"/usr/lib64/libgfortran.so.5",
"/acl/build/libarm_compute.so",
"/acl/build/libarm_compute_graph.so",
# Common CUDA libraries (same for all versions)
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_lapack_core.so.0",
"/usr/local/lib/libnvpl_blas_core.so.0",
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so",
"/usr/local/cuda/lib64/libcudnn.so.9",
"/usr/local/cuda/lib64/libcusparseLt.so.0",
"/usr/local/cuda/lib64/libcurand.so.10",
"/usr/local/cuda/lib64/libnccl.so.2",
"/usr/local/cuda/lib64/libnvshmem_host.so.3",
"/usr/local/cuda/lib64/libcudnn_adv.so.9",
"/usr/local/cuda/lib64/libcudnn_cnn.so.9",
"/usr/local/cuda/lib64/libcudnn_graph.so.9",
"/usr/local/cuda/lib64/libcudnn_ops.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9",
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9",
"/usr/local/cuda/lib64/libcufile.so.0", "/usr/local/cuda/lib64/libcufile.so.0",
"/usr/local/cuda/lib64/libcufile_rdma.so.1", "/usr/local/cuda/lib64/libcufile_rdma.so.1",
"/usr/local/cuda/lib64/libcusparse.so.12",
] ]
# CUDA version-specific libraries # Copy libraries to unzipped_folder/a/lib
if "13" in desired_cuda: for lib_path in libs_to_copy:
minor_version = desired_cuda[-1] lib_name = os.path.basename(lib_path)
version_specific_libs = [ shutil.copy2(lib_path, f"{folder}/tmp/torch/lib/{lib_name}")
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.13", os.system(
"/usr/local/cuda/lib64/libcublas.so.13", f"cd {folder}/tmp/torch/lib/; "
"/usr/local/cuda/lib64/libcublasLt.so.13", f"patchelf --set-rpath '$ORIGIN' --force-rpath {folder}/tmp/torch/lib/{lib_name}"
"/usr/local/cuda/lib64/libcudart.so.13", )
"/usr/local/cuda/lib64/libcufft.so.12",
"/usr/local/cuda/lib64/libcusolver.so.12",
"/usr/local/cuda/lib64/libnvJitLink.so.13",
"/usr/local/cuda/lib64/libnvrtc.so.13",
f"/usr/local/cuda/lib64/libnvrtc-builtins.so.13.{minor_version}",
]
elif "12" in desired_cuda:
# Get the last character for libnvrtc-builtins version (e.g., "129" -> "9")
minor_version = desired_cuda[-1]
version_specific_libs = [
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12",
"/usr/local/cuda/lib64/libcublas.so.12",
"/usr/local/cuda/lib64/libcublasLt.so.12",
"/usr/local/cuda/lib64/libcudart.so.12",
"/usr/local/cuda/lib64/libcufft.so.11",
"/usr/local/cuda/lib64/libcusolver.so.11",
"/usr/local/cuda/lib64/libnvJitLink.so.12",
"/usr/local/cuda/lib64/libnvrtc.so.12",
f"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.{minor_version}",
]
else:
raise ValueError(f"Unsupported CUDA version: {desired_cuda}.")
# Combine all libraries
libs_to_copy = common_libs + version_specific_libs
# Copy libraries to unzipped_folder/torch/lib
for lib_path in libs_to_copy:
copy_and_patch_library(lib_path, folder, use_nvidia_pypi_libs, desired_cuda)
# Make sure the wheel is tagged with manylinux_2_28 # Make sure the wheel is tagged with manylinux_2_28
for f in os.scandir(f"{folder}/tmp/"): for f in os.scandir(f"{folder}/tmp/"):
@ -213,8 +132,14 @@ def package_cuda_wheel(wheel_path, desired_cuda) -> None:
replace_tag(f"{f.path}/WHEEL") replace_tag(f"{f.path}/WHEEL")
break break
os.system(f"wheel pack {folder}/tmp/ -d {folder}") os.mkdir(f"{folder}/cuda_wheel")
os.system(f"rm -rf {folder}/tmp/") os.system(f"cd {folder}/tmp/; zip -r {folder}/cuda_wheel/{wheelname} *")
shutil.move(
f"{folder}/cuda_wheel/{wheelname}",
f"{folder}/{wheelname}",
copy_function=shutil.copy2,
)
os.system(f"rm -rf {folder}/tmp/ {folder}/cuda_wheel/")
def complete_wheel(folder: str) -> str: def complete_wheel(folder: str) -> str:
@ -237,7 +162,14 @@ def complete_wheel(folder: str) -> str:
f"/{folder}/dist/{repaired_wheel_name}", f"/{folder}/dist/{repaired_wheel_name}",
) )
else: else:
repaired_wheel_name = list_dir(f"/{folder}/dist")[0] repaired_wheel_name = wheel_name.replace(
"linux_aarch64", "manylinux_2_28_aarch64"
)
print(f"Renaming {wheel_name} wheel to {repaired_wheel_name}")
os.rename(
f"/{folder}/dist/{wheel_name}",
f"/{folder}/dist/{repaired_wheel_name}",
)
print(f"Copying {repaired_wheel_name} to artifacts") print(f"Copying {repaired_wheel_name} to artifacts")
shutil.copy2( shutil.copy2(
@ -274,21 +206,11 @@ if __name__ == "__main__":
).decode() ).decode()
print("Building PyTorch wheel") print("Building PyTorch wheel")
build_vars = "" build_vars = "CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
# MAX_JOB=5 is not required for CPU backend (see commit 465d98b) # MAX_JOB=5 is not required for CPU backend (see commit 465d98b)
if enable_cuda: if enable_cuda:
build_vars += "MAX_JOBS=5 " build_vars += "MAX_JOBS=5 "
# Handle PyPI NVIDIA libraries vs bundled libraries
use_nvidia_pypi_libs = os.getenv("USE_NVIDIA_PYPI_LIBS", "0") == "1"
if use_nvidia_pypi_libs:
print("Configuring build for PyPI NVIDIA libraries")
# Configure for dynamic linking (matching x86 logic)
build_vars += "ATEN_STATIC_CUDA=0 USE_CUDA_STATIC_LINK=0 USE_CUPTI_SO=1 "
else:
print("Configuring build for bundled NVIDIA libraries")
# Keep existing static linking approach - already configured above
override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION") override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION")
desired_cuda = os.getenv("DESIRED_CUDA") desired_cuda = os.getenv("DESIRED_CUDA")
if override_package_version is not None: if override_package_version is not None:
@ -313,17 +235,23 @@ if __name__ == "__main__":
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1 : branch.find('-')]} PYTORCH_BUILD_NUMBER=1 " build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1 : branch.find('-')]} PYTORCH_BUILD_NUMBER=1 "
if enable_mkldnn: if enable_mkldnn:
build_ArmComputeLibrary()
print("build pytorch with mkldnn+acl backend") print("build pytorch with mkldnn+acl backend")
build_vars += "USE_MKLDNN=ON USE_MKLDNN_ACL=ON " build_vars += (
build_vars += "ACL_ROOT_DIR=/acl " "USE_MKLDNN=ON USE_MKLDNN_ACL=ON "
"ACL_ROOT_DIR=/acl "
"LD_LIBRARY_PATH=/pytorch/build/lib:/acl/build:$LD_LIBRARY_PATH "
"ACL_INCLUDE_DIR=/acl/build "
"ACL_LIBRARY=/acl/build "
)
if enable_cuda: if enable_cuda:
build_vars += "BLAS=NVPL " build_vars += "BLAS=NVPL "
else: else:
build_vars += "BLAS=OpenBLAS OpenBLAS_HOME=/opt/OpenBLAS " build_vars += "BLAS=OpenBLAS OpenBLAS_HOME=/OpenBLAS "
else: else:
print("build pytorch without mkldnn backend") print("build pytorch without mkldnn backend")
os.system(f"cd /pytorch; {build_vars} python3 -m build --wheel --no-isolation") os.system(f"cd /pytorch; {build_vars} python3 setup.py bdist_wheel")
if enable_cuda: if enable_cuda:
print("Updating Cuda Dependency") print("Updating Cuda Dependency")
filename = os.listdir("/pytorch/dist/") filename = os.listdir("/pytorch/dist/")

View File

@ -241,7 +241,7 @@ def wait_for_connection(addr, port, timeout=15, attempt_cnt=5):
try: try:
with socket.create_connection((addr, port), timeout=timeout): with socket.create_connection((addr, port), timeout=timeout):
return return
except (ConnectionRefusedError, TimeoutError): # noqa: PERF203 except (ConnectionRefusedError, socket.timeout): # noqa: PERF203
if i == attempt_cnt - 1: if i == attempt_cnt - 1:
raise raise
time.sleep(timeout) time.sleep(timeout)
@ -299,6 +299,40 @@ def install_condaforge_python(host: RemoteHost, python_version="3.8") -> None:
) )
def build_OpenBLAS(host: RemoteHost, git_clone_flags: str = "") -> None:
print("Building OpenBLAS")
host.run_cmd(
f"git clone https://github.com/xianyi/OpenBLAS -b v0.3.28 {git_clone_flags}"
)
make_flags = "NUM_THREADS=64 USE_OPENMP=1 NO_SHARED=1 DYNAMIC_ARCH=1 TARGET=ARMV8"
host.run_cmd(
f"pushd OpenBLAS && make {make_flags} -j8 && sudo make {make_flags} install && popd && rm -rf OpenBLAS"
)
def build_ArmComputeLibrary(host: RemoteHost, git_clone_flags: str = "") -> None:
print("Building Arm Compute Library")
acl_build_flags = " ".join(
[
"debug=0",
"neon=1",
"opencl=0",
"os=linux",
"openmp=1",
"cppthreads=0",
"arch=armv8a",
"multi_isa=1",
"fixed_format_kernels=1",
"build=native",
]
)
host.run_cmd(
f"git clone https://github.com/ARM-software/ComputeLibrary.git -b v25.02 {git_clone_flags}"
)
host.run_cmd(f"cd ComputeLibrary && scons Werror=1 -j8 {acl_build_flags}")
def embed_libgomp(host: RemoteHost, use_conda, wheel_name) -> None: def embed_libgomp(host: RemoteHost, use_conda, wheel_name) -> None:
host.run_cmd("pip3 install auditwheel") host.run_cmd("pip3 install auditwheel")
host.run_cmd( host.run_cmd(
@ -408,7 +442,7 @@ def build_torchvision(
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd vision && {build_vars} python3 -m build --wheel --no-isolation") host.run_cmd(f"cd vision && {build_vars} python3 setup.py bdist_wheel")
vision_wheel_name = host.list_dir("vision/dist")[0] vision_wheel_name = host.list_dir("vision/dist")[0]
embed_libgomp(host, use_conda, os.path.join("vision", "dist", vision_wheel_name)) embed_libgomp(host, use_conda, os.path.join("vision", "dist", vision_wheel_name))
@ -463,7 +497,7 @@ def build_torchdata(
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd data && {build_vars} python3 -m build --wheel --no-isolation") host.run_cmd(f"cd data && {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("data/dist")[0] wheel_name = host.list_dir("data/dist")[0]
embed_libgomp(host, use_conda, os.path.join("data", "dist", wheel_name)) embed_libgomp(host, use_conda, os.path.join("data", "dist", wheel_name))
@ -519,7 +553,7 @@ def build_torchtext(
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd text && {build_vars} python3 -m build --wheel --no-isolation") host.run_cmd(f"cd text && {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("text/dist")[0] wheel_name = host.list_dir("text/dist")[0]
embed_libgomp(host, use_conda, os.path.join("text", "dist", wheel_name)) embed_libgomp(host, use_conda, os.path.join("text", "dist", wheel_name))
@ -580,7 +614,7 @@ def build_torchaudio(
host.run_cmd( host.run_cmd(
f"cd audio && export FFMPEG_ROOT=$(pwd)/third_party/ffmpeg && export USE_FFMPEG=1 \ f"cd audio && export FFMPEG_ROOT=$(pwd)/third_party/ffmpeg && export USE_FFMPEG=1 \
&& ./packaging/ffmpeg/build.sh \ && ./packaging/ffmpeg/build.sh \
&& {build_vars} python3 -m build --wheel --no-isolation" && {build_vars} python3 setup.py bdist_wheel"
) )
wheel_name = host.list_dir("audio/dist")[0] wheel_name = host.list_dir("audio/dist")[0]
@ -666,6 +700,7 @@ def start_build(
configure_system( configure_system(
host, compiler=compiler, use_conda=use_conda, python_version=python_version host, compiler=compiler, use_conda=use_conda, python_version=python_version
) )
build_OpenBLAS(host, git_clone_flags)
if host.using_docker(): if host.using_docker():
print("Move libgfortant.a into a standard location") print("Move libgfortant.a into a standard location")
@ -688,12 +723,10 @@ def start_build(
f"git clone --recurse-submodules -b {branch} https://github.com/pytorch/pytorch {git_clone_flags}" f"git clone --recurse-submodules -b {branch} https://github.com/pytorch/pytorch {git_clone_flags}"
) )
host.run_cmd("pytorch/.ci/docker/common/install_openblas.sh")
print("Building PyTorch wheel") print("Building PyTorch wheel")
build_opts = "" build_opts = ""
if pytorch_build_number is not None: if pytorch_build_number is not None:
build_opts += f" -C--build-option=--build-number={pytorch_build_number}" build_opts += f" --build-number {pytorch_build_number}"
# Breakpad build fails on aarch64 # Breakpad build fails on aarch64
build_vars = "USE_BREAKPAD=0 " build_vars = "USE_BREAKPAD=0 "
if branch == "nightly": if branch == "nightly":
@ -710,18 +743,15 @@ def start_build(
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
if enable_mkldnn: if enable_mkldnn:
host.run_cmd("pytorch/.ci/docker/common/install_acl.sh") build_ArmComputeLibrary(host, git_clone_flags)
print("build pytorch with mkldnn+acl backend") print("build pytorch with mkldnn+acl backend")
build_vars += " USE_MKLDNN=ON USE_MKLDNN_ACL=ON" build_vars += " USE_MKLDNN=ON USE_MKLDNN_ACL=ON"
build_vars += " BLAS=OpenBLAS"
build_vars += " OpenBLAS_HOME=/opt/OpenBLAS"
build_vars += " ACL_ROOT_DIR=/acl"
host.run_cmd( host.run_cmd(
f"cd $HOME/pytorch && {build_vars} python3 -m build --wheel --no-isolation{build_opts}" f"cd $HOME/pytorch && export ACL_ROOT_DIR=$HOME/ComputeLibrary && {build_vars} python3 setup.py bdist_wheel{build_opts}"
) )
print("Repair the wheel") print("Repair the wheel")
pytorch_wheel_name = host.list_dir("pytorch/dist")[0] pytorch_wheel_name = host.list_dir("pytorch/dist")[0]
ld_library_path = "/acl/build:$HOME/pytorch/build/lib" ld_library_path = "$HOME/acl/build:$HOME/pytorch/build/lib"
host.run_cmd( host.run_cmd(
f"export LD_LIBRARY_PATH={ld_library_path} && auditwheel repair $HOME/pytorch/dist/{pytorch_wheel_name}" f"export LD_LIBRARY_PATH={ld_library_path} && auditwheel repair $HOME/pytorch/dist/{pytorch_wheel_name}"
) )
@ -733,7 +763,7 @@ def start_build(
else: else:
print("build pytorch without mkldnn backend") print("build pytorch without mkldnn backend")
host.run_cmd( host.run_cmd(
f"cd pytorch && {build_vars} python3 -m build --wheel --no-isolation{build_opts}" f"cd pytorch && {build_vars} python3 setup.py bdist_wheel{build_opts}"
) )
print("Deleting build folder") print("Deleting build folder")
@ -877,7 +907,7 @@ def terminate_instances(instance_type: str) -> None:
def parse_arguments(): def parse_arguments():
from argparse import ArgumentParser from argparse import ArgumentParser
parser = ArgumentParser("Build and test AARCH64 wheels using EC2") parser = ArgumentParser("Builid and test AARCH64 wheels using EC2")
parser.add_argument("--key-name", type=str) parser.add_argument("--key-name", type=str)
parser.add_argument("--debug", action="store_true") parser.add_argument("--debug", action="store_true")
parser.add_argument("--build-only", action="store_true") parser.add_argument("--build-only", action="store_true")
@ -974,7 +1004,7 @@ if __name__ == "__main__":
install_condaforge_python(host, args.python_version) install_condaforge_python(host, args.python_version)
sys.exit(0) sys.exit(0)
python_version = args.python_version if args.python_version is not None else "3.10" python_version = args.python_version if args.python_version is not None else "3.9"
if args.use_torch_from_pypi: if args.use_torch_from_pypi:
configure_system(host, compiler=args.compiler, python_version=python_version) configure_system(host, compiler=args.compiler, python_version=python_version)

View File

@ -7,13 +7,13 @@ ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8 ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8 ENV LANGUAGE en_US.UTF-8
ARG DEVTOOLSET_VERSION=13 ARG DEVTOOLSET_VERSION=11
RUN yum -y update RUN yum -y update
RUN yum -y install epel-release RUN yum -y install epel-release
# install glibc-langpack-en make sure en_US.UTF-8 locale is available # install glibc-langpack-en make sure en_US.UTF-8 locale is available
RUN yum -y install glibc-langpack-en RUN yum -y install glibc-langpack-en
RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel openssl-devel yum-utils autoconf automake make gcc-toolset-${DEVTOOLSET_VERSION}-gcc gcc-toolset-${DEVTOOLSET_VERSION}-gcc-c++ gcc-toolset-${DEVTOOLSET_VERSION}-gcc-gfortran gcc-toolset-${DEVTOOLSET_VERSION}-gdb RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel openssl-devel yum-utils autoconf automake make gcc-toolset-${DEVTOOLSET_VERSION}-toolchain
# Just add everything as a safe.directory for git since these will be used in multiple places with git # Just add everything as a safe.directory for git since these will be used in multiple places with git
RUN git config --global --add safe.directory '*' RUN git config --global --add safe.directory '*'
ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH
@ -41,7 +41,6 @@ RUN bash ./install_conda.sh && rm install_conda.sh
# Install CUDA # Install CUDA
FROM base as cuda FROM base as cuda
ARG CUDA_VERSION=12.6 ARG CUDA_VERSION=12.6
ARG DEVTOOLSET_VERSION=13
RUN rm -rf /usr/local/cuda-* RUN rm -rf /usr/local/cuda-*
ADD ./common/install_cuda.sh install_cuda.sh ADD ./common/install_cuda.sh install_cuda.sh
COPY ./common/install_nccl.sh install_nccl.sh COPY ./common/install_nccl.sh install_nccl.sh
@ -51,8 +50,7 @@ ENV CUDA_HOME=/usr/local/cuda-${CUDA_VERSION}
# Preserve CUDA_VERSION for the builds # Preserve CUDA_VERSION for the builds
ENV CUDA_VERSION=${CUDA_VERSION} ENV CUDA_VERSION=${CUDA_VERSION}
# Make things in our path by default # Make things in our path by default
ENV PATH=/usr/local/cuda-${CUDA_VERSION}/bin:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH ENV PATH=/usr/local/cuda-${CUDA_VERSION}/bin:$PATH
FROM cuda as cuda12.6 FROM cuda as cuda12.6
RUN bash ./install_cuda.sh 12.6 RUN bash ./install_cuda.sh 12.6
@ -70,23 +68,8 @@ FROM cuda as cuda13.0
RUN bash ./install_cuda.sh 13.0 RUN bash ./install_cuda.sh 13.0
ENV DESIRED_CUDA=13.0 ENV DESIRED_CUDA=13.0
FROM ${ROCM_IMAGE} as rocm_base FROM ${ROCM_IMAGE} as rocm
ARG DEVTOOLSET_VERSION=13 ENV PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
# Install devtoolset on ROCm base image
RUN yum -y update && \
yum -y install epel-release && \
yum -y install glibc-langpack-en && \
yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel openssl-devel yum-utils autoconf automake make gcc-toolset-${DEVTOOLSET_VERSION}-gcc gcc-toolset-${DEVTOOLSET_VERSION}-gcc-c++ gcc-toolset-${DEVTOOLSET_VERSION}-gcc-gfortran gcc-toolset-${DEVTOOLSET_VERSION}-gdb
RUN git config --global --add safe.directory '*'
ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH
FROM rocm_base as rocm
ARG PYTORCH_ROCM_ARCH
ARG DEVTOOLSET_VERSION=13
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
ADD ./common/install_mkl.sh install_mkl.sh ADD ./common/install_mkl.sh install_mkl.sh
RUN bash ./install_mkl.sh && rm install_mkl.sh RUN bash ./install_mkl.sh && rm install_mkl.sh
ENV MKLROOT /opt/intel ENV MKLROOT /opt/intel
@ -104,7 +87,6 @@ COPY --from=cuda13.0 /usr/local/cuda-13.0 /usr/local/cuda-13.0
# Final step # Final step
FROM ${BASE_TARGET} as final FROM ${BASE_TARGET} as final
ARG DEVTOOLSET_VERSION=13
COPY --from=openssl /opt/openssl /opt/openssl COPY --from=openssl /opt/openssl /opt/openssl
COPY --from=patchelf /patchelf /usr/local/bin/patchelf COPY --from=patchelf /patchelf /usr/local/bin/patchelf
COPY --from=conda /opt/conda /opt/conda COPY --from=conda /opt/conda /opt/conda

View File

@ -36,8 +36,6 @@ case ${DOCKER_TAG_PREFIX} in
;; ;;
rocm*) rocm*)
BASE_TARGET=rocm BASE_TARGET=rocm
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx950;gfx1150;gfx1151"
EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}"
;; ;;
*) *)
echo "ERROR: Unknown docker tag ${DOCKER_TAG_PREFIX}" echo "ERROR: Unknown docker tag ${DOCKER_TAG_PREFIX}"
@ -59,7 +57,7 @@ docker build \
--target final \ --target final \
--progress plain \ --progress plain \
--build-arg "BASE_TARGET=${BASE_TARGET}" \ --build-arg "BASE_TARGET=${BASE_TARGET}" \
--build-arg "DEVTOOLSET_VERSION=13" \ --build-arg "DEVTOOLSET_VERSION=11" \
${EXTRA_BUILD_ARGS} \ ${EXTRA_BUILD_ARGS} \
-t ${tmp_tag} \ -t ${tmp_tag} \
$@ \ $@ \

View File

@ -81,11 +81,11 @@ elif [[ "$image" == *riscv* ]]; then
DOCKERFILE="ubuntu-cross-riscv/Dockerfile" DOCKERFILE="ubuntu-cross-riscv/Dockerfile"
fi fi
_UCX_COMMIT=7836b165abdbe468a2f607e7254011c07d788152 _UCX_COMMIT=7bb2722ff2187a0cad557ae4a6afa090569f83fb
_UCC_COMMIT=430e241bf5d38cbc73fc7a6b89155397232e3f96 _UCC_COMMIT=20eae37090a4ce1b32bcce6144ccad0b49943e0b
if [[ "$image" == *rocm* ]]; then if [[ "$image" == *rocm* ]]; then
_UCX_COMMIT=29831d319e6be55cb8c768ca61de335c934ca39e _UCX_COMMIT=cc312eaa4655c0cc5c2bcd796db938f90563bcf6
_UCC_COMMIT=9f4b242cbbd8b1462cbc732eb29316cdfa124b77 _UCC_COMMIT=0c0fc21559835044ab107199e334f7157d6a0d3d
fi fi
tag=$(echo $image | awk -F':' '{print $2}') tag=$(echo $image | awk -F':' '{print $2}')
@ -113,21 +113,32 @@ case "$tag" in
UCX_COMMIT=${_UCX_COMMIT} UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes TRITON=yes
INSTALL_MINGW=yes
;; ;;
pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11) pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks)
CUDA_VERSION=13.0.0 CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11 GCC_VERSION=9
VISION=yes VISION=yes
KATEX=yes KATEX=yes
UCX_COMMIT=${_UCX_COMMIT} UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes TRITON=yes
INDUCTOR_BENCHMARKS=yes
;; ;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks) pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc9-inductor-benchmarks)
CUDA_VERSION=12.8.1 CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=9
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.13-gcc9-inductor-benchmarks)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.13
GCC_VERSION=9 GCC_VERSION=9
VISION=yes VISION=yes
KATEX=yes KATEX=yes
@ -162,24 +173,12 @@ case "$tag" in
VISION=yes VISION=yes
ONNX=yes ONNX=yes
;; ;;
pytorch-linux-jammy-py3.10-clang12) pytorch-linux-jammy-py3.9-clang12)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
CLANG_VERSION=12 CLANG_VERSION=12
VISION=yes VISION=yes
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-py3.11-clang12)
ANACONDA_PYTHON_VERSION=3.11
CLANG_VERSION=12
VISION=no
TRITON=no
;;
pytorch-linux-jammy-py3.12-clang12)
ANACONDA_PYTHON_VERSION=3.12
CLANG_VERSION=12
VISION=no
TRITON=no
;;
pytorch-linux-jammy-rocm-n-py3 | pytorch-linux-jammy-rocm-n-py3-benchmarks | pytorch-linux-noble-rocm-n-py3) pytorch-linux-jammy-rocm-n-py3 | pytorch-linux-jammy-rocm-n-py3-benchmarks | pytorch-linux-noble-rocm-n-py3)
if [[ $tag =~ "jammy" ]]; then if [[ $tag =~ "jammy" ]]; then
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
@ -188,38 +187,46 @@ case "$tag" in
fi fi
GCC_VERSION=11 GCC_VERSION=11
VISION=yes VISION=yes
ROCM_VERSION=6.4
NINJA_VERSION=1.9.0
TRITON=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
if [[ $tag =~ "benchmarks" ]]; then
INDUCTOR_BENCHMARKS=yes
fi
;;
pytorch-linux-noble-rocm-alpha-py3)
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
VISION=yes
ROCM_VERSION=7.0 ROCM_VERSION=7.0
NINJA_VERSION=1.9.0 NINJA_VERSION=1.9.0
TRITON=yes TRITON=yes
KATEX=yes KATEX=yes
UCX_COMMIT=${_UCX_COMMIT} UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950;gfx1100" PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950"
if [[ $tag =~ "benchmarks" ]]; then
INDUCTOR_BENCHMARKS=yes
fi
;; ;;
pytorch-linux-jammy-xpu-n-1-py3) pytorch-linux-jammy-xpu-2025.0-py3)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11
VISION=yes
XPU_VERSION=2025.0
NINJA_VERSION=1.9.0
TRITON=yes
;;
pytorch-linux-jammy-xpu-2025.1-py3)
ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11 GCC_VERSION=11
VISION=yes VISION=yes
XPU_VERSION=2025.1 XPU_VERSION=2025.1
NINJA_VERSION=1.9.0 NINJA_VERSION=1.9.0
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-noble-xpu-n-py3 | pytorch-linux-noble-xpu-n-py3-inductor-benchmarks) pytorch-linux-jammy-py3.9-gcc11-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=13
VISION=yes
XPU_VERSION=2025.2
NINJA_VERSION=1.9.0
TRITON=yes
if [[ $tag =~ "benchmarks" ]]; then
INDUCTOR_BENCHMARKS=yes
fi
;;
pytorch-linux-jammy-py3-gcc11-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11 GCC_VERSION=11
VISION=yes VISION=yes
KATEX=yes KATEX=yes
@ -227,8 +234,8 @@ case "$tag" in
DOCS=yes DOCS=yes
INDUCTOR_BENCHMARKS=yes INDUCTOR_BENCHMARKS=yes
;; ;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.10-clang12) pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-clang12)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
CUDA_VERSION=12.8.1 CUDA_VERSION=12.8.1
CLANG_VERSION=12 CLANG_VERSION=12
VISION=yes VISION=yes
@ -239,8 +246,8 @@ case "$tag" in
CLANG_VERSION=18 CLANG_VERSION=18
VISION=yes VISION=yes
;; ;;
pytorch-linux-jammy-py3.10-gcc11) pytorch-linux-jammy-py3.9-gcc11)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11 GCC_VERSION=11
VISION=yes VISION=yes
KATEX=yes KATEX=yes
@ -260,12 +267,6 @@ case "$tag" in
HALIDE=yes HALIDE=yes
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-cuda12.8-py3.12-pallas)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
PALLAS=yes
;;
pytorch-linux-jammy-py3.12-triton-cpu) pytorch-linux-jammy-py3.12-triton-cpu)
CUDA_VERSION=12.6 CUDA_VERSION=12.6
ANACONDA_PYTHON_VERSION=3.12 ANACONDA_PYTHON_VERSION=3.12
@ -273,15 +274,18 @@ case "$tag" in
TRITON_CPU=yes TRITON_CPU=yes
;; ;;
pytorch-linux-jammy-linter) pytorch-linux-jammy-linter)
PYTHON_VERSION=3.10 # TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
# We will need to update mypy version eventually, but that's for another day. The task
# would be to upgrade mypy to 1.0.0 with Python 3.11
PYTHON_VERSION=3.9
;; ;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.10-linter) pytorch-linux-jammy-cuda12.8-cudnn9-py3.9-linter)
PYTHON_VERSION=3.10 PYTHON_VERSION=3.9
CUDA_VERSION=12.8.1 CUDA_VERSION=12.8.1
;; ;;
pytorch-linux-jammy-aarch64-py3.10-gcc13) pytorch-linux-jammy-aarch64-py3.10-gcc11)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=13 GCC_VERSION=11
ACL=yes ACL=yes
VISION=yes VISION=yes
OPENBLAS=yes OPENBLAS=yes
@ -289,19 +293,9 @@ case "$tag" in
# from pytorch/llvm:9.0.1 is x86 specific # from pytorch/llvm:9.0.1 is x86 specific
SKIP_LLVM_SRC_BUILD_INSTALL=yes SKIP_LLVM_SRC_BUILD_INSTALL=yes
;; ;;
pytorch-linux-jammy-aarch64-py3.10-clang21) pytorch-linux-jammy-aarch64-py3.10-gcc11-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
CLANG_VERSION=21 GCC_VERSION=11
ACL=yes
VISION=yes
OPENBLAS=yes
# snadampal: skipping llvm src build install because the current version
# from pytorch/llvm:9.0.1 is x86 specific
SKIP_LLVM_SRC_BUILD_INSTALL=yes
;;
pytorch-linux-jammy-aarch64-py3.10-gcc13-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=13
ACL=yes ACL=yes
VISION=yes VISION=yes
OPENBLAS=yes OPENBLAS=yes
@ -376,7 +370,7 @@ docker build \
--build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \ --build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
--build-arg "KATEX=${KATEX:-}" \ --build-arg "KATEX=${KATEX:-}" \
--build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \ --build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \
--build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}" \ --build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx90a;gfx942}" \
--build-arg "IMAGE_NAME=${IMAGE_NAME}" \ --build-arg "IMAGE_NAME=${IMAGE_NAME}" \
--build-arg "UCX_COMMIT=${UCX_COMMIT}" \ --build-arg "UCX_COMMIT=${UCX_COMMIT}" \
--build-arg "UCC_COMMIT=${UCC_COMMIT}" \ --build-arg "UCC_COMMIT=${UCC_COMMIT}" \
@ -387,14 +381,12 @@ docker build \
--build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \ --build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \
--build-arg "EXECUTORCH=${EXECUTORCH}" \ --build-arg "EXECUTORCH=${EXECUTORCH}" \
--build-arg "HALIDE=${HALIDE}" \ --build-arg "HALIDE=${HALIDE}" \
--build-arg "PALLAS=${PALLAS}" \
--build-arg "XPU_VERSION=${XPU_VERSION}" \ --build-arg "XPU_VERSION=${XPU_VERSION}" \
--build-arg "UNINSTALL_DILL=${UNINSTALL_DILL}" \ --build-arg "UNINSTALL_DILL=${UNINSTALL_DILL}" \
--build-arg "ACL=${ACL:-}" \ --build-arg "ACL=${ACL:-}" \
--build-arg "OPENBLAS=${OPENBLAS:-}" \ --build-arg "OPENBLAS=${OPENBLAS:-}" \
--build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \ --build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
--build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \ --build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \
--build-arg "INSTALL_MINGW=${INSTALL_MINGW:-}" \
-f $(dirname ${DOCKERFILE})/Dockerfile \ -f $(dirname ${DOCKERFILE})/Dockerfile \
-t "$tmp_tag" \ -t "$tmp_tag" \
"$@" \ "$@" \
@ -475,3 +467,12 @@ elif [ "$HAS_TRITON" = "yes" ]; then
echo "expecting triton to not be installed, but it is" echo "expecting triton to not be installed, but it is"
exit 1 exit 1
fi fi
# Sanity check cmake version. Executorch reinstalls cmake and I'm not sure if
# they support 4.0.0 yet, so exclude them from this check.
CMAKE_VERSION=$(drun cmake --version)
if [[ "$EXECUTORCH" != *yes* && "$CMAKE_VERSION" != *4.* ]]; then
echo "CMake version is not 4.0.0:"
drun cmake --version
exit 1
fi

View File

@ -56,13 +56,9 @@ ENV INSTALLED_VISION ${VISION}
# Install rocm # Install rocm
ARG ROCM_VERSION ARG ROCM_VERSION
RUN mkdir ci_commit_pins
COPY ./common/common_utils.sh common_utils.sh
COPY ./ci_commit_pins/rocm-composable-kernel.txt ci_commit_pins/rocm-composable-kernel.txt
COPY ./common/install_rocm.sh install_rocm.sh COPY ./common/install_rocm.sh install_rocm.sh
RUN bash ./install_rocm.sh RUN bash ./install_rocm.sh
RUN rm install_rocm.sh common_utils.sh RUN rm install_rocm.sh
RUN rm -r ci_commit_pins
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} RUN bash ./install_rocm_magma.sh ${ROCM_VERSION}
RUN rm install_rocm_magma.sh RUN rm install_rocm_magma.sh

View File

@ -1 +1 @@
deb42f2a8e48f5032b4a98ee781a15fa87a157cf 56392aa978594cc155fa8af48cd949f5b5f1823a

View File

@ -1,2 +1,2 @@
transformers==4.56.0 transformers==4.54.0
soxr==0.5.0 soxr==0.5.0

View File

@ -1 +0,0 @@
0.8.0

View File

@ -1 +1 @@
v2.27.5-1 v2.27.5-1

View File

@ -1 +0,0 @@
7fe50dc3da2069d6645d9deb8c017a876472a977

View File

@ -1 +1 @@
74a23feff57432129df84d8099e622773cf77925 e03a63be43e33596f7f0a43b0f530353785e4a59

View File

@ -1 +1 @@
1b0418a9a454b2b93ab8d71f40e59d2297157fae a6572fb0be5b9b0a19b0641a0ce05810fa04e44c

View File

@ -1 +1 @@
bfeb066872bc1e8b2d2bc0a3b295b99dd77206e7 f7888497a1eb9e98d4c07537f0d0bcfe180d1363

27
.ci/docker/common/install_acl.sh Executable file → Normal file
View File

@ -1,27 +1,16 @@
#!/bin/bash set -euo pipefail
# Script used only in CD pipeline
set -eux readonly version=v25.02
readonly src_host=https://github.com/ARM-software
ACL_VERSION=${ACL_VERSION:-"v52.6.0"} readonly src_repo=ComputeLibrary
ACL_INSTALL_DIR="/acl"
# Clone ACL # Clone ACL
git clone https://github.com/ARM-software/ComputeLibrary.git -b "${ACL_VERSION}" --depth 1 --shallow-submodules [[ ! -d ${src_repo} ]] && git clone ${src_host}/${src_repo}.git
cd ${src_repo}
git checkout $version
ACL_CHECKOUT_DIR="ComputeLibrary"
# Build with scons # Build with scons
pushd $ACL_CHECKOUT_DIR
scons -j8 Werror=0 debug=0 neon=1 opencl=0 embed_kernels=0 \ scons -j8 Werror=0 debug=0 neon=1 opencl=0 embed_kernels=0 \
os=linux arch=armv8a build=native multi_isa=1 \ os=linux arch=armv8a build=native multi_isa=1 \
fixed_format_kernels=1 openmp=1 cppthreads=0 fixed_format_kernels=1 openmp=1 cppthreads=0
popd
# Install ACL
sudo mkdir -p ${ACL_INSTALL_DIR}
for d in arm_compute include utils support src build
do
sudo cp -r ${ACL_CHECKOUT_DIR}/${d} ${ACL_INSTALL_DIR}/${d}
done
rm -rf $ACL_CHECKOUT_DIR

View File

@ -8,8 +8,8 @@ if [ -n "$CLANG_VERSION" ]; then
# work around ubuntu apt-get conflicts # work around ubuntu apt-get conflicts
sudo apt-get -y -f install sudo apt-get -y -f install
wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
if [[ $CLANG_VERSION -ge 18 ]]; then if [[ $CLANG_VERSION == 18 ]]; then
apt-add-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-${CLANG_VERSION} main" apt-add-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-18 main"
fi fi
fi fi

View File

@ -49,20 +49,12 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
export SYSROOT_DEP="sysroot_linux-64=2.17" export SYSROOT_DEP="sysroot_linux-64=2.17"
fi fi
# Install correct Python version
# Also ensure sysroot is using a modern GLIBC to match system compilers
if [ "$ANACONDA_PYTHON_VERSION" = "3.14" ]; then
as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y\
python="3.14.0" \
${SYSROOT_DEP} \
-c conda-forge
else
# Install correct Python version # Install correct Python version
# Also ensure sysroot is using a modern GLIBC to match system compilers # Also ensure sysroot is using a modern GLIBC to match system compilers
as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y\ as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y\
python="$ANACONDA_PYTHON_VERSION" \ python="$ANACONDA_PYTHON_VERSION" \
${SYSROOT_DEP} ${SYSROOT_DEP}
fi
# libstdcxx from conda default channels are too old, we need GLIBCXX_3.4.30 # libstdcxx from conda default channels are too old, we need GLIBCXX_3.4.30
# which is provided in libstdcxx 12 and up. # which is provided in libstdcxx 12 and up.
conda_install libstdcxx-ng=12.3.0 --update-deps -c conda-forge conda_install libstdcxx-ng=12.3.0 --update-deps -c conda-forge

View File

@ -83,6 +83,10 @@ function build_cpython {
py_suffix=${py_ver::-1} py_suffix=${py_ver::-1}
py_folder=$py_suffix py_folder=$py_suffix
fi fi
# Only b3 is available now
if [ "$py_suffix" == "3.14.0" ]; then
py_suffix="3.14.0b3"
fi
wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz
do_cpython_build $py_ver Python-$py_suffix do_cpython_build $py_ver Python-$py_suffix

View File

@ -10,7 +10,7 @@ else
arch_path='sbsa' arch_path='sbsa'
fi fi
NVSHMEM_VERSION=3.4.5 NVSHMEM_VERSION=3.3.24
function install_cuda { function install_cuda {
version=$1 version=$1
@ -147,10 +147,10 @@ function install_128 {
} }
function install_130 { function install_130 {
CUDNN_VERSION=9.13.0.50 CUDNN_VERSION=9.12.0.46
echo "Installing CUDA 13.0 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1" echo "Installing CUDA 13.0 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
# install CUDA 13.0 in the same container # install CUDA 13.0 in the same container
install_cuda 13.0.2 cuda_13.0.2_580.95.05_linux install_cuda 13.0.0 cuda_13.0.0_580.65.06_linux
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
install_cudnn 13 $CUDNN_VERSION install_cudnn 13 $CUDNN_VERSION

View File

@ -42,27 +42,22 @@ install_pip_dependencies() {
# A workaround, ExecuTorch has moved to numpy 2.0 which is not compatible with the current # A workaround, ExecuTorch has moved to numpy 2.0 which is not compatible with the current
# numba and scipy version used in PyTorch CI # numba and scipy version used in PyTorch CI
conda_run pip uninstall -y numba scipy conda_run pip uninstall -y numba scipy
# Yaspin is needed for running CI test (get_benchmark_analysis_data.py)
pip_install yaspin==3.1.0
popd popd
} }
setup_executorch() { setup_executorch() {
pushd executorch
export PYTHON_EXECUTABLE=python export PYTHON_EXECUTABLE=python
export CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON -DEXECUTORCH_BUILD_TESTS=ON" export CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
as_jenkins .ci/scripts/setup-linux.sh --build-tool cmake || true as_jenkins .ci/scripts/setup-linux.sh --build-tool cmake || true
popd
} }
if [ $# -eq 0 ]; then clone_executorch
clone_executorch install_buck2
install_buck2 install_conda_dependencies
install_conda_dependencies install_pip_dependencies
install_pip_dependencies setup_executorch
pushd executorch
setup_executorch
popd
else
"$@"
fi

View File

@ -7,11 +7,11 @@ if [ -n "$GCC_VERSION" ]; then
# Need the official toolchain repo to get alternate packages # Need the official toolchain repo to get alternate packages
add-apt-repository ppa:ubuntu-toolchain-r/test add-apt-repository ppa:ubuntu-toolchain-r/test
apt-get update apt-get update
apt-get install -y g++-$GCC_VERSION gfortran-$GCC_VERSION apt-get install -y g++-$GCC_VERSION
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50 update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50 update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50
update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-"$GCC_VERSION" 50 update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-"$GCC_VERSION" 50
update-alternatives --install /usr/bin/gfortran gfortran /usr/bin/gfortran-"$GCC_VERSION" 50
# Cleanup package manager # Cleanup package manager
apt-get autoclean && apt-get clean apt-get autoclean && apt-get clean

View File

@ -1,40 +0,0 @@
#!/bin/bash
set -ex
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
# Get the pinned JAX version (same for all CUDA versions)
JAX_VERSION=$(get_pinned_commit /ci_commit_pins/jax)
function install_jax_12() {
echo "Installing JAX ${JAX_VERSION} with CUDA 12 support"
pip_install "jax[cuda12]==${JAX_VERSION}" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
# Verify installation
python -c "import jax" # check for errors
echo "JAX ${JAX_VERSION} installation completed successfully for CUDA 12"
}
function install_jax_13() {
echo "Installing JAX ${JAX_VERSION} with CUDA 13 support"
pip_install "jax[cuda13]==${JAX_VERSION}" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
# Verify installation
python -c "import jax" # check for errors
echo "JAX ${JAX_VERSION} installation completed successfully for CUDA 13"
}
# idiomatic parameter and option handling in sh
while test $# -gt 0
do
case "$1" in
12.4|12.6|12.6.*|12.8|12.8.*|12.9|12.9.*) install_jax_12;
;;
13.0|13.0.*) install_jax_13;
;;
*) echo "bad argument $1"; exit 1
;;
esac
shift
done

View File

@ -1,56 +0,0 @@
#!/bin/bash
# Script used only in CD pipeline
set -ex
# install dependencies
dnf -y install gmp-devel libmpc-devel texinfo flex bison
cd /usr/local/src
# fetch source for gcc 13
git clone --depth 1 --single-branch -b releases/gcc-13.3.0 https://github.com/gcc-mirror/gcc.git gcc-13.3.0
mkdir -p gcc-13.3.0/build-gomp
cd gcc-13.3.0/build-gomp
# configure gcc build
# I got these flags by:
# 1. downloading the source rpm for gcc-11 on AlmaLinux 8 container
# dnf install -y dnf-plugins-core rpmdevtools
# dnf download --source libgomp
# 2. extracting the gcc.spec from the source.
# rpmdev-extract gcc-xx.src.rpm
# 3. extracting optflags and ld_flags from gcc.spec:
# rpm --eval '%{optflags}'
# rpm --eval '%{build_ldflags}'
#
# I had to remove the following flags because they didn't compile for this version of libgomp:
# -Werror=format-security
# -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1
# -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1
#
# I added -march=armv8-a -mtune=generic to make them explicit. I don't think they're strictly needed.
OPT_FLAGS='-O2 -march=armv8-a -mtune=generic'\
' -fexceptions -g -grecord-gcc-switches -pipe -Wall'\
' -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS'\
' -fstack-protector-strong -fasynchronous-unwind-tables'\
' -fstack-clash-protection'
LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now'
CFLAGS="$OPT_FLAGS" \
CXXFLAGS="$OPT_FLAGS" \
LDFLAGS="$LDFLAGS" \
../configure \
--prefix=/usr \
--libdir=/usr/lib64 \
--enable-languages=c,c++ \
--disable-multilib \
--disable-bootstrap \
--enable-libgomp
# only build libgomp
make -j$(nproc) all-target-libgomp
make install-target-libgomp

View File

@ -1,10 +0,0 @@
#!/bin/bash
set -ex
# Install MinGW-w64 for Windows cross-compilation
apt-get update
apt-get install -y g++-mingw-w64-x86-64-posix
echo "MinGW-w64 installed successfully"
x86_64-w64-mingw32-g++ --version

View File

@ -19,8 +19,8 @@ pip_install \
transformers==4.36.2 transformers==4.36.2
pip_install coloredlogs packaging pip_install coloredlogs packaging
pip_install onnxruntime==1.23.1 pip_install onnxruntime==1.22.1
pip_install onnxscript==0.5.4 pip_install onnxscript==0.4.0
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers # Cache the transformers model to be used later by ONNX tests. We need to run the transformers
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/ # package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/

13
.ci/docker/common/install_openblas.sh Executable file → Normal file
View File

@ -3,14 +3,11 @@
set -ex set -ex
OPENBLAS_VERSION=${OPENBLAS_VERSION:-"v0.3.30"} cd /
git clone https://github.com/OpenMathLib/OpenBLAS.git -b "${OPENBLAS_VERSION:-v0.3.30}" --depth 1 --shallow-submodules
# Clone OpenBLAS
git clone https://github.com/OpenMathLib/OpenBLAS.git -b "${OPENBLAS_VERSION}" --depth 1 --shallow-submodules
OPENBLAS_CHECKOUT_DIR="OpenBLAS" OPENBLAS_CHECKOUT_DIR="OpenBLAS"
OPENBLAS_BUILD_FLAGS=" OPENBLAS_BUILD_FLAGS="
CC=gcc
NUM_THREADS=128 NUM_THREADS=128
USE_OPENMP=1 USE_OPENMP=1
NO_SHARED=0 NO_SHARED=0
@ -20,7 +17,5 @@ CFLAGS=-O3
BUILD_BFLOAT16=1 BUILD_BFLOAT16=1
" "
make -j8 ${OPENBLAS_BUILD_FLAGS} -C $OPENBLAS_CHECKOUT_DIR make -j8 ${OPENBLAS_BUILD_FLAGS} -C ${OPENBLAS_CHECKOUT_DIR}
sudo make install -C $OPENBLAS_CHECKOUT_DIR make -j8 ${OPENBLAS_BUILD_FLAGS} install -C ${OPENBLAS_CHECKOUT_DIR}
rm -rf $OPENBLAS_CHECKOUT_DIR

View File

@ -2,11 +2,6 @@
set -ex set -ex
# for pip_install function
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
ROCM_COMPOSABLE_KERNEL_VERSION="$(cat $(dirname $0)/../ci_commit_pins/rocm-composable-kernel.txt)"
ver() { ver() {
printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' '); printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' ');
} }
@ -40,7 +35,17 @@ EOF
# Default url values # Default url values
rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}" rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}"
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu"
# Special case for ROCM_VERSION == 7.0
if [[ $(ver "$ROCM_VERSION") -eq $(ver 7.0) ]]; then
rocm_baseurl="https://repo.radeon.com/rocm/apt/7.0_alpha2"
amdgpu_baseurl="https://repo.radeon.com/amdgpu/30.10_alpha2/ubuntu"
fi
# Add amdgpu repository
UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'` UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
# Add rocm repository # Add rocm repository
wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add - wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add -
@ -108,8 +113,6 @@ EOF
rm -rf HIP clr rm -rf HIP clr
fi fi
pip_install "git+https://github.com/rocm/composable_kernel@$ROCM_COMPOSABLE_KERNEL_VERSION"
# Cleanup # Cleanup
apt-get autoclean && apt-get clean apt-get autoclean && apt-get clean
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
@ -173,8 +176,6 @@ install_centos() {
sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;" sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;"
done done
pip_install "git+https://github.com/rocm/composable_kernel@$ROCM_COMPOSABLE_KERNEL_VERSION"
# Cleanup # Cleanup
yum clean all yum clean all
rm -rf /var/cache/yum rm -rf /var/cache/yum

View File

@ -12,8 +12,8 @@ function do_install() {
rocm_version_nodot=${rocm_version//./} rocm_version_nodot=${rocm_version//./}
# post merge of https://github.com/icl-utk-edu/magma/pull/65 # Version 2.7.2 + ROCm related updates
MAGMA_VERSION=c0792ae825fb36872784892ea643dd6f3456bc5f MAGMA_VERSION=a1625ff4d9bc362906bd01f805dbbe12612953f6
magma_archive="magma-rocm${rocm_version_nodot}-${MAGMA_VERSION}-1.tar.bz2" magma_archive="magma-rocm${rocm_version_nodot}-${MAGMA_VERSION}-1.tar.bz2"
rocm_dir="/opt/rocm" rocm_dir="/opt/rocm"

View File

@ -57,7 +57,7 @@ if [ ! -f setup.py ]; then
cd python cd python
fi fi
pip_install pybind11==3.0.1 pip_install pybind11==2.13.6
# TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527 # TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527
as_jenkins sed -i -e 's/https:\/\/tritonlang.blob.core.windows.net\/llvm-builds/https:\/\/oaitriton.blob.core.windows.net\/public\/llvm-builds/g' setup.py as_jenkins sed -i -e 's/https:\/\/tritonlang.blob.core.windows.net\/llvm-builds/https:\/\/oaitriton.blob.core.windows.net\/public\/llvm-builds/g' setup.py
@ -66,15 +66,15 @@ if [ -n "${UBUNTU_VERSION}" ] && [ -n "${GCC_VERSION}" ] && [[ "${GCC_VERSION}"
# Triton needs at least gcc-9 to build # Triton needs at least gcc-9 to build
apt-get install -y g++-9 apt-get install -y g++-9
CXX=g++-9 conda_run python -m build --wheel --no-isolation CXX=g++-9 conda_run python setup.py bdist_wheel
elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then
# Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain # Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain
add-apt-repository -y ppa:ubuntu-toolchain-r/test add-apt-repository -y ppa:ubuntu-toolchain-r/test
apt-get install -y g++-9 apt-get install -y g++-9
CXX=g++-9 conda_run python -m build --wheel --no-isolation CXX=g++-9 conda_run python setup.py bdist_wheel
else else
conda_run python -m build --wheel --no-isolation conda_run python setup.py bdist_wheel
fi fi
# Copy the wheel to /opt for multi stage docker builds # Copy the wheel to /opt for multi stage docker builds

View File

@ -44,12 +44,8 @@ function install_ucc() {
./autogen.sh ./autogen.sh
if [[ -n "$CUDA_VERSION" && $CUDA_VERSION == 13* ]]; then # We only run distributed tests on Tesla M60 and A10G
NVCC_GENCODE="-gencode=arch=compute_86,code=compute_86" NVCC_GENCODE="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_86,code=compute_86"
else
# We only run distributed tests on Tesla M60 and A10G
NVCC_GENCODE="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_86,code=compute_86"
fi
if [[ -n "$ROCM_VERSION" ]]; then if [[ -n "$ROCM_VERSION" ]]; then
if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then

View File

@ -9,7 +9,7 @@ set -xe
function install_ubuntu() { function install_ubuntu() {
. /etc/os-release . /etc/os-release
if [[ ! " jammy noble " =~ " ${VERSION_CODENAME} " ]]; then if [[ ! " jammy " =~ " ${VERSION_CODENAME} " ]]; then
echo "Ubuntu version ${VERSION_CODENAME} not supported" echo "Ubuntu version ${VERSION_CODENAME} not supported"
exit exit
fi fi
@ -35,24 +35,25 @@ function install_ubuntu() {
# The xpu-smi packages # The xpu-smi packages
apt-get install -y flex bison xpu-smi apt-get install -y flex bison xpu-smi
# Compute and Media Runtimes if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then
if [[ " ${VERSION_CODENAME} " =~ " noble " ]]; then # Compute and Media Runtimes
apt-get install -y \ apt-get install -y \
intel-opencl-icd libze-intel-gpu1 libze1 \ intel-opencl-icd intel-level-zero-gpu level-zero \
intel-media-va-driver-non-free libmfx-gen1 libvpl2 \ intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 \
libegl-mesa0 libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \ libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \ libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo intel-ocloc mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo
else # jammy # Development Packages
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev
else # rolling driver
apt-get install -y \ apt-get install -y \
intel-opencl-icd libze-intel-gpu1 libze1 \ intel-opencl-icd libze-intel-gpu1 libze1 \
intel-media-va-driver-non-free libmfx-gen1 libvpl2 \ intel-media-va-driver-non-free libmfx-gen1 libvpl2 \
libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \ libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
libglapi-mesa libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \ libglapi-mesa libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo intel-ocloc mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo intel-ocloc
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev libze-dev
fi fi
# Development Packages
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev libze-dev
# Install Intel Support Packages # Install Intel Support Packages
apt-get install -y ${XPU_PACKAGES} apt-get install -y ${XPU_PACKAGES}
@ -64,14 +65,10 @@ function install_ubuntu() {
function install_rhel() { function install_rhel() {
. /etc/os-release . /etc/os-release
if [[ "${ID}" == "rhel" ]]; then
if [[ ! " 8.8 8.10 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then if [[ ! " 8.8 8.10 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then
echo "RHEL version ${VERSION_ID} not supported" echo "RHEL version ${VERSION_ID} not supported"
exit exit
fi
elif [[ "${ID}" == "almalinux" ]]; then
# Workaround for almalinux8 which used by quay.io/pypa/manylinux_2_28_x86_64
VERSION_ID="8.8"
fi fi
dnf install -y 'dnf-command(config-manager)' dnf install -y 'dnf-command(config-manager)'
@ -146,14 +143,14 @@ function install_sles() {
XPU_DRIVER_VERSION="" XPU_DRIVER_VERSION=""
if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then
# Use GPU driver LTS releases # Use GPU driver LTS releases
XPU_DRIVER_VERSION="/lts/2523" XPU_DRIVER_VERSION="/lts/2350"
fi fi
# Default use Intel® oneAPI Deep Learning Essentials 2025.1 # Default use Intel® oneAPI Deep Learning Essentials 2025.0
if [[ "$XPU_VERSION" == "2025.2" ]]; then if [[ "$XPU_VERSION" == "2025.1" ]]; then
XPU_PACKAGES="intel-deep-learning-essentials-2025.2"
else
XPU_PACKAGES="intel-deep-learning-essentials-2025.1" XPU_PACKAGES="intel-deep-learning-essentials-2025.1"
else
XPU_PACKAGES="intel-deep-learning-essentials-2025.0"
fi fi
# The installation depends on the base OS # The installation depends on the base OS

View File

@ -1,9 +0,0 @@
#!/bin/bash
set -xe
# Script used in Linux x86 and aarch64 CD pipeline
# Workaround for exposing statically linked libstdc++ CXX11 ABI symbols.
# see: https://github.com/pytorch/pytorch/issues/133437
LIBNONSHARED=$(gcc -print-file-name=libstdc++_nonshared.a)
nm -g $LIBNONSHARED | grep " T " | grep recursive_directory_iterator | cut -c 20- > weaken-symbols.txt
objcopy --weaken-symbols weaken-symbols.txt $LIBNONSHARED $LIBNONSHARED

View File

@ -74,14 +74,6 @@ RUN bash ./install_cuda.sh 13.0
RUN bash ./install_magma.sh 13.0 RUN bash ./install_magma.sh 13.0
RUN ln -sf /usr/local/cuda-13.0 /usr/local/cuda RUN ln -sf /usr/local/cuda-13.0 /usr/local/cuda
# Install libibverbs for libtorch and copy to CUDA directory
RUN apt-get update -y && \
apt-get install -y libibverbs-dev librdmacm-dev && \
cp /usr/lib/x86_64-linux-gnu/libmlx5.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/librdmacm.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/libibverbs.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/libnl* /usr/local/cuda/lib64/
FROM cpu as rocm FROM cpu as rocm
ARG ROCM_VERSION ARG ROCM_VERSION
ARG PYTORCH_ROCM_ARCH ARG PYTORCH_ROCM_ARCH

View File

@ -39,17 +39,13 @@ case ${DOCKER_TAG_PREFIX} in
DOCKER_GPU_BUILD_ARG="" DOCKER_GPU_BUILD_ARG=""
;; ;;
rocm*) rocm*)
# we want the patch version of 7.0 instead
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
fi
# we want the patch version of 6.4 instead # we want the patch version of 6.4 instead
if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then if [[ $(ver $GPU_ARCH_VERSION) -eq $(ver 6.4) ]]; then
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.4" GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
fi fi
BASE_TARGET=rocm BASE_TARGET=rocm
GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx950;gfx1150;gfx1151" PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}" DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}"
;; ;;
*) *)

View File

@ -130,8 +130,7 @@ ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/op
RUN for cpython_version in "cp312-cp312" "cp313-cp313" "cp313-cp313t"; do \ RUN for cpython_version in "cp312-cp312" "cp313-cp313" "cp313-cp313t"; do \
/opt/python/${cpython_version}/bin/python -m pip install setuptools wheel; \ /opt/python/${cpython_version}/bin/python -m pip install setuptools wheel; \
done; done;
ADD ./common/patch_libstdc.sh patch_libstdc.sh
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh
# cmake-3.18.4 from pip; force in case cmake3 already exists # cmake-3.18.4 from pip; force in case cmake3 already exists
RUN yum install -y python3-pip && \ RUN yum install -y python3-pip && \
@ -149,7 +148,7 @@ FROM cpu_final as rocm_final
ARG ROCM_VERSION=6.0 ARG ROCM_VERSION=6.0
ARG PYTORCH_ROCM_ARCH ARG PYTORCH_ROCM_ARCH
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH} ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
ARG DEVTOOLSET_VERSION=13 ARG DEVTOOLSET_VERSION=11
ENV LDFLAGS="-Wl,-rpath=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64 -Wl,-rpath=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib" ENV LDFLAGS="-Wl,-rpath=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64 -Wl,-rpath=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib"
# Somewhere in ROCm stack, we still use non-existing /opt/rocm/hip path, # Somewhere in ROCm stack, we still use non-existing /opt/rocm/hip path,
# below workaround helps avoid error # below workaround helps avoid error
@ -176,6 +175,6 @@ ENV XPU_DRIVER_TYPE ROLLING
RUN python3 -m pip install --upgrade pip && \ RUN python3 -m pip install --upgrade pip && \
python3 -mpip install cmake==3.28.4 python3 -mpip install cmake==3.28.4
ADD ./common/install_xpu.sh install_xpu.sh ADD ./common/install_xpu.sh install_xpu.sh
ENV XPU_VERSION 2025.2 ENV XPU_VERSION 2025.1
RUN bash ./install_xpu.sh && rm install_xpu.sh RUN bash ./install_xpu.sh && rm install_xpu.sh
RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd

View File

@ -50,10 +50,6 @@ RUN rm install_ninja.sh
ENV PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/bin:$PATH ENV PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/bin:$PATH
ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH
# Build a newer version of libgomp than that supported in in Almalinux 8.
COPY ./common/install_libgomp.sh install_libgomp.sh
RUN bash ./install_libgomp.sh && rm install_libgomp.sh
# git236+ would refuse to run git commands in repos owned by other users # git236+ would refuse to run git commands in repos owned by other users
# Which causes version check to fail, as pytorch repo is bind-mounted into the image # Which causes version check to fail, as pytorch repo is bind-mounted into the image
# Override this behaviour by treating every folder as safe # Override this behaviour by treating every folder as safe
@ -66,13 +62,6 @@ ARG OPENBLAS_VERSION
ADD ./common/install_openblas.sh install_openblas.sh ADD ./common/install_openblas.sh install_openblas.sh
RUN bash ./install_openblas.sh && rm install_openblas.sh RUN bash ./install_openblas.sh && rm install_openblas.sh
# Install Arm Compute Library
FROM base as arm_compute
# use python3.9 to install scons
RUN python3.9 -m pip install scons==4.7.0
RUN ln -sf /opt/python/cp39-cp39/bin/scons /usr/local/bin
COPY ./common/install_acl.sh install_acl.sh
RUN bash ./install_acl.sh && rm install_acl.sh
FROM base as final FROM base as final
# remove unnecessary python versions # remove unnecessary python versions
@ -81,7 +70,4 @@ RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6
COPY --from=openblas /opt/OpenBLAS/ /opt/OpenBLAS/ COPY --from=openblas /opt/OpenBLAS/ /opt/OpenBLAS/
COPY --from=arm_compute /acl /acl ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:$LD_LIBRARY_PATH
ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:/acl/build/:$LD_LIBRARY_PATH
ADD ./common/patch_libstdc.sh patch_libstdc.sh
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh

View File

@ -86,15 +86,6 @@ FROM base as nvpl
ADD ./common/install_nvpl.sh install_nvpl.sh ADD ./common/install_nvpl.sh install_nvpl.sh
RUN bash ./install_nvpl.sh && rm install_nvpl.sh RUN bash ./install_nvpl.sh && rm install_nvpl.sh
# Install Arm Compute Library
FROM base as arm_compute
# use python3.9 to install scons
RUN python3.9 -m pip install scons==4.7.0
RUN ln -sf /opt/python/cp39-cp39/bin/scons /usr/local/bin
COPY ./common/install_acl.sh install_acl.sh
RUN bash ./install_acl.sh && rm install_acl.sh
FROM base as final
FROM final as cuda_final FROM final as cuda_final
ARG BASE_CUDA_VERSION ARG BASE_CUDA_VERSION
RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION}
@ -102,9 +93,5 @@ COPY --from=cuda /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda-${BAS
COPY --from=magma /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda-${BASE_CUDA_VERSION} COPY --from=magma /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda-${BASE_CUDA_VERSION}
COPY --from=nvpl /opt/nvpl/lib/ /usr/local/lib/ COPY --from=nvpl /opt/nvpl/lib/ /usr/local/lib/
COPY --from=nvpl /opt/nvpl/include/ /usr/local/include/ COPY --from=nvpl /opt/nvpl/include/ /usr/local/include/
COPY --from=arm_compute /acl /acl
RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda
ENV PATH=/usr/local/cuda/bin:$PATH ENV PATH=/usr/local/cuda/bin:$PATH
ENV LD_LIBRARY_PATH=/acl/build/:$LD_LIBRARY_PATH
ADD ./common/patch_libstdc.sh patch_libstdc.sh
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh

View File

@ -0,0 +1,71 @@
FROM centos:8 as base
ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
ENV PATH /opt/rh/gcc-toolset-11/root/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
# change to a valid repo
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*.repo
# enable to install ninja-build
RUN sed -i 's|enabled=0|enabled=1|g' /etc/yum.repos.d/CentOS-Linux-PowerTools.repo
RUN yum -y update
RUN yum install -y wget curl perl util-linux xz bzip2 git patch which zlib-devel sudo
RUN yum install -y autoconf automake make cmake gdb gcc-toolset-11-gcc-c++
FROM base as openssl
ADD ./common/install_openssl.sh install_openssl.sh
RUN bash ./install_openssl.sh && rm install_openssl.sh
# Install python
FROM base as python
RUN yum install -y openssl-devel zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel libpcap-devel xz-devel libffi-devel
ADD common/install_cpython.sh install_cpython.sh
RUN bash ./install_cpython.sh && rm install_cpython.sh
FROM base as conda
ADD ./common/install_conda_docker.sh install_conda.sh
RUN bash ./install_conda.sh && rm install_conda.sh
RUN /opt/conda/bin/conda install -y cmake
FROM base as intel
# Install MKL
COPY --from=python /opt/python /opt/python
COPY --from=python /opt/_internal /opt/_internal
COPY --from=conda /opt/conda /opt/conda
ENV PATH=/opt/conda/bin:$PATH
ADD ./common/install_mkl.sh install_mkl.sh
RUN bash ./install_mkl.sh && rm install_mkl.sh
FROM base as patchelf
ADD ./common/install_patchelf.sh install_patchelf.sh
RUN bash ./install_patchelf.sh && rm install_patchelf.sh
RUN cp $(which patchelf) /patchelf
FROM base as jni
ADD ./common/install_jni.sh install_jni.sh
ADD ./java/jni.h jni.h
RUN bash ./install_jni.sh && rm install_jni.sh
FROM base as libpng
ADD ./common/install_libpng.sh install_libpng.sh
RUN bash ./install_libpng.sh && rm install_libpng.sh
FROM base as final
COPY --from=openssl /opt/openssl /opt/openssl
COPY --from=python /opt/python /opt/python
COPY --from=python /opt/_internal /opt/_internal
COPY --from=intel /opt/intel /opt/intel
COPY --from=conda /opt/conda /opt/conda
COPY --from=patchelf /usr/local/bin/patchelf /usr/local/bin/patchelf
COPY --from=jni /usr/local/include/jni.h /usr/local/include/jni.h
COPY --from=libpng /usr/local/bin/png* /usr/local/bin/
COPY --from=libpng /usr/local/bin/libpng* /usr/local/bin/
COPY --from=libpng /usr/local/include/png* /usr/local/include/
COPY --from=libpng /usr/local/include/libpng* /usr/local/include/
COPY --from=libpng /usr/local/lib/libpng* /usr/local/lib/
COPY --from=libpng /usr/local/lib/pkgconfig /usr/local/lib/pkgconfig
RUN yum install -y ninja-build

View File

@ -115,9 +115,6 @@ RUN env GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=True pip3 install grpcio
# cmake-3.28.0 from pip for onnxruntime # cmake-3.28.0 from pip for onnxruntime
RUN python3 -mpip install cmake==3.28.0 RUN python3 -mpip install cmake==3.28.0
ADD ./common/patch_libstdc.sh patch_libstdc.sh
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh
# build onnxruntime 1.21.0 from sources. # build onnxruntime 1.21.0 from sources.
# it is not possible to build it from sources using pip, # it is not possible to build it from sources using pip,
# so just build it from upstream repository. # so just build it from upstream repository.

View File

@ -28,7 +28,6 @@ fi
MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-} MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-}
DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-} DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-}
OPENBLAS_VERSION=${OPENBLAS_VERSION:-} OPENBLAS_VERSION=${OPENBLAS_VERSION:-}
ACL_VERSION=${ACL_VERSION:-}
case ${image} in case ${image} in
manylinux2_28-builder:cpu) manylinux2_28-builder:cpu)
@ -42,6 +41,13 @@ case ${image} in
GPU_IMAGE=arm64v8/almalinux:8 GPU_IMAGE=arm64v8/almalinux:8
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=13 --build-arg NINJA_VERSION=1.12.1" DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=13 --build-arg NINJA_VERSION=1.12.1"
MANY_LINUX_VERSION="2_28_aarch64" MANY_LINUX_VERSION="2_28_aarch64"
OPENBLAS_VERSION="v0.3.30"
;;
manylinuxcxx11-abi-builder:cpu-cxx11-abi)
TARGET=final
GPU_IMAGE=""
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9"
MANY_LINUX_VERSION="cxx11-abi"
;; ;;
manylinuxs390x-builder:cpu-s390x) manylinuxs390x-builder:cpu-s390x)
TARGET=final TARGET=final
@ -75,25 +81,21 @@ case ${image} in
DOCKERFILE_SUFFIX="_cuda_aarch64" DOCKERFILE_SUFFIX="_cuda_aarch64"
;; ;;
manylinux2_28-builder:rocm*) manylinux2_28-builder:rocm*)
# we want the patch version of 7.0 instead
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
fi
# we want the patch version of 6.4 instead # we want the patch version of 6.4 instead
if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then if [[ $(ver $GPU_ARCH_VERSION) -eq $(ver 6.4) ]]; then
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.4" GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
fi fi
TARGET=rocm_final TARGET=rocm_final
MANY_LINUX_VERSION="2_28" MANY_LINUX_VERSION="2_28"
DEVTOOLSET_VERSION="11" DEVTOOLSET_VERSION="11"
GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx950;gfx1150;gfx1151" PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}"
;; ;;
manylinux2_28-builder:xpu) manylinux2_28-builder:xpu)
TARGET=xpu_final TARGET=xpu_final
GPU_IMAGE=amd64/almalinux:8 GPU_IMAGE=amd64/almalinux:8
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=13" DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11"
MANY_LINUX_VERSION="2_28" MANY_LINUX_VERSION="2_28"
;; ;;
*) *)
@ -119,8 +121,7 @@ tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
DOCKER_BUILDKIT=1 docker build \ DOCKER_BUILDKIT=1 docker build \
${DOCKER_GPU_BUILD_ARG} \ ${DOCKER_GPU_BUILD_ARG} \
--build-arg "GPU_IMAGE=${GPU_IMAGE}" \ --build-arg "GPU_IMAGE=${GPU_IMAGE}" \
--build-arg "OPENBLAS_VERSION=${OPENBLAS_VERSION:-}" \ --build-arg "OPENBLAS_VERSION=${OPENBLAS_VERSION}" \
--build-arg "ACL_VERSION=${ACL_VERSION:-}" \
--target "${TARGET}" \ --target "${TARGET}" \
-t "${tmp_tag}" \ -t "${tmp_tag}" \
$@ \ $@ \

View File

@ -10,6 +10,11 @@ BAD_SSL = "https://self-signed.badssl.com"
print("Testing SSL certificate checking for Python:", sys.version) print("Testing SSL certificate checking for Python:", sys.version)
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] < (3, 4):
print("This version never checks SSL certs; skipping tests")
sys.exit(0)
EXC = OSError EXC = OSError
print(f"Connecting to {GOOD_SSL} should work") print(f"Connecting to {GOOD_SSL} should work")

View File

@ -10,11 +10,6 @@ boto3==1.35.42
#Pinned versions: 1.19.12, 1.16.34 #Pinned versions: 1.19.12, 1.16.34
#test that import: #test that import:
build==1.3.0
#Description: A simple, correct Python build frontend.
#Pinned versions: 1.3.0
#test that import:
click click
#Description: Command Line Interface Creation Kit #Description: Command Line Interface Creation Kit
#Pinned versions: #Pinned versions:
@ -52,10 +47,10 @@ flatbuffers==24.12.23
#Pinned versions: 24.12.23 #Pinned versions: 24.12.23
#test that import: #test that import:
hypothesis==6.56.4 hypothesis==5.35.1
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136 # Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
#Description: advanced library for generating parametrized tests #Description: advanced library for generating parametrized tests
#Pinned versions: 6.56.4 #Pinned versions: 5.35.1
#test that import: test_xnnpack_integration.py, test_pruning_op.py, test_nn.py #test that import: test_xnnpack_integration.py, test_pruning_op.py, test_nn.py
junitparser==2.1.1 junitparser==2.1.1
@ -98,9 +93,8 @@ librosa==0.10.2 ; python_version == "3.12" and platform_machine != "s390x"
#Pinned versions: #Pinned versions:
#test that import: #test that import:
mypy==1.16.0 ; platform_system == "Linux" mypy==1.16.0
# Pin MyPy version because new errors are likely to appear with each release # Pin MyPy version because new errors are likely to appear with each release
# Skip on Windows as lots of type annotations are POSIX specific
#Description: linter #Description: linter
#Pinned versions: 1.16.0 #Pinned versions: 1.16.0
#test that import: test_typing.py, test_type_hints.py #test that import: test_typing.py, test_type_hints.py
@ -111,17 +105,20 @@ networkx==2.8.8
#Pinned versions: 2.8.8 #Pinned versions: 2.8.8
#test that import: functorch #test that import: functorch
ninja==1.11.1.4 ninja==1.11.1.3
#Description: build system. Used in some tests. Used in build to generate build #Description: build system. Used in some tests. Used in build to generate build
#time tracing information #time tracing information
#Pinned versions: 1.11.1.4 #Pinned versions: 1.11.1.3
#test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py #test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py
numba==0.49.0 ; python_version < "3.9" and platform_machine != "s390x"
numba==0.55.2 ; python_version == "3.9" and platform_machine != "s390x"
numba==0.55.2 ; python_version == "3.10" and platform_machine != "s390x" numba==0.55.2 ; python_version == "3.10" and platform_machine != "s390x"
numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x" numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x"
#Description: Just-In-Time Compiler for Numerical Functions #Description: Just-In-Time Compiler for Numerical Functions
#Pinned versions: 0.55.2, 0.60.0 #Pinned versions: 0.54.1, 0.49.0, <=0.49.1
#test that import: test_numba_integration.py #test that import: test_numba_integration.py
#For numba issue see https://github.com/pytorch/pytorch/issues/51511
#Need release > 0.61.2 for s390x due to https://github.com/numba/numba/pull/10073 #Need release > 0.61.2 for s390x due to https://github.com/numba/numba/pull/10073
#numpy #numpy
@ -136,14 +133,12 @@ numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x"
#test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py, #test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py,
#test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py, #test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py,
#test_binary_ufuncs.py #test_binary_ufuncs.py
numpy==1.22.4; python_version == "3.10" numpy==1.22.4; python_version == "3.9" or python_version == "3.10"
numpy==1.26.2; python_version == "3.11" or python_version == "3.12" numpy==1.26.2; python_version == "3.11" or python_version == "3.12"
numpy==2.1.2; python_version >= "3.13" and python_version < "3.14" numpy==2.1.2; python_version >= "3.13"
numpy==2.3.4; python_version >= "3.14"
pandas==2.0.3; python_version < "3.13" pandas==2.0.3; python_version < "3.13"
pandas==2.2.3; python_version >= "3.13" and python_version < "3.14" pandas==2.2.3; python_version >= "3.13"
pandas==2.3.3; python_version >= "3.14"
#onnxruntime #onnxruntime
#Description: scoring engine for Open Neural Network Exchange (ONNX) models #Description: scoring engine for Open Neural Network Exchange (ONNX) models
@ -155,8 +150,7 @@ opt-einsum==3.3
#Pinned versions: 3.3 #Pinned versions: 3.3
#test that import: test_linalg.py #test that import: test_linalg.py
optree==0.13.0 ; python_version < "3.14" optree==0.13.0
optree==0.17.0 ; python_version >= "3.14"
#Description: A library for tree manipulation #Description: A library for tree manipulation
#Pinned versions: 0.13.0 #Pinned versions: 0.13.0
#test that import: test_vmap.py, test_aotdispatch.py, test_dynamic_shapes.py, #test that import: test_vmap.py, test_aotdispatch.py, test_dynamic_shapes.py,
@ -171,12 +165,12 @@ optree==0.17.0 ; python_version >= "3.14"
pillow==11.0.0 pillow==11.0.0
#Description: Python Imaging Library fork #Description: Python Imaging Library fork
#Pinned versions: 11.0.0 #Pinned versions: 10.3.0
#test that import: #test that import:
protobuf==5.29.5 protobuf==5.29.4
#Description: Google's data interchange format #Description: Google's data interchange format
#Pinned versions: 5.29.5 #Pinned versions: 5.29.4
#test that import: test_tensorboard.py, test/onnx/* #test that import: test_tensorboard.py, test/onnx/*
psutil psutil
@ -219,7 +213,7 @@ pytest-subtests==0.13.1
#Pinned versions: #Pinned versions:
#test that import: #test that import:
xdoctest==1.3.0 xdoctest==1.1.0
#Description: runs doctests in pytest #Description: runs doctests in pytest
#Pinned versions: 1.1.0 #Pinned versions: 1.1.0
#test that import: #test that import:
@ -244,9 +238,10 @@ pygments==2.15.0
#Pinned versions: 14.1.0 #Pinned versions: 14.1.0
#test that import: #test that import:
scikit-image==0.22.0 scikit-image==0.19.3 ; python_version < "3.10"
scikit-image==0.22.0 ; python_version >= "3.10"
#Description: image processing routines #Description: image processing routines
#Pinned versions: 0.22.0 #Pinned versions:
#test that import: test_nn.py #test that import: test_nn.py
#scikit-learn #scikit-learn
@ -255,8 +250,7 @@ scikit-image==0.22.0
#test that import: #test that import:
scipy==1.10.1 ; python_version <= "3.11" scipy==1.10.1 ; python_version <= "3.11"
scipy==1.14.1 ; python_version > "3.11" and python_version < "3.14" scipy==1.14.1 ; python_version >= "3.12"
scipy==1.16.2 ; python_version >= "3.14"
# Pin SciPy because of failing distribution tests (see #60347) # Pin SciPy because of failing distribution tests (see #60347)
#Description: scientific python #Description: scientific python
#Pinned versions: 1.10.1 #Pinned versions: 1.10.1
@ -270,7 +264,7 @@ scipy==1.16.2 ; python_version >= "3.14"
#test that import: #test that import:
# needed by torchgen utils # needed by torchgen utils
typing-extensions==4.12.2 typing-extensions>=4.10.0
#Description: type hints for python #Description: type hints for python
#Pinned versions: #Pinned versions:
#test that import: #test that import:
@ -328,10 +322,11 @@ pywavelets==1.7.0 ; python_version >= "3.12"
#Pinned versions: 1.4.1 #Pinned versions: 1.4.1
#test that import: #test that import:
lxml==5.3.0 ; python_version < "3.14" lxml==5.3.0
lxml==6.0.2 ; python_version >= "3.14"
#Description: This is a requirement of unittest-xml-reporting #Description: This is a requirement of unittest-xml-reporting
# Python-3.9 binaries
PyGithub==2.3.0 PyGithub==2.3.0
sympy==1.13.3 sympy==1.13.3
@ -339,14 +334,12 @@ sympy==1.13.3
#Pinned versions: #Pinned versions:
#test that import: #test that import:
onnx==1.19.1 ; python_version < "3.14" onnx==1.18.0
# Unpin once Python 3.14 is supported. See onnxruntime issue 26309.
onnx==1.18.0 ; python_version == "3.14"
#Description: Required by onnx tests, and mypy and test_public_bindings.py when checking torch.onnx._internal #Description: Required by onnx tests, and mypy and test_public_bindings.py when checking torch.onnx._internal
#Pinned versions: #Pinned versions:
#test that import: #test that import:
onnxscript==0.5.4 onnxscript==0.4.0
#Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal #Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
#Pinned versions: #Pinned versions:
#test that import: #test that import:
@ -366,10 +359,9 @@ pwlf==2.2.1
#test that import: test_sac_estimator.py #test that import: test_sac_estimator.py
# To build PyTorch itself # To build PyTorch itself
pyyaml==6.0.3 pyyaml
pyzstd pyzstd
setuptools==78.1.1 setuptools>=70.1.0
packaging==23.1
six six
scons==4.5.2 ; platform_machine == "aarch64" scons==4.5.2 ; platform_machine == "aarch64"
@ -384,16 +376,13 @@ dataclasses_json==0.6.7
#Pinned versions: 0.6.7 #Pinned versions: 0.6.7
#test that import: #test that import:
cmake==3.31.6 cmake==4.0.0
#Description: required for building #Description: required for building
tlparse==0.4.0 tlparse==0.3.30
#Description: required for log parsing #Description: required for log parsing
filelock==3.18.0 cuda-bindings>=12.0,<13.0 ; platform_machine != "s390x"
#Description: required for inductor testing
cuda-bindings>=12.0,<13.0 ; platform_machine != "s390x" and platform_system != "Darwin"
#Description: required for testing CUDAGraph::raw_cuda_graph(). See https://nvidia.github.io/cuda-python/cuda-bindings/latest/support.html for how this version was chosen. Note "Any fix in the latest bindings would be backported to the prior major version" means that only the newest version of cuda-bindings will get fixes. Depending on the latest version of 12.x is okay because all 12.y versions will be supported via "CUDA minor version compatibility". Pytorch builds against 13.z versions of cuda toolkit work with 12.x versions of cuda-bindings as well because newer drivers work with old toolkits. #Description: required for testing CUDAGraph::raw_cuda_graph(). See https://nvidia.github.io/cuda-python/cuda-bindings/latest/support.html for how this version was chosen. Note "Any fix in the latest bindings would be backported to the prior major version" means that only the newest version of cuda-bindings will get fixes. Depending on the latest version of 12.x is okay because all 12.y versions will be supported via "CUDA minor version compatibility". Pytorch builds against 13.z versions of cuda toolkit work with 12.x versions of cuda-bindings as well because newer drivers work with old toolkits.
#test that import: test_cuda.py #test that import: test_cuda.py

View File

@ -1,10 +1,7 @@
sphinx==7.2.6 sphinx==5.3.0
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 7.2.6 #Pinned versions: 5.3.0
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@722b7e6f9ca512fcc526ad07d62b3d28c50bb6cd#egg=pytorch_sphinx_theme2
pytorch_sphinx_theme2==0.2.0
#Description: This is needed to generate PyTorch docs
#Pinned versions: 0.2.0
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering # TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
# but it doesn't seem to work and hangs around idly. The initial thought that it is probably # but it doesn't seem to work and hangs around idly. The initial thought that it is probably
@ -32,17 +29,17 @@ tensorboard==2.18.0 ; python_version >= "3.13"
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 2.13.0 #Pinned versions: 2.13.0
breathe==4.36.0 breathe==4.34.0
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
#Pinned versions: 4.36.0 #Pinned versions: 4.34.0
exhale==0.3.7 exhale==0.2.3
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
#Pinned versions: 0.3.7 #Pinned versions: 0.2.3
docutils==0.20 docutils==0.16
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
#Pinned versions: 0.20 #Pinned versions: 0.16
bs4==0.0.1 bs4==0.0.1
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
@ -52,13 +49,13 @@ IPython==8.12.0
#Description: This is used to generate PyTorch functorch docs #Description: This is used to generate PyTorch functorch docs
#Pinned versions: 8.12.0 #Pinned versions: 8.12.0
myst-nb==1.3.0 myst-nb==0.17.2
#Description: This is used to generate PyTorch functorch and torch.compile docs. #Description: This is used to generate PyTorch functorch and torch.compile docs.
#Pinned versions: 1.3.0 #Pinned versions: 0.17.2
# The following are required to build torch.distributed.elastic.rendezvous.etcd* docs # The following are required to build torch.distributed.elastic.rendezvous.etcd* docs
python-etcd==0.4.5 python-etcd==0.4.5
sphinx-copybutton==0.5.0 sphinx-copybutton==0.5.0
sphinx-design==0.6.1 sphinx-design==0.4.0
sphinxcontrib-mermaid==1.0.0 sphinxcontrib-mermaid==1.0.0
myst-parser==4.0.1 myst-parser==0.18.1

View File

@ -1 +1 @@
3.5.1 3.4.0

View File

@ -1 +1 @@
3.5.0 3.4.0

View File

@ -52,13 +52,9 @@ ENV INSTALLED_VISION ${VISION}
# Install rocm # Install rocm
ARG ROCM_VERSION ARG ROCM_VERSION
RUN mkdir ci_commit_pins
COPY ./common/common_utils.sh common_utils.sh
COPY ./ci_commit_pins/rocm-composable-kernel.txt ci_commit_pins/rocm-composable-kernel.txt
COPY ./common/install_rocm.sh install_rocm.sh COPY ./common/install_rocm.sh install_rocm.sh
RUN bash ./install_rocm.sh RUN bash ./install_rocm.sh
RUN rm install_rocm.sh common_utils.sh RUN rm install_rocm.sh
RUN rm -r ci_commit_pins
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} RUN bash ./install_rocm_magma.sh ${ROCM_VERSION}
RUN rm install_rocm_magma.sh RUN rm install_rocm_magma.sh

View File

@ -54,15 +54,12 @@ ENV OPENSSL_DIR /opt/openssl
RUN rm install_openssl.sh RUN rm install_openssl.sh
ARG INDUCTOR_BENCHMARKS ARG INDUCTOR_BENCHMARKS
ARG ANACONDA_PYTHON_VERSION
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
COPY ./common/common_utils.sh common_utils.sh COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt
COPY ci_commit_pins/timm.txt timm.txt COPY ci_commit_pins/timm.txt timm.txt
COPY ci_commit_pins/torchbench.txt torchbench.txt
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt
# Install XPU Dependencies # Install XPU Dependencies
ARG XPU_VERSION ARG XPU_VERSION

View File

@ -66,7 +66,6 @@ ENV NCCL_LIB_DIR="/usr/local/cuda/lib64/"
# (optional) Install UCC # (optional) Install UCC
ARG UCX_COMMIT ARG UCX_COMMIT
ARG UCC_COMMIT ARG UCC_COMMIT
ARG CUDA_VERSION
ENV UCX_COMMIT $UCX_COMMIT ENV UCX_COMMIT $UCX_COMMIT
ENV UCC_COMMIT $UCC_COMMIT ENV UCC_COMMIT $UCC_COMMIT
ENV UCX_HOME /usr ENV UCX_HOME /usr
@ -100,16 +99,9 @@ COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt
COPY ci_commit_pins/timm.txt timm.txt COPY ci_commit_pins/timm.txt timm.txt
COPY ci_commit_pins/torchbench.txt torchbench.txt COPY ci_commit_pins/torchbench.txt torchbench.txt
# Only build aoti cpp tests when INDUCTOR_BENCHMARKS is set to True
ENV BUILD_AOT_INDUCTOR_TEST ${INDUCTOR_BENCHMARKS}
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt
ARG INSTALL_MINGW
COPY ./common/install_mingw.sh install_mingw.sh
RUN if [ -n "${INSTALL_MINGW}" ]; then bash ./install_mingw.sh; fi
RUN rm install_mingw.sh
ARG TRITON ARG TRITON
ARG TRITON_CPU ARG TRITON_CPU
@ -143,15 +135,6 @@ COPY ci_commit_pins/halide.txt halide.txt
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
RUN rm install_halide.sh common_utils.sh halide.txt RUN rm install_halide.sh common_utils.sh halide.txt
ARG PALLAS
ARG CUDA_VERSION
# Install JAX with CUDA support (for Pallas)
COPY ./common/install_jax.sh install_jax.sh
COPY ./common/common_utils.sh common_utils.sh
COPY ./ci_commit_pins/jax.txt /ci_commit_pins/jax.txt
RUN if [ -n "${PALLAS}" ]; then bash ./install_jax.sh ${CUDA_VERSION}; fi
RUN rm -f install_jax.sh common_utils.sh /ci_commit_pins/jax.txt
ARG ONNX ARG ONNX
# Install ONNX dependencies # Install ONNX dependencies
COPY ./common/install_onnx.sh ./common/common_utils.sh ./ COPY ./common/install_onnx.sh ./common/common_utils.sh ./

View File

@ -7,4 +7,4 @@ set -ex
SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
USE_NVSHMEM=0 USE_CUSPARSELT=0 BUILD_PYTHONLESS=1 DESIRED_PYTHON="3.10" ${SCRIPTPATH}/../manywheel/build.sh USE_NVSHMEM=0 USE_CUSPARSELT=0 BUILD_PYTHONLESS=1 DESIRED_PYTHON="3.9" ${SCRIPTPATH}/../manywheel/build.sh

View File

@ -8,11 +8,9 @@ from abc import ABC, abstractmethod
try: try:
from collections.abc import Callable # Python 3.11+ from typing import Any, Callable, Required, TypedDict # Python 3.11+
from typing import Any, Required, TypedDict
except ImportError: except ImportError:
from collections.abc import Callable from typing import Any, Callable, TypedDict
from typing import Any, TypedDict
from typing_extensions import Required # Fallback for Python <3.11 from typing_extensions import Required # Fallback for Python <3.11

View File

@ -1,56 +1,14 @@
from __future__ import annotations from __future__ import annotations
import logging
import os import os
import textwrap
from pathlib import Path from pathlib import Path
from typing import TYPE_CHECKING from typing import Iterable, Mapping, Optional
import logging
from cli.lib.common.utils import get_wheels import xml.etree.ElementTree as ET
from jinja2 import Template from pathlib import Path
from typing import Iterable, Tuple
if TYPE_CHECKING:
from collections.abc import Iterable, Mapping
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
_TPL_CONTENT = Template(
textwrap.dedent("""\
## {{ title }}
```{{ lang }}
{{ content }}
```
""")
)
_TPL_LIST_ITEMS = Template(
textwrap.dedent("""\
## {{ title }}
{% for it in items %}
- {{ it.pkg }}: {{ it.relpath }}
{% else %}
_(no item found)_
{% endfor %}
""")
)
_TPL_TABLE = Template(
textwrap.dedent("""\
{%- if rows %}
| {{ cols | join(' | ') }} |
|{%- for _ in cols %} --- |{%- endfor %}
{%- for r in rows %}
| {%- for c in cols %} {{ r.get(c, "") }} |{%- endfor %}
{%- endfor %}
{%- else %}
_(no data)_
{%- endif %}
""")
)
def gh_summary_path() -> Path | None: def gh_summary_path() -> Path | None:
"""Return the Path to the GitHub step summary file, or None if not set.""" """Return the Path to the GitHub step summary file, or None if not set."""
@ -69,14 +27,13 @@ def write_gh_step_summary(md: str, *, append_content: bool = True) -> bool:
""" """
sp = gh_summary_path() sp = gh_summary_path()
if not sp: if not sp:
# When running locally, just log to console instead of failing.
logger.info("[gh-summary] GITHUB_STEP_SUMMARY not set, skipping write.") logger.info("[gh-summary] GITHUB_STEP_SUMMARY not set, skipping write.")
return False return False
sp.parent.mkdir(parents=True, exist_ok=True)
md_clean = textwrap.dedent(md).strip() + "\n"
mode = "a" if append_content else "w" mode = "a" if append_content else "w"
with sp.open(mode, encoding="utf-8") as f: with sp.open(mode, encoding="utf-8") as f:
f.write(md_clean) f.write(md.rstrip() + "\n")
return True return True
@ -85,59 +42,182 @@ def md_heading(text: str, level: int = 2) -> str:
return f"{'#' * max(1, min(level, 6))} {text}\n" return f"{'#' * max(1, min(level, 6))} {text}\n"
def md_kv_table(rows: Iterable[Mapping[str, str | int | float]]) -> str:
"""
Render a list of dictionaries as a Markdown table.
The first row (header) is derived from the union of all keys.
# Suppose you want to summarize benchmark results
rows = [
{"name": "transformer-small", "p50": 12.3, "p90(ms)": 18.4},
{"name": "transformer-large", "p50": 45.1, "p90(ms)": 60.7},
]
content = []
content.append(md_heading("Benchmark Results", level=2))
content.append(md_kv_table(rows))
content.append(md_details("Raw logs", "```\n[INFO] benchmark log ...\n```"))
# Join the pieces into one Markdown block
markdown = '\n'.join(content)
# Write to GitHub Actions summary (or log locally if not in CI)
write_gh_step_summary(markdown, append=True)
"""
rows = list(rows)
if not rows:
return "_(no data)_\n"
# Collect all columns across all rows
cols = list({k for r in rows for k in r.keys()})
header = "| " + " | ".join(cols) + " |\n"
sep = "|" + "|".join([" --- " for _ in cols]) + "|\n"
lines = []
for r in rows:
line = "| " + " | ".join(str(r.get(c, "")) for c in cols) + " |\n"
lines.append(line)
return header + sep + "".join(lines) + "\n"
def md_details(summary: str, content: str) -> str: def md_details(summary: str, content: str) -> str:
"""Generate a collapsible <details> block with a summary and inner content.""" """Generate a collapsible <details> block with a summary and inner content."""
return f"<details>\n<summary>{summary}</summary>\n\n{content}\n\n</details>\n" return f"<details>\n<summary>{summary}</summary>\n\n{content}\n\n</details>\n"
# ---- helper test to generate a summary for list of pytest failures ------#
def summarize_failures_by_test_command(
xml_and_labels: Iterable[Tuple[str | Path, str]],
*,
title: str = "Pytest Failures by Test Command",
dedupe_within_command: bool = True,
):
"""
Args:
xml_and_labels: list of (xml_path, label) pairs.
Each XML corresponds to one pytest subprocess (one test command).
Behavior:
- Writes a section per test command if it has failures.
- Each failed test is listed as 'path/to/test.py:test_name'.
Example:
xml = [
("reports/junit_cmd0.xml", "pytest -v -s tests/unit"),
("reports/junit_cmd1.xml", "pytest -v -s tests/integration"),
("reports/junit_cmd2.xml", "pytest -v -s tests/entrypoints"),
]
summarize_failures_by_test_command(
xmls,
title="Consolidated Pytest Failures",
)
"""
write_gh_step_summary(md_heading(title, level=2))
for xml_path, label in xml_and_labels:
xmlp = Path(xml_path)
failed = _parse_failed_simple(xmlp)
if dedupe_within_command:
failed = sorted(set(failed))
if not failed:
continue # skip commands with no failures
write_gh_step_summary(md_heading(f"Test Command: {label}", level=3))
lines = "\n".join(f"- {item}" for item in failed)
write_gh_step_summary(lines + "\n")
def _to_simple_name_from_testcase(tc: ET.Element) -> str:
"""
Convert a <testcase> into 'path/to/test.py:test_name' format.
Prefer the 'file' attribute if available, else fall back to classname.
"""
name = tc.attrib.get("name", "")
file_attr = tc.attrib.get("file")
if file_attr:
return f"{file_attr}:{name}"
classname = tc.attrib.get("classname", "")
parts = classname.split(".") if classname else []
if len(parts) >= 1:
# drop last part if it's a class, treat rest as module path
mod_parts = parts[:-1] if len(parts) >= 2 else parts
mod_path = "/".join(mod_parts) + ".py" if mod_parts else "unknown.py"
return f"{mod_path}:{name}"
return f"unknown.py:{name or 'unknown_test'}"
def _parse_failed_simple(xml_path: Path) -> list[str]:
"""
Parse one XML, return failures as ['tests/a_test.py:test_x', ...].
Only include <failure> and <error>.
"""
if not xml_path.exists():
return []
tree = ET.parse(xml_path)
root = tree.getroot()
failed = []
for tc in root.iter("testcase"):
if any(x.tag in {"failure", "error"} for x in tc):
failed.append(_to_simple_name_from_testcase(tc))
return failed
def summarize_content_from_file( def summarize_content_from_file(
output_dir: Path, output_dir: Path,
freeze_file: str, freeze_file: str,
title: str = "Content from file", title: str = "Wheels (pip freeze)",
code_lang: str = "", # e.g. "text" or "ini" code_lang: str = "", # e.g. "text" or "ini"
) -> bool: ) -> bool:
"""
Read a text file from output_dir/freeze_file and append it to
the GitHub Step Summary as a Markdown code block.
Returns True if something was written, False otherwise.
"""
f = Path(output_dir) / freeze_file f = Path(output_dir) / freeze_file
if not f.exists(): if not f.exists():
return False return False
content = f.read_text(encoding="utf-8").strip() content = f.read_text(encoding="utf-8").strip()
md = render_content(content, title=title, lang=code_lang) if not content:
return write_gh_step_summary(md)
def summarize_wheels(path: Path, title: str = "Wheels", max_depth: int = 3):
items = get_wheels(path, max_depth=max_depth)
if not items:
return False return False
md = render_list(items, title=title) md = []
return write_gh_step_summary(md) md.append(md_heading(title, 2))
md.append(f"```{code_lang}".rstrip())
md.append(content)
md.append("```")
return write_gh_step_summary("\n".join(md) + "\n")
def md_kv_table(rows: Iterable[Mapping[str, str | int | float]]) -> str: def summarize_wheels(
output_dir: Path,
title: str = "Wheels",
max_depth: Optional[int] = None, # None = unlimited
):
""" """
Render a list of dicts as a Markdown table using Jinja template. Walk output_dir up to max_depth and list all *.whl files.
Grouped as 'package: filename.whl'.
Args:
output_dir: base directory to search
title: section title in GH summary
max_depth: maximum folder depth relative to output_dir (0 = only top-level)
""" """
rows = list(rows) if not output_dir.exists():
cols = list({k for r in rows for k in r.keys()}) return False
md = _TPL_TABLE.render(cols=cols, rows=rows).strip() + "\n" root = Path(output_dir)
return md lines = [md_heading(title, 2)]
for dirpath, _, filenames in os.walk(root):
depth = Path(dirpath).relative_to(root).parts
if max_depth is not None and len(depth) > max_depth:
# skip going deeper
continue
def render_list( for fname in sorted(filenames):
items: Iterable[str], if not fname.endswith(".whl"):
*, continue
title: str = "List", pkg = fname.split("-")[0]
) -> str: relpath = str(Path(dirpath) / fname).replace(str(root) + os.sep, "")
tpl = _TPL_LIST_ITEMS lines.append(f"- {pkg}: {relpath}")
md = tpl.render(title=title, items=items)
return md
if len(lines) > 1:
def render_content( write_gh_step_summary("\n".join(lines) + "\n")
content: str,
*,
title: str = "Content",
lang: str = "text",
) -> str:
tpl = _TPL_CONTENT
md = tpl.render(title=title, content=content, lang=lang)
return md

View File

@ -57,8 +57,8 @@ def clone_external_repo(target: str, repo: str, dst: str = "", update_submodules
logger.info("Successfully cloned %s", target) logger.info("Successfully cloned %s", target)
return r, commit return r, commit
except GitCommandError: except GitCommandError as e:
logger.exception("Git operation failed") logger.error("Git operation failed: %s", e)
raise raise

View File

@ -4,7 +4,7 @@ import shlex
import shutil import shutil
import sys import sys
from collections.abc import Iterable from collections.abc import Iterable
from importlib.metadata import PackageNotFoundError, version # noqa: UP035 from importlib.metadata import PackageNotFoundError, version
from typing import Optional, Union from typing import Optional, Union
from cli.lib.common.utils import run_command from cli.lib.common.utils import run_command

View File

@ -8,7 +8,6 @@ import shlex
import subprocess import subprocess
import sys import sys
from contextlib import contextmanager from contextlib import contextmanager
from pathlib import Path
from typing import Optional from typing import Optional
@ -116,24 +115,3 @@ def working_directory(path: str):
yield yield
finally: finally:
os.chdir(prev_cwd) os.chdir(prev_cwd)
def get_wheels(
output_dir: Path,
max_depth: Optional[int] = None,
) -> list[str]:
"""Return a list of wheels found in the given output directory."""
root = Path(output_dir)
if not root.exists():
return []
items = []
for dirpath, _, filenames in os.walk(root):
depth = Path(dirpath).relative_to(root).parts
if max_depth is not None and len(depth) > max_depth:
continue
for fname in sorted(filenames):
if fname.endswith(".whl"):
pkg = fname.split("-")[0]
relpath = str((Path(dirpath) / fname).relative_to(root))
items.append({"pkg": pkg, "relpath": relpath})
return items

View File

@ -1,27 +1,15 @@
import logging import logging
import os from pathlib import Path
import textwrap import re
from typing import Any from typing import Any
from cli.lib.common.gh_summary import write_gh_step_summary
from cli.lib.common.git_helper import clone_external_repo from cli.lib.common.git_helper import clone_external_repo
from cli.lib.common.pip_helper import pip_install_packages from cli.lib.common.pip_helper import pip_install_packages
from cli.lib.common.utils import run_command, temp_environ, working_directory from cli.lib.common.utils import run_command, temp_environ, working_directory
from jinja2 import Template from cli.lib.common.gh_summary import md_heading, write_gh_step_summary
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
_TPL_VLLM_INFO = Template(
textwrap.dedent("""\
## Vllm against Pytorch CI Test Summary
**Vllm Commit**: [{{ vllm_commit }}](https://github.com/vllm-project/vllm/commit/{{ vllm_commit }})
{%- if torch_sha %}
**Pytorch Commit**: [{{ torch_sha }}](https://github.com/pytorch/pytorch/commit/{{ torch_sha }})
{%- endif %}
""")
)
def sample_vllm_test_library(): def sample_vllm_test_library():
""" """
@ -41,6 +29,7 @@ def sample_vllm_test_library():
"pytest -v -s basic_correctness/test_cumem.py", "pytest -v -s basic_correctness/test_cumem.py",
"pytest -v -s basic_correctness/test_basic_correctness.py", "pytest -v -s basic_correctness/test_basic_correctness.py",
"pytest -v -s basic_correctness/test_cpu_offload.py", "pytest -v -s basic_correctness/test_cpu_offload.py",
"VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1 pytest -v -s basic_correctness/test_preemption.py",
], ],
}, },
"vllm_basic_models_test": { "vllm_basic_models_test": {
@ -67,12 +56,16 @@ def sample_vllm_test_library():
"-v", "-v",
"-s", "-s",
"entrypoints/llm", "entrypoints/llm",
"--ignore=entrypoints/llm/test_lazy_outlines.py",
"--ignore=entrypoints/llm/test_generate.py", "--ignore=entrypoints/llm/test_generate.py",
"--ignore=entrypoints/llm/test_generate_multiple_loras.py",
"--ignore=entrypoints/llm/test_collective_rpc.py", "--ignore=entrypoints/llm/test_collective_rpc.py",
] ]
), ),
"pytest -v -s entrypoints/llm/test_generate.py", "pytest -v -s entrypoints/llm/test_lazy_outlines.py",
"pytest -v -s entrypoints/offline_mode", "pytest -v -s entrypoints/llm/test_generate.py ",
"pytest -v -s entrypoints/llm/test_generate_multiple_loras.py",
"VLLM_USE_V1=0 pytest -v -s entrypoints/offline_mode",
], ],
}, },
"vllm_regression_test": { "vllm_regression_test": {
@ -92,24 +85,14 @@ def sample_vllm_test_library():
"num_gpus": 4, "num_gpus": 4,
"steps": [ "steps": [
"pytest -v -s -x lora/test_chatglm3_tp.py", "pytest -v -s -x lora/test_chatglm3_tp.py",
"echo $VLLM_WORKER_MULTIPROC_METHOD",
"pytest -v -s -x lora/test_llama_tp.py", "pytest -v -s -x lora/test_llama_tp.py",
"pytest -v -s -x lora/test_llm_with_multi_loras.py", "pytest -v -s -x lora/test_multi_loras_with_tp.py",
], ],
}, },
"vllm_distributed_test_28_failure_test": { "vllm_lora_280_failure_test": {
"title": "Distributed Tests (2 GPUs) pytorch 2.8 release failure", "title": "LoRA 280 failure test",
"id": "vllm_distributed_test_28_failure_test", "id": "vllm_lora_280_failure_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"num_gpus": 4,
"steps": [
"pytest -v -s distributed/test_sequence_parallel.py",
],
},
"vllm_lora_28_failure_test": {
"title": "LoRA pytorch 2.8 failure test",
"id": "vllm_lora_28_failure_test",
"steps": ["pytest -v lora/test_quant_model.py"], "steps": ["pytest -v lora/test_quant_model.py"],
}, },
"vllm_multi_model_processor_test": { "vllm_multi_model_processor_test": {
@ -120,15 +103,6 @@ def sample_vllm_test_library():
"pytest -v -s models/multimodal/processing --ignore models/multimodal/processing/test_tensor_schema.py", "pytest -v -s models/multimodal/processing --ignore models/multimodal/processing/test_tensor_schema.py",
], ],
}, },
"vllm_multi_model_test_28_failure_test": {
"title": "Multi-Model Test (Failed 2.8 release)",
"id": "vllm_multi_model_test_28_failure_test",
"package_install": ["git+https://github.com/TIGER-AI-Lab/Mantis.git"],
"steps": [
"pytest -v -s models/multimodal/generation/test_voxtral.py",
"pytest -v -s models/multimodal/pooling",
],
},
"vllm_pytorch_compilation_unit_tests": { "vllm_pytorch_compilation_unit_tests": {
"title": "PyTorch Compilation Unit Tests", "title": "PyTorch Compilation Unit Tests",
"id": "vllm_pytorch_compilation_unit_tests", "id": "vllm_pytorch_compilation_unit_tests",
@ -143,28 +117,6 @@ def sample_vllm_test_library():
"pytest -v -s compile/test_decorator.py", "pytest -v -s compile/test_decorator.py",
], ],
}, },
"vllm_language_model_test_extended_generation_28_failure_test": {
"title": "Language Models Test (Extended Generation) 2.8 release failure",
"id": "vllm_languagde_model_test_extended_generation_28_failure_test",
"package_install": [
"--no-build-isolation",
"git+https://github.com/Dao-AILab/causal-conv1d@v1.5.0.post8",
],
"steps": [
"pytest -v -s models/language/generation/test_mistral.py",
],
},
"vllm_distributed_test_2_gpu_28_failure_test": {
"title": "Distributed Tests (2 GPUs) pytorch 2.8 release failure",
"id": "vllm_distributed_test_2_gpu_28_failure_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"num_gpus": 4,
"steps": [
"pytest -v -s distributed/test_sequence_parallel.py",
],
},
# TODO(elainewy):need to add g6 with 4 gpus to run this test # TODO(elainewy):need to add g6 with 4 gpus to run this test
"vllm_lora_test": { "vllm_lora_test": {
"title": "LoRA Test %N", "title": "LoRA Test %N",
@ -281,12 +233,3 @@ def replace_buildkite_placeholders(step: str, shard_id: int, num_shards: int) ->
for k in sorted(mapping, key=len, reverse=True): for k in sorted(mapping, key=len, reverse=True):
step = step.replace(k, mapping[k]) step = step.replace(k, mapping[k])
return step return step
def summarize_build_info(vllm_commit: str) -> bool:
torch_sha = os.getenv("GITHUB_SHA")
md = (
_TPL_VLLM_INFO.render(vllm_commit=vllm_commit, torch_sha=torch_sha).strip()
+ "\n"
)
return write_gh_step_summary(md)

View File

@ -4,7 +4,6 @@ import textwrap
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import Optional from typing import Optional
from cli.lib.common.cli_helper import BaseRunner from cli.lib.common.cli_helper import BaseRunner
from cli.lib.common.docker_helper import local_image_exists from cli.lib.common.docker_helper import local_image_exists
from cli.lib.common.envs_helper import ( from cli.lib.common.envs_helper import (
@ -13,11 +12,6 @@ from cli.lib.common.envs_helper import (
env_str_field, env_str_field,
with_params_help, with_params_help,
) )
from cli.lib.common.gh_summary import (
gh_summary_path,
summarize_content_from_file,
summarize_wheels,
)
from cli.lib.common.path_helper import ( from cli.lib.common.path_helper import (
copy, copy,
ensure_dir_exists, ensure_dir_exists,
@ -26,7 +20,14 @@ from cli.lib.common.path_helper import (
is_path_exist, is_path_exist,
) )
from cli.lib.common.utils import run_command from cli.lib.common.utils import run_command
from cli.lib.core.vllm.lib import clone_vllm, summarize_build_info from cli.lib.core.vllm.lib import clone_vllm, write_gh_step_summary
from cli.lib.common.gh_summary import (
summarize_content_from_file,
summarize_wheels,
gh_summary_path,
)
import torch
from torch import torch_version
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -63,12 +64,7 @@ class VllmBuildParameters:
# DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True" # DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True"
use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True) use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True)
dockerfile_path: Path = env_path_field( dockerfile_path: Path = env_path_field(
"DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile" "DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm"
)
# the cleaning script to remove torch dependencies from pip
cleaning_script: Path = env_path_field(
"cleaning_script", ".github/ci_configs/vllm/use_existing_torch.py"
) )
# OUTPUT_DIR: where docker buildx (local exporter) will write artifacts # OUTPUT_DIR: where docker buildx (local exporter) will write artifacts
@ -165,8 +161,17 @@ class VllmBuildRunner(BaseRunner):
logger.info("Running vllm build with inputs: %s", inputs) logger.info("Running vllm build with inputs: %s", inputs)
vllm_commit = clone_vllm() vllm_commit = clone_vllm()
self.cp_torch_cleaning_script(inputs) vllm_sha_url = f"${vllm_commit}](https://github.com/vllm-project/vllm/commit/${vllm_commit})"
write_gh_step_summary(
f"""
## Commit Info
- **Vllm Commit**: `{vllm_sha_url}`
- **Torch Version**: `{torch_version}`
"""
)
self.cp_dockerfile_if_exist(inputs) self.cp_dockerfile_if_exist(inputs)
# cp torch wheels from root direct to vllm workspace if exist # cp torch wheels from root direct to vllm workspace if exist
self.cp_torch_whls_if_exist(inputs) self.cp_torch_whls_if_exist(inputs)
@ -187,19 +192,26 @@ class VllmBuildRunner(BaseRunner):
if not gh_summary_path(): if not gh_summary_path():
return logger.info("Skipping, not detect GH Summary env var....") return logger.info("Skipping, not detect GH Summary env var....")
logger.info("Generate GH Summary ...") logger.info("Generate GH Summary ...")
# summarize vllm build info vllm_sha_url = f"[{vllm_commit}](https://github.com/vllm-project/vllm/commit/{vllm_commit})"
summarize_build_info(vllm_commit) write_gh_step_summary(
f"""
# summarize vllm build artifacts ## Build vllm against Pytorch CI
**Vllm Commit**: `{vllm_sha_url}`
"""
)
torch_sha = os.getenv("GITHUB_SHA")
if torch_sha: # only can grab this in github action
torch_sha_url = (
f"[{torch_sha}](https://github.com/pytorch/pytorch/commit/{torch_sha})]"
)
write_gh_step_summary(
f"""
**Pytorch Commit**: `{torch_sha_url}`
"""
)
vllm_artifact_dir = inputs.output_dir / "wheels" vllm_artifact_dir = inputs.output_dir / "wheels"
summarize_content_from_file( summarize_content_from_file(vllm_artifact_dir, "build_summary.txt", title="Vllm build package summary")
vllm_artifact_dir, summarize_wheels(inputs.torch_whls_path, max_depth=3, title="Torch Wheels Artifacts")
"build_summary.txt",
title="Vllm build env pip package summary",
)
summarize_wheels(
inputs.torch_whls_path, max_depth=3, title="Torch Wheels Artifacts"
)
summarize_wheels(vllm_artifact_dir, max_depth=3, title="Vllm Wheels Artifacts") summarize_wheels(vllm_artifact_dir, max_depth=3, title="Vllm Wheels Artifacts")
def cp_torch_whls_if_exist(self, inputs: VllmBuildParameters) -> str: def cp_torch_whls_if_exist(self, inputs: VllmBuildParameters) -> str:
@ -211,11 +223,6 @@ class VllmBuildRunner(BaseRunner):
copy(inputs.torch_whls_path, tmp_dir) copy(inputs.torch_whls_path, tmp_dir)
return tmp_dir return tmp_dir
def cp_torch_cleaning_script(self, inputs: VllmBuildParameters):
script = get_path(inputs.cleaning_script, resolve=True)
vllm_script = Path(f"./{self.work_directory}/use_existing_torch.py")
copy(script, vllm_script)
def cp_dockerfile_if_exist(self, inputs: VllmBuildParameters): def cp_dockerfile_if_exist(self, inputs: VllmBuildParameters):
if not inputs.use_local_dockerfile: if not inputs.use_local_dockerfile:
logger.info("using vllm default dockerfile.torch_nightly for build") logger.info("using vllm default dockerfile.torch_nightly for build")

View File

@ -11,7 +11,7 @@ from typing import Any
from cli.lib.common.cli_helper import BaseRunner from cli.lib.common.cli_helper import BaseRunner
from cli.lib.common.envs_helper import env_path_field, env_str_field, get_env from cli.lib.common.envs_helper import env_path_field, env_str_field, get_env
from cli.lib.common.path_helper import copy, get_path, remove_dir from cli.lib.common.path_helper import copy, remove_dir
from cli.lib.common.pip_helper import ( from cli.lib.common.pip_helper import (
pip_install_first_match, pip_install_first_match,
pip_install_packages, pip_install_packages,
@ -43,10 +43,6 @@ class VllmTestParameters:
torch_cuda_arch_list: str = env_str_field("TORCH_CUDA_ARCH_LIST", "8.9") torch_cuda_arch_list: str = env_str_field("TORCH_CUDA_ARCH_LIST", "8.9")
cleaning_script: Path = env_path_field(
"cleaning_script", ".github/ci_configs/vllm/use_existing_torch.py"
)
def __post_init__(self): def __post_init__(self):
if not self.torch_whls_path.exists(): if not self.torch_whls_path.exists():
raise ValueError("missing torch_whls_path") raise ValueError("missing torch_whls_path")
@ -96,13 +92,11 @@ class VllmTestRunner(BaseRunner):
self._set_envs(params) self._set_envs(params)
clone_vllm(dst=self.work_directory) clone_vllm(dst=self.work_directory)
self.cp_torch_cleaning_script(params)
with working_directory(self.work_directory): with working_directory(self.work_directory):
remove_dir(Path("vllm")) remove_dir(Path("vllm"))
self._install_wheels(params) self._install_wheels(params)
self._install_dependencies() self._install_dependencies()
# verify the torches are not overridden by test dependencies # verify the torches are not overridden by test dependencies
check_versions() check_versions()
def run(self): def run(self):
@ -110,31 +104,20 @@ class VllmTestRunner(BaseRunner):
main function to run vllm test main function to run vllm test
""" """
self.prepare() self.prepare()
try: with working_directory(self.work_directory):
with working_directory(self.work_directory): if self.test_type == TestInpuType.TEST_PLAN:
if self.test_type == TestInpuType.TEST_PLAN: if self.num_shards > 1:
if self.num_shards > 1: run_test_plan(
run_test_plan( self.test_plan,
self.test_plan, "vllm",
"vllm", sample_vllm_test_library(),
sample_vllm_test_library(), self.shard_id,
self.shard_id, self.num_shards,
self.num_shards, )
)
else:
run_test_plan(
self.test_plan, "vllm", sample_vllm_test_library()
)
else: else:
raise ValueError(f"Unknown test type {self.test_type}") run_test_plan(self.test_plan, "vllm", sample_vllm_test_library())
finally: else:
# double check the torches are not overridden by other packages raise ValueError(f"Unknown test type {self.test_type}")
check_versions()
def cp_torch_cleaning_script(self, params: VllmTestParameters):
script = get_path(params.cleaning_script, resolve=True)
vllm_script = Path(f"./{self.work_directory}/use_existing_torch.py")
copy(script, vllm_script)
def _install_wheels(self, params: VllmTestParameters): def _install_wheels(self, params: VllmTestParameters):
logger.info("Running vllm test with inputs: %s", params) logger.info("Running vllm test with inputs: %s", params)
@ -232,6 +215,7 @@ def preprocess_test_in(
"torchaudio", "torchaudio",
"xformers", "xformers",
"mamba_ssm", "mamba_ssm",
"pybind11",
] + additional_package_to_move ] + additional_package_to_move
# Read current requirements # Read current requirements
target_path = Path(target_file) target_path = Path(target_file)

View File

@ -6,7 +6,7 @@ dependencies = [
"GitPython==3.1.45", "GitPython==3.1.45",
"docker==7.1.0", "docker==7.1.0",
"pytest==7.3.2", "pytest==7.3.2",
"uv==0.9.6" "uv==0.8.6"
] ]
[tool.setuptools] [tool.setuptools]

View File

@ -1,11 +1,11 @@
SHELL=/usr/bin/env bash SHELL=/usr/bin/env bash
DOCKER_CMD ?= docker DOCKER_CMD ?= docker
DESIRED_ROCM ?= 7.1 DESIRED_ROCM ?= 6.4
DESIRED_ROCM_SHORT = $(subst .,,$(DESIRED_ROCM)) DESIRED_ROCM_SHORT = $(subst .,,$(DESIRED_ROCM))
PACKAGE_NAME = magma-rocm PACKAGE_NAME = magma-rocm
# inherit this from underlying docker image, do not pass this env var to docker # inherit this from underlying docker image, do not pass this env var to docker
#PYTORCH_ROCM_ARCH ?= gfx900;gfx906;gfx908;gfx90a;gfx942;gfx950;gfx1030;gfx1100;gfx1101;gfx1102;gfx1150;gfx1151;gfx1200;gfx1201 #PYTORCH_ROCM_ARCH ?= gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201
DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \ DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
-v $(shell git rev-parse --show-toplevel)/.ci:/builder \ -v $(shell git rev-parse --show-toplevel)/.ci:/builder \
@ -16,26 +16,20 @@ DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \
magma-rocm/build_magma.sh magma-rocm/build_magma.sh
.PHONY: all .PHONY: all
all: magma-rocm71
all: magma-rocm70
all: magma-rocm64 all: magma-rocm64
all: magma-rocm63
.PHONY: .PHONY:
clean: clean:
$(RM) -r magma-* $(RM) -r magma-*
$(RM) -r output $(RM) -r output
.PHONY: magma-rocm71
magma-rocm71: DESIRED_ROCM := 7.1
magma-rocm71:
$(DOCKER_RUN)
.PHONY: magma-rocm70
magma-rocm70: DESIRED_ROCM := 7.0
magma-rocm70:
$(DOCKER_RUN)
.PHONY: magma-rocm64 .PHONY: magma-rocm64
magma-rocm64: DESIRED_ROCM := 6.4 magma-rocm64: DESIRED_ROCM := 6.4
magma-rocm64: magma-rocm64:
$(DOCKER_RUN) $(DOCKER_RUN)
.PHONY: magma-rocm63
magma-rocm63: DESIRED_ROCM := 6.3
magma-rocm63:
$(DOCKER_RUN)

View File

@ -30,6 +30,7 @@ into a tarball, with the following structure:
More specifically, `build_magma.sh` copies over the relevant files from the `package_files` directory depending on the ROCm version. More specifically, `build_magma.sh` copies over the relevant files from the `package_files` directory depending on the ROCm version.
Outputted binaries should be in the `output` folder. Outputted binaries should be in the `output` folder.
## Pushing ## Pushing
Packages can be uploaded to an S3 bucket using: Packages can be uploaded to an S3 bucket using:

View File

@ -6,8 +6,8 @@ set -eou pipefail
# The script expects DESIRED_CUDA and PACKAGE_NAME to be set # The script expects DESIRED_CUDA and PACKAGE_NAME to be set
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
# https://github.com/icl-utk-edu/magma/pull/65 # Version 2.7.2 + ROCm related updates
MAGMA_VERSION=d6e4117bc88e73f06d26c6c2e14f064e8fc3d1ec MAGMA_VERSION=a1625ff4d9bc362906bd01f805dbbe12612953f6
# Folders for the build # Folders for the build
PACKAGE_FILES=${ROOT_DIR}/magma-rocm/package_files # metadata PACKAGE_FILES=${ROOT_DIR}/magma-rocm/package_files # metadata
@ -20,7 +20,7 @@ mkdir -p ${PACKAGE_DIR} ${PACKAGE_OUTPUT}/linux-64 ${PACKAGE_BUILD} ${PACKAGE_RE
# Fetch magma sources and verify checksum # Fetch magma sources and verify checksum
pushd ${PACKAGE_DIR} pushd ${PACKAGE_DIR}
git clone https://github.com/jeffdaily/magma git clone https://bitbucket.org/icl/magma.git
pushd magma pushd magma
git checkout ${MAGMA_VERSION} git checkout ${MAGMA_VERSION}
popd popd

View File

@ -142,7 +142,7 @@ time CMAKE_ARGS=${CMAKE_ARGS[@]} \
EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \ EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \
BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \ BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \
USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \ USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \
python -m build --wheel --no-isolation --outdir /tmp/$WHEELHOUSE_DIR python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR
echo "Finished setup.py bdist at $(date)" echo "Finished setup.py bdist at $(date)"
# Build libtorch packages # Build libtorch packages

View File

@ -124,7 +124,6 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then
fi fi
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
echo "Bundling with cudnn and cublas." echo "Bundling with cudnn and cublas."
DEPS_LIST+=( DEPS_LIST+=(
"/usr/local/cuda/lib64/libcudnn_adv.so.9" "/usr/local/cuda/lib64/libcudnn_adv.so.9"
"/usr/local/cuda/lib64/libcudnn_cnn.so.9" "/usr/local/cuda/lib64/libcudnn_cnn.so.9"
@ -134,11 +133,16 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9" "/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9"
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9" "/usr/local/cuda/lib64/libcudnn_heuristic.so.9"
"/usr/local/cuda/lib64/libcudnn.so.9" "/usr/local/cuda/lib64/libcudnn.so.9"
"/usr/local/cuda/lib64/libcublas.so.12"
"/usr/local/cuda/lib64/libcublasLt.so.12"
"/usr/local/cuda/lib64/libcusparseLt.so.0" "/usr/local/cuda/lib64/libcusparseLt.so.0"
"/usr/local/cuda/lib64/libcudart.so.12"
"/usr/local/cuda/lib64/libnvrtc.so.12"
"/usr/local/cuda/lib64/libnvrtc-builtins.so" "/usr/local/cuda/lib64/libnvrtc-builtins.so"
"/usr/local/cuda/lib64/libcufile.so.0" "/usr/local/cuda/lib64/libcufile.so.0"
"/usr/local/cuda/lib64/libcufile_rdma.so.1" "/usr/local/cuda/lib64/libcufile_rdma.so.1"
"/usr/local/cuda/lib64/libnvshmem_host.so.3" "/usr/local/cuda/lib64/libnvshmem_host.so.3"
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12"
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so" "/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so"
) )
DEPS_SONAME+=( DEPS_SONAME+=(
@ -150,59 +154,22 @@ if [[ $CUDA_VERSION == 12* || $CUDA_VERSION == 13* ]]; then
"libcudnn_engines_precompiled.so.9" "libcudnn_engines_precompiled.so.9"
"libcudnn_heuristic.so.9" "libcudnn_heuristic.so.9"
"libcudnn.so.9" "libcudnn.so.9"
"libcublas.so.12"
"libcublasLt.so.12"
"libcusparseLt.so.0" "libcusparseLt.so.0"
"libcudart.so.12"
"libnvrtc.so.12"
"libnvrtc-builtins.so" "libnvrtc-builtins.so"
"libnvshmem_host.so.3" "libnvshmem_host.so.3"
"libcufile.so.0" "libcufile.so.0"
"libcufile_rdma.so.1" "libcufile_rdma.so.1"
"libcupti.so.12"
"libnvperf_host.so" "libnvperf_host.so"
) )
# Add libnvToolsExt only if CUDA version is not 12.9 # Add libnvToolsExt only if CUDA version is not 12.9
if [[ $CUDA_VERSION == 13* ]]; then if [[ $CUDA_VERSION != 12.9* ]]; then
DEPS_LIST+=( DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1")
"/usr/local/cuda/lib64/libcublas.so.13" DEPS_SONAME+=("libnvToolsExt.so.1")
"/usr/local/cuda/lib64/libcublasLt.so.13"
"/usr/local/cuda/lib64/libcudart.so.13"
"/usr/local/cuda/lib64/libnvrtc.so.13"
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.13"
"/usr/local/cuda/lib64/libibverbs.so.1"
"/usr/local/cuda/lib64/librdmacm.so.1"
"/usr/local/cuda/lib64/libmlx5.so.1"
"/usr/local/cuda/lib64/libnl-3.so.200"
"/usr/local/cuda/lib64/libnl-route-3.so.200")
DEPS_SONAME+=(
"libcublas.so.13"
"libcublasLt.so.13"
"libcudart.so.13"
"libnvrtc.so.13"
"libcupti.so.13"
"libibverbs.so.1"
"librdmacm.so.1"
"libmlx5.so.1"
"libnl-3.so.200"
"libnl-route-3.so.200")
export USE_CUPTI_SO=1
export ATEN_STATIC_CUDA=0
export USE_CUDA_STATIC_LINK=0
export USE_CUFILE=0
else
DEPS_LIST+=(
"/usr/local/cuda/lib64/libcublas.so.12"
"/usr/local/cuda/lib64/libcublasLt.so.12"
"/usr/local/cuda/lib64/libcudart.so.12"
"/usr/local/cuda/lib64/libnvrtc.so.12"
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12")
DEPS_SONAME+=(
"libcublas.so.12"
"libcublasLt.so.12"
"libcudart.so.12"
"libnvrtc.so.12"
"libcupti.so.12")
if [[ $CUDA_VERSION != 12.9* ]]; then
DEPS_LIST+=("/usr/local/cuda/lib64/libnvToolsExt.so.1")
DEPS_SONAME+=("libnvToolsExt.so.1")
fi
fi fi
else else
echo "Using nvidia libs from pypi." echo "Using nvidia libs from pypi."

View File

@ -104,7 +104,7 @@ if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then
export ROCclr_DIR=/opt/rocm/rocclr/lib/cmake/rocclr export ROCclr_DIR=/opt/rocm/rocclr/lib/cmake/rocclr
fi fi
echo "Calling -m pip install . -v --no-build-isolation at $(date)" echo "Calling 'python -m pip install .' at $(date)"
if [[ $LIBTORCH_VARIANT = *"static"* ]]; then if [[ $LIBTORCH_VARIANT = *"static"* ]]; then
STATIC_CMAKE_FLAG="-DTORCH_STATIC=1" STATIC_CMAKE_FLAG="-DTORCH_STATIC=1"

View File

@ -107,10 +107,6 @@ if [[ $ROCM_INT -ge 60200 ]]; then
ROCM_SO_FILES+=("librocm-core.so") ROCM_SO_FILES+=("librocm-core.so")
fi fi
if [[ $ROCM_INT -ge 70000 ]]; then
ROCM_SO_FILES+=("librocroller.so")
fi
OS_NAME=`awk -F= '/^NAME/{print $2}' /etc/os-release` OS_NAME=`awk -F= '/^NAME/{print $2}' /etc/os-release`
if [[ "$OS_NAME" == *"CentOS Linux"* || "$OS_NAME" == *"AlmaLinux"* ]]; then if [[ "$OS_NAME" == *"CentOS Linux"* || "$OS_NAME" == *"AlmaLinux"* ]]; then
LIBGOMP_PATH="/usr/lib64/libgomp.so.1" LIBGOMP_PATH="/usr/lib64/libgomp.so.1"

View File

@ -89,7 +89,7 @@ fi
if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then
export USE_MKLDNN=1 export USE_MKLDNN=1
export USE_MKLDNN_ACL=1 export USE_MKLDNN_ACL=1
export ACL_ROOT_DIR=/acl export ACL_ROOT_DIR=/ComputeLibrary
fi fi
if [[ "$BUILD_ENVIRONMENT" == *riscv64* ]]; then if [[ "$BUILD_ENVIRONMENT" == *riscv64* ]]; then
@ -168,16 +168,14 @@ if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source /opt/intel/oneapi/compiler/latest/env/vars.sh source /opt/intel/oneapi/compiler/latest/env/vars.sh
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source /opt/intel/oneapi/umf/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/ccl/latest/env/vars.sh source /opt/intel/oneapi/ccl/latest/env/vars.sh
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source /opt/intel/oneapi/mpi/latest/env/vars.sh source /opt/intel/oneapi/mpi/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/pti/latest/env/vars.sh
# Enable XCCL build # Enable XCCL build
export USE_XCCL=1 export USE_XCCL=1
export USE_MPI=0 export USE_MPI=0
# XPU kineto feature dependencies are not fully ready, disable kineto build as temp WA
export USE_KINETO=0
export TORCH_XPU_ARCH_LIST=pvc export TORCH_XPU_ARCH_LIST=pvc
fi fi
@ -235,9 +233,7 @@ if [[ "${BUILD_ENVIRONMENT}" != *cuda* ]]; then
export BUILD_STATIC_RUNTIME_BENCHMARK=ON export BUILD_STATIC_RUNTIME_BENCHMARK=ON
fi fi
if [[ "$BUILD_ENVIRONMENT" == *-full-debug* ]]; then if [[ "$BUILD_ENVIRONMENT" == *-debug* ]]; then
export CMAKE_BUILD_TYPE=Debug
elif [[ "$BUILD_ENVIRONMENT" == *-debug* ]]; then
export CMAKE_BUILD_TYPE=RelWithAssert export CMAKE_BUILD_TYPE=RelWithAssert
fi fi
@ -294,20 +290,15 @@ else
WERROR=1 python setup.py clean WERROR=1 python setup.py clean
WERROR=1 python -m build --wheel --no-isolation WERROR=1 python setup.py bdist_wheel
else else
python setup.py clean python setup.py clean
if [[ "$BUILD_ENVIRONMENT" == *xla* ]]; then if [[ "$BUILD_ENVIRONMENT" == *xla* ]]; then
source .ci/pytorch/install_cache_xla.sh source .ci/pytorch/install_cache_xla.sh
fi fi
python -m build --wheel --no-isolation python setup.py bdist_wheel
fi fi
pip_install_whl "$(echo dist/*.whl)" pip_install_whl "$(echo dist/*.whl)"
if [[ "$BUILD_ENVIRONMENT" == *full-debug* ]]; then
# Regression test for https://github.com/pytorch/pytorch/issues/164297
# Torch should be importable and that's about it
pushd /; python -c "import torch;print(torch.__config__.show(), torch.randn(5) + 1.7)"; popd
fi
if [[ "${BUILD_ADDITIONAL_PACKAGES:-}" == *vision* ]]; then if [[ "${BUILD_ADDITIONAL_PACKAGES:-}" == *vision* ]]; then
install_torchvision install_torchvision
@ -428,7 +419,7 @@ fi
if [[ "$BUILD_ENVIRONMENT" != *libtorch* && "$BUILD_ENVIRONMENT" != *bazel* ]]; then if [[ "$BUILD_ENVIRONMENT" != *libtorch* && "$BUILD_ENVIRONMENT" != *bazel* ]]; then
# export test times so that potential sharded tests that'll branch off this build will use consistent data # export test times so that potential sharded tests that'll branch off this build will use consistent data
# don't do this for libtorch as libtorch is C++ only and thus won't have python tests run on its build # don't do this for libtorch as libtorch is C++ only and thus won't have python tests run on its build
PYTHONPATH=. python tools/stats/export_test_times.py python tools/stats/export_test_times.py
fi fi
# don't do this for bazel or s390x or riscv64 as they don't use sccache # don't do this for bazel or s390x or riscv64 as they don't use sccache
if [[ "$BUILD_ENVIRONMENT" != *s390x* && "$BUILD_ENVIRONMENT" != *riscv64* && "$BUILD_ENVIRONMENT" != *-bazel-* ]]; then if [[ "$BUILD_ENVIRONMENT" != *s390x* && "$BUILD_ENVIRONMENT" != *riscv64* && "$BUILD_ENVIRONMENT" != *-bazel-* ]]; then

View File

@ -300,3 +300,24 @@ except RuntimeError as e:
exit 1 exit 1
fi fi
fi fi
###############################################################################
# Check for C++ ABI compatibility to GCC-11 - GCC 13
###############################################################################
if [[ "$(uname)" == 'Linux' && "$PACKAGE_TYPE" == 'manywheel' ]]; then
pushd /tmp
# Per https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Dialect-Options.html
# gcc-11 is ABI16, gcc-13 is ABI18, gcc-14 is ABI19
# gcc 11 - CUDA 11.8, xpu, rocm
# gcc 13 - CUDA 12.6, 12.8 and cpu
# Please see issue for reference: https://github.com/pytorch/pytorch/issues/152426
if [[ "$(uname -m)" == "s390x" ]]; then
cxx_abi="19"
elif [[ "$DESIRED_CUDA" != 'xpu' && "$DESIRED_CUDA" != 'rocm'* ]]; then
cxx_abi="18"
else
cxx_abi="16"
fi
python -c "import torch; exit(0 if torch._C._PYBIND11_BUILD_ABI == '_cxxabi10${cxx_abi}' else 1)"
popd
fi

View File

@ -96,6 +96,7 @@ function pip_build_and_install() {
python3 -m pip wheel \ python3 -m pip wheel \
--no-build-isolation \ --no-build-isolation \
--no-deps \ --no-deps \
--no-use-pep517 \
-w "${wheel_dir}" \ -w "${wheel_dir}" \
"${build_target}" "${build_target}"
fi fi
@ -257,19 +258,11 @@ function install_torchrec_and_fbgemm() {
git clone --recursive https://github.com/pytorch/fbgemm git clone --recursive https://github.com/pytorch/fbgemm
pushd fbgemm/fbgemm_gpu pushd fbgemm/fbgemm_gpu
git checkout "${fbgemm_commit}" --recurse-submodules git checkout "${fbgemm_commit}" --recurse-submodules
# until the fbgemm_commit includes the tbb patch python setup.py bdist_wheel \
patch <<'EOF' --build-variant=rocm \
--- a/FbgemmGpu.cmake -DHIP_ROOT_DIR="${ROCM_PATH}" \
+++ b/FbgemmGpu.cmake -DCMAKE_C_FLAGS="-DTORCH_USE_HIP_DSA" \
@@ -184,5 +184,6 @@ gpu_cpp_library( -DCMAKE_CXX_FLAGS="-DTORCH_USE_HIP_DSA"
fbgemm_gpu_tbe_cache
fbgemm_gpu_tbe_optimizers
fbgemm_gpu_tbe_utils
+ tbb
DESTINATION
fbgemm_gpu)
EOF
python setup.py bdist_wheel --build-variant=rocm
popd popd
# Save the wheel before cleaning up # Save the wheel before cleaning up

View File

@ -58,7 +58,7 @@ time python tools/setup_helpers/generate_code.py \
# Build the docs # Build the docs
pushd docs/cpp pushd docs/cpp
time make VERBOSE=1 html time make VERBOSE=1 html -j
popd popd
popd popd

View File

@ -0,0 +1,40 @@
#!/bin/bash
# This is where the local pytorch install in the docker image is located
pt_checkout="/var/lib/jenkins/workspace"
source "$pt_checkout/.ci/pytorch/common_utils.sh"
echo "functorch_doc_push_script.sh: Invoked with $*"
set -ex -o pipefail
version=${DOCS_VERSION:-nightly}
echo "version: $version"
# Build functorch docs
pushd $pt_checkout/functorch/docs
make html
popd
git clone https://github.com/pytorch/functorch -b gh-pages --depth 1 functorch_ghpages
pushd functorch_ghpages
if [ "$version" == "main" ]; then
version=nightly
fi
git rm -rf "$version" || true
mv "$pt_checkout/functorch/docs/build/html" "$version"
git add "$version" || true
git status
git config user.email "soumith+bot@pytorch.org"
git config user.name "pytorchbot"
# If there aren't changes, don't make a commit; push is no-op
git commit -m "Generate Python docs from pytorch/pytorch@${GITHUB_SHA}" || true
git status
if [[ "${WITH_PUSH:-}" == true ]]; then
git push -u origin gh-pages
fi
popd

View File

@ -36,11 +36,11 @@ fi
print_cmake_info print_cmake_info
if [[ ${BUILD_ENVIRONMENT} == *"distributed"* ]]; then if [[ ${BUILD_ENVIRONMENT} == *"distributed"* ]]; then
# Needed for inductor benchmarks, as lots of HF networks make `torch.distribtued` calls # Needed for inductor benchmarks, as lots of HF networks make `torch.distribtued` calls
USE_DISTRIBUTED=1 USE_OPENMP=1 WERROR=1 python -m build --wheel --no-isolation USE_DISTRIBUTED=1 USE_OPENMP=1 WERROR=1 python setup.py bdist_wheel
else else
# Explicitly set USE_DISTRIBUTED=0 to align with the default build config on mac. This also serves as the sole CI config that tests # Explicitly set USE_DISTRIBUTED=0 to align with the default build config on mac. This also serves as the sole CI config that tests
# that building with USE_DISTRIBUTED=0 works at all. See https://github.com/pytorch/pytorch/issues/86448 # that building with USE_DISTRIBUTED=0 works at all. See https://github.com/pytorch/pytorch/issues/86448
USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python -m build --wheel --no-isolation -C--build-option=--plat-name=macosx_11_0_arm64 USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel --plat-name macosx_11_0_arm64
fi fi
if which sccache > /dev/null; then if which sccache > /dev/null; then
print_sccache_stats print_sccache_stats

View File

@ -55,7 +55,7 @@ test_python_shard() {
setup_test_python setup_test_python
time python test/run_test.py --verbose --exclude-jit-executor --exclude-distributed-tests --exclude-quantization-tests --shard "$1" "$NUM_TEST_SHARDS" time python test/run_test.py --verbose --exclude-jit-executor --exclude-distributed-tests --shard "$1" "$NUM_TEST_SHARDS"
assert_git_not_dirty assert_git_not_dirty
} }
@ -195,7 +195,7 @@ torchbench_setup_macos() {
git checkout "$(cat ../.github/ci_commit_pins/vision.txt)" git checkout "$(cat ../.github/ci_commit_pins/vision.txt)"
git submodule update --init --recursive git submodule update --init --recursive
python setup.py clean python setup.py clean
python -m pip install -e . -v --no-build-isolation python setup.py develop
popd popd
pushd torchaudio pushd torchaudio
@ -204,7 +204,7 @@ torchbench_setup_macos() {
git submodule update --init --recursive git submodule update --init --recursive
python setup.py clean python setup.py clean
#TODO: Remove me, when figure out how to make TorchAudio find brew installed openmp #TODO: Remove me, when figure out how to make TorchAudio find brew installed openmp
USE_OPENMP=0 python -m pip install -e . -v --no-build-isolation USE_OPENMP=0 python setup.py develop
popd popd
checkout_install_torchbench checkout_install_torchbench
@ -256,7 +256,7 @@ test_torchbench_smoketest() {
local device=mps local device=mps
local dtypes=(undefined float16 bfloat16 notset) local dtypes=(undefined float16 bfloat16 notset)
local dtype=${dtypes[$1]} local dtype=${dtypes[$1]}
local models=(llama BERT_pytorch dcgan yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor vgg16) local models=(hf_T5 llama BERT_pytorch dcgan hf_GPT2 yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor timm_resnet timm_vovnet vgg16)
for backend in eager inductor; do for backend in eager inductor; do
@ -302,47 +302,6 @@ test_torchbench_smoketest() {
fi fi
done done
echo "Pytorch benchmark on mps device completed"
}
test_aoti_torchbench_smoketest() {
print_cmake_info
echo "Launching AOTInductor torchbench setup"
pip_benchmark_deps
# shellcheck disable=SC2119,SC2120
torchbench_setup_macos
TEST_REPORTS_DIR=$(pwd)/test/test-reports
mkdir -p "$TEST_REPORTS_DIR"
local device=mps
local dtypes=(undefined float16 bfloat16 notset)
local dtype=${dtypes[$1]}
local models=(llama BERT_pytorch dcgan yolov3 resnet152 sam sam_fast pytorch_unet stable_diffusion_text_encoder speech_transformer Super_SloMo doctr_det_predictor doctr_reco_predictor vgg16)
echo "Launching torchbench inference performance run for AOT Inductor and dtype ${dtype}"
local dtype_arg="--${dtype}"
if [ "$dtype" == notset ]; then
dtype_arg="--float32"
fi
touch "$TEST_REPORTS_DIR/aot_inductor_torchbench_${dtype}_inference_${device}_performance.csv"
for model in "${models[@]}"; do
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
--performance --only "$model" --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_torchbench_${dtype}_inference_${device}_performance.csv" || true
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/torchbench.py \
--accuracy --only "$model" --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_torchbench_${dtype}_inference_${device}_accuracy.csv" || true
done
echo "Launching HuggingFace inference performance run for AOT Inductor and dtype ${dtype}"
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
--performance --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_huggingface_${dtype}_inference_${device}_performance.csv" || true
PYTHONPATH="$(pwd)"/torchbench python benchmarks/dynamo/huggingface.py \
--accuracy --export-aot-inductor --inference --devices "$device" "$dtype_arg" \
--output "$TEST_REPORTS_DIR/aot_inductor_huggingface_${dtype}_inference_${device}_accuracy.csv" || true
echo "Pytorch benchmark on mps device completed" echo "Pytorch benchmark on mps device completed"
} }
@ -391,8 +350,6 @@ elif [[ $TEST_CONFIG == *"perf_timm"* ]]; then
test_timm_perf test_timm_perf
elif [[ $TEST_CONFIG == *"perf_smoketest"* ]]; then elif [[ $TEST_CONFIG == *"perf_smoketest"* ]]; then
test_torchbench_smoketest "${SHARD_NUMBER}" test_torchbench_smoketest "${SHARD_NUMBER}"
elif [[ $TEST_CONFIG == *"aot_inductor_perf_smoketest"* ]]; then
test_aoti_torchbench_smoketest "${SHARD_NUMBER}"
elif [[ $TEST_CONFIG == *"mps"* ]]; then elif [[ $TEST_CONFIG == *"mps"* ]]; then
test_python_mps test_python_mps
elif [[ $NUM_TEST_SHARDS -gt 1 ]]; then elif [[ $NUM_TEST_SHARDS -gt 1 ]]; then

View File

@ -26,7 +26,6 @@ if [[ "${SHARD_NUMBER:-2}" == "2" ]]; then
time python test/run_test.py --verbose -i distributed/test_c10d_spawn_gloo time python test/run_test.py --verbose -i distributed/test_c10d_spawn_gloo
time python test/run_test.py --verbose -i distributed/test_c10d_spawn_nccl time python test/run_test.py --verbose -i distributed/test_c10d_spawn_nccl
time python test/run_test.py --verbose -i distributed/test_compute_comm_reordering time python test/run_test.py --verbose -i distributed/test_compute_comm_reordering
time python test/run_test.py --verbose -i distributed/test_aten_comm_compute_reordering
time python test/run_test.py --verbose -i distributed/test_store time python test/run_test.py --verbose -i distributed/test_store
time python test/run_test.py --verbose -i distributed/test_symmetric_memory time python test/run_test.py --verbose -i distributed/test_symmetric_memory
time python test/run_test.py --verbose -i distributed/test_pg_wrapper time python test/run_test.py --verbose -i distributed/test_pg_wrapper

View File

@ -1,25 +0,0 @@
From 6e08c9d08e9de59c7af28b720289debbbd384764 Mon Sep 17 00:00:00 2001
From: Michael Wang <13521008+isVoid@users.noreply.github.com>
Date: Tue, 1 Apr 2025 17:28:05 -0700
Subject: [PATCH] Avoid bumping certain driver API to avoid future breakage
(#185)
Co-authored-by: isVoid <isVoid@users.noreply.github.com>
---
numba_cuda/numba/cuda/cudadrv/driver.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/numba_cuda/numba/cuda/cudadrv/driver.py b/numba_cuda/numba/cuda/cudadrv/driver.py
index 1641bf77..233e9ed7 100644
--- a/numba_cuda/numba/cuda/cudadrv/driver.py
+++ b/numba_cuda/numba/cuda/cudadrv/driver.py
@@ -365,6 +365,9 @@ def _find_api(self, fname):
else:
variants = ('_v2', '')
+ if fname in ("cuCtxGetDevice", "cuCtxSynchronize"):
+ return getattr(self.lib, fname)
+
for variant in variants:
try:
return getattr(self.lib, f'{fname}{variant}')

View File

@ -89,41 +89,23 @@ if [ "$is_main_doc" = true ]; then
make coverage make coverage
# Now we have the coverage report, we need to make sure it is empty. # Now we have the coverage report, we need to make sure it is empty.
# Sphinx 7.2.6+ format: python.txt contains a statistics table with a TOTAL row # Count the number of lines in the file and turn that number into a variable
# showing the undocumented count in the third column. # $lines. The `cut -f1 ...` is to only parse the number, not the filename
# Example: | TOTAL | 99.83% | 2 | # Skip the report header by subtracting 2: the header will be output even if
# there are no undocumented items.
# #
# Also: see docs/source/conf.py for "coverage_ignore*" items, which should # Also: see docs/source/conf.py for "coverage_ignore*" items, which should
# be documented then removed from there. # be documented then removed from there.
lines=$(wc -l build/coverage/python.txt 2>/dev/null |cut -f1 -d' ')
# Extract undocumented count from TOTAL row in Sphinx 7.2.6 statistics table undocumented=$((lines - 2))
# The table format is: | Module | Coverage | Undocumented | if [ $undocumented -lt 0 ]; then
# Extract the third column (undocumented count) from the TOTAL row
undocumented=$(grep "| TOTAL" build/coverage/python.txt | awk -F'|' '{print $4}' | tr -d ' ')
if [ -z "$undocumented" ] || ! [[ "$undocumented" =~ ^[0-9]+$ ]]; then
echo coverage output not found echo coverage output not found
exit 1 exit 1
elif [ "$undocumented" -gt 0 ]; then elif [ $undocumented -gt 0 ]; then
set +x # Disable command echoing for cleaner output echo undocumented objects found:
echo "" cat build/coverage/python.txt
echo "====================="
echo "UNDOCUMENTED OBJECTS:"
echo "====================="
echo ""
# Find the line number of the TOTAL row and print only what comes after it
total_line=$(grep -n "| TOTAL" build/coverage/python.txt | cut -d: -f1)
if [ -n "$total_line" ]; then
# Print only the detailed list (skip the statistics table)
tail -n +$((total_line + 2)) build/coverage/python.txt
else
# Fallback to showing entire file if TOTAL line not found
cat build/coverage/python.txt
fi
echo ""
echo "Make sure you've updated relevant .rsts in docs/source!" echo "Make sure you've updated relevant .rsts in docs/source!"
echo "You can reproduce locally by running 'cd docs && make coverage && tail -n +\$((grep -n \"| TOTAL\" build/coverage/python.txt | cut -d: -f1) + 2)) build/coverage/python.txt'" echo "You can reproduce locally by running 'cd docs && make coverage && cat build/coverage/python.txt'"
set -x # Re-enable command echoing
exit 1 exit 1
fi fi
else else

View File

@ -32,9 +32,6 @@ LIBTORCH_NAMESPACE_LIST = (
"torch::", "torch::",
) )
# Patterns for detecting statically linked libstdc++ symbols
STATICALLY_LINKED_CXX11_ABI = [re.compile(r".*recursive_directory_iterator.*")]
def _apply_libtorch_symbols(symbols): def _apply_libtorch_symbols(symbols):
return [ return [
@ -56,17 +53,12 @@ def get_symbols(lib: str) -> list[tuple[str, str, str]]:
return [x.split(" ", 2) for x in lines.decode("latin1").split("\n")[:-1]] return [x.split(" ", 2) for x in lines.decode("latin1").split("\n")[:-1]]
def grep_symbols( def grep_symbols(lib: str, patterns: list[Any]) -> list[str]:
lib: str, patterns: list[Any], symbol_type: str | None = None
) -> list[str]:
def _grep_symbols( def _grep_symbols(
symbols: list[tuple[str, str, str]], patterns: list[Any] symbols: list[tuple[str, str, str]], patterns: list[Any]
) -> list[str]: ) -> list[str]:
rc = [] rc = []
for _s_addr, _s_type, s_name in symbols: for _s_addr, _s_type, s_name in symbols:
# Filter by symbol type if specified
if symbol_type and _s_type != symbol_type:
continue
for pattern in patterns: for pattern in patterns:
if pattern.match(s_name): if pattern.match(s_name):
rc.append(s_name) rc.append(s_name)
@ -88,18 +80,6 @@ def grep_symbols(
return functools.reduce(list.__add__, (x.result() for x in tasks), []) return functools.reduce(list.__add__, (x.result() for x in tasks), [])
def check_lib_statically_linked_libstdc_cxx_abi_symbols(lib: str) -> None:
cxx11_statically_linked_symbols = grep_symbols(
lib, STATICALLY_LINKED_CXX11_ABI, symbol_type="T"
)
num_statically_linked_symbols = len(cxx11_statically_linked_symbols)
print(f"num_statically_linked_symbols (T): {num_statically_linked_symbols}")
if num_statically_linked_symbols > 0:
raise RuntimeError(
f"Found statically linked libstdc++ symbols (recursive_directory_iterator): {cxx11_statically_linked_symbols[:100]}"
)
def check_lib_symbols_for_abi_correctness(lib: str) -> None: def check_lib_symbols_for_abi_correctness(lib: str) -> None:
print(f"lib: {lib}") print(f"lib: {lib}")
cxx11_symbols = grep_symbols(lib, LIBTORCH_CXX11_PATTERNS) cxx11_symbols = grep_symbols(lib, LIBTORCH_CXX11_PATTERNS)
@ -127,7 +107,6 @@ def main() -> None:
libtorch_cpu_path = str(install_root / "lib" / "libtorch_cpu.so") libtorch_cpu_path = str(install_root / "lib" / "libtorch_cpu.so")
check_lib_symbols_for_abi_correctness(libtorch_cpu_path) check_lib_symbols_for_abi_correctness(libtorch_cpu_path)
check_lib_statically_linked_libstdc_cxx_abi_symbols(libtorch_cpu_path)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -386,8 +386,8 @@ def smoke_test_compile(device: str = "cpu") -> None:
def smoke_test_nvshmem() -> None: def smoke_test_nvshmem() -> None:
if not torch.cuda.is_available() or target_os == "windows": if not torch.cuda.is_available():
print("Windows platform or CUDA is not available, skipping NVSHMEM test") print("CUDA is not available, skipping NVSHMEM test")
return return
# Check if NVSHMEM is compiled in current build # Check if NVSHMEM is compiled in current build
@ -396,9 +396,7 @@ def smoke_test_nvshmem() -> None:
except ImportError: except ImportError:
# Not built with NVSHMEM support. # Not built with NVSHMEM support.
# torch is not compiled with NVSHMEM prior to 2.9 # torch is not compiled with NVSHMEM prior to 2.9
from torch.torch_version import TorchVersion if torch.__version__ < "2.9":
if TorchVersion(torch.__version__) < (2, 9):
return return
else: else:
# After 2.9: NVSHMEM is expected to be compiled in current build # After 2.9: NVSHMEM is expected to be compiled in current build

View File

@ -32,18 +32,6 @@ if [[ "$BUILD_ENVIRONMENT" != *rocm* && "$BUILD_ENVIRONMENT" != *s390x* && -d /v
git config --global --add safe.directory /var/lib/jenkins/workspace git config --global --add safe.directory /var/lib/jenkins/workspace
fi fi
# Patch numba to avoid CUDA-13 crash, see https://github.com/pytorch/pytorch/issues/162878
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
NUMBA_CUDA_DIR=$(python -c "import os;import numba.cuda; print(os.path.dirname(numba.cuda.__file__))" 2>/dev/null || true)
if [ -n "$NUMBA_CUDA_DIR" ]; then
NUMBA_PATCH="$(dirname "$(realpath "${BASH_SOURCE[0]}")")/numba-cuda-13.patch"
pushd "$NUMBA_CUDA_DIR"
patch -p4 <"$NUMBA_PATCH"
popd
fi
fi
echo "Environment variables:" echo "Environment variables:"
env env
@ -208,8 +196,6 @@ if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
source /opt/intel/oneapi/ccl/latest/env/vars.sh source /opt/intel/oneapi/ccl/latest/env/vars.sh
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source /opt/intel/oneapi/mpi/latest/env/vars.sh source /opt/intel/oneapi/mpi/latest/env/vars.sh
# shellcheck disable=SC1091
source /opt/intel/oneapi/pti/latest/env/vars.sh
# Check XPU status before testing # Check XPU status before testing
timeout 30 xpu-smi discovery || true timeout 30 xpu-smi discovery || true
fi fi
@ -326,26 +312,20 @@ test_python_shard() {
# modify LD_LIBRARY_PATH to ensure it has the conda env. # modify LD_LIBRARY_PATH to ensure it has the conda env.
# This set of tests has been shown to be buggy without it for the split-build # This set of tests has been shown to be buggy without it for the split-build
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests --exclude-quantization-tests $INCLUDE_CLAUSE --shard "$1" "$NUM_TEST_SHARDS" --verbose $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests $INCLUDE_CLAUSE --shard "$1" "$NUM_TEST_SHARDS" --verbose $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
assert_git_not_dirty assert_git_not_dirty
} }
test_python() { test_python() {
# shellcheck disable=SC2086 # shellcheck disable=SC2086
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests --exclude-quantization-tests $INCLUDE_CLAUSE --verbose $PYTHON_TEST_EXTRA_OPTION time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests $INCLUDE_CLAUSE --verbose $PYTHON_TEST_EXTRA_OPTION
assert_git_not_dirty assert_git_not_dirty
} }
test_python_smoke() { test_python_smoke() {
# Smoke tests for H100/B200 # Smoke tests for H100
time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 inductor/test_max_autotune inductor/test_cutedsl_grouped_mm $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running time python test/run_test.py --include test_matmul_cuda inductor/test_fp8 inductor/test_max_autotune $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
assert_git_not_dirty
}
test_python_smoke_b200() {
# Targeted smoke tests for B200 - staged approach to avoid too many failures
time python test/run_test.py --include test_matmul_cuda test_scaled_matmul_cuda inductor/test_fp8 $PYTHON_TEST_EXTRA_OPTION --upload-artifacts-while-running
assert_git_not_dirty assert_git_not_dirty
} }
@ -394,7 +374,6 @@ test_dynamo_wrapped_shard() {
--exclude-distributed-tests \ --exclude-distributed-tests \
--exclude-torch-export-tests \ --exclude-torch-export-tests \
--exclude-aot-dispatch-tests \ --exclude-aot-dispatch-tests \
--exclude-quantization-tests \
--shard "$1" "$NUM_TEST_SHARDS" \ --shard "$1" "$NUM_TEST_SHARDS" \
--verbose \ --verbose \
--upload-artifacts-while-running --upload-artifacts-while-running
@ -439,7 +418,7 @@ test_inductor_distributed() {
# this runs on both single-gpu and multi-gpu instance. It should be smart about skipping tests that aren't supported # this runs on both single-gpu and multi-gpu instance. It should be smart about skipping tests that aren't supported
# with if required # gpus aren't available # with if required # gpus aren't available
python test/run_test.py --include distributed/test_dynamo_distributed distributed/test_inductor_collectives distributed/test_aten_comm_compute_reordering distributed/test_compute_comm_reordering --verbose python test/run_test.py --include distributed/test_dynamo_distributed distributed/test_inductor_collectives distributed/test_compute_comm_reordering --verbose
assert_git_not_dirty assert_git_not_dirty
} }
@ -462,37 +441,31 @@ test_inductor_shard() {
--verbose --verbose
} }
test_inductor_aoti_cpp() { test_inductor_aoti() {
# docker build uses bdist_wheel which does not work with test_aot_inductor
# TODO: need a faster way to build
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
# We need to hipify before building again # We need to hipify before building again
python3 tools/amd_build/build_amd.py python3 tools/amd_build/build_amd.py
fi fi
if [[ "$BUILD_ENVIRONMENT" == *sm86* ]]; then if [[ "$BUILD_ENVIRONMENT" == *sm86* ]]; then
BUILD_COMMAND=(TORCH_CUDA_ARCH_LIST=8.6 USE_FLASH_ATTENTION=OFF python -m pip install --no-build-isolation -v -e .)
# TODO: Replace me completely, as one should not use conda libstdc++, nor need special path to TORCH_LIB # TODO: Replace me completely, as one should not use conda libstdc++, nor need special path to TORCH_LIB
TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="/opt/conda/envs/py_3.10/lib:${TORCH_LIB_DIR}:${LD_LIBRARY_PATH}") TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="/opt/conda/envs/py_3.10/lib:${TORCH_LIB_DIR}:${LD_LIBRARY_PATH}")
else else
BUILD_COMMAND=(python -m pip install --no-build-isolation -v -e .)
TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}") TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}")
fi fi
# aoti cmake custom command requires `torch` to be installed
# initialize the cmake build cache and install torch
/usr/bin/env "${BUILD_COMMAND[@]}"
# rebuild with the build cache with `BUILD_AOT_INDUCTOR_TEST` enabled
/usr/bin/env CMAKE_FRESH=1 BUILD_AOT_INDUCTOR_TEST=1 "${BUILD_COMMAND[@]}"
/usr/bin/env "${TEST_ENVS[@]}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference cpp/test_vec_half_AVX2 -dist=loadfile /usr/bin/env "${TEST_ENVS[@]}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference cpp/test_vec_half_AVX2 -dist=loadfile
} }
test_inductor_aoti_cross_compile_for_windows() {
TEST_REPORTS_DIR=$(pwd)/test/test-reports
mkdir -p "$TEST_REPORTS_DIR"
# Set WINDOWS_CUDA_HOME environment variable
WINDOWS_CUDA_HOME="$(pwd)/win-torch-wheel-extracted"
export WINDOWS_CUDA_HOME
echo "WINDOWS_CUDA_HOME is set to: $WINDOWS_CUDA_HOME"
echo "Contents:"
ls -lah "$(pwd)/win-torch-wheel-extracted/lib/x64/" || true
python test/inductor/test_aoti_cross_compile_windows.py -k compile --package-dir "$TEST_REPORTS_DIR" --win-torch-lib-dir "$(pwd)/win-torch-wheel-extracted/torch/lib"
}
test_inductor_cpp_wrapper_shard() { test_inductor_cpp_wrapper_shard() {
if [[ -z "$NUM_TEST_SHARDS" ]]; then if [[ -z "$NUM_TEST_SHARDS" ]]; then
echo "NUM_TEST_SHARDS must be defined to run a Python test shard" echo "NUM_TEST_SHARDS must be defined to run a Python test shard"
@ -523,14 +496,6 @@ test_inductor_cpp_wrapper_shard() {
-k 'take' \ -k 'take' \
--shard "$1" "$NUM_TEST_SHARDS" \ --shard "$1" "$NUM_TEST_SHARDS" \
--verbose --verbose
if [[ "${BUILD_ENVIRONMENT}" == *xpu* ]]; then
python test/run_test.py \
--include inductor/test_mkldnn_pattern_matcher \
-k 'xpu' \
--shard "$1" "$NUM_TEST_SHARDS" \
--verbose
fi
} }
# "Global" flags for inductor benchmarking controlled by TEST_CONFIG # "Global" flags for inductor benchmarking controlled by TEST_CONFIG
@ -574,8 +539,6 @@ fi
if [[ "${TEST_CONFIG}" == *cpu* ]]; then if [[ "${TEST_CONFIG}" == *cpu* ]]; then
DYNAMO_BENCHMARK_FLAGS+=(--device cpu) DYNAMO_BENCHMARK_FLAGS+=(--device cpu)
elif [[ "${TEST_CONFIG}" == *xpu* ]]; then
DYNAMO_BENCHMARK_FLAGS+=(--device xpu)
else else
DYNAMO_BENCHMARK_FLAGS+=(--device cuda) DYNAMO_BENCHMARK_FLAGS+=(--device cuda)
fi fi
@ -669,8 +632,6 @@ test_perf_for_dashboard() {
device=cuda_b200 device=cuda_b200
elif [[ "${TEST_CONFIG}" == *rocm* ]]; then elif [[ "${TEST_CONFIG}" == *rocm* ]]; then
device=rocm device=rocm
elif [[ "${TEST_CONFIG}" == *xpu* ]]; then
device=xpu
fi fi
for mode in "${modes[@]}"; do for mode in "${modes[@]}"; do
@ -826,11 +787,6 @@ test_inductor_halide() {
assert_git_not_dirty assert_git_not_dirty
} }
test_inductor_pallas() {
python test/run_test.py --include inductor/test_pallas.py --verbose
assert_git_not_dirty
}
test_inductor_triton_cpu() { test_inductor_triton_cpu() {
python test/run_test.py --include inductor/test_triton_cpu_backend.py inductor/test_torchinductor_strided_blocks.py --verbose python test/run_test.py --include inductor/test_triton_cpu_backend.py inductor/test_torchinductor_strided_blocks.py --verbose
assert_git_not_dirty assert_git_not_dirty
@ -855,7 +811,7 @@ test_dynamo_benchmark() {
elif [[ "${suite}" == "timm_models" ]]; then elif [[ "${suite}" == "timm_models" ]]; then
export TORCHBENCH_ONLY_MODELS="inception_v3" export TORCHBENCH_ONLY_MODELS="inception_v3"
elif [[ "${suite}" == "torchbench" ]]; then elif [[ "${suite}" == "torchbench" ]]; then
export TORCHBENCH_ONLY_MODELS="BERT_pytorch" export TORCHBENCH_ONLY_MODELS="hf_Bert"
fi fi
fi fi
test_single_dynamo_benchmark "dashboard" "$suite" "$shard_id" "$@" test_single_dynamo_benchmark "dashboard" "$suite" "$shard_id" "$@"
@ -886,13 +842,13 @@ test_inductor_torchbench_smoketest_perf() {
mkdir -p "$TEST_REPORTS_DIR" mkdir -p "$TEST_REPORTS_DIR"
python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --float16 --training \ python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --float16 --training \
--batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" --only BERT_pytorch \ --batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" --only hf_Bert \
--output "$TEST_REPORTS_DIR/inductor_training_smoketest.csv" --output "$TEST_REPORTS_DIR/inductor_training_smoketest.csv"
# The threshold value needs to be actively maintained to make this check useful # The threshold value needs to be actively maintained to make this check useful
python benchmarks/dynamo/check_perf_csv.py -f "$TEST_REPORTS_DIR/inductor_training_smoketest.csv" -t 1.4 python benchmarks/dynamo/check_perf_csv.py -f "$TEST_REPORTS_DIR/inductor_training_smoketest.csv" -t 1.4
# Check memory compression ratio for a few models # Check memory compression ratio for a few models
for test in BERT_pytorch yolov3; do for test in hf_Albert timm_vision_transformer; do
python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --amp --training \ python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --amp --training \
--disable-cudagraphs --batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" \ --disable-cudagraphs --batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" \
--only $test --output "$TEST_REPORTS_DIR/inductor_training_smoketest_$test.csv" --only $test --output "$TEST_REPORTS_DIR/inductor_training_smoketest_$test.csv"
@ -903,7 +859,7 @@ test_inductor_torchbench_smoketest_perf() {
done done
# Perform some "warm-start" runs for a few huggingface models. # Perform some "warm-start" runs for a few huggingface models.
for test in AllenaiLongformerBase DistilBertForMaskedLM DistillGPT2 GoogleFnet YituTechConvBert; do for test in AlbertForQuestionAnswering AllenaiLongformerBase DistilBertForMaskedLM DistillGPT2 GoogleFnet YituTechConvBert; do
python benchmarks/dynamo/huggingface.py --accuracy --training --amp --inductor --device cuda --warm-start-latency \ python benchmarks/dynamo/huggingface.py --accuracy --training --amp --inductor --device cuda --warm-start-latency \
--only $test --output "$TEST_REPORTS_DIR/inductor_warm_start_smoketest_$test.csv" --only $test --output "$TEST_REPORTS_DIR/inductor_warm_start_smoketest_$test.csv"
python benchmarks/dynamo/check_accuracy.py \ python benchmarks/dynamo/check_accuracy.py \
@ -917,7 +873,7 @@ test_inductor_set_cpu_affinity(){
export LD_PRELOAD="$JEMALLOC_LIB":"$LD_PRELOAD" export LD_PRELOAD="$JEMALLOC_LIB":"$LD_PRELOAD"
export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:-1" export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:-1"
if [[ "$(uname -m)" != "aarch64" ]]; then if [[ "${TEST_CONFIG}" != *aarch64* ]]; then
# Use Intel OpenMP for x86 # Use Intel OpenMP for x86
IOMP_LIB="$(dirname "$(which python)")/../lib/libiomp5.so" IOMP_LIB="$(dirname "$(which python)")/../lib/libiomp5.so"
export LD_PRELOAD="$IOMP_LIB":"$LD_PRELOAD" export LD_PRELOAD="$IOMP_LIB":"$LD_PRELOAD"
@ -931,7 +887,7 @@ test_inductor_set_cpu_affinity(){
cores=$((cpus / thread_per_core)) cores=$((cpus / thread_per_core))
# Set number of cores to 16 on aarch64 for performance runs # Set number of cores to 16 on aarch64 for performance runs
if [[ "$(uname -m)" == "aarch64" && $cores -gt 16 ]]; then if [[ "${TEST_CONFIG}" == *aarch64* && $cores -gt 16 ]]; then
cores=16 cores=16
fi fi
export OMP_NUM_THREADS=$cores export OMP_NUM_THREADS=$cores
@ -1182,12 +1138,6 @@ test_distributed() {
fi fi
} }
test_quantization() {
echo "Testing quantization"
python test/test_quantization.py
}
test_rpc() { test_rpc() {
echo "Testing RPC C++ tests" echo "Testing RPC C++ tests"
# NB: the ending test_rpc must match the current function name for the current # NB: the ending test_rpc must match the current function name for the current
@ -1434,7 +1384,7 @@ EOF
pip3 install -r requirements.txt pip3 install -r requirements.txt
# shellcheck source=./common-build.sh # shellcheck source=./common-build.sh
source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh" source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh"
python -m build --wheel --no-isolation -C--build-option=--bdist-dir="base_bdist_tmp" --outdir "base_dist" python setup.py bdist_wheel --bdist-dir="base_bdist_tmp" --dist-dir="base_dist"
python -mpip install base_dist/*.whl python -mpip install base_dist/*.whl
echo "::endgroup::" echo "::endgroup::"
@ -1582,10 +1532,14 @@ test_executorch() {
install_torchvision install_torchvision
install_torchaudio install_torchaudio
INSTALL_SCRIPT="$(pwd)/.ci/docker/common/install_executorch.sh"
pushd /executorch pushd /executorch
"${INSTALL_SCRIPT}" setup_executorch
export PYTHON_EXECUTABLE=python
export CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
# NB: We need to rebuild ExecuTorch runner here because it depends on PyTorch
# from the PR
bash .ci/scripts/setup-linux.sh --build-tool cmake
echo "Run ExecuTorch unit tests" echo "Run ExecuTorch unit tests"
pytest -v -n auto pytest -v -n auto
@ -1599,14 +1553,17 @@ test_executorch() {
popd popd
# Test torchgen generated code for Executorch.
echo "Testing ExecuTorch op registration"
"$BUILD_BIN_DIR"/test_edge_op_registration
assert_git_not_dirty assert_git_not_dirty
} }
test_linux_aarch64() { test_linux_aarch64() {
python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp test_torch test_dynamic_shapes \ python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp test_torch test_dynamic_shapes \
test_transformers test_multiprocessing test_numpy_interop test_autograd test_binary_ufuncs test_complex test_spectral_ops \ test_transformers test_multiprocessing test_numpy_interop test_autograd test_binary_ufuncs test_complex test_spectral_ops \
test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops test_ops profiler/test_memory_profiler \ test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops test_ops \
distributed/elastic/timer/api_test distributed/elastic/timer/local_timer_example distributed/elastic/timer/local_timer_test \
--shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose --shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose
# Dynamo tests # Dynamo tests
@ -1632,12 +1589,11 @@ test_operator_benchmark() {
TEST_REPORTS_DIR=$(pwd)/test/test-reports TEST_REPORTS_DIR=$(pwd)/test/test-reports
mkdir -p "$TEST_REPORTS_DIR" mkdir -p "$TEST_REPORTS_DIR"
TEST_DIR=$(pwd) TEST_DIR=$(pwd)
ARCH=$(uname -m)
test_inductor_set_cpu_affinity test_inductor_set_cpu_affinity
cd benchmarks/operator_benchmark/pt_extension cd benchmarks/operator_benchmark/pt_extension
python -m pip install . -v --no-build-isolation python -m pip install .
cd "${TEST_DIR}"/benchmarks/operator_benchmark cd "${TEST_DIR}"/benchmarks/operator_benchmark
$TASKSET python -m benchmark_all_test --device "$1" --tag-filter "$2" \ $TASKSET python -m benchmark_all_test --device "$1" --tag-filter "$2" \
@ -1647,28 +1603,9 @@ test_operator_benchmark() {
pip_install pandas pip_install pandas
python check_perf_csv.py \ python check_perf_csv.py \
--actual "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv" \ --actual "${TEST_REPORTS_DIR}/operator_benchmark_eager_float32_cpu.csv" \
--expected "${ARCH}_expected_ci_operator_benchmark_eager_float32_cpu.csv" --expected "expected_ci_operator_benchmark_eager_float32_cpu.csv"
} }
test_operator_microbenchmark() {
TEST_REPORTS_DIR=$(pwd)/test/test-reports
mkdir -p "$TEST_REPORTS_DIR"
TEST_DIR=$(pwd)
cd benchmarks/operator_benchmark/pt_extension
python -m pip install .
cd "${TEST_DIR}"/benchmarks/operator_benchmark
for OP_BENCHMARK_TESTS in matmul mm addmm bmm conv; do
$TASKSET python -m pt.${OP_BENCHMARK_TESTS}_test --tag-filter long \
--output-json-for-dashboard "${TEST_REPORTS_DIR}/operator_microbenchmark_${OP_BENCHMARK_TESTS}_compile.json" \
--benchmark-name "PyTorch operator microbenchmark" --use-compile
$TASKSET python -m pt.${OP_BENCHMARK_TESTS}_test --tag-filter long \
--output-json-for-dashboard "${TEST_REPORTS_DIR}/operator_microbenchmark_${OP_BENCHMARK_TESTS}.json" \
--benchmark-name "PyTorch operator microbenchmark"
done
}
if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then
(cd test && python -c "import torch; print(torch.__config__.show())") (cd test && python -c "import torch; print(torch.__config__.show())")
@ -1684,7 +1621,7 @@ if [[ "${TEST_CONFIG}" == *numpy_2* ]]; then
python -m pip install --pre numpy==2.0.2 scipy==1.13.1 numba==0.60.0 python -m pip install --pre numpy==2.0.2 scipy==1.13.1 numba==0.60.0
fi fi
python test/run_test.py --include dynamo/test_functions.py dynamo/test_unspec.py test_binary_ufuncs.py test_fake_tensor.py test_linalg.py test_numpy_interop.py test_tensor_creation_ops.py test_torch.py torch_np/test_basic.py python test/run_test.py --include dynamo/test_functions.py dynamo/test_unspec.py test_binary_ufuncs.py test_fake_tensor.py test_linalg.py test_numpy_interop.py test_tensor_creation_ops.py test_torch.py torch_np/test_basic.py
elif [[ "${BUILD_ENVIRONMENT}" == *aarch64* && "${TEST_CONFIG}" == 'default' ]]; then elif [[ "${BUILD_ENVIRONMENT}" == *aarch64* && "${TEST_CONFIG}" != *perf_cpu_aarch64* ]]; then
test_linux_aarch64 test_linux_aarch64
elif [[ "${TEST_CONFIG}" == *backward* ]]; then elif [[ "${TEST_CONFIG}" == *backward* ]]; then
test_forward_backward_compatibility test_forward_backward_compatibility
@ -1701,8 +1638,6 @@ elif [[ "${TEST_CONFIG}" == *executorch* ]]; then
test_executorch test_executorch
elif [[ "$TEST_CONFIG" == 'jit_legacy' ]]; then elif [[ "$TEST_CONFIG" == 'jit_legacy' ]]; then
test_python_legacy_jit test_python_legacy_jit
elif [[ "$TEST_CONFIG" == 'quantization' ]]; then
test_quantization
elif [[ "${BUILD_ENVIRONMENT}" == *libtorch* ]]; then elif [[ "${BUILD_ENVIRONMENT}" == *libtorch* ]]; then
# TODO: run some C++ tests # TODO: run some C++ tests
echo "no-op at the moment" echo "no-op at the moment"
@ -1725,20 +1660,14 @@ elif [[ "${TEST_CONFIG}" == *operator_benchmark* ]]; then
test_operator_benchmark cpu ${TEST_MODE} test_operator_benchmark cpu ${TEST_MODE}
fi fi
elif [[ "${TEST_CONFIG}" == *operator_microbenchmark* ]]; then
test_operator_microbenchmark
elif [[ "${TEST_CONFIG}" == *inductor_distributed* ]]; then elif [[ "${TEST_CONFIG}" == *inductor_distributed* ]]; then
test_inductor_distributed test_inductor_distributed
elif [[ "${TEST_CONFIG}" == *inductor-halide* ]]; then elif [[ "${TEST_CONFIG}" == *inductor-halide* ]]; then
test_inductor_halide test_inductor_halide
elif [[ "${TEST_CONFIG}" == *inductor-pallas* ]]; then
test_inductor_pallas
elif [[ "${TEST_CONFIG}" == *inductor-triton-cpu* ]]; then elif [[ "${TEST_CONFIG}" == *inductor-triton-cpu* ]]; then
test_inductor_triton_cpu test_inductor_triton_cpu
elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then
test_inductor_micro_benchmark test_inductor_micro_benchmark
elif [[ "${TEST_CONFIG}" == *aoti_cross_compile_for_windows* ]]; then
test_inductor_aoti_cross_compile_for_windows
elif [[ "${TEST_CONFIG}" == *huggingface* ]]; then elif [[ "${TEST_CONFIG}" == *huggingface* ]]; then
install_torchvision install_torchvision
id=$((SHARD_NUMBER-1)) id=$((SHARD_NUMBER-1))
@ -1770,7 +1699,7 @@ elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
else else
# Do this after checkout_install_torchbench to ensure we clobber any # Do this after checkout_install_torchbench to ensure we clobber any
# nightlies that torchbench may pull in # nightlies that torchbench may pull in
if [[ "${TEST_CONFIG}" != *cpu* && "${TEST_CONFIG}" != *xpu* ]]; then if [[ "${TEST_CONFIG}" != *cpu* ]]; then
install_torchrec_and_fbgemm install_torchrec_and_fbgemm
fi fi
PYTHONPATH=/torchbench test_dynamo_benchmark torchbench "$id" PYTHONPATH=/torchbench test_dynamo_benchmark torchbench "$id"
@ -1779,11 +1708,16 @@ elif [[ "${TEST_CONFIG}" == *inductor_cpp_wrapper* ]]; then
install_torchvision install_torchvision
PYTHONPATH=/torchbench test_inductor_cpp_wrapper_shard "$SHARD_NUMBER" PYTHONPATH=/torchbench test_inductor_cpp_wrapper_shard "$SHARD_NUMBER"
if [[ "$SHARD_NUMBER" -eq "1" ]]; then if [[ "$SHARD_NUMBER" -eq "1" ]]; then
test_inductor_aoti_cpp test_inductor_aoti
fi fi
elif [[ "${TEST_CONFIG}" == *inductor* ]]; then elif [[ "${TEST_CONFIG}" == *inductor* ]]; then
install_torchvision install_torchvision
test_inductor_shard "${SHARD_NUMBER}" test_inductor_shard "${SHARD_NUMBER}"
if [[ "${SHARD_NUMBER}" == 1 ]]; then
if [[ "${BUILD_ENVIRONMENT}" != linux-jammy-py3.9-gcc11-build ]]; then
test_inductor_distributed
fi
fi
elif [[ "${TEST_CONFIG}" == *einops* ]]; then elif [[ "${TEST_CONFIG}" == *einops* ]]; then
test_einops test_einops
elif [[ "${TEST_CONFIG}" == *dynamo_wrapped* ]]; then elif [[ "${TEST_CONFIG}" == *dynamo_wrapped* ]]; then
@ -1833,14 +1767,10 @@ elif [[ "${BUILD_ENVIRONMENT}" == *xpu* ]]; then
test_xpu_bin test_xpu_bin
elif [[ "${TEST_CONFIG}" == smoke ]]; then elif [[ "${TEST_CONFIG}" == smoke ]]; then
test_python_smoke test_python_smoke
elif [[ "${TEST_CONFIG}" == smoke_b200 ]]; then
test_python_smoke_b200
elif [[ "${TEST_CONFIG}" == h100_distributed ]]; then elif [[ "${TEST_CONFIG}" == h100_distributed ]]; then
test_h100_distributed test_h100_distributed
elif [[ "${TEST_CONFIG}" == "h100-symm-mem" ]]; then elif [[ "${TEST_CONFIG}" == "h100-symm-mem" ]]; then
test_h100_symm_mem test_h100_symm_mem
elif [[ "${TEST_CONFIG}" == "b200-symm-mem" ]]; then
test_h100_symm_mem
elif [[ "${TEST_CONFIG}" == h100_cutlass_backend ]]; then elif [[ "${TEST_CONFIG}" == h100_cutlass_backend ]]; then
test_h100_cutlass_backend test_h100_cutlass_backend
else else

View File

@ -1,32 +0,0 @@
#!/bin/bash
set -ex -o pipefail
# Suppress ANSI color escape sequences
export TERM=vt100
# shellcheck source=./common.sh
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
# shellcheck source=./common-build.sh
source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh"
echo "Environment variables"
env
echo "Testing FA3 stable wheel still works with currently built torch"
echo "Installing ABI Stable FA3 wheel"
# The wheel was built on https://github.com/Dao-AILab/flash-attention/commit/b3846b059bf6b143d1cd56879933be30a9f78c81
# on torch nightly torch==2.9.0.dev20250830+cu129
$MAYBE_SUDO pip -q install https://s3.amazonaws.com/ossci-linux/wheels/flash_attn_3-3.0.0b1-cp39-abi3-linux_x86_64.whl
pushd flash-attention/hopper
export PYTHONPATH=$PWD
pytest -v -s \
"test_flash_attn.py::test_flash_attn_output[1-1-192-False-False-False-0.0-False-False-mha-dtype0]" \
"test_flash_attn.py::test_flash_attn_varlen_output[511-1-64-True-False-False-0.0-False-False-gqa-dtype2]" \
"test_flash_attn.py::test_flash_attn_kvcache[1-128-128-False-False-True-None-0.0-False-False-True-False-True-False-gqa-dtype0]" \
"test_flash_attn.py::test_flash_attn_race_condition[97-97-192-True-dtype0]" \
"test_flash_attn.py::test_flash_attn_combine[2-3-64-dtype1]" \
"test_flash_attn.py::test_flash3_bw_compatibility"
popd

View File

@ -70,7 +70,7 @@ sccache --zero-stats
sccache --show-stats sccache --show-stats
# Build the wheel # Build the wheel
python -m build --wheel --no-isolation python setup.py bdist_wheel
if ($LASTEXITCODE -ne 0) { exit 1 } if ($LASTEXITCODE -ne 0) { exit 1 }
# Install the wheel locally # Install the wheel locally

View File

@ -38,12 +38,10 @@ if errorlevel 1 goto fail
if not errorlevel 0 goto fail if not errorlevel 0 goto fail
:: Update CMake :: Update CMake
:: TODO: Investigate why this helps MKL detection, even when CMake from choco is not used
call choco upgrade -y cmake --no-progress --installargs 'ADD_CMAKE_TO_PATH=System' --apply-install-arguments-to-dependencies --version=3.27.9 call choco upgrade -y cmake --no-progress --installargs 'ADD_CMAKE_TO_PATH=System' --apply-install-arguments-to-dependencies --version=3.27.9
if errorlevel 1 goto fail if errorlevel 1 goto fail
if not errorlevel 0 goto fail if not errorlevel 0 goto fail
:: TODO: Move to .ci/docker/requirements-ci.txt
call pip install mkl==2024.2.0 mkl-static==2024.2.0 mkl-include==2024.2.0 call pip install mkl==2024.2.0 mkl-static==2024.2.0 mkl-include==2024.2.0
if errorlevel 1 goto fail if errorlevel 1 goto fail
if not errorlevel 0 goto fail if not errorlevel 0 goto fail
@ -132,14 +130,14 @@ if "%USE_CUDA%"=="1" (
:: Print all existing environment variable for debugging :: Print all existing environment variable for debugging
set set
python -m build --wheel --no-isolation python setup.py bdist_wheel
if errorlevel 1 goto fail if errorlevel 1 goto fail
if not errorlevel 0 goto fail if not errorlevel 0 goto fail
sccache --show-stats sccache --show-stats
python -c "import os, glob; os.system('python -mpip install --no-index --no-deps ' + glob.glob('dist/*.whl')[0])" python -c "import os, glob; os.system('python -mpip install --no-index --no-deps ' + glob.glob('dist/*.whl')[0])"
( (
if "%BUILD_ENVIRONMENT%"=="" ( if "%BUILD_ENVIRONMENT%"=="" (
echo NOTE: To run `import torch`, please make sure to activate the conda environment by running `call %CONDA_ROOT_DIR%\Scripts\activate.bat %CONDA_ROOT_DIR%\envs\py_tmp` in Command Prompt before running Git Bash. echo NOTE: To run `import torch`, please make sure to activate the conda environment by running `call %CONDA_PARENT_DIR%\Miniconda3\Scripts\activate.bat %CONDA_PARENT_DIR%\Miniconda3` in Command Prompt before running Git Bash.
) else ( ) else (
copy /Y "dist\*.whl" "%PYTORCH_FINAL_PACKAGE_DIR%" copy /Y "dist\*.whl" "%PYTORCH_FINAL_PACKAGE_DIR%"

View File

@ -3,12 +3,12 @@ if "%BUILD_ENVIRONMENT%"=="" (
) else ( ) else (
set CONDA_PARENT_DIR=C:\Jenkins set CONDA_PARENT_DIR=C:\Jenkins
) )
set CONDA_ROOT_DIR=%CONDA_PARENT_DIR%\Miniconda3
:: Be conservative here when rolling out the new AMI with conda. This will try :: Be conservative here when rolling out the new AMI with conda. This will try
:: to install conda as before if it couldn't find the conda installation. This :: to install conda as before if it couldn't find the conda installation. This
:: can be removed eventually after we gain enough confidence in the AMI :: can be removed eventually after we gain enough confidence in the AMI
if not exist %CONDA_ROOT_DIR% ( if not exist %CONDA_PARENT_DIR%\Miniconda3 (
set INSTALL_FRESH_CONDA=1 set INSTALL_FRESH_CONDA=1
) )
@ -17,14 +17,10 @@ if "%INSTALL_FRESH_CONDA%"=="1" (
if errorlevel 1 exit /b if errorlevel 1 exit /b
if not errorlevel 0 exit /b if not errorlevel 0 exit /b
%TMP_DIR_WIN%\Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /AddToPath=0 /D=%CONDA_ROOT_DIR% %TMP_DIR_WIN%\Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /AddToPath=0 /D=%CONDA_PARENT_DIR%\Miniconda3
if errorlevel 1 exit /b if errorlevel 1 exit /b
if not errorlevel 0 exit /b if not errorlevel 0 exit /b
) )
:: Activate conda so that we can use its commands, i.e. conda, python, pip :: Activate conda so that we can use its commands, i.e. conda, python, pip
call %CONDA_ROOT_DIR%\Scripts\activate.bat %CONDA_ROOT_DIR% call %CONDA_PARENT_DIR%\Miniconda3\Scripts\activate.bat %CONDA_PARENT_DIR%\Miniconda3
:: Activate conda so that we can use its commands, i.e. conda, python, pip
call conda activate py_tmp
call pip install -r .ci/docker/requirements-ci.txt

View File

@ -14,7 +14,7 @@ if not errorlevel 0 exit /b
:: build\torch. Rather than changing all these references, making a copy of torch folder :: build\torch. Rather than changing all these references, making a copy of torch folder
:: from conda to the current workspace is easier. The workspace will be cleaned up after :: from conda to the current workspace is easier. The workspace will be cleaned up after
:: the job anyway :: the job anyway
xcopy /s %CONDA_ROOT_DIR%\envs\py_tmp\Lib\site-packages\torch %TMP_DIR_WIN%\build\torch\ xcopy /s %CONDA_PARENT_DIR%\Miniconda3\Lib\site-packages\torch %TMP_DIR_WIN%\build\torch\
pushd . pushd .
if "%VC_VERSION%" == "" ( if "%VC_VERSION%" == "" (

View File

@ -15,35 +15,37 @@ if errorlevel 1 exit /b 1
if not errorlevel 0 exit /b 1 if not errorlevel 0 exit /b 1
cd %TMP_DIR_WIN%\build\torch\test cd %TMP_DIR_WIN%\build\torch\test
:: Enable delayed variable expansion to make the list
setlocal enabledelayedexpansion
set EXE_LIST=
for /r "." %%a in (*.exe) do ( for /r "." %%a in (*.exe) do (
if "%%~na" == "c10_intrusive_ptr_benchmark" ( call :libtorch_check "%%~na" "%%~fa"
@REM NB: This is not a gtest executable file, thus couldn't be handled by
@REM pytest-cpp and is excluded from test discovery by run_test
call "%%~fa"
if errorlevel 1 goto fail if errorlevel 1 goto fail
if not errorlevel 0 goto fail
) else (
if "%%~na" == "verify_api_visibility" (
@REM Skip verify_api_visibility as it is a compile-level test
) else (
set EXE_LIST=!EXE_LIST! cpp/%%~na
)
)
) )
goto :eof
:libtorch_check
cd %CWD% cd %CWD%
set CPP_TESTS_DIR=%TMP_DIR_WIN%\build\torch\test set CPP_TESTS_DIR=%TMP_DIR_WIN%\build\torch\test
:: Run python test\run_test.py on the list :: Skip verify_api_visibility as it a compile level test
set NO_TD=True && python test\run_test.py --cpp --verbose -i !EXE_LIST! if "%~1" == "verify_api_visibility" goto :eof
if errorlevel 1 goto fail
if not errorlevel 0 goto fail
goto :eof echo Running "%~2"
if "%~1" == "c10_intrusive_ptr_benchmark" (
:: NB: This is not a gtest executable file, thus couldn't be handled by pytest-cpp
call "%~2"
goto :eof
)
python test\run_test.py --cpp --verbose -i "cpp/%~1"
if errorlevel 1 (
echo %1 failed with exit code %errorlevel%
goto fail
)
if not errorlevel 0 (
echo %1 failed with exit code %errorlevel%
goto fail
)
:eof :eof
exit /b 0 exit /b 0

View File

@ -25,7 +25,7 @@ echo Copying over test times file
robocopy /E "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.additional_ci_files" "%PROJECT_DIR_WIN%\.additional_ci_files" robocopy /E "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.additional_ci_files" "%PROJECT_DIR_WIN%\.additional_ci_files"
echo Run nn tests echo Run nn tests
python run_test.py --exclude-jit-executor --exclude-distributed-tests --exclude-quantization-tests --shard "%SHARD_NUMBER%" "%NUM_TEST_SHARDS%" --verbose python run_test.py --exclude-jit-executor --exclude-distributed-tests --shard "%SHARD_NUMBER%" "%NUM_TEST_SHARDS%" --verbose
if ERRORLEVEL 1 goto fail if ERRORLEVEL 1 goto fail
popd popd

View File

@ -37,8 +37,23 @@ if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
export PYTORCH_TESTING_DEVICE_ONLY_FOR="cuda" export PYTORCH_TESTING_DEVICE_ONLY_FOR="cuda"
fi fi
# TODO: Move this to .ci/docker/requirements-ci.txt # TODO: Move both of them to Windows AMI
python -m pip install "psutil==5.9.1" nvidia-ml-py "pytest-shard==0.1.2" python -m pip install pytest-rerunfailures==10.3 pytest-cpp==2.3.0 tensorboard==2.13.0 protobuf==5.29.4 pytest-subtests==0.13.1
# Install Z3 optional dependency for Windows builds.
python -m pip install z3-solver==4.15.1.0
# Install tlparse for test\dynamo\test_structured_trace.py UTs.
python -m pip install tlparse==0.3.30
# Install parameterized
python -m pip install parameterized==0.8.1
# Install pulp for testing ilps under torch\distributed\_tools
python -m pip install pulp==2.9.0
# Install expecttest to merge https://github.com/pytorch/pytorch/pull/155308
python -m pip install expecttest==0.3.0
run_tests() { run_tests() {
# Run nvidia-smi if available # Run nvidia-smi if available

View File

@ -48,7 +48,7 @@ sccache --zero-stats
sccache --show-stats sccache --show-stats
:: Call PyTorch build script :: Call PyTorch build script
python -m build --wheel --no-isolation --outdir "%PYTORCH_FINAL_PACKAGE_DIR%" python setup.py bdist_wheel -d "%PYTORCH_FINAL_PACKAGE_DIR%"
:: show sccache stats :: show sccache stats
sccache --show-stats sccache --show-stats

View File

@ -37,10 +37,10 @@ IF "%CUDA_PATH_V128%"=="" (
) )
IF "%BUILD_VISION%" == "" ( IF "%BUILD_VISION%" == "" (
set TORCH_CUDA_ARCH_LIST=7.0;7.5;8.0;8.6;9.0;10.0;12.0 set TORCH_CUDA_ARCH_LIST=6.1;7.0;7.5;8.0;8.6;9.0;10.0;12.0
set TORCH_NVCC_FLAGS=-Xfatbin -compress-all set TORCH_NVCC_FLAGS=-Xfatbin -compress-all
) ELSE ( ) ELSE (
set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90 -gencode=arch=compute_100,code=compute_100 -gencode=arch=compute_120,code=compute_120 set NVCC_FLAGS=-D__CUDA_NO_HALF_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=compute_80 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_90,code=compute_90 -gencode=arch=compute_100,code=compute_100 -gencode=arch=compute_120,code=compute_120
) )
set "CUDA_PATH=%CUDA_PATH_V128%" set "CUDA_PATH=%CUDA_PATH_V128%"

View File

@ -1,20 +1,12 @@
copy "%CUDA_PATH%\bin\cusparse*64_*.dll*" pytorch\torch\lib
if %CUDA_VERSION% geq 130 ( copy "%CUDA_PATH%\bin\cublas*64_*.dll*" pytorch\torch\lib
set "dll_path=bin\x64" copy "%CUDA_PATH%\bin\cudart*64_*.dll*" pytorch\torch\lib
) else ( copy "%CUDA_PATH%\bin\curand*64_*.dll*" pytorch\torch\lib
set "dll_path=bin" copy "%CUDA_PATH%\bin\cufft*64_*.dll*" pytorch\torch\lib
) copy "%CUDA_PATH%\bin\cusolver*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cusparse*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cublas*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cudart*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\curand*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cufft*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\cusolver*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\nvrtc*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\%dll_path%\nvJitLink_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\bin\cudnn*64_*.dll*" pytorch\torch\lib copy "%CUDA_PATH%\bin\cudnn*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\bin\nvrtc*64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\extras\CUPTI\lib64\cupti64_*.dll*" pytorch\torch\lib copy "%CUDA_PATH%\extras\CUPTI\lib64\cupti64_*.dll*" pytorch\torch\lib
copy "%CUDA_PATH%\extras\CUPTI\lib64\nvperf_host*.dll*" pytorch\torch\lib copy "%CUDA_PATH%\extras\CUPTI\lib64\nvperf_host*.dll*" pytorch\torch\lib
@ -28,3 +20,8 @@ copy "%libuv_ROOT%\bin\uv.dll" pytorch\torch\lib
if exist "C:\Windows\System32\zlibwapi.dll" ( if exist "C:\Windows\System32\zlibwapi.dll" (
copy "C:\Windows\System32\zlibwapi.dll" pytorch\torch\lib copy "C:\Windows\System32\zlibwapi.dll" pytorch\torch\lib
) )
::copy nvJitLink dll is requires for cuda 12+
if exist "%CUDA_PATH%\bin\nvJitLink_*.dll*" (
copy "%CUDA_PATH%\bin\nvJitLink_*.dll*" pytorch\torch\lib
)

View File

@ -1,9 +1,9 @@
set WIN_DRIVER_VN=580.88 set WIN_DRIVER_VN=528.89
set "DRIVER_DOWNLOAD_LINK=https://ossci-windows.s3.amazonaws.com/%WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe" & REM @lint-ignore set "DRIVER_DOWNLOAD_LINK=https://ossci-windows.s3.amazonaws.com/%WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe" & REM @lint-ignore
curl --retry 3 -kL %DRIVER_DOWNLOAD_LINK% --output %WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe curl --retry 3 -kL %DRIVER_DOWNLOAD_LINK% --output %WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1
start /wait %WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe -s -noreboot start /wait %WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe -s -noreboot
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1
del %WIN_DRIVER_VN%-data-center-tesla-desktop-win10-win11-64bit-dch-international.exe || ver > NUL del %WIN_DRIVER_VN%-data-center-tesla-desktop-winserver-2016-2019-2022-dch-international.exe || ver > NUL

View File

@ -7,9 +7,12 @@ if "%DESIRED_PYTHON%" == "3.13t" (
set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.13.0/python-3.13.0-amd64.exe" set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.13.0/python-3.13.0-amd64.exe"
set ADDITIONAL_OPTIONS="Include_freethreaded=1" set ADDITIONAL_OPTIONS="Include_freethreaded=1"
set PYTHON_EXEC="python3.13t" set PYTHON_EXEC="python3.13t"
) else if "%DESIRED_PYTHON%"=="3.14" (
echo Python version is set to 3.14 or 3.14t
set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.14.0/python-3.14.0rc1-amd64.exe"
) else if "%DESIRED_PYTHON%"=="3.14t" ( ) else if "%DESIRED_PYTHON%"=="3.14t" (
echo Python version is set to 3.14 or 3.14t echo Python version is set to 3.14 or 3.14t
set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.14.0/python-3.14.0-amd64.exe" set "PYTHON_INSTALLER_URL=https://www.python.org/ftp/python/3.14.0/python-3.14.0rc1-amd64.exe"
set ADDITIONAL_OPTIONS="Include_freethreaded=1" set ADDITIONAL_OPTIONS="Include_freethreaded=1"
set PYTHON_EXEC="python3.14t" set PYTHON_EXEC="python3.14t"
) else ( ) else (
@ -25,5 +28,5 @@ start /wait "" python-amd64.exe /quiet InstallAllUsers=1 PrependPath=0 Include_t
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1
set "PATH=%CD%\Python\Scripts;%CD%\Python;%PATH%" set "PATH=%CD%\Python\Scripts;%CD%\Python;%PATH%"
%PYTHON_EXEC% -m pip install --upgrade pip setuptools packaging wheel build %PYTHON_EXEC% -m pip install --upgrade pip setuptools packaging wheel
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1

View File

@ -86,7 +86,7 @@ copy /Y "%LIBTORCH_PREFIX%-%PYTORCH_BUILD_VERSION%.zip" "%PYTORCH_FINAL_PACKAGE_
goto build_end goto build_end
:pytorch :pytorch
%PYTHON_EXEC% -m build --wheel --no-isolation --outdir "%PYTORCH_FINAL_PACKAGE_DIR%" %PYTHON_EXEC% setup.py bdist_wheel -d "%PYTORCH_FINAL_PACKAGE_DIR%"
:build_end :build_end
IF ERRORLEVEL 1 exit /b 1 IF ERRORLEVEL 1 exit /b 1

View File

@ -63,7 +63,7 @@ if errorlevel 1 exit /b 1
call %CONDA_HOME%\condabin\activate.bat testenv call %CONDA_HOME%\condabin\activate.bat testenv
if errorlevel 1 exit /b 1 if errorlevel 1 exit /b 1
call conda install -y -q -c conda-forge libuv=1.51 call conda install -y -q -c conda-forge libuv=1.39
call conda install -y -q intel-openmp call conda install -y -q intel-openmp
echo "install and test libtorch" echo "install and test libtorch"

View File

@ -13,9 +13,9 @@ if not exist "%SRC_DIR%\temp_build" mkdir "%SRC_DIR%\temp_build"
:xpu_bundle_install_start :xpu_bundle_install_start
set XPU_BUNDLE_PARENT_DIR=C:\Program Files (x86)\Intel\oneAPI set XPU_BUNDLE_PARENT_DIR=C:\Program Files (x86)\Intel\oneAPI
set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/75d4eb97-914a-4a95-852c-7b9733d80f74/intel-deep-learning-essentials-2025.1.3.8_offline.exe set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/9d6d6c17-ca2d-4735-9331-99447e4a1280/intel-deep-learning-essentials-2025.0.1.28_offline.exe
set XPU_BUNDLE_PRODUCT_NAME=intel.oneapi.win.deep-learning-essentials.product set XPU_BUNDLE_PRODUCT_NAME=intel.oneapi.win.deep-learning-essentials.product
set XPU_BUNDLE_VERSION=2025.1.3+5 set XPU_BUNDLE_VERSION=2025.0.1+20
set XPU_BUNDLE_INSTALLED=0 set XPU_BUNDLE_INSTALLED=0
set XPU_BUNDLE_UNINSTALL=0 set XPU_BUNDLE_UNINSTALL=0
set XPU_EXTRA_URL=NULL set XPU_EXTRA_URL=NULL
@ -24,9 +24,9 @@ set XPU_EXTRA_VERSION=2025.0.1+1226
set XPU_EXTRA_INSTALLED=0 set XPU_EXTRA_INSTALLED=0
set XPU_EXTRA_UNINSTALL=0 set XPU_EXTRA_UNINSTALL=0
if not [%XPU_VERSION%]==[] if [%XPU_VERSION%]==[2025.2] ( if not [%XPU_VERSION%]==[] if [%XPU_VERSION%]==[2025.1] (
set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/24751ead-ddc5-4479-b9e6-f9fe2ff8b9f2/intel-deep-learning-essentials-2025.2.1.25_offline.exe set XPU_BUNDLE_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/75d4eb97-914a-4a95-852c-7b9733d80f74/intel-deep-learning-essentials-2025.1.3.8_offline.exe
set XPU_BUNDLE_VERSION=2025.2.1+20 set XPU_BUNDLE_VERSION=2025.1.3+5
) )
:: Check if XPU bundle is target version or already installed :: Check if XPU bundle is target version or already installed
@ -90,3 +90,14 @@ if errorlevel 1 exit /b 1
del xpu_extra.exe del xpu_extra.exe
:xpu_install_end :xpu_install_end
if not "%XPU_ENABLE_KINETO%"=="1" goto install_end
:: Install Level Zero SDK
set XPU_EXTRA_LZ_URL=https://github.com/oneapi-src/level-zero/releases/download/v1.14.0/level-zero-sdk_1.14.0.zip
curl -k -L %XPU_EXTRA_LZ_URL% --output "%SRC_DIR%\temp_build\level_zero_sdk.zip"
echo "Installing level zero SDK..."
7z x "%SRC_DIR%\temp_build\level_zero_sdk.zip" -o"%SRC_DIR%\temp_build\level_zero"
set "INCLUDE=%SRC_DIR%\temp_build\level_zero\include;%INCLUDE%"
del "%SRC_DIR%\temp_build\level_zero_sdk.zip"
:install_end

View File

@ -18,7 +18,7 @@ if "%DESIRED_PYTHON%" == "3.9" %PYTHON_EXEC% -m pip install numpy==2.0.2 cmake
%PYTHON_EXEC% -m pip install pyyaml %PYTHON_EXEC% -m pip install pyyaml
%PYTHON_EXEC% -m pip install mkl-include mkl-static %PYTHON_EXEC% -m pip install mkl-include mkl-static
%PYTHON_EXEC% -m pip install boto3 requests ninja typing_extensions setuptools==72.1.0 %PYTHON_EXEC% -m pip install boto3 ninja typing_extensions setuptools==72.1.0
where cmake.exe where cmake.exe

View File

@ -85,7 +85,7 @@ mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true
# Create an isolated directory to store this builds pytorch checkout and conda # Create an isolated directory to store this builds pytorch checkout and conda
# installation # installation
if [[ -z "$MAC_PACKAGE_WORK_DIR" ]]; then if [[ -z "$MAC_PACKAGE_WORK_DIR" ]]; then
MAC_PACKAGE_WORK_DIR="$(pwd)/tmp_wheel_${DESIRED_PYTHON}_$(date +%H%M%S)" MAC_PACKAGE_WORK_DIR="$(pwd)/tmp_wheel_conda_${DESIRED_PYTHON}_$(date +%H%M%S)"
fi fi
mkdir -p "$MAC_PACKAGE_WORK_DIR" || true mkdir -p "$MAC_PACKAGE_WORK_DIR" || true
if [[ -n ${GITHUB_ACTIONS} ]]; then if [[ -n ${GITHUB_ACTIONS} ]]; then
@ -96,11 +96,11 @@ fi
whl_tmp_dir="${MAC_PACKAGE_WORK_DIR}/dist" whl_tmp_dir="${MAC_PACKAGE_WORK_DIR}/dist"
mkdir -p "$whl_tmp_dir" mkdir -p "$whl_tmp_dir"
mac_version='macosx-11_0-arm64' mac_version='macosx_11_0_arm64'
libtorch_arch='arm64' libtorch_arch='arm64'
# Create a consistent wheel package name to rename the wheel to # Create a consistent wheel package name to rename the wheel to
wheel_filename_new="${TORCH_PACKAGE_NAME}-${build_version}${build_number_prefix}-cp${python_nodot}-none-${mac_version//[-,]/_}.whl" wheel_filename_new="${TORCH_PACKAGE_NAME}-${build_version}${build_number_prefix}-cp${python_nodot}-none-${mac_version}.whl"
########################################################### ###########################################################
@ -124,58 +124,93 @@ popd
export TH_BINARY_BUILD=1 export TH_BINARY_BUILD=1
export INSTALL_TEST=0 # dont install test binaries into site-packages export INSTALL_TEST=0 # dont install test binaries into site-packages
export MACOSX_DEPLOYMENT_TARGET=11.0 export MACOSX_DEPLOYMENT_TARGET=10.15
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
SETUPTOOLS_PINNED_VERSION="==70.1.0"
PYYAML_PINNED_VERSION="==5.3"
EXTRA_CONDA_INSTALL_FLAGS="" EXTRA_CONDA_INSTALL_FLAGS=""
CONDA_ENV_CREATE_FLAGS="" CONDA_ENV_CREATE_FLAGS=""
RENAME_WHEEL=true RENAME_WHEEL=true
case $desired_python in case $desired_python in
3.14t) 3.14t)
echo "Using 3.14 deps" echo "Using 3.14 deps"
mac_version='macosx-11.0-arm64' SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="==2.1.0" NUMPY_PINNED_VERSION="==2.1.0"
CONDA_ENV_CREATE_FLAGS="python-freethreading"
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge/label/python_rc -c conda-forge"
desired_python="3.14.0rc1"
RENAME_WHEEL=false RENAME_WHEEL=false
;; ;;
3.14) 3.14)
echo "Using 3.14t deps" echo "Using 3.14t deps"
mac_version='macosx-11.0-arm64' SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="==2.1.0" NUMPY_PINNED_VERSION="==2.1.0"
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge/label/python_rc -c conda-forge"
desired_python="3.14.0rc1"
RENAME_WHEEL=false RENAME_WHEEL=false
;; ;;
3.13t) 3.13t)
echo "Using 3.13t deps" echo "Using 3.13 deps"
mac_version='macosx-11.0-arm64' SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="==2.1.0" NUMPY_PINNED_VERSION="==2.1.0"
CONDA_ENV_CREATE_FLAGS="python-freethreading"
EXTRA_CONDA_INSTALL_FLAGS="-c conda-forge"
desired_python="3.13"
RENAME_WHEEL=false RENAME_WHEEL=false
;; ;;
3.13) 3.13)
echo "Using 3.13 deps" echo "Using 3.13 deps"
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="==2.1.0" NUMPY_PINNED_VERSION="==2.1.0"
;; ;;
3.12) 3.12)
echo "Using 3.12 deps" echo "Using 3.12 deps"
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=6.0.1"
NUMPY_PINNED_VERSION="==2.0.2" NUMPY_PINNED_VERSION="==2.0.2"
;; ;;
3.11) 3.11)
echo "Using 3.11 deps" echo "Using 3.11 deps"
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=5.3"
NUMPY_PINNED_VERSION="==2.0.2" NUMPY_PINNED_VERSION="==2.0.2"
;; ;;
3.10) 3.10)
echo "Using 3.10 deps" echo "Using 3.10 deps"
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=5.3"
NUMPY_PINNED_VERSION="==2.0.2"
;;
3.9)
echo "Using 3.9 deps"
SETUPTOOLS_PINNED_VERSION=">=70.1.0"
PYYAML_PINNED_VERSION=">=5.3"
NUMPY_PINNED_VERSION="==2.0.2" NUMPY_PINNED_VERSION="==2.0.2"
;; ;;
*) *)
echo "Unsupported version $desired_python" echo "Using default deps"
exit 1 NUMPY_PINNED_VERSION="==1.11.3"
;; ;;
esac esac
# Install into a fresh env
tmp_env_name="wheel_py$python_nodot"
conda create ${EXTRA_CONDA_INSTALL_FLAGS} -yn "$tmp_env_name" python="$desired_python" ${CONDA_ENV_CREATE_FLAGS}
source activate "$tmp_env_name"
PINNED_PACKAGES=( PINNED_PACKAGES=(
"setuptools${SETUPTOOLS_PINNED_VERSION}"
"pyyaml${PYYAML_PINNED_VERSION}"
"numpy${NUMPY_PINNED_VERSION}" "numpy${NUMPY_PINNED_VERSION}"
) )
python -mvenv ~/${desired_python}-build retry pip install "${PINNED_PACKAGES[@]}" -r "${pytorch_rootdir}/requirements-build.txt"
source ~/${desired_python}-build/bin/activate pip install requests ninja typing-extensions
retry pip install "${PINNED_PACKAGES[@]}" -r "${pytorch_rootdir}/requirements.txt" retry pip install -r "${pytorch_rootdir}/requirements.txt" || true
retry brew install libomp retry brew install libomp
# For USE_DISTRIBUTED=1 on macOS, need libuv, which is build as part of tensorpipe submodule # For USE_DISTRIBUTED=1 on macOS, need libuv, which is build as part of tensorpipe submodule
@ -186,11 +221,11 @@ export USE_QNNPACK=OFF
export BUILD_TEST=OFF export BUILD_TEST=OFF
pushd "$pytorch_rootdir" pushd "$pytorch_rootdir"
echo "Calling -m build --wheel --no-isolation at $(date)" echo "Calling setup.py bdist_wheel at $(date)"
_PYTHON_HOST_PLATFORM=${mac_version} ARCHFLAGS="-arch arm64" python -m build --wheel --no-isolation --outdir "$whl_tmp_dir" -C--plat-name="${mac_version//[-.]/_}" python setup.py bdist_wheel -d "$whl_tmp_dir"
echo "Finished -m build --wheel --no-isolation at $(date)" echo "Finished setup.py bdist_wheel at $(date)"
if [[ $package_type != 'libtorch' ]]; then if [[ $package_type != 'libtorch' ]]; then
echo "delocating wheel dependencies" echo "delocating wheel dependencies"

View File

@ -71,7 +71,14 @@ export PYTORCH_BUILD_NUMBER=1
# Set triton version as part of PYTORCH_EXTRA_INSTALL_REQUIREMENTS # Set triton version as part of PYTORCH_EXTRA_INSTALL_REQUIREMENTS
TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt) TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
TRITON_CONSTRAINT="platform_system == 'Linux'"
# Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64'"
# CUDA 12.9 builds have triton for Linux and Linux aarch64 binaries.
if [[ "$DESIRED_CUDA" == "cu129" ]]; then
TRITON_CONSTRAINT="platform_system == 'Linux'"
fi
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" && ! "$PYTORCH_BUILD_VERSION" =~ .*xpu.* ]]; then if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" && ! "$PYTORCH_BUILD_VERSION" =~ .*xpu.* ]]; then
TRITON_REQUIREMENT="triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}" TRITON_REQUIREMENT="triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
@ -163,13 +170,8 @@ if [[ "$(uname)" != Darwin ]]; then
MEMORY_LIMIT_MAX_JOBS=12 MEMORY_LIMIT_MAX_JOBS=12
NUM_CPUS=$(( $(nproc) - 2 )) NUM_CPUS=$(( $(nproc) - 2 ))
if [[ "$(uname)" == Linux ]]; then # Defaults here for **binary** linux builds so they can be changed in one place
# Defaults here for **binary** linux builds so they can be changed in one place export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))}
export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))}
else
# For other builds
export MAX_JOBS=${NUM_CPUS}
fi
cat >>"$envfile" <<EOL cat >>"$envfile" <<EOL
export MAX_JOBS="${MAX_JOBS}" export MAX_JOBS="${MAX_JOBS}"

View File

@ -15,7 +15,8 @@ fi
if [[ "$DESIRED_CUDA" == 'xpu' ]]; then if [[ "$DESIRED_CUDA" == 'xpu' ]]; then
export VC_YEAR=2022 export VC_YEAR=2022
export USE_SCCACHE=0 export USE_SCCACHE=0
export XPU_VERSION=2025.2 export XPU_VERSION=2025.1
export XPU_ENABLE_KINETO=1
fi fi
echo "Free space on filesystem before build:" echo "Free space on filesystem before build:"

Some files were not shown because too many files have changed in this diff Show More