Compare commits

..

1 Commits

Author SHA1 Message Date
2d757f6517 apply 2025-02-18 11:24:34 -08:00
8942 changed files with 292196 additions and 625954 deletions

View File

@ -2,7 +2,7 @@ build --cxxopt=--std=c++17
build --copt=-I. build --copt=-I.
# Bazel does not support including its cc_library targets as system # Bazel does not support including its cc_library targets as system
# headers. We work around this for generated code # headers. We work around this for generated code
# (e.g. torch/headeronly/macros/cmake_macros.h) by making the generated directory a # (e.g. c10/macros/cmake_macros.h) by making the generated directory a
# system include path. # system include path.
build --copt=-isystem --copt bazel-out/k8-fastbuild/bin build --copt=-isystem --copt bazel-out/k8-fastbuild/bin
build --copt=-isystem --copt bazel-out/darwin-fastbuild/bin build --copt=-isystem --copt bazel-out/darwin-fastbuild/bin

View File

@ -1,15 +0,0 @@
version: 1
paths:
include:
- "**/*.py"
exclude:
- ".*"
- ".*/**"
- "**/.*/**"
- "**/.*"
- "**/_*/**"
- "**/_*.py"
- "**/test/**"
- "**/benchmarks/**"
- "**/test_*.py"
- "**/*_test.py"

View File

@ -3,20 +3,10 @@ set -eux -o pipefail
GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-} GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
# Set CUDA architecture lists to match x86 build_cuda.sh
if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then if [[ "$GPU_ARCH_VERSION" == *"12.6"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0" export TORCH_CUDA_ARCH_LIST="9.0"
elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then elif [[ "$GPU_ARCH_VERSION" == *"12.8"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;12.0" export TORCH_CUDA_ARCH_LIST="9.0;10.0;12.0"
elif [[ "$GPU_ARCH_VERSION" == *"13.0"* ]]; then
export TORCH_CUDA_ARCH_LIST="8.0;9.0;10.0;11.0;12.0+PTX"
fi
# Compress the fatbin with -compress-mode=size for CUDA 13
if [[ "$DESIRED_CUDA" == *"13"* ]]; then
export TORCH_NVCC_FLAGS="-compress-mode=size"
# Bundle ptxas into the cu13 wheel, see https://github.com/pytorch/pytorch/issues/163801
export BUILD_BUNDLE_PTXAS=1
fi fi
SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
@ -30,22 +20,13 @@ cd /
# on the mounted pytorch repo # on the mounted pytorch repo
git config --global --add safe.directory /pytorch git config --global --add safe.directory /pytorch
pip install -r /pytorch/requirements.txt pip install -r /pytorch/requirements.txt
pip install auditwheel==6.2.0 wheel pip install auditwheel
if [ "$DESIRED_CUDA" = "cpu" ]; then if [ "$DESIRED_CUDA" = "cpu" ]; then
echo "BASE_CUDA_VERSION is not set. Building cpu wheel." echo "BASE_CUDA_VERSION is not set. Building cpu wheel."
python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn #USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files
USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn
else else
echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA" echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA"
export USE_SYSTEM_NCCL=1 #USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files
USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda
# Check if we should use NVIDIA libs from PyPI (similar to x86 build_cuda.sh logic)
if [[ -z "$PYTORCH_EXTRA_INSTALL_REQUIREMENTS" ]]; then
echo "Bundling CUDA libraries with wheel for aarch64."
else
echo "Using nvidia libs from pypi for aarch64."
echo "Updated PYTORCH_EXTRA_INSTALL_REQUIREMENTS for aarch64: $PYTORCH_EXTRA_INSTALL_REQUIREMENTS"
export USE_NVIDIA_PYPI_LIBS=1
fi
python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda
fi fi

View File

@ -13,208 +13,117 @@ def list_dir(path: str) -> list[str]:
return check_output(["ls", "-1", path]).decode().split("\n") return check_output(["ls", "-1", path]).decode().split("\n")
def replace_tag(filename) -> None: def build_ArmComputeLibrary() -> None:
with open(filename) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line.startswith("Tag:"):
lines[i] = line.replace("-linux_", "-manylinux_2_28_")
print(f"Updated tag from {line} to {lines[i]}")
break
with open(filename, "w") as f:
f.writelines(lines)
def patch_library_rpath(
folder: str,
lib_name: str,
use_nvidia_pypi_libs: bool = False,
desired_cuda: str = "",
) -> None:
"""Apply patchelf to set RPATH for a library in torch/lib"""
lib_path = f"{folder}/tmp/torch/lib/{lib_name}"
if use_nvidia_pypi_libs:
# For PyPI NVIDIA libraries, construct CUDA RPATH
cuda_rpaths = [
"$ORIGIN/../../nvidia/cudnn/lib",
"$ORIGIN/../../nvidia/nvshmem/lib",
"$ORIGIN/../../nvidia/nccl/lib",
"$ORIGIN/../../nvidia/cusparselt/lib",
]
if "130" in desired_cuda:
cuda_rpaths.append("$ORIGIN/../../nvidia/cu13/lib")
else:
cuda_rpaths.extend(
[
"$ORIGIN/../../nvidia/cublas/lib",
"$ORIGIN/../../nvidia/cuda_cupti/lib",
"$ORIGIN/../../nvidia/cuda_nvrtc/lib",
"$ORIGIN/../../nvidia/cuda_runtime/lib",
"$ORIGIN/../../nvidia/cufft/lib",
"$ORIGIN/../../nvidia/curand/lib",
"$ORIGIN/../../nvidia/cusolver/lib",
"$ORIGIN/../../nvidia/cusparse/lib",
"$ORIGIN/../../nvidia/nvtx/lib",
"$ORIGIN/../../nvidia/cufile/lib",
]
)
# Add $ORIGIN for local torch libs
rpath = ":".join(cuda_rpaths) + ":$ORIGIN"
else:
# For bundled libraries, just use $ORIGIN
rpath = "$ORIGIN"
if os.path.exists(lib_path):
os.system(
f"cd {folder}/tmp/torch/lib/; "
f"patchelf --set-rpath '{rpath}' --force-rpath {lib_name}"
)
def copy_and_patch_library(
src_path: str,
folder: str,
use_nvidia_pypi_libs: bool = False,
desired_cuda: str = "",
) -> None:
"""Copy a library to torch/lib and patch its RPATH"""
if os.path.exists(src_path):
lib_name = os.path.basename(src_path)
shutil.copy2(src_path, f"{folder}/tmp/torch/lib/{lib_name}")
patch_library_rpath(folder, lib_name, use_nvidia_pypi_libs, desired_cuda)
def package_cuda_wheel(wheel_path, desired_cuda) -> None:
""" """
Package the cuda wheel libraries Using ArmComputeLibrary for aarch64 PyTorch
"""
print("Building Arm Compute Library")
acl_build_flags = [
"debug=0",
"neon=1",
"opencl=0",
"os=linux",
"openmp=1",
"cppthreads=0",
"arch=armv8a",
"multi_isa=1",
"fixed_format_kernels=1",
"build=native",
]
acl_install_dir = "/acl"
acl_checkout_dir = "ComputeLibrary"
os.makedirs(acl_install_dir)
check_call(
[
"git",
"clone",
"https://github.com/ARM-software/ComputeLibrary.git",
"-b",
"v24.09",
"--depth",
"1",
"--shallow-submodules",
]
)
check_call(
["scons", "Werror=1", "-j8", f"build_dir=/{acl_install_dir}/build"]
+ acl_build_flags,
cwd=acl_checkout_dir,
)
for d in ["arm_compute", "include", "utils", "support", "src"]:
shutil.copytree(f"{acl_checkout_dir}/{d}", f"{acl_install_dir}/{d}")
def update_wheel(wheel_path, desired_cuda) -> None:
"""
Update the cuda wheel libraries
""" """
folder = os.path.dirname(wheel_path) folder = os.path.dirname(wheel_path)
wheelname = os.path.basename(wheel_path)
os.mkdir(f"{folder}/tmp") os.mkdir(f"{folder}/tmp")
os.system(f"unzip {wheel_path} -d {folder}/tmp") os.system(f"unzip {wheel_path} -d {folder}/tmp")
# Delete original wheel since it will be repackaged libs_to_copy = [
os.system(f"rm {wheel_path}") "/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12",
"/usr/local/cuda/lib64/libcudnn.so.9",
# Check if we should use PyPI NVIDIA libraries or bundle system libraries "/usr/local/cuda/lib64/libcublas.so.12",
use_nvidia_pypi_libs = os.getenv("USE_NVIDIA_PYPI_LIBS", "0") == "1" "/usr/local/cuda/lib64/libcublasLt.so.12",
"/usr/local/cuda/lib64/libcudart.so.12",
if use_nvidia_pypi_libs: "/usr/local/cuda/lib64/libcufft.so.11",
print("Using nvidia libs from pypi - skipping CUDA library bundling") "/usr/local/cuda/lib64/libcusparse.so.12",
# For PyPI approach, we don't bundle CUDA libraries - they come from PyPI packages "/usr/local/cuda/lib64/libcusparseLt.so.0",
# We only need to bundle non-NVIDIA libraries "/usr/local/cuda/lib64/libcusolver.so.11",
minimal_libs_to_copy = [ "/usr/local/cuda/lib64/libcurand.so.10",
"/lib64/libgomp.so.1", "/usr/local/cuda/lib64/libnvToolsExt.so.1",
"/usr/lib64/libgfortran.so.5", "/usr/local/cuda/lib64/libnvJitLink.so.12",
"/acl/build/libarm_compute.so", "/usr/local/cuda/lib64/libnvrtc.so.12",
"/acl/build/libarm_compute_graph.so", "/usr/local/cuda/lib64/libcudnn_adv.so.9",
"/usr/local/cuda/lib64/libcudnn_cnn.so.9",
"/usr/local/cuda/lib64/libcudnn_graph.so.9",
"/usr/local/cuda/lib64/libcudnn_ops.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9",
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9",
"/lib64/libgomp.so.1",
"/usr/lib64/libgfortran.so.5",
"/acl/build/libarm_compute.so",
"/acl/build/libarm_compute_graph.so",
]
if enable_cuda:
libs_to_copy += [
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0", "/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0", "/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_lapack_core.so.0", "/usr/local/lib/libnvpl_lapack_core.so.0",
"/usr/local/lib/libnvpl_blas_core.so.0", "/usr/local/lib/libnvpl_blas_core.so.0",
] ]
if "126" in desired_cuda:
# Copy minimal libraries to unzipped_folder/torch/lib libs_to_copy += [
for lib_path in minimal_libs_to_copy: "/usr/local/cuda/lib64/libnvrtc-builtins.so.12.6",
copy_and_patch_library(lib_path, folder, use_nvidia_pypi_libs, desired_cuda) ]
elif "128" in desired_cuda:
# Patch torch libraries used for searching libraries libs_to_copy += [
torch_libs_to_patch = [ "/usr/local/cuda/lib64/libnvrtc-builtins.so.12.8",
"libtorch.so", ]
"libtorch_cpu.so",
"libtorch_cuda.so",
"libtorch_cuda_linalg.so",
"libtorch_global_deps.so",
"libtorch_python.so",
"libtorch_nvshmem.so",
"libc10.so",
"libc10_cuda.so",
"libcaffe2_nvrtc.so",
"libshm.so",
]
for lib_name in torch_libs_to_patch:
patch_library_rpath(folder, lib_name, use_nvidia_pypi_libs, desired_cuda)
else: else:
print("Bundling CUDA libraries with wheel") libs_to_copy += [
# Original logic for bundling system CUDA libraries "/opt/OpenBLAS/lib/libopenblas.so.0",
# Common libraries for all CUDA versions
common_libs = [
# Non-NVIDIA system libraries
"/lib64/libgomp.so.1",
"/usr/lib64/libgfortran.so.5",
"/acl/build/libarm_compute.so",
"/acl/build/libarm_compute_graph.so",
# Common CUDA libraries (same for all versions)
"/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_blas_lp64_gomp.so.0",
"/usr/local/lib/libnvpl_lapack_core.so.0",
"/usr/local/lib/libnvpl_blas_core.so.0",
"/usr/local/cuda/extras/CUPTI/lib64/libnvperf_host.so",
"/usr/local/cuda/lib64/libcudnn.so.9",
"/usr/local/cuda/lib64/libcusparseLt.so.0",
"/usr/local/cuda/lib64/libcurand.so.10",
"/usr/local/cuda/lib64/libnccl.so.2",
"/usr/local/cuda/lib64/libnvshmem_host.so.3",
"/usr/local/cuda/lib64/libcudnn_adv.so.9",
"/usr/local/cuda/lib64/libcudnn_cnn.so.9",
"/usr/local/cuda/lib64/libcudnn_graph.so.9",
"/usr/local/cuda/lib64/libcudnn_ops.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9",
"/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9",
"/usr/local/cuda/lib64/libcudnn_heuristic.so.9",
"/usr/local/cuda/lib64/libcufile.so.0",
"/usr/local/cuda/lib64/libcufile_rdma.so.1",
"/usr/local/cuda/lib64/libcusparse.so.12",
] ]
# Copy libraries to unzipped_folder/a/lib
# CUDA version-specific libraries for lib_path in libs_to_copy:
if "13" in desired_cuda: lib_name = os.path.basename(lib_path)
minor_version = desired_cuda[-1] shutil.copy2(lib_path, f"{folder}/tmp/torch/lib/{lib_name}")
version_specific_libs = [ os.system(
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.13", f"cd {folder}/tmp/torch/lib/; "
"/usr/local/cuda/lib64/libcublas.so.13", f"patchelf --set-rpath '$ORIGIN' --force-rpath {folder}/tmp/torch/lib/{lib_name}"
"/usr/local/cuda/lib64/libcublasLt.so.13", )
"/usr/local/cuda/lib64/libcudart.so.13", os.mkdir(f"{folder}/cuda_wheel")
"/usr/local/cuda/lib64/libcufft.so.12", os.system(f"cd {folder}/tmp/; zip -r {folder}/cuda_wheel/{wheelname} *")
"/usr/local/cuda/lib64/libcusolver.so.12", shutil.move(
"/usr/local/cuda/lib64/libnvJitLink.so.13", f"{folder}/cuda_wheel/{wheelname}",
"/usr/local/cuda/lib64/libnvrtc.so.13", f"{folder}/{wheelname}",
f"/usr/local/cuda/lib64/libnvrtc-builtins.so.13.{minor_version}", copy_function=shutil.copy2,
] )
elif "12" in desired_cuda: os.system(f"rm -rf {folder}/tmp/ {folder}/cuda_wheel/")
# Get the last character for libnvrtc-builtins version (e.g., "129" -> "9")
minor_version = desired_cuda[-1]
version_specific_libs = [
"/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12",
"/usr/local/cuda/lib64/libcublas.so.12",
"/usr/local/cuda/lib64/libcublasLt.so.12",
"/usr/local/cuda/lib64/libcudart.so.12",
"/usr/local/cuda/lib64/libcufft.so.11",
"/usr/local/cuda/lib64/libcusolver.so.11",
"/usr/local/cuda/lib64/libnvJitLink.so.12",
"/usr/local/cuda/lib64/libnvrtc.so.12",
f"/usr/local/cuda/lib64/libnvrtc-builtins.so.12.{minor_version}",
]
else:
raise ValueError(f"Unsupported CUDA version: {desired_cuda}.")
# Combine all libraries
libs_to_copy = common_libs + version_specific_libs
# Copy libraries to unzipped_folder/torch/lib
for lib_path in libs_to_copy:
copy_and_patch_library(lib_path, folder, use_nvidia_pypi_libs, desired_cuda)
# Make sure the wheel is tagged with manylinux_2_28
for f in os.scandir(f"{folder}/tmp/"):
if f.is_dir() and f.name.endswith(".dist-info"):
replace_tag(f"{f.path}/WHEEL")
break
os.system(f"wheel pack {folder}/tmp/ -d {folder}")
os.system(f"rm -rf {folder}/tmp/")
def complete_wheel(folder: str) -> str: def complete_wheel(folder: str) -> str:
@ -223,9 +132,6 @@ def complete_wheel(folder: str) -> str:
""" """
wheel_name = list_dir(f"/{folder}/dist")[0] wheel_name = list_dir(f"/{folder}/dist")[0]
# Please note for cuda we don't run auditwheel since we use custom script to package
# the cuda dependencies to the wheel file using update_wheel() method.
# However we need to make sure filename reflects the correct Manylinux platform.
if "pytorch" in folder and not enable_cuda: if "pytorch" in folder and not enable_cuda:
print("Repairing Wheel with AuditWheel") print("Repairing Wheel with AuditWheel")
check_call(["auditwheel", "repair", f"dist/{wheel_name}"], cwd=folder) check_call(["auditwheel", "repair", f"dist/{wheel_name}"], cwd=folder)
@ -237,7 +143,7 @@ def complete_wheel(folder: str) -> str:
f"/{folder}/dist/{repaired_wheel_name}", f"/{folder}/dist/{repaired_wheel_name}",
) )
else: else:
repaired_wheel_name = list_dir(f"/{folder}/dist")[0] repaired_wheel_name = wheel_name
print(f"Copying {repaired_wheel_name} to artifacts") print(f"Copying {repaired_wheel_name} to artifacts")
shutil.copy2( shutil.copy2(
@ -274,20 +180,8 @@ if __name__ == "__main__":
).decode() ).decode()
print("Building PyTorch wheel") print("Building PyTorch wheel")
build_vars = "" build_vars = "MAX_JOBS=5 CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
# MAX_JOB=5 is not required for CPU backend (see commit 465d98b) os.system("cd /pytorch; python setup.py clean")
if enable_cuda:
build_vars += "MAX_JOBS=5 "
# Handle PyPI NVIDIA libraries vs bundled libraries
use_nvidia_pypi_libs = os.getenv("USE_NVIDIA_PYPI_LIBS", "0") == "1"
if use_nvidia_pypi_libs:
print("Configuring build for PyPI NVIDIA libraries")
# Configure for dynamic linking (matching x86 logic)
build_vars += "ATEN_STATIC_CUDA=0 USE_CUDA_STATIC_LINK=0 USE_CUPTI_SO=1 "
else:
print("Configuring build for bundled NVIDIA libraries")
# Keep existing static linking approach - already configured above
override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION") override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION")
desired_cuda = os.getenv("DESIRED_CUDA") desired_cuda = os.getenv("DESIRED_CUDA")
@ -310,24 +204,30 @@ if __name__ == "__main__":
else: else:
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date} PYTORCH_BUILD_NUMBER=1 " build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date} PYTORCH_BUILD_NUMBER=1 "
elif branch.startswith(("v1.", "v2.")): elif branch.startswith(("v1.", "v2.")):
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1 : branch.find('-')]} PYTORCH_BUILD_NUMBER=1 " build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1:branch.find('-')]} PYTORCH_BUILD_NUMBER=1 "
if enable_mkldnn: if enable_mkldnn:
build_ArmComputeLibrary()
print("build pytorch with mkldnn+acl backend") print("build pytorch with mkldnn+acl backend")
build_vars += "USE_MKLDNN=ON USE_MKLDNN_ACL=ON " build_vars += (
build_vars += "ACL_ROOT_DIR=/acl " "USE_MKLDNN=ON USE_MKLDNN_ACL=ON "
"ACL_ROOT_DIR=/acl "
"LD_LIBRARY_PATH=/pytorch/build/lib:/acl/build:$LD_LIBRARY_PATH "
"ACL_INCLUDE_DIR=/acl/build "
"ACL_LIBRARY=/acl/build "
)
if enable_cuda: if enable_cuda:
build_vars += "BLAS=NVPL " build_vars += "BLAS=NVPL "
else: else:
build_vars += "BLAS=OpenBLAS OpenBLAS_HOME=/opt/OpenBLAS " build_vars += "BLAS=OpenBLAS OpenBLAS_HOME=/OpenBLAS "
else: else:
print("build pytorch without mkldnn backend") print("build pytorch without mkldnn backend")
os.system(f"cd /pytorch; {build_vars} python3 -m build --wheel --no-isolation") os.system(f"cd /pytorch; {build_vars} python3 setup.py bdist_wheel")
if enable_cuda: if enable_cuda:
print("Updating Cuda Dependency") print("Updating Cuda Dependency")
filename = os.listdir("/pytorch/dist/") filename = os.listdir("/pytorch/dist/")
wheel_path = f"/pytorch/dist/{filename[0]}" wheel_path = f"/pytorch/dist/{filename[0]}"
package_cuda_wheel(wheel_path, desired_cuda) update_wheel(wheel_path, desired_cuda)
pytorch_wheel_name = complete_wheel("/pytorch/") pytorch_wheel_name = complete_wheel("/pytorch/")
print(f"Build Complete. Created {pytorch_wheel_name}..") print(f"Build Complete. Created {pytorch_wheel_name}..")

View File

@ -19,11 +19,13 @@ import boto3
# AMI images for us-east-1, change the following based on your ~/.aws/config # AMI images for us-east-1, change the following based on your ~/.aws/config
os_amis = { os_amis = {
"ubuntu18_04": "ami-078eece1d8119409f", # login_name: ubuntu
"ubuntu20_04": "ami-052eac90edaa9d08f", # login_name: ubuntu "ubuntu20_04": "ami-052eac90edaa9d08f", # login_name: ubuntu
"ubuntu22_04": "ami-0c6c29c5125214c77", # login_name: ubuntu "ubuntu22_04": "ami-0c6c29c5125214c77", # login_name: ubuntu
"redhat8": "ami-0698b90665a2ddcf1", # login_name: ec2-user "redhat8": "ami-0698b90665a2ddcf1", # login_name: ec2-user
} }
ubuntu18_04_ami = os_amis["ubuntu18_04"]
ubuntu20_04_ami = os_amis["ubuntu20_04"] ubuntu20_04_ami = os_amis["ubuntu20_04"]
@ -241,7 +243,7 @@ def wait_for_connection(addr, port, timeout=15, attempt_cnt=5):
try: try:
with socket.create_connection((addr, port), timeout=timeout): with socket.create_connection((addr, port), timeout=timeout):
return return
except (ConnectionRefusedError, TimeoutError): # noqa: PERF203 except (ConnectionRefusedError, socket.timeout): # noqa: PERF203
if i == attempt_cnt - 1: if i == attempt_cnt - 1:
raise raise
time.sleep(timeout) time.sleep(timeout)
@ -299,6 +301,40 @@ def install_condaforge_python(host: RemoteHost, python_version="3.8") -> None:
) )
def build_OpenBLAS(host: RemoteHost, git_clone_flags: str = "") -> None:
print("Building OpenBLAS")
host.run_cmd(
f"git clone https://github.com/xianyi/OpenBLAS -b v0.3.28 {git_clone_flags}"
)
make_flags = "NUM_THREADS=64 USE_OPENMP=1 NO_SHARED=1 DYNAMIC_ARCH=1 TARGET=ARMV8"
host.run_cmd(
f"pushd OpenBLAS && make {make_flags} -j8 && sudo make {make_flags} install && popd && rm -rf OpenBLAS"
)
def build_ArmComputeLibrary(host: RemoteHost, git_clone_flags: str = "") -> None:
print("Building Arm Compute Library")
acl_build_flags = " ".join(
[
"debug=0",
"neon=1",
"opencl=0",
"os=linux",
"openmp=1",
"cppthreads=0",
"arch=armv8a",
"multi_isa=1",
"fixed_format_kernels=1",
"build=native",
]
)
host.run_cmd(
f"git clone https://github.com/ARM-software/ComputeLibrary.git -b v24.09 {git_clone_flags}"
)
host.run_cmd(f"cd ComputeLibrary && scons Werror=1 -j8 {acl_build_flags}")
def embed_libgomp(host: RemoteHost, use_conda, wheel_name) -> None: def embed_libgomp(host: RemoteHost, use_conda, wheel_name) -> None:
host.run_cmd("pip3 install auditwheel") host.run_cmd("pip3 install auditwheel")
host.run_cmd( host.run_cmd(
@ -404,11 +440,13 @@ def build_torchvision(
) )
build_vars += f"BUILD_VERSION={version}.dev{build_date}" build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None: elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-', maxsplit=1)[0]}" build_vars += (
f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
)
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd vision && {build_vars} python3 -m build --wheel --no-isolation") host.run_cmd(f"cd vision && {build_vars} python3 setup.py bdist_wheel")
vision_wheel_name = host.list_dir("vision/dist")[0] vision_wheel_name = host.list_dir("vision/dist")[0]
embed_libgomp(host, use_conda, os.path.join("vision", "dist", vision_wheel_name)) embed_libgomp(host, use_conda, os.path.join("vision", "dist", vision_wheel_name))
@ -459,11 +497,13 @@ def build_torchdata(
) )
build_vars += f"BUILD_VERSION={version}.dev{build_date}" build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None: elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-', maxsplit=1)[0]}" build_vars += (
f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
)
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd data && {build_vars} python3 -m build --wheel --no-isolation") host.run_cmd(f"cd data && {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("data/dist")[0] wheel_name = host.list_dir("data/dist")[0]
embed_libgomp(host, use_conda, os.path.join("data", "dist", wheel_name)) embed_libgomp(host, use_conda, os.path.join("data", "dist", wheel_name))
@ -515,11 +555,13 @@ def build_torchtext(
) )
build_vars += f"BUILD_VERSION={version}.dev{build_date}" build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None: elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-', maxsplit=1)[0]}" build_vars += (
f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
)
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd text && {build_vars} python3 -m build --wheel --no-isolation") host.run_cmd(f"cd text && {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("text/dist")[0] wheel_name = host.list_dir("text/dist")[0]
embed_libgomp(host, use_conda, os.path.join("text", "dist", wheel_name)) embed_libgomp(host, use_conda, os.path.join("text", "dist", wheel_name))
@ -573,14 +615,16 @@ def build_torchaudio(
) )
build_vars += f"BUILD_VERSION={version}.dev{build_date}" build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None: elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-', maxsplit=1)[0]}" build_vars += (
f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
)
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd( host.run_cmd(
f"cd audio && export FFMPEG_ROOT=$(pwd)/third_party/ffmpeg && export USE_FFMPEG=1 \ f"cd audio && export FFMPEG_ROOT=$(pwd)/third_party/ffmpeg && export USE_FFMPEG=1 \
&& ./packaging/ffmpeg/build.sh \ && ./packaging/ffmpeg/build.sh \
&& {build_vars} python3 -m build --wheel --no-isolation" && {build_vars} python3 setup.py bdist_wheel"
) )
wheel_name = host.list_dir("audio/dist")[0] wheel_name = host.list_dir("audio/dist")[0]
@ -615,6 +659,18 @@ def configure_system(
"sudo apt-get install -y python3-dev python3-yaml python3-setuptools python3-wheel python3-pip" "sudo apt-get install -y python3-dev python3-yaml python3-setuptools python3-wheel python3-pip"
) )
host.run_cmd("pip3 install dataclasses typing-extensions") host.run_cmd("pip3 install dataclasses typing-extensions")
# Install and switch to gcc-8 on Ubuntu-18.04
if not host.using_docker() and host.ami == ubuntu18_04_ami and compiler == "gcc-8":
host.run_cmd("sudo apt-get install -y g++-8 gfortran-8")
host.run_cmd(
"sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 100"
)
host.run_cmd(
"sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-8 100"
)
host.run_cmd(
"sudo update-alternatives --install /usr/bin/gfortran gfortran /usr/bin/gfortran-8 100"
)
if not use_conda: if not use_conda:
print("Installing Cython + numpy from PyPy") print("Installing Cython + numpy from PyPy")
host.run_cmd("sudo pip3 install Cython") host.run_cmd("sudo pip3 install Cython")
@ -666,6 +722,7 @@ def start_build(
configure_system( configure_system(
host, compiler=compiler, use_conda=use_conda, python_version=python_version host, compiler=compiler, use_conda=use_conda, python_version=python_version
) )
build_OpenBLAS(host, git_clone_flags)
if host.using_docker(): if host.using_docker():
print("Move libgfortant.a into a standard location") print("Move libgfortant.a into a standard location")
@ -688,12 +745,10 @@ def start_build(
f"git clone --recurse-submodules -b {branch} https://github.com/pytorch/pytorch {git_clone_flags}" f"git clone --recurse-submodules -b {branch} https://github.com/pytorch/pytorch {git_clone_flags}"
) )
host.run_cmd("pytorch/.ci/docker/common/install_openblas.sh")
print("Building PyTorch wheel") print("Building PyTorch wheel")
build_opts = "" build_opts = ""
if pytorch_build_number is not None: if pytorch_build_number is not None:
build_opts += f" -C--build-option=--build-number={pytorch_build_number}" build_opts += f" --build-number {pytorch_build_number}"
# Breakpad build fails on aarch64 # Breakpad build fails on aarch64
build_vars = "USE_BREAKPAD=0 " build_vars = "USE_BREAKPAD=0 "
if branch == "nightly": if branch == "nightly":
@ -706,22 +761,19 @@ def start_build(
version = host.check_output("cat pytorch/version.txt").strip()[:-2] version = host.check_output("cat pytorch/version.txt").strip()[:-2]
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date} PYTORCH_BUILD_NUMBER=1" build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date} PYTORCH_BUILD_NUMBER=1"
if branch.startswith(("v1.", "v2.")): if branch.startswith(("v1.", "v2.")):
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1 : branch.find('-')]} PYTORCH_BUILD_NUMBER=1" build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1:branch.find('-')]} PYTORCH_BUILD_NUMBER=1"
if host.using_docker(): if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000" build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
if enable_mkldnn: if enable_mkldnn:
host.run_cmd("pytorch/.ci/docker/common/install_acl.sh") build_ArmComputeLibrary(host, git_clone_flags)
print("build pytorch with mkldnn+acl backend") print("build pytorch with mkldnn+acl backend")
build_vars += " USE_MKLDNN=ON USE_MKLDNN_ACL=ON" build_vars += " USE_MKLDNN=ON USE_MKLDNN_ACL=ON"
build_vars += " BLAS=OpenBLAS"
build_vars += " OpenBLAS_HOME=/opt/OpenBLAS"
build_vars += " ACL_ROOT_DIR=/acl"
host.run_cmd( host.run_cmd(
f"cd $HOME/pytorch && {build_vars} python3 -m build --wheel --no-isolation{build_opts}" f"cd $HOME/pytorch && export ACL_ROOT_DIR=$HOME/ComputeLibrary && {build_vars} python3 setup.py bdist_wheel{build_opts}"
) )
print("Repair the wheel") print("Repair the wheel")
pytorch_wheel_name = host.list_dir("pytorch/dist")[0] pytorch_wheel_name = host.list_dir("pytorch/dist")[0]
ld_library_path = "/acl/build:$HOME/pytorch/build/lib" ld_library_path = "$HOME/acl/build:$HOME/pytorch/build/lib"
host.run_cmd( host.run_cmd(
f"export LD_LIBRARY_PATH={ld_library_path} && auditwheel repair $HOME/pytorch/dist/{pytorch_wheel_name}" f"export LD_LIBRARY_PATH={ld_library_path} && auditwheel repair $HOME/pytorch/dist/{pytorch_wheel_name}"
) )
@ -733,7 +785,7 @@ def start_build(
else: else:
print("build pytorch without mkldnn backend") print("build pytorch without mkldnn backend")
host.run_cmd( host.run_cmd(
f"cd pytorch && {build_vars} python3 -m build --wheel --no-isolation{build_opts}" f"cd pytorch && {build_vars} python3 setup.py bdist_wheel{build_opts}"
) )
print("Deleting build folder") print("Deleting build folder")
@ -877,7 +929,7 @@ def terminate_instances(instance_type: str) -> None:
def parse_arguments(): def parse_arguments():
from argparse import ArgumentParser from argparse import ArgumentParser
parser = ArgumentParser("Build and test AARCH64 wheels using EC2") parser = ArgumentParser("Builid and test AARCH64 wheels using EC2")
parser.add_argument("--key-name", type=str) parser.add_argument("--key-name", type=str)
parser.add_argument("--debug", action="store_true") parser.add_argument("--debug", action="store_true")
parser.add_argument("--build-only", action="store_true") parser.add_argument("--build-only", action="store_true")
@ -974,7 +1026,7 @@ if __name__ == "__main__":
install_condaforge_python(host, args.python_version) install_condaforge_python(host, args.python_version)
sys.exit(0) sys.exit(0)
python_version = args.python_version if args.python_version is not None else "3.10" python_version = args.python_version if args.python_version is not None else "3.8"
if args.use_torch_from_pypi: if args.use_torch_from_pypi:
configure_system(host, compiler=args.compiler, python_version=python_version) configure_system(host, compiler=args.compiler, python_version=python_version)

View File

@ -10,3 +10,5 @@ example: `py2-cuda9.0-cudnn7-ubuntu16.04`. The Docker images that are
built on Jenkins and are used in triggered builds already have this built on Jenkins and are used in triggered builds already have this
environment variable set in their manifest. Also see environment variable set in their manifest. Also see
`./docker/jenkins/*/Dockerfile` and search for `BUILD_ENVIRONMENT`. `./docker/jenkins/*/Dockerfile` and search for `BUILD_ENVIRONMENT`.
Our Jenkins installation is located at https://ci.pytorch.org/jenkins/.

View File

@ -5,7 +5,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
if [[ ${BUILD_ENVIRONMENT} == *onnx* ]]; then if [[ ${BUILD_ENVIRONMENT} == *onnx* ]]; then
pip install click mock tabulate networkx==2.0 pip install click mock tabulate networkx==2.0
pip -q install "file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx" pip -q install --user "file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx"
fi fi
# Skip tests in environments where they are not built/applicable # Skip tests in environments where they are not built/applicable
@ -13,6 +13,10 @@ if [[ "${BUILD_ENVIRONMENT}" == *-android* ]]; then
echo 'Skipping tests' echo 'Skipping tests'
exit 0 exit 0
fi fi
if [[ "${BUILD_ENVIRONMENT}" == *-rocm* ]]; then
# temporary to locate some kernel issues on the CI nodes
export HSAKMT_DEBUG_LEVEL=4
fi
# These additional packages are needed for circleci ROCm builds. # These additional packages are needed for circleci ROCm builds.
if [[ $BUILD_ENVIRONMENT == *rocm* ]]; then if [[ $BUILD_ENVIRONMENT == *rocm* ]]; then
# Need networkx 2.0 because bellmand_ford was moved in 2.1 . Scikit-image by # Need networkx 2.0 because bellmand_ford was moved in 2.1 . Scikit-image by
@ -147,8 +151,8 @@ export DNNL_MAX_CPU_ISA=AVX2
if [[ "${SHARD_NUMBER:-1}" == "1" ]]; then if [[ "${SHARD_NUMBER:-1}" == "1" ]]; then
# TODO(sdym@meta.com) remove this when the linked issue resolved. # TODO(sdym@meta.com) remove this when the linked issue resolved.
# py is temporary until https://github.com/Teemu/pytest-sugar/issues/241 is fixed # py is temporary until https://github.com/Teemu/pytest-sugar/issues/241 is fixed
pip install py==1.11.0 pip install --user py==1.11.0
pip install pytest-sugar pip install --user pytest-sugar
# NB: Warnings are disabled because they make it harder to see what # NB: Warnings are disabled because they make it harder to see what
# the actual erroring test is # the actual erroring test is
"$PYTHON" \ "$PYTHON" \

View File

@ -34,106 +34,5 @@ See `build.sh` for valid build environments (it's the giant switch).
./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest ./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest
# Set flags (see build.sh) and build image # Set flags (see build.sh) and build image
sudo bash -c 'TRITON=1 ./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest sudo bash -c 'PROTOBUF=1 ./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest
``` ```
## [Guidance] Adding a New Base Docker Image
### Background
The base Docker images in directory `.ci/docker/` are built by the `docker-builds.yml` workflow. Those images are used throughout the PyTorch CI/CD pipeline. You should only create or modify a base Docker image if you need specific environment changes or dependencies before building PyTorch on CI.
1. **Automatic Rebuilding**:
- The Docker image building process is triggered automatically when changes are made to files in the `.ci/docker/*` directory
- This ensures all images stay up-to-date with the latest dependencies and configurations
2. **Image Reuse in PyTorch Build Workflows** (example: linux-build):
- The images generated by `docker-builds.yml` are reused in `_linux-build.yml` through the `calculate-docker-image` step
- The `_linux-build.yml` workflow:
- Pulls the Docker image determined by the `calculate-docker-image` step
- Runs a Docker container with that image
- Executes `.ci/pytorch/build.sh` inside the container to build PyTorch
3. **Usage in Test Workflows** (example: linux-test):
- The same Docker images are also used in `_linux-test.yml` for running tests
- The `_linux-test.yml` workflow follows a similar pattern:
- It uses the `calculate-docker-image` step to determine which Docker image to use
- It pulls the Docker image and runs a container with that image
- It installs the wheels from the artifacts generated by PyTorch build jobs
- It executes test scripts (like `.ci/pytorch/test.sh` or `.ci/pytorch/multigpu-test.sh`) inside the container
### Understanding File Purposes
#### `.ci/docker/build.sh` vs `.ci/pytorch/build.sh`
- **`.ci/docker/build.sh`**:
- Used for building base Docker images
- Executed by the `docker-builds.yml` workflow to pre-build Docker images for CI
- Contains configurations for different Docker build environments
- **`.ci/pytorch/build.sh`**:
- Used for building PyTorch inside a Docker container
- Called by workflows like `_linux-build.yml` after the Docker container is started
- Builds PyTorch wheels and other artifacts
#### `.ci/docker/ci_commit_pins/` vs `.github/ci_commit_pins`
- **`.ci/docker/ci_commit_pins/`**:
- Used for pinning dependency versions during base Docker image building
- Ensures consistent environments for building PyTorch
- Changes here trigger base Docker image rebuilds
- **`.github/ci_commit_pins`**:
- Used for pinning dependency versions during PyTorch building and tests
- Ensures consistent dependencies for PyTorch across different builds
- Used by build scripts running inside Docker containers
### Step-by-Step Guide for Adding a New Base Docker Image
#### 1. Add Pinned Commits (If Applicable)
We use pinned commits for build stability. The `nightly.yml` workflow checks and updates pinned commits for certain repository dependencies daily.
If your new Docker image needs a library installed from a specific pinned commit or built from source:
1. Add the repository you want to track in `nightly.yml` and `merge-rules.yml`
2. Add the initial pinned commit in `.ci/docker/ci_commit_pins/`. The text filename should match the one defined in step 1
#### 2. Configure the Base Docker Image
1. **Add new Base Docker image configuration** (if applicable):
Add the configuration in `.ci/docker/build.sh`. For example:
```bash
pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc11-new1)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
NEW_ARG_1=yes
;;
```
2. **Add build arguments to Docker build command**:
If you're introducing a new argument to the Docker build, make sure to add it in the Docker build step in `.ci/docker/build.sh`:
```bash
docker build \
....
--build-arg "NEW_ARG_1=${NEW_ARG_1}"
```
3. **Update Dockerfile logic**:
Update the Dockerfile to use the new argument. For example, in `ubuntu/Dockerfile`:
```dockerfile
ARG NEW_ARG_1
# Set up environment for NEW_ARG_1
RUN if [ -n "${NEW_ARG_1}" ]; then bash ./do_something.sh; fi
```
4. **Add the Docker configuration** in `.github/workflows/docker-builds.yml`:
The `docker-builds.yml` workflow pre-builds the Docker images whenever changes occur in the `.ci/docker/` directory. This includes the
pinned commit updates.

View File

@ -1,7 +1,6 @@
ARG CUDA_VERSION=12.6 ARG CUDA_VERSION=12.4
ARG BASE_TARGET=cuda${CUDA_VERSION} ARG BASE_TARGET=cuda${CUDA_VERSION}
ARG ROCM_IMAGE=rocm/dev-almalinux-8:6.3-complete FROM amd64/almalinux:8 as base
FROM amd64/almalinux:8.10-20250519 as base
ENV LC_ALL en_US.UTF-8 ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8 ENV LANG en_US.UTF-8
@ -9,10 +8,12 @@ ENV LANGUAGE en_US.UTF-8
ARG DEVTOOLSET_VERSION=11 ARG DEVTOOLSET_VERSION=11
ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
RUN yum -y update RUN yum -y update
RUN yum -y install epel-release RUN yum -y install epel-release
# install glibc-langpack-en make sure en_US.UTF-8 locale is available
RUN yum -y install glibc-langpack-en
RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel openssl-devel yum-utils autoconf automake make gcc-toolset-${DEVTOOLSET_VERSION}-toolchain RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel openssl-devel yum-utils autoconf automake make gcc-toolset-${DEVTOOLSET_VERSION}-toolchain
# Just add everything as a safe.directory for git since these will be used in multiple places with git # Just add everything as a safe.directory for git since these will be used in multiple places with git
RUN git config --global --add safe.directory '*' RUN git config --global --add safe.directory '*'
@ -40,51 +41,41 @@ RUN bash ./install_conda.sh && rm install_conda.sh
# Install CUDA # Install CUDA
FROM base as cuda FROM base as cuda
ARG CUDA_VERSION=12.6 ARG CUDA_VERSION=12.4
RUN rm -rf /usr/local/cuda-* RUN rm -rf /usr/local/cuda-*
ADD ./common/install_cuda.sh install_cuda.sh ADD ./common/install_cuda.sh install_cuda.sh
COPY ./common/install_nccl.sh install_nccl.sh
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
COPY ./common/install_cusparselt.sh install_cusparselt.sh
ENV CUDA_HOME=/usr/local/cuda-${CUDA_VERSION} ENV CUDA_HOME=/usr/local/cuda-${CUDA_VERSION}
# Preserve CUDA_VERSION for the builds # Preserve CUDA_VERSION for the builds
ENV CUDA_VERSION=${CUDA_VERSION} ENV CUDA_VERSION=${CUDA_VERSION}
# Make things in our path by default # Make things in our path by default
ENV PATH=/usr/local/cuda-${CUDA_VERSION}/bin:$PATH ENV PATH=/usr/local/cuda-${CUDA_VERSION}/bin:$PATH
FROM cuda as cuda11.8
RUN bash ./install_cuda.sh 11.8
ENV DESIRED_CUDA=11.8
FROM cuda as cuda12.1
RUN bash ./install_cuda.sh 12.1
ENV DESIRED_CUDA=12.1
FROM cuda as cuda12.4
RUN bash ./install_cuda.sh 12.4
ENV DESIRED_CUDA=12.4
FROM cuda as cuda12.6 FROM cuda as cuda12.6
RUN bash ./install_cuda.sh 12.6 RUN bash ./install_cuda.sh 12.6
ENV DESIRED_CUDA=12.6 ENV DESIRED_CUDA=12.6
FROM cuda as cuda12.8
RUN bash ./install_cuda.sh 12.8
ENV DESIRED_CUDA=12.8
FROM cuda as cuda12.9
RUN bash ./install_cuda.sh 12.9
ENV DESIRED_CUDA=12.9
FROM cuda as cuda13.0
RUN bash ./install_cuda.sh 13.0
ENV DESIRED_CUDA=13.0
FROM ${ROCM_IMAGE} as rocm
ARG PYTORCH_ROCM_ARCH
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
ADD ./common/install_mkl.sh install_mkl.sh
RUN bash ./install_mkl.sh && rm install_mkl.sh
ENV MKLROOT /opt/intel
# Install MNIST test data # Install MNIST test data
FROM base as mnist FROM base as mnist
ADD ./common/install_mnist.sh install_mnist.sh ADD ./common/install_mnist.sh install_mnist.sh
RUN bash ./install_mnist.sh RUN bash ./install_mnist.sh
FROM base as all_cuda FROM base as all_cuda
COPY --from=cuda11.8 /usr/local/cuda-11.8 /usr/local/cuda-11.8
COPY --from=cuda12.1 /usr/local/cuda-12.1 /usr/local/cuda-12.1
COPY --from=cuda12.4 /usr/local/cuda-12.4 /usr/local/cuda-12.4
COPY --from=cuda12.6 /usr/local/cuda-12.6 /usr/local/cuda-12.6 COPY --from=cuda12.6 /usr/local/cuda-12.6 /usr/local/cuda-12.6
COPY --from=cuda12.8 /usr/local/cuda-12.8 /usr/local/cuda-12.8
COPY --from=cuda12.9 /usr/local/cuda-12.9 /usr/local/cuda-12.9
COPY --from=cuda13.0 /usr/local/cuda-13.0 /usr/local/cuda-13.0
# Final step # Final step
FROM ${BASE_TARGET} as final FROM ${BASE_TARGET} as final

View File

@ -1,76 +1,82 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Script used only in CD pipeline # Script used only in CD pipeline
set -exou pipefail set -eou pipefail
image="$1" image="$1"
shift shift
if [ -z "${image}" ]; then if [ -z "${image}" ]; then
echo "Usage: $0 IMAGENAME:ARCHTAG" echo "Usage: $0 IMAGE"
exit 1 exit 1
fi fi
# Go from imagename:tag to tag DOCKER_IMAGE_NAME="pytorch/${image}"
DOCKER_TAG_PREFIX=$(echo "${image}" | awk -F':' '{print $2}')
CUDA_VERSION=""
ROCM_VERSION=""
EXTRA_BUILD_ARGS=""
if [[ "${DOCKER_TAG_PREFIX}" == cuda* ]]; then
# extract cuda version from image name and tag. e.g. manylinux2_28-builder:cuda12.8 returns 12.8
CUDA_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'cuda' '{print $2}')
EXTRA_BUILD_ARGS="--build-arg CUDA_VERSION=${CUDA_VERSION}"
elif [[ "${DOCKER_TAG_PREFIX}" == rocm* ]]; then
# extract rocm version from image name and tag. e.g. manylinux2_28-builder:rocm6.2.4 returns 6.2.4
ROCM_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'rocm' '{print $2}')
EXTRA_BUILD_ARGS="--build-arg ROCM_IMAGE=rocm/dev-almalinux-8:${ROCM_VERSION}-complete"
fi
case ${DOCKER_TAG_PREFIX} in
cpu)
BASE_TARGET=base
;;
cuda*)
BASE_TARGET=cuda${CUDA_VERSION}
;;
rocm*)
BASE_TARGET=rocm
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
# add gfx950, gfx115x conditionally starting in ROCm 7.0
if [[ "$ROCM_VERSION" == *"7.0"* ]]; then
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950;gfx1150;gfx1151"
fi
EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}"
;;
*)
echo "ERROR: Unknown docker tag ${DOCKER_TAG_PREFIX}"
exit 1
;;
esac
# TODO: Remove LimitNOFILE=1048576 patch once https://github.com/pytorch/test-infra/issues/5712
# is resolved. This patch is required in order to fix timing out of Docker build on Amazon Linux 2023.
sudo sed -i s/LimitNOFILE=infinity/LimitNOFILE=1048576/ /usr/lib/systemd/system/docker.service
sudo systemctl daemon-reload
sudo systemctl restart docker
export DOCKER_BUILDKIT=1 export DOCKER_BUILDKIT=1
TOPDIR=$(git rev-parse --show-toplevel) TOPDIR=$(git rev-parse --show-toplevel)
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
docker build \ CUDA_VERSION=${CUDA_VERSION:-12.1}
--target final \
--progress plain \
--build-arg "BASE_TARGET=${BASE_TARGET}" \
--build-arg "DEVTOOLSET_VERSION=11" \
${EXTRA_BUILD_ARGS} \
-t ${tmp_tag} \
$@ \
-f "${TOPDIR}/.ci/docker/almalinux/Dockerfile" \
${TOPDIR}/.ci/docker/
if [ -n "${CUDA_VERSION}" ]; then case ${CUDA_VERSION} in
cpu)
BASE_TARGET=base
DOCKER_TAG=cpu
;;
all)
BASE_TARGET=all_cuda
DOCKER_TAG=latest
;;
*)
BASE_TARGET=cuda${CUDA_VERSION}
DOCKER_TAG=cuda${CUDA_VERSION}
;;
esac
(
set -x
# TODO: Remove LimitNOFILE=1048576 patch once https://github.com/pytorch/test-infra/issues/5712
# is resolved. This patch is required in order to fix timing out of Docker build on Amazon Linux 2023.
sudo sed -i s/LimitNOFILE=infinity/LimitNOFILE=1048576/ /usr/lib/systemd/system/docker.service
sudo systemctl daemon-reload
sudo systemctl restart docker
docker build \
--target final \
--progress plain \
--build-arg "BASE_TARGET=${BASE_TARGET}" \
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \
--build-arg "DEVTOOLSET_VERSION=11" \
-t ${DOCKER_IMAGE_NAME} \
$@ \
-f "${TOPDIR}/.ci/docker/almalinux/Dockerfile" \
${TOPDIR}/.ci/docker/
)
if [[ "${DOCKER_TAG}" =~ ^cuda* ]]; then
# Test that we're using the right CUDA compiler # Test that we're using the right CUDA compiler
docker run --rm "${tmp_tag}" nvcc --version | grep "cuda_${CUDA_VERSION}" (
set -x
docker run --rm "${DOCKER_IMAGE_NAME}" nvcc --version | grep "cuda_${CUDA_VERSION}"
)
fi
GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)}
GIT_BRANCH_NAME=${GITHUB_REF##*/}
GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)}
DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE_NAME}-${GIT_BRANCH_NAME}
DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE_NAME}-${GIT_COMMIT_SHA}
if [[ "${WITH_PUSH:-}" == true ]]; then
(
set -x
docker push "${DOCKER_IMAGE_NAME}"
if [[ -n ${GITHUB_REF} ]]; then
docker tag ${DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_BRANCH_TAG}
docker tag ${DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_SHA_TAG}
docker push "${DOCKER_IMAGE_BRANCH_TAG}"
docker push "${DOCKER_IMAGE_SHA_TAG}"
fi
)
fi fi

View File

@ -1,8 +1,4 @@
#!/bin/bash #!/bin/bash
# The purpose of this script is to:
# 1. Extract the set of parameters to be used for a docker build based on the provided image name.
# 2. Run docker build with the parameters found in step 1.
# 3. Run the built image and print out the expected and actual versions of packages installed.
set -ex set -ex
@ -50,23 +46,30 @@ if [[ "$image" == *xla* ]]; then
exit 0 exit 0
fi fi
if [[ "$image" == *-jammy* ]]; then if [[ "$image" == *-focal* ]]; then
UBUNTU_VERSION=20.04
elif [[ "$image" == *-jammy* ]]; then
UBUNTU_VERSION=22.04 UBUNTU_VERSION=22.04
elif [[ "$image" == *-noble* ]]; then
UBUNTU_VERSION=24.04
elif [[ "$image" == *ubuntu* ]]; then elif [[ "$image" == *ubuntu* ]]; then
extract_version_from_image_name ubuntu UBUNTU_VERSION extract_version_from_image_name ubuntu UBUNTU_VERSION
elif [[ "$image" == *centos* ]]; then
extract_version_from_image_name centos CENTOS_VERSION
fi fi
if [ -n "${UBUNTU_VERSION}" ]; then if [ -n "${UBUNTU_VERSION}" ]; then
OS="ubuntu" OS="ubuntu"
elif [ -n "${CENTOS_VERSION}" ]; then
OS="centos"
else else
echo "Unable to derive operating system base..." echo "Unable to derive operating system base..."
exit 1 exit 1
fi fi
DOCKERFILE="${OS}/Dockerfile" DOCKERFILE="${OS}/Dockerfile"
if [[ "$image" == *rocm* ]]; then # When using ubuntu - 22.04, start from Ubuntu docker image, instead of nvidia/cuda docker image.
if [[ "$image" == *cuda* && "$UBUNTU_VERSION" != "22.04" ]]; then
DOCKERFILE="${OS}-cuda/Dockerfile"
elif [[ "$image" == *rocm* ]]; then
DOCKERFILE="${OS}-rocm/Dockerfile" DOCKERFILE="${OS}-rocm/Dockerfile"
elif [[ "$image" == *xpu* ]]; then elif [[ "$image" == *xpu* ]]; then
DOCKERFILE="${OS}-xpu/Dockerfile" DOCKERFILE="${OS}-xpu/Dockerfile"
@ -76,158 +79,238 @@ elif [[ "$image" == *cuda*linter* ]]; then
elif [[ "$image" == *linter* ]]; then elif [[ "$image" == *linter* ]]; then
# Use a separate Dockerfile for linter to keep a small image size # Use a separate Dockerfile for linter to keep a small image size
DOCKERFILE="linter/Dockerfile" DOCKERFILE="linter/Dockerfile"
elif [[ "$image" == *riscv* ]]; then
# Use RISC-V specific Dockerfile
DOCKERFILE="ubuntu-cross-riscv/Dockerfile"
fi fi
_UCX_COMMIT=7836b165abdbe468a2f607e7254011c07d788152 # CMake 3.18 is needed to support CUDA17 language variant
_UCC_COMMIT=430e241bf5d38cbc73fc7a6b89155397232e3f96 CMAKE_VERSION=3.18.5
_UCX_COMMIT=7bb2722ff2187a0cad557ae4a6afa090569f83fb
_UCC_COMMIT=20eae37090a4ce1b32bcce6144ccad0b49943e0b
if [[ "$image" == *rocm* ]]; then if [[ "$image" == *rocm* ]]; then
_UCX_COMMIT=29831d319e6be55cb8c768ca61de335c934ca39e _UCX_COMMIT=cc312eaa4655c0cc5c2bcd796db938f90563bcf6
_UCC_COMMIT=9f4b242cbbd8b1462cbc732eb29316cdfa124b77 _UCC_COMMIT=0c0fc21559835044ab107199e334f7157d6a0d3d
fi fi
tag=$(echo $image | awk -F':' '{print $2}')
# It's annoying to rename jobs every time you want to rewrite a # It's annoying to rename jobs every time you want to rewrite a
# configuration, so we hardcode everything here rather than do it # configuration, so we hardcode everything here rather than do it
# from scratch # from scratch
case "$tag" in case "$image" in
pytorch-linux-jammy-cuda12.4-cudnn9-py3-gcc11) pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9)
CUDA_VERSION=12.4 CUDA_VERSION=12.4.1
ANACONDA_PYTHON_VERSION=3.10 CUDNN_VERSION=9
GCC_VERSION=11
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
;;
pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11)
CUDA_VERSION=13.0.0
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9-inductor-benchmarks)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=9 GCC_VERSION=9
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
KATEX=yes KATEX=yes
UCX_COMMIT=${_UCX_COMMIT} UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
CONDA_CMAKE=yes
TRITON=yes
;;
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks)
CUDA_VERSION=12.4.1
CUDNN_VERSION=9
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=9
PROTOBUF=yes
DB=yes
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
CONDA_CMAKE=yes
TRITON=yes TRITON=yes
INDUCTOR_BENCHMARKS=yes INDUCTOR_BENCHMARKS=yes
;; ;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.12-gcc11-vllm) pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks)
CUDA_VERSION=12.8.1 CUDA_VERSION=12.4.1
CUDNN_VERSION=9
ANACONDA_PYTHON_VERSION=3.12 ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11 GCC_VERSION=9
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
KATEX=yes KATEX=yes
UCX_COMMIT=${_UCX_COMMIT} UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
CONDA_CMAKE=yes
TRITON=yes TRITON=yes
INDUCTOR_BENCHMARKS=yes
;; ;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc9) pytorch-linux-focal-cuda12.4-cudnn9-py3.13-gcc9-inductor-benchmarks)
CUDA_VERSION=12.8.1 CUDA_VERSION=12.4.1
CUDNN_VERSION=9
ANACONDA_PYTHON_VERSION=3.13
GCC_VERSION=9
PROTOBUF=yes
DB=yes
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
CONDA_CMAKE=yes
TRITON=yes
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9)
CUDA_VERSION=11.8.0
CUDNN_VERSION=9
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=9 GCC_VERSION=9
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
KATEX=yes KATEX=yes
UCX_COMMIT=${_UCX_COMMIT} UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
CONDA_CMAKE=yes
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-py3-clang12-onnx) pytorch-linux-focal-py3-clang10-onnx)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
CLANG_VERSION=12 CLANG_VERSION=10
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
CONDA_CMAKE=yes
ONNX=yes ONNX=yes
;; ;;
pytorch-linux-jammy-py3.10-clang12) pytorch-linux-focal-py3.9-clang10)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
CLANG_VERSION=12 CLANG_VERSION=10
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
VULKAN_SDK_VERSION=1.2.162.1
SWIFTSHADER=yes
CONDA_CMAKE=yes
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-rocm-n-py3 | pytorch-linux-jammy-rocm-n-py3-benchmarks | pytorch-linux-noble-rocm-n-py3) pytorch-linux-focal-py3.11-clang10)
if [[ $tag =~ "jammy" ]]; then ANACONDA_PYTHON_VERSION=3.11
ANACONDA_PYTHON_VERSION=3.10 CLANG_VERSION=10
else PROTOBUF=yes
ANACONDA_PYTHON_VERSION=3.12 DB=yes
fi
GCC_VERSION=11
VISION=yes VISION=yes
ROCM_VERSION=7.0 VULKAN_SDK_VERSION=1.2.162.1
SWIFTSHADER=yes
CONDA_CMAKE=yes
TRITON=yes
;;
pytorch-linux-focal-py3.9-gcc9)
ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=9
PROTOBUF=yes
DB=yes
VISION=yes
CONDA_CMAKE=yes
TRITON=yes
;;
pytorch-linux-focal-rocm-n-1-py3)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11
PROTOBUF=yes
DB=yes
VISION=yes
ROCM_VERSION=6.2.4
NINJA_VERSION=1.9.0 NINJA_VERSION=1.9.0
CONDA_CMAKE=yes
TRITON=yes TRITON=yes
KATEX=yes KATEX=yes
UCX_COMMIT=${_UCX_COMMIT} UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT} UCC_COMMIT=${_UCC_COMMIT}
PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950" INDUCTOR_BENCHMARKS=yes
if [[ $tag =~ "benchmarks" ]]; then
INDUCTOR_BENCHMARKS=yes
fi
;; ;;
pytorch-linux-jammy-xpu-n-1-py3) pytorch-linux-focal-rocm-n-py3)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11 GCC_VERSION=11
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
XPU_VERSION=2025.1 ROCM_VERSION=6.3
NINJA_VERSION=1.9.0 NINJA_VERSION=1.9.0
CONDA_CMAKE=yes
TRITON=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-jammy-xpu-2024.0-py3)
ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11
PROTOBUF=yes
DB=yes
VISION=yes
XPU_VERSION=0.5
NINJA_VERSION=1.9.0
CONDA_CMAKE=yes
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-xpu-n-py3) pytorch-linux-jammy-xpu-2025.0-py3)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11 GCC_VERSION=11
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
XPU_VERSION=2025.2 XPU_VERSION=2025.0
NINJA_VERSION=1.9.0 NINJA_VERSION=1.9.0
CONDA_CMAKE=yes
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-py3-gcc11-inductor-benchmarks) pytorch-linux-jammy-py3.9-gcc11-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11 GCC_VERSION=11
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
KATEX=yes KATEX=yes
CONDA_CMAKE=yes
TRITON=yes TRITON=yes
DOCS=yes DOCS=yes
INDUCTOR_BENCHMARKS=yes INDUCTOR_BENCHMARKS=yes
;; ;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.10-clang12) pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-clang12)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
CUDA_VERSION=12.8.1 CUDA_VERSION=11.8
CUDNN_VERSION=9
CLANG_VERSION=12 CLANG_VERSION=12
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-py3-clang12-asan)
ANACONDA_PYTHON_VERSION=3.9
CLANG_VERSION=12
PROTOBUF=yes
DB=yes
VISION=yes
CONDA_CMAKE=yes
TRITON=yes
;;
pytorch-linux-jammy-py3-clang15-asan)
ANACONDA_PYTHON_VERSION=3.10
CLANG_VERSION=15
CONDA_CMAKE=yes
VISION=yes
;;
pytorch-linux-jammy-py3-clang18-asan) pytorch-linux-jammy-py3-clang18-asan)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
CLANG_VERSION=18 CLANG_VERSION=18
CONDA_CMAKE=yes
VISION=yes VISION=yes
;; ;;
pytorch-linux-jammy-py3.10-gcc11) pytorch-linux-jammy-py3.9-gcc11)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
GCC_VERSION=11 GCC_VERSION=11
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
KATEX=yes KATEX=yes
CONDA_CMAKE=yes
TRITON=yes TRITON=yes
DOCS=yes DOCS=yes
UNINSTALL_DILL=yes UNINSTALL_DILL=yes
@ -235,34 +318,44 @@ case "$tag" in
pytorch-linux-jammy-py3-clang12-executorch) pytorch-linux-jammy-py3-clang12-executorch)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
CLANG_VERSION=12 CLANG_VERSION=12
CONDA_CMAKE=yes
EXECUTORCH=yes EXECUTORCH=yes
;; ;;
pytorch-linux-jammy-py3.12-halide) pytorch-linux-jammy-py3.12-halide)
CUDA_VERSION=12.6 CUDA_VERSION=12.4
ANACONDA_PYTHON_VERSION=3.12 ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11 GCC_VERSION=11
CONDA_CMAKE=yes
HALIDE=yes HALIDE=yes
TRITON=yes TRITON=yes
;; ;;
pytorch-linux-jammy-py3.12-triton-cpu) pytorch-linux-jammy-py3.12-triton-cpu)
CUDA_VERSION=12.6 CUDA_VERSION=12.4
ANACONDA_PYTHON_VERSION=3.12 ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11 GCC_VERSION=11
CONDA_CMAKE=yes
TRITON_CPU=yes TRITON_CPU=yes
;; ;;
pytorch-linux-jammy-linter) pytorch-linux-focal-linter)
PYTHON_VERSION=3.10 # TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
# We will need to update mypy version eventually, but that's for another day. The task
# would be to upgrade mypy to 1.0.0 with Python 3.11
ANACONDA_PYTHON_VERSION=3.9
CONDA_CMAKE=yes
;; ;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.10-linter) pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter)
PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.9
CUDA_VERSION=12.8.1 CUDA_VERSION=11.8
CONDA_CMAKE=yes
;; ;;
pytorch-linux-jammy-aarch64-py3.10-gcc11) pytorch-linux-jammy-aarch64-py3.10-gcc11)
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11 GCC_VERSION=11
ACL=yes ACL=yes
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
OPENBLAS=yes CONDA_CMAKE=yes
# snadampal: skipping llvm src build install because the current version # snadampal: skipping llvm src build install because the current version
# from pytorch/llvm:9.0.1 is x86 specific # from pytorch/llvm:9.0.1 is x86 specific
SKIP_LLVM_SRC_BUILD_INSTALL=yes SKIP_LLVM_SRC_BUILD_INSTALL=yes
@ -271,18 +364,19 @@ case "$tag" in
ANACONDA_PYTHON_VERSION=3.10 ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11 GCC_VERSION=11
ACL=yes ACL=yes
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
OPENBLAS=yes CONDA_CMAKE=yes
# snadampal: skipping llvm src build install because the current version # snadampal: skipping llvm src build install because the current version
# from pytorch/llvm:9.0.1 is x86 specific # from pytorch/llvm:9.0.1 is x86 specific
SKIP_LLVM_SRC_BUILD_INSTALL=yes SKIP_LLVM_SRC_BUILD_INSTALL=yes
INDUCTOR_BENCHMARKS=yes INDUCTOR_BENCHMARKS=yes
;; ;;
pytorch-linux-noble-riscv64-py3.12-gcc14)
GCC_VERSION=14
;;
*) *)
# Catch-all for builds that are not hardcoded. # Catch-all for builds that are not hardcoded.
PROTOBUF=yes
DB=yes
VISION=yes VISION=yes
echo "image '$image' did not match an existing build configuration" echo "image '$image' did not match an existing build configuration"
if [[ "$image" == *py* ]]; then if [[ "$image" == *py* ]]; then
@ -290,6 +384,7 @@ case "$tag" in
fi fi
if [[ "$image" == *cuda* ]]; then if [[ "$image" == *cuda* ]]; then
extract_version_from_image_name cuda CUDA_VERSION extract_version_from_image_name cuda CUDA_VERSION
extract_version_from_image_name cudnn CUDNN_VERSION
fi fi
if [[ "$image" == *rocm* ]]; then if [[ "$image" == *rocm* ]]; then
extract_version_from_image_name rocm ROCM_VERSION extract_version_from_image_name rocm ROCM_VERSION
@ -297,7 +392,8 @@ case "$tag" in
TRITON=yes TRITON=yes
# To ensure that any ROCm config will build using conda cmake # To ensure that any ROCm config will build using conda cmake
# and thus have LAPACK/MKL enabled # and thus have LAPACK/MKL enabled
fi CONDA_CMAKE=yes
fi
if [[ "$image" == *centos7* ]]; then if [[ "$image" == *centos7* ]]; then
NINJA_VERSION=1.10.2 NINJA_VERSION=1.10.2
fi fi
@ -313,34 +409,45 @@ case "$tag" in
if [[ "$image" == *glibc* ]]; then if [[ "$image" == *glibc* ]]; then
extract_version_from_image_name glibc GLIBC_VERSION extract_version_from_image_name glibc GLIBC_VERSION
fi fi
if [[ "$image" == *cmake* ]]; then
extract_version_from_image_name cmake CMAKE_VERSION
fi
;; ;;
esac esac
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]') tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
no_cache_flag="" #when using cudnn version 8 install it separately from cuda
progress_flag="" if [[ "$image" == *cuda* && ${OS} == "ubuntu" ]]; then
# Do not use cache and progress=plain when in CI IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
if [[ -n "${CI:-}" ]]; then if [[ ${CUDNN_VERSION} == 9 ]]; then
no_cache_flag="--no-cache" IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
progress_flag="--progress=plain" fi
fi fi
# Build image # Build image
docker build \ docker build \
${no_cache_flag} \ --no-cache \
${progress_flag} \ --progress=plain \
--build-arg "BUILD_ENVIRONMENT=${image}" \ --build-arg "BUILD_ENVIRONMENT=${image}" \
--build-arg "PROTOBUF=${PROTOBUF:-}" \
--build-arg "LLVMDEV=${LLVMDEV:-}" \ --build-arg "LLVMDEV=${LLVMDEV:-}" \
--build-arg "DB=${DB:-}" \
--build-arg "VISION=${VISION:-}" \ --build-arg "VISION=${VISION:-}" \
--build-arg "UBUNTU_VERSION=${UBUNTU_VERSION}" \ --build-arg "UBUNTU_VERSION=${UBUNTU_VERSION}" \
--build-arg "CENTOS_VERSION=${CENTOS_VERSION}" \
--build-arg "DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" \ --build-arg "DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" \
--build-arg "GLIBC_VERSION=${GLIBC_VERSION}" \ --build-arg "GLIBC_VERSION=${GLIBC_VERSION}" \
--build-arg "CLANG_VERSION=${CLANG_VERSION}" \ --build-arg "CLANG_VERSION=${CLANG_VERSION}" \
--build-arg "ANACONDA_PYTHON_VERSION=${ANACONDA_PYTHON_VERSION}" \ --build-arg "ANACONDA_PYTHON_VERSION=${ANACONDA_PYTHON_VERSION}" \
--build-arg "PYTHON_VERSION=${PYTHON_VERSION}" \
--build-arg "GCC_VERSION=${GCC_VERSION}" \ --build-arg "GCC_VERSION=${GCC_VERSION}" \
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \ --build-arg "CUDA_VERSION=${CUDA_VERSION}" \
--build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \
--build-arg "TENSORRT_VERSION=${TENSORRT_VERSION}" \
--build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \
--build-arg "VULKAN_SDK_VERSION=${VULKAN_SDK_VERSION}" \
--build-arg "SWIFTSHADER=${SWIFTSHADER}" \
--build-arg "CMAKE_VERSION=${CMAKE_VERSION:-}" \
--build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \ --build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
--build-arg "KATEX=${KATEX:-}" \ --build-arg "KATEX=${KATEX:-}" \
--build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \ --build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \
@ -348,6 +455,7 @@ docker build \
--build-arg "IMAGE_NAME=${IMAGE_NAME}" \ --build-arg "IMAGE_NAME=${IMAGE_NAME}" \
--build-arg "UCX_COMMIT=${UCX_COMMIT}" \ --build-arg "UCX_COMMIT=${UCX_COMMIT}" \
--build-arg "UCC_COMMIT=${UCC_COMMIT}" \ --build-arg "UCC_COMMIT=${UCC_COMMIT}" \
--build-arg "CONDA_CMAKE=${CONDA_CMAKE}" \
--build-arg "TRITON=${TRITON}" \ --build-arg "TRITON=${TRITON}" \
--build-arg "TRITON_CPU=${TRITON_CPU}" \ --build-arg "TRITON_CPU=${TRITON_CPU}" \
--build-arg "ONNX=${ONNX}" \ --build-arg "ONNX=${ONNX}" \
@ -356,9 +464,7 @@ docker build \
--build-arg "EXECUTORCH=${EXECUTORCH}" \ --build-arg "EXECUTORCH=${EXECUTORCH}" \
--build-arg "HALIDE=${HALIDE}" \ --build-arg "HALIDE=${HALIDE}" \
--build-arg "XPU_VERSION=${XPU_VERSION}" \ --build-arg "XPU_VERSION=${XPU_VERSION}" \
--build-arg "UNINSTALL_DILL=${UNINSTALL_DILL}" \
--build-arg "ACL=${ACL:-}" \ --build-arg "ACL=${ACL:-}" \
--build-arg "OPENBLAS=${OPENBLAS:-}" \
--build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \ --build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
--build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \ --build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \
-f $(dirname ${DOCKERFILE})/Dockerfile \ -f $(dirname ${DOCKERFILE})/Dockerfile \
@ -375,7 +481,7 @@ docker build \
UBUNTU_VERSION=$(echo ${UBUNTU_VERSION} | sed 's/-rc$//') UBUNTU_VERSION=$(echo ${UBUNTU_VERSION} | sed 's/-rc$//')
function drun() { function drun() {
docker run --rm "$tmp_tag" "$@" docker run --rm "$tmp_tag" $*
} }
if [[ "$OS" == "ubuntu" ]]; then if [[ "$OS" == "ubuntu" ]]; then
@ -401,14 +507,7 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
fi fi
if [ -n "$GCC_VERSION" ]; then if [ -n "$GCC_VERSION" ]; then
if [[ "$image" == *riscv* ]]; then if !(drun gcc --version 2>&1 | grep -q " $GCC_VERSION\\W"); then
# Check RISC-V cross-compilation toolchain version
if !(drun riscv64-linux-gnu-gcc-${GCC_VERSION} --version 2>&1 | grep -q " $GCC_VERSION\\W"); then
echo "RISC-V GCC_VERSION=$GCC_VERSION, but:"
drun riscv64-linux-gnu-gcc-${GCC_VERSION} --version
exit 1
fi
elif !(drun gcc --version 2>&1 | grep -q " $GCC_VERSION\\W"); then
echo "GCC_VERSION=$GCC_VERSION, but:" echo "GCC_VERSION=$GCC_VERSION, but:"
drun gcc --version drun gcc --version
exit 1 exit 1
@ -430,14 +529,3 @@ if [ -n "$KATEX" ]; then
exit 1 exit 1
fi fi
fi fi
HAS_TRITON=$(drun python -c "import triton" > /dev/null 2>&1 && echo "yes" || echo "no")
if [[ -n "$TRITON" || -n "$TRITON_CPU" ]]; then
if [ "$HAS_TRITON" = "no" ]; then
echo "expecting triton to be installed, but it is not"
exit 1
fi
elif [ "$HAS_TRITON" = "yes" ]; then
echo "expecting triton to not be installed, but it is"
exit 1
fi

View File

@ -17,8 +17,9 @@ RUN bash ./install_base.sh && rm install_base.sh
# Update CentOS git version # Update CentOS git version
RUN yum -y remove git RUN yum -y remove git
RUN yum -y remove git-* RUN yum -y remove git-*
RUN yum -y install https://packages.endpointdev.com/rhel/7/os/x86_64/endpoint-repo-1.9-1.x86_64.rpm && \ RUN yum -y install https://packages.endpoint.com/rhel/7/os/x86_64/endpoint-repo-1.9-1.x86_64.rpm || \
sed -i 's/packages.endpoint/packages.endpointdev/' /etc/yum.repos.d/endpoint.repo (yum -y install https://packages.endpointdev.com/rhel/7/os/x86_64/endpoint-repo-1.9-1.x86_64.rpm && \
sed -i "s/packages.endpoint/packages.endpointdev/" /etc/yum.repos.d/endpoint.repo)
RUN yum install -y git RUN yum install -y git
# Install devtoolset # Install devtoolset
@ -39,7 +40,7 @@ RUN bash ./install_user.sh && rm install_user.sh
# Install conda and other packages (e.g., numpy, pytest) # Install conda and other packages (e.g., numpy, pytest)
ARG ANACONDA_PYTHON_VERSION ARG ANACONDA_PYTHON_VERSION
ARG BUILD_ENVIRONMENT ARG CONDA_CMAKE
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
COPY requirements-ci.txt /opt/conda/requirements-ci.txt COPY requirements-ci.txt /opt/conda/requirements-ci.txt
@ -47,6 +48,20 @@ COPY ./common/install_conda.sh install_conda.sh
COPY ./common/common_utils.sh common_utils.sh COPY ./common/common_utils.sh common_utils.sh
RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
# (optional) Install protobuf for ONNX
ARG PROTOBUF
COPY ./common/install_protobuf.sh install_protobuf.sh
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
RUN rm install_protobuf.sh
ENV INSTALLED_PROTOBUF ${PROTOBUF}
# (optional) Install database packages like LMDB and LevelDB
ARG DB
COPY ./common/install_db.sh install_db.sh
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
RUN rm install_db.sh
ENV INSTALLED_DB ${DB}
# (optional) Install vision packages like OpenCV # (optional) Install vision packages like OpenCV
ARG VISION ARG VISION
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
@ -56,15 +71,11 @@ ENV INSTALLED_VISION ${VISION}
# Install rocm # Install rocm
ARG ROCM_VERSION ARG ROCM_VERSION
RUN mkdir ci_commit_pins
COPY ./common/common_utils.sh common_utils.sh
COPY ./ci_commit_pins/rocm-composable-kernel.txt ci_commit_pins/rocm-composable-kernel.txt
COPY ./common/install_rocm.sh install_rocm.sh COPY ./common/install_rocm.sh install_rocm.sh
RUN bash ./install_rocm.sh RUN bash ./install_rocm.sh
RUN rm install_rocm.sh common_utils.sh RUN rm install_rocm.sh
RUN rm -r ci_commit_pins
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} RUN bash ./install_rocm_magma.sh
RUN rm install_rocm_magma.sh RUN rm install_rocm_magma.sh
COPY ./common/install_amdsmi.sh install_amdsmi.sh COPY ./common/install_amdsmi.sh install_amdsmi.sh
RUN bash ./install_amdsmi.sh RUN bash ./install_amdsmi.sh
@ -78,6 +89,12 @@ ENV MAGMA_HOME /opt/rocm/magma
ENV LANG en_US.utf8 ENV LANG en_US.utf8
ENV LC_ALL en_US.utf8 ENV LC_ALL en_US.utf8
# (optional) Install non-default CMake version
ARG CMAKE_VERSION
COPY ./common/install_cmake.sh install_cmake.sh
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
RUN rm install_cmake.sh
# (optional) Install non-default Ninja version # (optional) Install non-default Ninja version
ARG NINJA_VERSION ARG NINJA_VERSION
COPY ./common/install_ninja.sh install_ninja.sh COPY ./common/install_ninja.sh install_ninja.sh

View File

@ -1 +1 @@
e0dda9059d082537cee36be6c5e4fe3b18c880c0 5e4d6b6380d575e48e37e9d987fded4ec588e7bc

View File

@ -1,2 +0,0 @@
transformers==4.56.0
soxr==0.5.0

View File

@ -0,0 +1 @@
243e186efbf7fb93328dd6b34927a4e8c8f24395

View File

@ -1 +1 @@
v2.27.5-1 v2.25.1-1

View File

@ -1 +0,0 @@
v2.27.7-1

View File

@ -1 +0,0 @@
7fe50dc3da2069d6645d9deb8c017a876472a977

View File

@ -1 +0,0 @@
74a23feff57432129df84d8099e622773cf77925

View File

@ -1 +1 @@
1b0418a9a454b2b93ab8d71f40e59d2297157fae e98b6fcb8df5b44eb0d0addb6767c573d37ba024

View File

@ -1 +1 @@
27664085f804afc83df26f740bb46c365854f2c4 4b3bb1f8da0ded6ccd572dd1358ef45af5a1befe

View File

@ -23,10 +23,6 @@ conda_install() {
as_jenkins conda install -q -n py_$ANACONDA_PYTHON_VERSION -y python="$ANACONDA_PYTHON_VERSION" $* as_jenkins conda install -q -n py_$ANACONDA_PYTHON_VERSION -y python="$ANACONDA_PYTHON_VERSION" $*
} }
conda_install_through_forge() {
as_jenkins conda install -c conda-forge -q -n py_$ANACONDA_PYTHON_VERSION -y python="$ANACONDA_PYTHON_VERSION" $*
}
conda_run() { conda_run() {
as_jenkins conda run -n py_$ANACONDA_PYTHON_VERSION --no-capture-output $* as_jenkins conda run -n py_$ANACONDA_PYTHON_VERSION --no-capture-output $*
} }

27
.ci/docker/common/install_acl.sh Executable file → Normal file
View File

@ -1,27 +1,16 @@
#!/bin/bash set -euo pipefail
# Script used only in CD pipeline
set -eux readonly version=v24.04
readonly src_host=https://github.com/ARM-software
ACL_VERSION=${ACL_VERSION:-"v25.02"} readonly src_repo=ComputeLibrary
ACL_INSTALL_DIR="/acl"
# Clone ACL # Clone ACL
git clone https://github.com/ARM-software/ComputeLibrary.git -b "${ACL_VERSION}" --depth 1 --shallow-submodules [[ ! -d ${src_repo} ]] && git clone ${src_host}/${src_repo}.git
cd ${src_repo}
git checkout $version
ACL_CHECKOUT_DIR="ComputeLibrary"
# Build with scons # Build with scons
pushd $ACL_CHECKOUT_DIR
scons -j8 Werror=0 debug=0 neon=1 opencl=0 embed_kernels=0 \ scons -j8 Werror=0 debug=0 neon=1 opencl=0 embed_kernels=0 \
os=linux arch=armv8a build=native multi_isa=1 \ os=linux arch=armv8a build=native multi_isa=1 \
fixed_format_kernels=1 openmp=1 cppthreads=0 fixed_format_kernels=1 openmp=1 cppthreads=0
popd
# Install ACL
sudo mkdir -p ${ACL_INSTALL_DIR}
for d in arm_compute include utils support src build
do
sudo cp -r ${ACL_CHECKOUT_DIR}/${d} ${ACL_INSTALL_DIR}/${d}
done
rm -rf $ACL_CHECKOUT_DIR

View File

@ -15,9 +15,6 @@ install_ubuntu() {
elif [[ "$UBUNTU_VERSION" == "22.04"* ]]; then elif [[ "$UBUNTU_VERSION" == "22.04"* ]]; then
cmake3="cmake=3.22*" cmake3="cmake=3.22*"
maybe_libiomp_dev="" maybe_libiomp_dev=""
elif [[ "$UBUNTU_VERSION" == "24.04"* ]]; then
cmake3="cmake=3.28*"
maybe_libiomp_dev=""
else else
cmake3="cmake=3.5*" cmake3="cmake=3.5*"
maybe_libiomp_dev="libiomp-dev" maybe_libiomp_dev="libiomp-dev"
@ -33,6 +30,18 @@ install_ubuntu() {
maybe_libomp_dev="" maybe_libomp_dev=""
fi fi
# HACK: UCC testing relies on libnccl library from NVIDIA repo, and version 2.16 crashes
# See https://github.com/pytorch/pytorch/pull/105260#issuecomment-1673399729
# TODO: Eliminate this hack, we should not relay on apt-get installation
# See https://github.com/pytorch/pytorch/issues/144768
if [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "11.8"* ]]; then
maybe_libnccl_dev="libnccl2=2.15.5-1+cuda11.8 libnccl-dev=2.15.5-1+cuda11.8 --allow-downgrades --allow-change-held-packages"
elif [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "12.4"* ]]; then
maybe_libnccl_dev="libnccl2=2.25.1-1+cuda12.4 libnccl-dev=2.25.1-1+cuda12.4 --allow-downgrades --allow-change-held-packages"
else
maybe_libnccl_dev=""
fi
# Install common dependencies # Install common dependencies
apt-get update apt-get update
# TODO: Some of these may not be necessary # TODO: Some of these may not be necessary
@ -61,6 +70,7 @@ install_ubuntu() {
libasound2-dev \ libasound2-dev \
libsndfile-dev \ libsndfile-dev \
${maybe_libomp_dev} \ ${maybe_libomp_dev} \
${maybe_libnccl_dev} \
software-properties-common \ software-properties-common \
wget \ wget \
sudo \ sudo \
@ -89,6 +99,9 @@ install_centos() {
ccache_deps="asciidoc docbook-dtds docbook-style-xsl libxslt" ccache_deps="asciidoc docbook-dtds docbook-style-xsl libxslt"
numpy_deps="gcc-gfortran" numpy_deps="gcc-gfortran"
# Note: protobuf-c-{compiler,devel} on CentOS are too old to be used
# for Caffe2. That said, we still install them to make sure the build
# system opts to build/use protoc and libprotobuf from third-party.
yum install -y \ yum install -y \
$ccache_deps \ $ccache_deps \
$numpy_deps \ $numpy_deps \

View File

@ -9,7 +9,7 @@ install_ubuntu() {
# Instead use lib and headers from OpenSSL1.1 installed in `install_openssl.sh`` # Instead use lib and headers from OpenSSL1.1 installed in `install_openssl.sh``
apt-get install -y cargo apt-get install -y cargo
echo "Checking out sccache repo" echo "Checking out sccache repo"
git clone https://github.com/mozilla/sccache -b v0.10.0 git clone https://github.com/mozilla/sccache -b v0.9.1
cd sccache cd sccache
echo "Building sccache" echo "Building sccache"
cargo build --release cargo build --release

View File

@ -4,10 +4,16 @@ set -ex
if [ -n "$CLANG_VERSION" ]; then if [ -n "$CLANG_VERSION" ]; then
if [[ $UBUNTU_VERSION == 22.04 ]]; then if [[ $CLANG_VERSION == 9 && $UBUNTU_VERSION == 18.04 ]]; then
sudo apt-get update
# gpg-agent is not available by default on 18.04
sudo apt-get install -y --no-install-recommends gpg-agent
wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
apt-add-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-${CLANG_VERSION} main"
elif [[ $UBUNTU_VERSION == 22.04 ]]; then
# work around ubuntu apt-get conflicts # work around ubuntu apt-get conflicts
sudo apt-get -y -f install sudo apt-get -y -f install
wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
if [[ $CLANG_VERSION == 18 ]]; then if [[ $CLANG_VERSION == 18 ]]; then
apt-add-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-18 main" apt-add-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-18 main"
fi fi
@ -35,7 +41,7 @@ if [ -n "$CLANG_VERSION" ]; then
# clang's packaging is a little messed up (the runtime libs aren't # clang's packaging is a little messed up (the runtime libs aren't
# added into the linker path), so give it a little help # added into the linker path), so give it a little help
clang_lib=("/usr/lib/llvm-$CLANG_VERSION/lib/clang/"*"/lib/linux") clang_lib=("/usr/lib/llvm-$CLANG_VERSION/lib/clang/"*"/lib/linux")
echo "$clang_lib" >/etc/ld.so.conf.d/clang.conf echo "$clang_lib" > /etc/ld.so.conf.d/clang.conf
ldconfig ldconfig
# Cleanup package manager # Cleanup package manager

View File

@ -0,0 +1,31 @@
#!/bin/bash
set -ex
[ -n "$CMAKE_VERSION" ]
# Remove system cmake install so it won't get used instead
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
case "$ID" in
ubuntu)
apt-get remove cmake -y
;;
centos)
yum remove cmake -y
;;
*)
echo "Unable to determine OS..."
exit 1
;;
esac
# Turn 3.6.3 into v3.6
path=$(echo "${CMAKE_VERSION}" | sed -e 's/\([0-9].[0-9]\+\).*/v\1/')
file="cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz"
# Download and install specific CMake version in /usr/local
pushd /tmp
curl -Os --retry 3 "https://cmake.org/files/${path}/${file}"
tar -C /usr/local --strip-components 1 --no-same-owner -zxf cmake-*.tar.gz
rm -f cmake-*.tar.gz
popd

View File

@ -4,8 +4,12 @@ set -ex
# Optionally install conda # Optionally install conda
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download" # @lint-ignore BASE_URL="https://repo.anaconda.com/miniconda"
CONDA_FILE="Miniforge3-Linux-$(uname -m).sh" CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
if [[ $(uname -m) == "aarch64" ]] || [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download"
CONDA_FILE="Miniforge3-Linux-$(uname -m).sh"
fi
MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1) MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1)
MINOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 2) MINOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 2)
@ -17,6 +21,7 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
exit 1 exit 1
;; ;;
esac esac
mkdir -p /opt/conda mkdir -p /opt/conda
chown jenkins:jenkins /opt/conda chown jenkins:jenkins /opt/conda
@ -57,33 +62,32 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
# libstdcxx from conda default channels are too old, we need GLIBCXX_3.4.30 # libstdcxx from conda default channels are too old, we need GLIBCXX_3.4.30
# which is provided in libstdcxx 12 and up. # which is provided in libstdcxx 12 and up.
conda_install libstdcxx-ng=12.3.0 --update-deps -c conda-forge conda_install libstdcxx-ng=12.3.0 -c conda-forge
# Miniforge installer doesn't install sqlite by default
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
conda_install sqlite
fi
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README # Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
if [[ $(uname -m) != "aarch64" ]]; then if [[ $(uname -m) == "aarch64" ]]; then
pip_install mkl==2024.2.0 conda_install "openblas==0.3.28=*openmp*"
pip_install mkl-static==2024.2.0 else
pip_install mkl-include==2024.2.0 conda_install "mkl=2021.4.0 mkl-include=2021.4.0"
fi fi
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source # Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
# and libpython-static for torch deploy # and libpython-static for torch deploy
conda_install llvmdev=8.0.0 "libpython-static=${ANACONDA_PYTHON_VERSION}" conda_install llvmdev=8.0.0 "libpython-static=${ANACONDA_PYTHON_VERSION}"
# Use conda cmake in some cases. Conda cmake will be newer than our supported
# min version (3.5 for xenial and 3.10 for bionic), so we only do it in those
# following builds that we know should use conda. Specifically, Ubuntu bionic
# and focal cannot find conda mkl with stock cmake, so we need a cmake from conda
if [ -n "${CONDA_CMAKE}" ]; then
conda_install cmake
fi
# Magma package names are concatenation of CUDA major and minor ignoring revision # Magma package names are concatenation of CUDA major and minor ignoring revision
# I.e. magma-cuda102 package corresponds to CUDA_VERSION=10.2 and CUDA_VERSION=10.2.89 # I.e. magma-cuda102 package corresponds to CUDA_VERSION=10.2 and CUDA_VERSION=10.2.89
# Magma is installed from a tarball in the ossci-linux bucket into the conda env # Magma is installed from a tarball in the ossci-linux bucket into the conda env
if [ -n "$CUDA_VERSION" ]; then if [ -n "$CUDA_VERSION" ]; then
conda_run ${SCRIPT_FOLDER}/install_magma_conda.sh $(cut -f1-2 -d'.' <<< ${CUDA_VERSION}) ${SCRIPT_FOLDER}/install_magma_conda.sh $(cut -f1-2 -d'.' <<< ${CUDA_VERSION}) ${ANACONDA_PYTHON_VERSION}
fi
if [[ "$UBUNTU_VERSION" == "24.04"* ]] ; then
conda_install_through_forge libstdcxx-ng=14
fi fi
# Install some other packages, including those needed for Python test reporting # Install some other packages, including those needed for Python test reporting

View File

@ -3,10 +3,11 @@
set -uex -o pipefail set -uex -o pipefail
PYTHON_DOWNLOAD_URL=https://www.python.org/ftp/python PYTHON_DOWNLOAD_URL=https://www.python.org/ftp/python
PYTHON_DOWNLOAD_GITHUB_BRANCH=https://github.com/python/cpython/archive/refs/heads
GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py
# Python versions to be installed in /opt/$VERSION_NO # Python versions to be installed in /opt/$VERSION_NO
CPYTHON_VERSIONS=${CPYTHON_VERSIONS:-"3.9.0 3.10.1 3.11.0 3.12.0 3.13.0 3.13.0t 3.14.0 3.14.0t"} CPYTHON_VERSIONS=${CPYTHON_VERSIONS:-"3.8.1 3.9.0 3.10.1 3.11.0 3.12.0 3.13.0 3.13.0t"}
function check_var { function check_var {
if [ -z "$1" ]; then if [ -z "$1" ]; then
@ -23,8 +24,9 @@ function do_cpython_build {
tar -xzf Python-$py_ver.tgz tar -xzf Python-$py_ver.tgz
local additional_flags="" local additional_flags=""
if [[ "$py_ver" == *"t" ]]; then if [ "$py_ver" == "3.13.0t" ]; then
additional_flags=" --disable-gil" additional_flags=" --disable-gil"
mv cpython-3.13/ cpython-3.13t/
fi fi
pushd $py_folder pushd $py_folder
@ -66,29 +68,32 @@ function do_cpython_build {
ln -s pip3 ${prefix}/bin/pip ln -s pip3 ${prefix}/bin/pip
fi fi
# install setuptools since python 3.12 is required to use distutils # install setuptools since python 3.12 is required to use distutils
# packaging is needed to create symlink since wheel no longer provides needed information ${prefix}/bin/pip install wheel==0.34.2 setuptools==68.2.2
${prefix}/bin/pip install packaging==25.0 wheel==0.45.1 setuptools==80.9.0 local abi_tag=$(${prefix}/bin/python -c "from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag; print('{0}{1}-{2}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()))")
local abi_tag=$(${prefix}/bin/python -c "from packaging.tags import interpreter_name, interpreter_version; import sysconfig ; from sysconfig import get_config_var; print('{0}{1}-{0}{1}{2}'.format(interpreter_name(), interpreter_version(), 't' if sysconfig.get_config_var('Py_GIL_DISABLED') else ''))")
ln -sf ${prefix} /opt/python/${abi_tag} ln -sf ${prefix} /opt/python/${abi_tag}
} }
function build_cpython { function build_cpython {
local py_ver=$1 local py_ver=$1
check_var $py_ver check_var $py_ver
local py_suffix=$py_ver check_var $PYTHON_DOWNLOAD_URL
local py_folder=$py_ver local py_ver_folder=$py_ver
# Special handling for nogil if [ "$py_ver" = "3.13.0t" ]; then
if [[ "${py_ver}" == *"t" ]]; then PY_VER_SHORT="3.13"
py_suffix=${py_ver::-1} PYT_VER_SHORT="3.13t"
py_folder=$py_suffix check_var $PYTHON_DOWNLOAD_GITHUB_BRANCH
wget $PYTHON_DOWNLOAD_GITHUB_BRANCH/$PY_VER_SHORT.tar.gz -O Python-$py_ver.tgz
do_cpython_build $py_ver cpython-$PYT_VER_SHORT
elif [ "$py_ver" = "3.13.0" ]; then
PY_VER_SHORT="3.13"
check_var $PYTHON_DOWNLOAD_GITHUB_BRANCH
wget $PYTHON_DOWNLOAD_GITHUB_BRANCH/$PY_VER_SHORT.tar.gz -O Python-$py_ver.tgz
do_cpython_build $py_ver cpython-$PY_VER_SHORT
else
wget -q $PYTHON_DOWNLOAD_URL/$py_ver_folder/Python-$py_ver.tgz
do_cpython_build $py_ver Python-$py_ver
fi fi
# Update to rc2 due to https://github.com/python/cpython/commit/c72699086fe4
if [ "$py_suffix" == "3.14.0" ]; then
py_suffix="3.14.0rc2"
fi
wget -q $PYTHON_DOWNLOAD_URL/$py_folder/Python-$py_suffix.tgz -O Python-$py_ver.tgz
do_cpython_build $py_ver Python-$py_suffix
rm -f Python-$py_ver.tgz rm -f Python-$py_ver.tgz
} }

View File

@ -2,164 +2,273 @@
set -ex set -ex
arch_path='' NCCL_VERSION=v2.25.1-1
targetarch=${TARGETARCH:-$(uname -m)} CUDNN_VERSION=9.5.1.17
if [ ${targetarch} = 'amd64' ] || [ "${targetarch}" = 'x86_64' ]; then
arch_path='x86_64'
else
arch_path='sbsa'
fi
NVSHMEM_VERSION=3.3.24 function install_cusparselt_040 {
# cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html
function install_cuda { mkdir tmp_cusparselt && pushd tmp_cusparselt
version=$1 wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.4.0.7-archive.tar.xz
runfile=$2 tar xf libcusparse_lt-linux-x86_64-0.4.0.7-archive.tar.xz
major_minor=${version%.*} cp -a libcusparse_lt-linux-x86_64-0.4.0.7-archive/include/* /usr/local/cuda/include/
rm -rf /usr/local/cuda-${major_minor} /usr/local/cuda cp -a libcusparse_lt-linux-x86_64-0.4.0.7-archive/lib/* /usr/local/cuda/lib64/
if [[ ${arch_path} == 'sbsa' ]]; then popd
runfile="${runfile}_sbsa" rm -rf tmp_cusparselt
fi
runfile="${runfile}.run"
wget -q https://developer.download.nvidia.com/compute/cuda/${version}/local_installers/${runfile} -O ${runfile}
chmod +x ${runfile}
./${runfile} --toolkit --silent
rm -f ${runfile}
rm -f /usr/local/cuda && ln -s /usr/local/cuda-${major_minor} /usr/local/cuda
} }
function install_cudnn { function install_cusparselt_062 {
cuda_major_version=$1 # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html
cudnn_version=$2 mkdir tmp_cusparselt && pushd tmp_cusparselt
mkdir tmp_cudnn && cd tmp_cudnn wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.6.2.3-archive.tar.xz
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement tar xf libcusparse_lt-linux-x86_64-0.6.2.3-archive.tar.xz
filepath="cudnn-linux-${arch_path}-${cudnn_version}_cuda${cuda_major_version}-archive" cp -a libcusparse_lt-linux-x86_64-0.6.2.3-archive/include/* /usr/local/cuda/include/
wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-${arch_path}/${filepath}.tar.xz cp -a libcusparse_lt-linux-x86_64-0.6.2.3-archive/lib/* /usr/local/cuda/lib64/
tar xf ${filepath}.tar.xz popd
cp -a ${filepath}/include/* /usr/local/cuda/include/ rm -rf tmp_cusparselt
cp -a ${filepath}/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf tmp_cudnn
} }
function install_nvshmem { function install_cusparselt_063 {
cuda_major_version=$1 # e.g. "12" # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html
nvshmem_version=$2 # e.g. "3.3.9" mkdir tmp_cusparselt && pushd tmp_cusparselt
wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.6.3.2-archive.tar.xz
tar xf libcusparse_lt-linux-x86_64-0.6.3.2-archive.tar.xz
cp -a libcusparse_lt-linux-x86_64-0.6.3.2-archive/include/* /usr/local/cuda/include/
cp -a libcusparse_lt-linux-x86_64-0.6.3.2-archive/lib/* /usr/local/cuda/lib64/
popd
rm -rf tmp_cusparselt
}
case "${arch_path}" in function install_118 {
sbsa) CUDNN_VERSION=9.1.0.70
dl_arch="aarch64" NCCL_VERSION=v2.21.5-1
;; echo "Installing CUDA 11.8 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.4.0"
x86_64) rm -rf /usr/local/cuda-11.8 /usr/local/cuda
dl_arch="x64" # install CUDA 11.8.0 in the same container
;; wget -q https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
*) chmod +x cuda_11.8.0_520.61.05_linux.run
dl_arch="${arch}" ./cuda_11.8.0_520.61.05_linux.run --toolkit --silent
;; rm -f cuda_11.8.0_520.61.05_linux.run
esac rm -f /usr/local/cuda && ln -s /usr/local/cuda-11.8 /usr/local/cuda
tmpdir="tmp_nvshmem" # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
mkdir -p "${tmpdir}" && cd "${tmpdir}" mkdir tmp_cudnn && cd tmp_cudnn
wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz
tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz
cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive/include/* /usr/local/cuda/include/
cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf tmp_cudnn
# nvSHMEM license: https://docs.nvidia.com/nvshmem/api/sla.html # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
# This pattern is a lie as it is not consistent across versions, for 3.3.9 it was cuda_ver-arch-nvshhem-ver # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
filename="libnvshmem-linux-${arch_path}-${nvshmem_version}_cuda${cuda_major_version}-archive" git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git
suffix=".tar.xz" cd nccl && make -j src.build
url="https://developer.download.nvidia.com/compute/nvshmem/redist/libnvshmem/linux-${arch_path}/${filename}${suffix}" cp -a build/include/* /usr/local/cuda/include/
cp -a build/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf nccl
# download, unpack, install install_cusparselt_040
wget -q "${url}"
tar xf "${filename}${suffix}"
cp -a "${filename}/include/"* /usr/local/cuda/include/
cp -a "${filename}/lib/"* /usr/local/cuda/lib64/
# cleanup ldconfig
cd ..
rm -rf "${tmpdir}"
echo "nvSHMEM ${nvshmem_version} for CUDA ${cuda_major_version} (${arch_path}) installed."
} }
function install_124 { function install_124 {
CUDNN_VERSION=9.1.0.70 CUDNN_VERSION=9.1.0.70
echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL and cuSparseLt-0.6.2" echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.2"
install_cuda 12.4.1 cuda_12.4.1_550.54.15_linux rm -rf /usr/local/cuda-12.4 /usr/local/cuda
# install CUDA 12.4.1 in the same container
wget -q https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run
chmod +x cuda_12.4.1_550.54.15_linux.run
./cuda_12.4.1_550.54.15_linux.run --toolkit --silent
rm -f cuda_12.4.1_550.54.15_linux.run
rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.4 /usr/local/cuda
install_cudnn 12 $CUDNN_VERSION # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
mkdir tmp_cudnn && cd tmp_cudnn
wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/
cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf tmp_cudnn
CUDA_VERSION=12.4 bash install_nccl.sh # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git
cd nccl && make -j src.build
cp -a build/include/* /usr/local/cuda/include/
cp -a build/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf nccl
CUDA_VERSION=12.4 bash install_cusparselt.sh install_cusparselt_062
ldconfig ldconfig
} }
function install_126 { function install_126 {
CUDNN_VERSION=9.10.2.21 echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.3"
echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1" rm -rf /usr/local/cuda-12.6 /usr/local/cuda
install_cuda 12.6.3 cuda_12.6.3_560.35.05_linux # install CUDA 12.6.3 in the same container
wget -q https://developer.download.nvidia.com/compute/cuda/12.6.3/local_installers/cuda_12.6.3_560.35.05_linux.run
chmod +x cuda_12.6.3_560.35.05_linux.run
./cuda_12.6.3_560.35.05_linux.run --toolkit --silent
rm -f cuda_12.6.3_560.35.05_linux.run
rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.6 /usr/local/cuda
install_cudnn 12 $CUDNN_VERSION # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
mkdir tmp_cudnn && cd tmp_cudnn
wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/
cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf tmp_cudnn
install_nvshmem 12 $NVSHMEM_VERSION # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git
cd nccl && make -j src.build
cp -a build/include/* /usr/local/cuda/include/
cp -a build/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf nccl
CUDA_VERSION=12.6 bash install_nccl.sh install_cusparselt_063
CUDA_VERSION=12.6 bash install_cusparselt.sh
ldconfig ldconfig
} }
function install_129 { function prune_118 {
CUDNN_VERSION=9.10.2.21 echo "Pruning CUDA 11.8 and cuDNN"
echo "Installing CUDA 12.9.1 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1" #####################################################################################
# install CUDA 12.9.1 in the same container # CUDA 11.8 prune static libs
install_cuda 12.9.1 cuda_12.9.1_575.57.08_linux #####################################################################################
export NVPRUNE="/usr/local/cuda-11.8/bin/nvprune"
export CUDA_LIB_DIR="/usr/local/cuda-11.8/lib64"
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement export GENCODE="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
install_cudnn 12 $CUDNN_VERSION export GENCODE_CUDNN="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
install_nvshmem 12 $NVSHMEM_VERSION if [[ -n "$OVERRIDE_GENCODE" ]]; then
export GENCODE=$OVERRIDE_GENCODE
fi
CUDA_VERSION=12.9 bash install_nccl.sh # all CUDA libs except CuDNN and CuBLAS (cudnn and cublas need arch 3.7 included)
ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \
| xargs -I {} bash -c \
"echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}"
CUDA_VERSION=12.9 bash install_cusparselt.sh # prune CuDNN and CuBLAS
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a
ldconfig #####################################################################################
# CUDA 11.8 prune visual tools
#####################################################################################
export CUDA_BASE="/usr/local/cuda-11.8/"
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2022.3.0 $CUDA_BASE/nsight-systems-2022.4.2/
}
function prune_124 {
echo "Pruning CUDA 12.4"
#####################################################################################
# CUDA 12.4 prune static libs
#####################################################################################
export NVPRUNE="/usr/local/cuda-12.4/bin/nvprune"
export CUDA_LIB_DIR="/usr/local/cuda-12.4/lib64"
export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
if [[ -n "$OVERRIDE_GENCODE" ]]; then
export GENCODE=$OVERRIDE_GENCODE
fi
if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then
export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN
fi
# all CUDA libs except CuDNN and CuBLAS
ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \
| xargs -I {} bash -c \
"echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}"
# prune CuDNN and CuBLAS
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a
#####################################################################################
# CUDA 12.4 prune visual tools
#####################################################################################
export CUDA_BASE="/usr/local/cuda-12.4/"
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.1.0 $CUDA_BASE/nsight-systems-2023.4.4/
}
function prune_126 {
echo "Pruning CUDA 12.6"
#####################################################################################
# CUDA 12.6 prune static libs
#####################################################################################
export NVPRUNE="/usr/local/cuda-12.6/bin/nvprune"
export CUDA_LIB_DIR="/usr/local/cuda-12.6/lib64"
export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
if [[ -n "$OVERRIDE_GENCODE" ]]; then
export GENCODE=$OVERRIDE_GENCODE
fi
if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then
export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN
fi
# all CUDA libs except CuDNN and CuBLAS
ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \
| xargs -I {} bash -c \
"echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}"
# prune CuDNN and CuBLAS
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a
#####################################################################################
# CUDA 12.6 prune visual tools
#####################################################################################
export CUDA_BASE="/usr/local/cuda-12.6/"
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.3.2 $CUDA_BASE/nsight-systems-2024.5.1/
} }
function install_128 { function install_128 {
CUDNN_VERSION=9.8.0.87 CUDNN_VERSION=9.7.1.26
echo "Installing CUDA 12.8.1 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1" echo "Installing CUDA 12.8.0 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.3"
# install CUDA 12.8.1 in the same container rm -rf /usr/local/cuda-12.8 /usr/local/cuda
install_cuda 12.8.1 cuda_12.8.1_570.124.06_linux # install CUDA 12.8.0 in the same container
wget -q https://developer.download.nvidia.com/compute/cuda/12.8.0/local_installers/cuda_12.8.0_570.86.10_linux.run
chmod +x cuda_12.8.0_570.86.10_linux.run
./cuda_12.8.0_570.86.10_linux.run --toolkit --silent
rm -f cuda_12.8.0_570.86.10_linux.run
rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.8 /usr/local/cuda
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
install_cudnn 12 $CUDNN_VERSION mkdir tmp_cudnn && cd tmp_cudnn
wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz
cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/
cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf tmp_cudnn
install_nvshmem 12 $NVSHMEM_VERSION # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git
cd nccl && make -j src.build
cp -a build/include/* /usr/local/cuda/include/
cp -a build/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf nccl
CUDA_VERSION=12.8 bash install_nccl.sh install_cusparselt_063
CUDA_VERSION=12.8 bash install_cusparselt.sh
ldconfig
}
function install_130 {
CUDNN_VERSION=9.13.0.50
echo "Installing CUDA 13.0 and cuDNN ${CUDNN_VERSION} and NVSHMEM and NCCL and cuSparseLt-0.7.1"
# install CUDA 13.0 in the same container
install_cuda 13.0.0 cuda_13.0.0_580.65.06_linux
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
install_cudnn 13 $CUDNN_VERSION
install_nvshmem 13 $NVSHMEM_VERSION
CUDA_VERSION=13.0 bash install_nccl.sh
CUDA_VERSION=13.0 bash install_cusparselt.sh
ldconfig ldconfig
} }
@ -168,15 +277,13 @@ function install_130 {
while test $# -gt 0 while test $# -gt 0
do do
case "$1" in case "$1" in
12.4) install_124; 11.8) install_118; prune_118
;; ;;
12.6|12.6.*) install_126; 12.4) install_124; prune_124
;; ;;
12.8|12.8.*) install_128; 12.6) install_126; prune_126
;; ;;
12.9|12.9.*) install_129; 12.8) install_128;
;;
13.0|13.0.*) install_130;
;; ;;
*) echo "bad argument $1"; exit 1 *) echo "bad argument $1"; exit 1
;; ;;

View File

@ -0,0 +1,211 @@
#!/bin/bash
# Script used only in CD pipeline
set -ex
NCCL_VERSION=v2.21.5-1
CUDNN_VERSION=9.5.1.17
function install_cusparselt_062 {
# cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html
mkdir tmp_cusparselt && pushd tmp_cusparselt
wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-sbsa/libcusparse_lt-linux-sbsa-0.6.2.3-archive.tar.xz
tar xf libcusparse_lt-linux-sbsa-0.6.2.3-archive.tar.xz
cp -a libcusparse_lt-linux-sbsa-0.6.2.3-archive/include/* /usr/local/cuda/include/
cp -a libcusparse_lt-linux-sbsa-0.6.2.3-archive/lib/* /usr/local/cuda/lib64/
popd
rm -rf tmp_cusparselt
}
function install_cusparselt_063 {
# cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html
mkdir tmp_cusparselt && pushd tmp_cusparselt
wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-sbsa/libcusparse_lt-linux-sbsa-0.6.3.2-archive.tar.xz
tar xf libcusparse_lt-linux-sbsa-0.6.3.2-archive.tar.xz
cp -a libcusparse_lt-linux-sbsa-0.6.3.2-archive/include/* /usr/local/cuda/include/
cp -a libcusparse_lt-linux-sbsa-0.6.3.2-archive/lib/* /usr/local/cuda/lib64/
popd
rm -rf tmp_cusparselt
}
function install_124 {
CUDNN_VERSION=9.1.0.70
echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.2"
rm -rf /usr/local/cuda-12.4 /usr/local/cuda
# install CUDA 12.4.1 in the same container
wget -q https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux_sbsa.run
chmod +x cuda_12.4.1_550.54.15_linux_sbsa.run
./cuda_12.4.1_550.54.15_linux_sbsa.run --toolkit --silent
rm -f cuda_12.4.1_550.54.15_linux_sbsa.run
rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.4 /usr/local/cuda
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
mkdir tmp_cudnn && cd tmp_cudnn
wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz
tar xf cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz
cp -a cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/
cp -a cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf tmp_cudnn
# NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
git clone -b ${NCCL_VERSION} --depth 1 https://github.com/NVIDIA/nccl.git
cd nccl && make -j src.build
cp -a build/include/* /usr/local/cuda/include/
cp -a build/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf nccl
install_cusparselt_063
ldconfig
}
function prune_124 {
echo "Pruning CUDA 12.4"
#####################################################################################
# CUDA 12.4 prune static libs
#####################################################################################
export NVPRUNE="/usr/local/cuda-12.4/bin/nvprune"
export CUDA_LIB_DIR="/usr/local/cuda-12.4/lib64"
export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
if [[ -n "$OVERRIDE_GENCODE" ]]; then
export GENCODE=$OVERRIDE_GENCODE
fi
# all CUDA libs except CuDNN and CuBLAS
ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \
| xargs -I {} bash -c \
"echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}"
# prune CuDNN and CuBLAS
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a
#####################################################################################
# CUDA 12.4 prune visual tools
#####################################################################################
export CUDA_BASE="/usr/local/cuda-12.4/"
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.1.0 $CUDA_BASE/nsight-systems-2023.4.4/
}
function install_126 {
echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.3"
rm -rf /usr/local/cuda-12.6 /usr/local/cuda
# install CUDA 12.6.3 in the same container
wget -q https://developer.download.nvidia.com/compute/cuda/12.6.3/local_installers/cuda_12.6.3_560.35.05_linux_sbsa.run
chmod +x cuda_12.6.3_560.35.05_linux_sbsa.run
./cuda_12.6.3_560.35.05_linux_sbsa.run --toolkit --silent
rm -f cuda_12.6.3_560.35.05_linux_sbsa.run
rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.6 /usr/local/cuda
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
mkdir tmp_cudnn && cd tmp_cudnn
wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz
tar xf cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz
cp -a cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/
cp -a cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf tmp_cudnn
# NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
git clone -b ${NCCL_VERSION} --depth 1 https://github.com/NVIDIA/nccl.git
cd nccl && make -j src.build
cp -a build/include/* /usr/local/cuda/include/
cp -a build/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf nccl
install_cusparselt_063
ldconfig
}
function prune_126 {
echo "Pruning CUDA 12.6"
#####################################################################################
# CUDA 12.6 prune static libs
#####################################################################################
export NVPRUNE="/usr/local/cuda-12.6/bin/nvprune"
export CUDA_LIB_DIR="/usr/local/cuda-12.6/lib64"
export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90"
if [[ -n "$OVERRIDE_GENCODE" ]]; then
export GENCODE=$OVERRIDE_GENCODE
fi
if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then
export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN
fi
# all CUDA libs except CuDNN and CuBLAS
ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis" \
| xargs -I {} bash -c \
"echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}"
# prune CuDNN and CuBLAS
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a
$NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a
#####################################################################################
# CUDA 12.6 prune visual tools
#####################################################################################
export CUDA_BASE="/usr/local/cuda-12.6/"
rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.3.2 $CUDA_BASE/nsight-systems-2024.5.1/
}
function install_128 {
CUDNN_VERSION=9.7.1.26
echo "Installing CUDA 12.8.0 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.3"
rm -rf /usr/local/cuda-12.8 /usr/local/cuda
# install CUDA 12.8.0 in the same container
wget -q https://developer.download.nvidia.com/compute/cuda/12.8.0/local_installers/cuda_12.8.0_570.86.10_linux_sbsa.run
chmod +x cuda_12.8.0_570.86.10_linux_sbsa.run
./cuda_12.8.0_570.86.10_linux_sbsa.run --toolkit --silent
rm -f cuda_12.8.0_570.86.10_linux_sbsa.run
rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.8 /usr/local/cuda
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
mkdir tmp_cudnn && cd tmp_cudnn
wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz
tar xf cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz
cp -a cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/
cp -a cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf tmp_cudnn
# NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
git clone -b ${NCCL_VERSION} --depth 1 https://github.com/NVIDIA/nccl.git
cd nccl && make -j src.build
cp -a build/include/* /usr/local/cuda/include/
cp -a build/lib/* /usr/local/cuda/lib64/
cd ..
rm -rf nccl
install_cusparselt_063
ldconfig
}
# idiomatic parameter and option handling in sh
while test $# -gt 0
do
case "$1" in
12.4) install_124; prune_124
;;
12.6) install_126; prune_126
;;
12.8) install_128;
;;
*) echo "bad argument $1"; exit 1
;;
esac
shift
done

View File

@ -0,0 +1,26 @@
#!/bin/bash
if [[ -n "${CUDNN_VERSION}" ]]; then
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
mkdir tmp_cudnn
pushd tmp_cudnn
if [[ ${CUDA_VERSION:0:4} == "12.8" ]]; then
CUDNN_NAME="cudnn-linux-x86_64-9.7.1.26_cuda12-archive"
elif [[ ${CUDA_VERSION:0:4} == "12.6" ]]; then
CUDNN_NAME="cudnn-linux-x86_64-9.5.1.17_cuda12-archive"
elif [[ ${CUDA_VERSION:0:2} == "12" ]]; then
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda12-archive"
elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive"
else
print "Unsupported CUDA version ${CUDA_VERSION}"
exit 1
fi
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz
tar xf ${CUDNN_NAME}.tar.xz
cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/
cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/
popd
rm -rf tmp_cudnn
ldconfig
fi

View File

@ -5,21 +5,13 @@ set -ex
# cuSPARSELt license: https://docs.nvidia.com/cuda/cusparselt/license.html # cuSPARSELt license: https://docs.nvidia.com/cuda/cusparselt/license.html
mkdir tmp_cusparselt && cd tmp_cusparselt mkdir tmp_cusparselt && cd tmp_cusparselt
if [[ ${CUDA_VERSION:0:4} =~ "13" ]]; then if [[ ${CUDA_VERSION:0:4} =~ ^12\.[5-8]$ ]]; then
arch_path='sbsa' arch_path='sbsa'
export TARGETARCH=${TARGETARCH:-$(uname -m)} export TARGETARCH=${TARGETARCH:-$(uname -m)}
if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then
arch_path='x86_64' arch_path='x86_64'
fi fi
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.8.0.4_cuda13-archive" CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.6.3.2-archive"
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz
elif [[ ${CUDA_VERSION:0:4} =~ ^12\.[5-9]$ ]]; then
arch_path='sbsa'
export TARGETARCH=${TARGETARCH:-$(uname -m)}
if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then
arch_path='x86_64'
fi
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.7.1.0-archive"
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz
elif [[ ${CUDA_VERSION:0:4} == "12.4" ]]; then elif [[ ${CUDA_VERSION:0:4} == "12.4" ]]; then
arch_path='sbsa' arch_path='sbsa'
@ -29,6 +21,9 @@ elif [[ ${CUDA_VERSION:0:4} == "12.4" ]]; then
fi fi
CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.6.2.3-archive" CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.6.2.3-archive"
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz
elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then
CUSPARSELT_NAME="libcusparse_lt-linux-x86_64-0.4.0.7-archive"
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/${CUSPARSELT_NAME}.tar.xz
else else
echo "Not sure which libcusparselt version to install for this ${CUDA_VERSION}" echo "Not sure which libcusparselt version to install for this ${CUDA_VERSION}"
fi fi

38
.ci/docker/common/install_db.sh Executable file
View File

@ -0,0 +1,38 @@
#!/bin/bash
set -ex
install_ubuntu() {
apt-get update
# Cleanup
apt-get autoclean && apt-get clean
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
}
install_centos() {
# Need EPEL for many packages we depend on.
# See http://fedoraproject.org/wiki/EPEL
yum --enablerepo=extras install -y epel-release
# Cleanup
yum clean all
rm -rf /var/cache/yum
rm -rf /var/lib/yum/yumdb
rm -rf /var/lib/yum/history
}
# Install base packages depending on the base OS
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
case "$ID" in
ubuntu)
install_ubuntu
;;
centos)
install_centos
;;
*)
echo "Unable to determine OS..."
exit 1
;;
esac

View File

@ -13,7 +13,7 @@ clone_executorch() {
# and fetch the target commit # and fetch the target commit
pushd executorch pushd executorch
git checkout "${EXECUTORCH_PINNED_COMMIT}" git checkout "${EXECUTORCH_PINNED_COMMIT}"
git submodule update --init --recursive git submodule update --init
popd popd
chown -R jenkins executorch chown -R jenkins executorch
@ -42,27 +42,23 @@ install_pip_dependencies() {
# A workaround, ExecuTorch has moved to numpy 2.0 which is not compatible with the current # A workaround, ExecuTorch has moved to numpy 2.0 which is not compatible with the current
# numba and scipy version used in PyTorch CI # numba and scipy version used in PyTorch CI
conda_run pip uninstall -y numba scipy conda_run pip uninstall -y numba scipy
# Yaspin is needed for running CI test (get_benchmark_analysis_data.py)
pip_install yaspin==3.1.0
popd popd
} }
setup_executorch() { setup_executorch() {
export PYTHON_EXECUTABLE=python pushd executorch
export CMAKE_ARGS="-DEXECUTORCH_BUILD_PYBIND=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON -DEXECUTORCH_BUILD_TESTS=ON"
as_jenkins .ci/scripts/setup-linux.sh --build-tool cmake || true export PYTHON_EXECUTABLE=python
export EXECUTORCH_BUILD_PYBIND=ON
export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
as_jenkins .ci/scripts/setup-linux.sh cmake || true
popd
} }
if [ $# -eq 0 ]; then clone_executorch
clone_executorch install_buck2
install_buck2 install_conda_dependencies
install_conda_dependencies install_pip_dependencies
install_pip_dependencies setup_executorch
pushd executorch
setup_executorch
popd
else
"$@"
fi

View File

@ -17,7 +17,7 @@ if [ -n "${UBUNTU_VERSION}" ];then
libopenblas-dev libeigen3-dev libatlas-base-dev libzstd-dev libopenblas-dev libeigen3-dev libatlas-base-dev libzstd-dev
fi fi
pip_install numpy scipy imageio cmake ninja conda_install numpy scipy imageio cmake ninja
git clone --depth 1 --branch release/16.x --recursive https://github.com/llvm/llvm-project.git git clone --depth 1 --branch release/16.x --recursive https://github.com/llvm/llvm-project.git
cmake -DCMAKE_BUILD_TYPE=Release \ cmake -DCMAKE_BUILD_TYPE=Release \
@ -35,9 +35,7 @@ git clone https://github.com/halide/Halide.git
pushd Halide pushd Halide
git checkout ${COMMIT} && git submodule update --init --recursive git checkout ${COMMIT} && git submodule update --init --recursive
pip_install -r requirements.txt pip_install -r requirements.txt
# NOTE: pybind has a requirement for cmake > 3.5 so set the minimum cmake version here with a flag cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -S . -B build
# Context: https://github.com/pytorch/pytorch/issues/150420
cmake -G Ninja -DCMAKE_POLICY_VERSION_MINIMUM=3.5 -DCMAKE_BUILD_TYPE=Release -S . -B build
cmake --build build cmake --build build
test -e ${CONDA_PREFIX}/lib/python3 || ln -s python${ANACONDA_PYTHON_VERSION} ${CONDA_PREFIX}/lib/python3 test -e ${CONDA_PREFIX}/lib/python3 || ln -s python${ANACONDA_PYTHON_VERSION} ${CONDA_PREFIX}/lib/python3
cmake --install build --prefix ${CONDA_PREFIX} cmake --install build --prefix ${CONDA_PREFIX}

View File

@ -5,42 +5,28 @@ set -ex
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
function install_huggingface() { function install_huggingface() {
pip_install -r huggingface-requirements.txt local version
commit=$(get_pinned_commit huggingface)
pip_install "git+https://github.com/huggingface/transformers@${commit}"
} }
function install_timm() { function install_timm() {
local commit local commit
commit=$(get_pinned_commit timm) commit=$(get_pinned_commit timm)
# TODO (huydhn): There is no torchvision release on 3.13 when I write this, so
# I'm using nightly here instead. We just need to package to be able to install
# TIMM. Removing this once vision has a release on 3.13
if [[ "${ANACONDA_PYTHON_VERSION}" == "3.13" ]]; then
pip_install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/cu124
fi
pip_install "git+https://github.com/huggingface/pytorch-image-models@${commit}" pip_install "git+https://github.com/huggingface/pytorch-image-models@${commit}"
} # Clean up
conda_run pip uninstall -y cmake torch torchvision triton
function install_torchbench() {
local commit
commit=$(get_pinned_commit torchbench)
git clone https://github.com/pytorch/benchmark torchbench
pushd torchbench
git checkout "$commit"
python install.py --continue_on_fail
echo "Print all dependencies after TorchBench is installed"
python -mpip freeze
popd
chown -R jenkins torchbench
chown -R jenkins /opt/conda
} }
# Pango is needed for weasyprint which is needed for doctr # Pango is needed for weasyprint which is needed for doctr
conda_install pango conda_install pango
# Stable packages are ok here, just to satisfy TorchBench check
pip_install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu128
install_torchbench
install_huggingface install_huggingface
install_timm install_timm
# Clean up
conda_run pip uninstall -y torch torchvision torchaudio triton torchao

View File

@ -2,6 +2,8 @@
set -ex set -ex
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
if [ -n "${UBUNTU_VERSION}" ]; then if [ -n "${UBUNTU_VERSION}" ]; then
apt update apt update
apt-get install -y clang doxygen git graphviz nodejs npm libtinfo5 apt-get install -y clang doxygen git graphviz nodejs npm libtinfo5
@ -13,8 +15,8 @@ chown -R jenkins pytorch
pushd pytorch pushd pytorch
# Install all linter dependencies # Install all linter dependencies
pip install -r requirements.txt pip_install -r requirements.txt
lintrunner init conda_run lintrunner init
# Cache .lintbin directory as part of the Docker image # Cache .lintbin directory as part of the Docker image
cp -r .lintbin /tmp cp -r .lintbin /tmp

View File

@ -1,23 +1,26 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Script that installs magma from tarball inside conda environment. # Script that replaces the magma install from a conda package
# It replaces anaconda magma-cuda package which is no longer published.
# Execute it inside active conda environment.
# See issue: https://github.com/pytorch/pytorch/issues/138506
set -eou pipefail set -eou pipefail
cuda_version_nodot=${1/./} function do_install() {
anaconda_dir=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} cuda_version_nodot=${1/./}
anaconda_python_version=$2
MAGMA_VERSION="2.6.1" MAGMA_VERSION="2.6.1"
magma_archive="magma-cuda${cuda_version_nodot}-${MAGMA_VERSION}-1.tar.bz2" magma_archive="magma-cuda${cuda_version_nodot}-${MAGMA_VERSION}-1.tar.bz2"
(
set -x anaconda_dir="/opt/conda/envs/py_${anaconda_python_version}"
tmp_dir=$(mktemp -d) (
pushd ${tmp_dir} set -x
curl -OLs https://ossci-linux.s3.us-east-1.amazonaws.com/${magma_archive} tmp_dir=$(mktemp -d)
tar -xvf "${magma_archive}" pushd ${tmp_dir}
mv include/* "${anaconda_dir}/include/" curl -OLs https://ossci-linux.s3.us-east-1.amazonaws.com/${magma_archive}
mv lib/* "${anaconda_dir}/lib" tar -xvf "${magma_archive}"
popd mv include/* "${anaconda_dir}/include/"
) mv lib/* "${anaconda_dir}/lib"
popd
)
}
do_install $1 $2

View File

@ -1,28 +0,0 @@
#!/bin/bash
set -ex
NCCL_VERSION=""
if [[ ${CUDA_VERSION:0:2} == "11" ]]; then
NCCL_VERSION=$(cat ci_commit_pins/nccl-cu11.txt)
elif [[ ${CUDA_VERSION:0:2} == "12" ]]; then
NCCL_VERSION=$(cat ci_commit_pins/nccl-cu12.txt)
elif [[ ${CUDA_VERSION:0:2} == "13" ]]; then
NCCL_VERSION=$(cat ci_commit_pins/nccl-cu13.txt)
else
echo "Unexpected CUDA_VERSION ${CUDA_VERSION}"
exit 1
fi
if [[ -n "${NCCL_VERSION}" ]]; then
# NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses
# Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build
git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git
pushd nccl
make -j src.build
cp -a build/include/* /usr/local/cuda/include/
cp -a build/lib/* /usr/local/cuda/lib64/
popd
rm -rf nccl
ldconfig
fi

View File

@ -4,15 +4,10 @@ set -ex
[ -n "$NINJA_VERSION" ] [ -n "$NINJA_VERSION" ]
arch=$(uname -m) url="https://github.com/ninja-build/ninja/releases/download/v${NINJA_VERSION}/ninja-linux.zip"
if [ "$arch" == "aarch64" ]; then
url="https://github.com/ninja-build/ninja/releases/download/v${NINJA_VERSION}/ninja-linux-aarch64.zip"
else
url="https://github.com/ninja-build/ninja/releases/download/v${NINJA_VERSION}/ninja-linux.zip"
fi
pushd /tmp pushd /tmp
wget --no-verbose --output-document=ninja-linux.zip "$url" wget --no-verbose --output-document=ninja-linux.zip "$url"
unzip ninja-linux.zip -d /usr/local/bin unzip ninja-linux.zip -d /usr/local/bin
rm -f ninja-linux.zip rm -f ninja-linux.zip
popd popd

View File

@ -8,6 +8,16 @@ retry () {
"$@" || (sleep 10 && "$@") || (sleep 20 && "$@") || (sleep 40 && "$@") "$@" || (sleep 10 && "$@") || (sleep 20 && "$@") || (sleep 40 && "$@")
} }
# A bunch of custom pip dependencies for ONNX
pip_install \
beartype==0.15.0 \
filelock==3.9.0 \
flatbuffers==2.0 \
mock==5.0.1 \
ninja==1.10.2 \
networkx==2.5 \
numpy==1.24.2
# ONNXRuntime should be installed before installing # ONNXRuntime should be installed before installing
# onnx-weekly. Otherwise, onnx-weekly could be # onnx-weekly. Otherwise, onnx-weekly could be
# overwritten by onnx. # overwritten by onnx.
@ -19,8 +29,12 @@ pip_install \
transformers==4.36.2 transformers==4.36.2
pip_install coloredlogs packaging pip_install coloredlogs packaging
pip_install onnxruntime==1.23.0
pip_install onnxscript==0.5.3 pip_install onnxruntime==1.18.1
pip_install onnx==1.17.0
pip_install onnxscript==0.1.0 --no-deps
# required by onnxscript
pip_install ml_dtypes
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers # Cache the transformers model to be used later by ONNX tests. We need to run the transformers
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/ # package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/

13
.ci/docker/common/install_openblas.sh Executable file → Normal file
View File

@ -3,12 +3,10 @@
set -ex set -ex
OPENBLAS_VERSION=${OPENBLAS_VERSION:-"v0.3.30"} cd /
git clone https://github.com/OpenMathLib/OpenBLAS.git -b v0.3.28 --depth 1 --shallow-submodules
# Clone OpenBLAS
git clone https://github.com/OpenMathLib/OpenBLAS.git -b "${OPENBLAS_VERSION}" --depth 1 --shallow-submodules
OPENBLAS_CHECKOUT_DIR="OpenBLAS"
OPENBLAS_BUILD_FLAGS=" OPENBLAS_BUILD_FLAGS="
NUM_THREADS=128 NUM_THREADS=128
USE_OPENMP=1 USE_OPENMP=1
@ -16,10 +14,9 @@ NO_SHARED=0
DYNAMIC_ARCH=1 DYNAMIC_ARCH=1
TARGET=ARMV8 TARGET=ARMV8
CFLAGS=-O3 CFLAGS=-O3
BUILD_BFLOAT16=1
" "
make -j8 ${OPENBLAS_BUILD_FLAGS} -C $OPENBLAS_CHECKOUT_DIR OPENBLAS_CHECKOUT_DIR="OpenBLAS"
sudo make install -C $OPENBLAS_CHECKOUT_DIR
rm -rf $OPENBLAS_CHECKOUT_DIR make -j8 ${OPENBLAS_BUILD_FLAGS} -C ${OPENBLAS_CHECKOUT_DIR}
make -j8 ${OPENBLAS_BUILD_FLAGS} install -C ${OPENBLAS_CHECKOUT_DIR}

View File

@ -0,0 +1,19 @@
#!/bin/bash
set -ex
pb_dir="/usr/temp_pb_install_dir"
mkdir -p $pb_dir
# On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or
# else it will fail with
# g++: error: ./../lib64/crti.o: No such file or directory
ln -s /usr/lib64 "$pb_dir/lib64"
curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz" --retry 3
tar -xvz --no-same-owner -C "$pb_dir" --strip-components 1 -f protobuf-all-3.17.3.tar.gz
NPROC=$[$(nproc) - 2]
pushd "$pb_dir" && ./configure && make -j${NPROC} && make -j${NPROC} check && sudo make -j${NRPOC} install && sudo ldconfig
popd
rm -rf $pb_dir

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -ex
apt-get update
# Use deadsnakes in case we need an older python version
sudo add-apt-repository ppa:deadsnakes/ppa
apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python3-pip python${PYTHON_VERSION}-venv
# Use a venv because uv and some other package managers don't support --user install
ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python
python -m venv /var/lib/jenkins/ci_env
source /var/lib/jenkins/ci_env/bin/activate
python -mpip install --upgrade pip
python -mpip install -r /opt/requirements-ci.txt

View File

@ -2,22 +2,19 @@
set -ex set -ex
# for pip_install function
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
ROCM_COMPOSABLE_KERNEL_VERSION="$(cat $(dirname $0)/../ci_commit_pins/rocm-composable-kernel.txt)"
ver() { ver() {
printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' '); printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' ');
} }
install_ubuntu() { install_ubuntu() {
apt-get update apt-get update
# gpg-agent is not available by default if [[ $UBUNTU_VERSION == 18.04 ]]; then
apt-get install -y --no-install-recommends gpg-agent # gpg-agent is not available by default on 18.04
if [[ $(ver $UBUNTU_VERSION) -ge $(ver 22.04) ]]; then apt-get install -y --no-install-recommends gpg-agent
echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' \ fi
| sudo tee /etc/apt/preferences.d/rocm-pin-600 if [[ $UBUNTU_VERSION == 20.04 ]]; then
# gpg-agent is not available by default on 20.04
apt-get install -y --no-install-recommends gpg-agent
fi fi
apt-get install -y kmod apt-get install -y kmod
apt-get install -y wget apt-get install -y wget
@ -26,28 +23,13 @@ install_ubuntu() {
apt-get install -y libc++1 apt-get install -y libc++1
apt-get install -y libc++abi1 apt-get install -y libc++abi1
# Make sure rocm packages from repo.radeon.com have highest priority
cat << EOF > /etc/apt/preferences.d/rocm-pin-600
Package: *
Pin: release o=repo.radeon.com
Pin-Priority: 600
EOF
# we want the patch version of 6.4 instead
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.4) ]]; then
ROCM_VERSION="${ROCM_VERSION}.2"
fi
# Default url values
rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}"
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu"
# Add amdgpu repository # Add amdgpu repository
UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'` UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list echo "deb [arch=amd64] https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
# Add rocm repository # Add rocm repository
wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add - wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add -
local rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}"
echo "deb [arch=amd64] ${rocm_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/rocm.list echo "deb [arch=amd64] ${rocm_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/rocm.list
apt-get update --allow-insecure-repositories apt-get update --allow-insecure-repositories
@ -81,39 +63,21 @@ EOF
done done
# ROCm 6.3 had a regression where initializing static code objects had significant overhead # ROCm 6.3 had a regression where initializing static code objects had significant overhead
# CI no longer builds for ROCm 6.3, but if [[ $(ver $ROCM_VERSION) -eq $(ver 6.3) ]]; then
# ROCm 6.4 did not yet fix the regression, also HIP branch names are different
if [[ $(ver $ROCM_VERSION) -ge $(ver 6.4) ]] && [[ $(ver $ROCM_VERSION) -lt $(ver 7.0) ]]; then
if [[ $(ver $ROCM_VERSION) -eq $(ver 6.4.2) ]]; then
HIP_TAG=rocm-6.4.2
CLR_HASH=74d78ba3ac4bac235d02bcb48511c30b5cfdd457 # branch release/rocm-rel-6.4.2-statco-hotfix
elif [[ $(ver $ROCM_VERSION) -eq $(ver 6.4.1) ]]; then
HIP_TAG=rocm-6.4.1
CLR_HASH=efe6c35790b9206923bfeed1209902feff37f386 # branch release/rocm-rel-6.4.1-statco-hotfix
elif [[ $(ver $ROCM_VERSION) -eq $(ver 6.4) ]]; then
HIP_TAG=rocm-6.4.0
CLR_HASH=600f5b0d2baed94d5121e2174a9de0851b040b0c # branch release/rocm-rel-6.4-statco-hotfix
fi
# clr build needs CppHeaderParser but can only find it using conda's python # clr build needs CppHeaderParser but can only find it using conda's python
python -m pip install CppHeaderParser /opt/conda/bin/python -m pip install CppHeaderParser
git clone https://github.com/ROCm/HIP -b $HIP_TAG git clone https://github.com/ROCm/HIP -b rocm-6.3.x
HIP_COMMON_DIR=$(readlink -f HIP) HIP_COMMON_DIR=$(readlink -f HIP)
git clone https://github.com/jeffdaily/clr git clone https://github.com/jeffdaily/clr -b release/rocm-rel-6.3-statco-hotfix
pushd clr
git checkout $CLR_HASH
popd
mkdir -p clr/build mkdir -p clr/build
pushd clr/build pushd clr/build
# Need to point CMake to the correct python installation to find CppHeaderParser cmake .. -DCLR_BUILD_HIP=ON -DHIP_COMMON_DIR=$HIP_COMMON_DIR
cmake .. -DPython3_EXECUTABLE=/opt/conda/envs/py_${ANACONDA_PYTHON_VERSION}/bin/python3 -DCLR_BUILD_HIP=ON -DHIP_COMMON_DIR=$HIP_COMMON_DIR
make -j make -j
cp hipamd/lib/libamdhip64.so.6.4.* /opt/rocm/lib/libamdhip64.so.6.4.* cp hipamd/lib/libamdhip64.so.6.3.* /opt/rocm/lib/libamdhip64.so.6.3.*
popd popd
rm -rf HIP clr rm -rf HIP clr
fi fi
pip_install "git+https://github.com/rocm/composable_kernel@$ROCM_COMPOSABLE_KERNEL_VERSION"
# Cleanup # Cleanup
apt-get autoclean && apt-get clean apt-get autoclean && apt-get clean
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
@ -177,8 +141,6 @@ install_centos() {
sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;" sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;"
done done
pip_install "git+https://github.com/rocm/composable_kernel@$ROCM_COMPOSABLE_KERNEL_VERSION"
# Cleanup # Cleanup
yum clean all yum clean all
rm -rf /var/cache/yum rm -rf /var/cache/yum

View File

@ -115,7 +115,7 @@ index a5007ffc..13fa07fc 100644
if (!fp) { if (!fp) {
- fprintf(stderr, "%s: %s\n", AMDGPU_ASIC_ID_TABLE, - fprintf(stderr, "%s: %s\n", AMDGPU_ASIC_ID_TABLE,
- strerror(errno)); - strerror(errno));
+ //fprintf(stderr, "amdgpu.ids: No such file or directory\n"); + fprintf(stderr, "amdgpu.ids: No such file or directory\n");
return; return;
} }

View File

@ -1,37 +1,50 @@
#!/usr/bin/env bash #!/bin/bash
# Script used only in CD pipeline # Script used in CI and CD pipeline
set -eou pipefail set -ex
function do_install() { # Magma build scripts need `python`
rocm_version=$1 ln -sf /usr/bin/python3 /usr/bin/python
if [[ ${rocm_version} =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# chop off any patch version
rocm_version="${rocm_version%.*}"
fi
rocm_version_nodot=${rocm_version//./} ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
case "$ID" in
almalinux)
yum install -y gcc-gfortran
;;
*)
echo "No preinstalls to build magma..."
;;
esac
# https://github.com/icl-utk-edu/magma/pull/65 MKLROOT=${MKLROOT:-/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION}
MAGMA_VERSION=d6e4117bc88e73f06d26c6c2e14f064e8fc3d1ec
magma_archive="magma-rocm${rocm_version_nodot}-${MAGMA_VERSION}-1.tar.bz2"
rocm_dir="/opt/rocm" # "install" hipMAGMA into /opt/rocm/magma by copying after build
( git clone https://bitbucket.org/icl/magma.git
set -x pushd magma
tmp_dir=$(mktemp -d)
pushd ${tmp_dir}
curl -OLs https://ossci-linux.s3.us-east-1.amazonaws.com/${magma_archive}
if tar -xvf "${magma_archive}"
then
mkdir -p "${rocm_dir}/magma"
mv include "${rocm_dir}/magma/include"
mv lib "${rocm_dir}/magma/lib"
else
echo "${magma_archive} not found, skipping magma install"
fi
popd
)
}
do_install $1 # Version 2.7.2 + ROCm related updates
git checkout a1625ff4d9bc362906bd01f805dbbe12612953f6
cp make.inc-examples/make.inc.hip-gcc-mkl make.inc
echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc
if [[ -f "${MKLROOT}/lib/libmkl_core.a" ]]; then
echo 'LIB = -Wl,--start-group -lmkl_gf_lp64 -lmkl_gnu_thread -lmkl_core -Wl,--end-group -lpthread -lstdc++ -lm -lgomp -lhipblas -lhipsparse' >> make.inc
fi
echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib -ldl' >> make.inc
echo 'DEVCCFLAGS += --gpu-max-threads-per-block=256' >> make.inc
export PATH="${PATH}:/opt/rocm/bin"
if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then
amdgpu_targets=`echo $PYTORCH_ROCM_ARCH | sed 's/;/ /g'`
else
amdgpu_targets=`rocm_agent_enumerator | grep -v gfx000 | sort -u | xargs`
fi
for arch in $amdgpu_targets; do
echo "DEVCCFLAGS += --offload-arch=$arch" >> make.inc
done
# hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition
sed -i 's/^FOPENMP/#FOPENMP/g' make.inc
make -f make.gen.hipMAGMA -j $(nproc)
LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT="${MKLROOT}"
make testing/testing_dgemm -j $(nproc) MKLROOT="${MKLROOT}"
popd
mv magma /opt/rocm

View File

@ -0,0 +1,24 @@
#!/bin/bash
set -ex
[ -n "${SWIFTSHADER}" ]
retry () {
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
}
_https_amazon_aws=https://ossci-android.s3.amazonaws.com
# SwiftShader
_swiftshader_dir=/var/lib/jenkins/swiftshader
_swiftshader_file_targz=swiftshader-abe07b943-prebuilt.tar.gz
mkdir -p $_swiftshader_dir
_tmp_swiftshader_targz="/tmp/${_swiftshader_file_targz}"
curl --silent --show-error --location --fail --retry 3 \
--output "${_tmp_swiftshader_targz}" "$_https_amazon_aws/${_swiftshader_file_targz}"
tar -C "${_swiftshader_dir}" -xzf "${_tmp_swiftshader_targz}"
export VK_ICD_FILENAMES="${_swiftshader_dir}/build/Linux/vk_swiftshader_icd.json"

View File

@ -2,16 +2,14 @@
set -ex set -ex
mkdir -p /opt/triton
if [ -z "${TRITON}" ] && [ -z "${TRITON_CPU}" ]; then
echo "TRITON and TRITON_CPU are not set. Exiting..."
exit 0
fi
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
get_pip_version() { get_conda_version() {
conda_run pip list | grep -w $* | head -n 1 | awk '{print $2}' as_jenkins conda list -n py_$ANACONDA_PYTHON_VERSION | grep -w $* | head -n 1 | awk '{print $2}'
}
conda_reinstall() {
as_jenkins conda install -q -n py_$ANACONDA_PYTHON_VERSION -y --force-reinstall $*
} }
if [ -n "${XPU_VERSION}" ]; then if [ -n "${XPU_VERSION}" ]; then
@ -33,9 +31,11 @@ if [ -n "${UBUNTU_VERSION}" ];then
apt-get install -y gpg-agent apt-get install -y gpg-agent
fi fi
# Keep the current cmake and numpy version here, so we can reinstall them later if [ -n "${CONDA_CMAKE}" ]; then
CMAKE_VERSION=$(get_pip_version cmake) # Keep the current cmake and numpy version here, so we can reinstall them later
NUMPY_VERSION=$(get_pip_version numpy) CMAKE_VERSION=$(get_conda_version cmake)
NUMPY_VERSION=$(get_conda_version numpy)
fi
if [ -z "${MAX_JOBS}" ]; then if [ -z "${MAX_JOBS}" ]; then
export MAX_JOBS=$(nproc) export MAX_JOBS=$(nproc)
@ -51,13 +51,7 @@ as_jenkins git clone --recursive ${TRITON_REPO} triton
cd triton cd triton
as_jenkins git checkout ${TRITON_PINNED_COMMIT} as_jenkins git checkout ${TRITON_PINNED_COMMIT}
as_jenkins git submodule update --init --recursive as_jenkins git submodule update --init --recursive
cd python
# Old versions of python have setup.py in ./python; newer versions have it in ./
if [ ! -f setup.py ]; then
cd python
fi
pip_install pybind11==3.0.1
# TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527 # TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527
as_jenkins sed -i -e 's/https:\/\/tritonlang.blob.core.windows.net\/llvm-builds/https:\/\/oaitriton.blob.core.windows.net\/public\/llvm-builds/g' setup.py as_jenkins sed -i -e 's/https:\/\/tritonlang.blob.core.windows.net\/llvm-builds/https:\/\/oaitriton.blob.core.windows.net\/public\/llvm-builds/g' setup.py
@ -66,42 +60,28 @@ if [ -n "${UBUNTU_VERSION}" ] && [ -n "${GCC_VERSION}" ] && [[ "${GCC_VERSION}"
# Triton needs at least gcc-9 to build # Triton needs at least gcc-9 to build
apt-get install -y g++-9 apt-get install -y g++-9
CXX=g++-9 conda_run python -m build --wheel --no-isolation CXX=g++-9 pip_install -e .
elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then
# Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain # Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain
add-apt-repository -y ppa:ubuntu-toolchain-r/test add-apt-repository -y ppa:ubuntu-toolchain-r/test
apt-get install -y g++-9 apt-get install -y g++-9
CXX=g++-9 conda_run python -m build --wheel --no-isolation CXX=g++-9 pip_install -e .
else else
conda_run python -m build --wheel --no-isolation pip_install -e .
fi fi
# Copy the wheel to /opt for multi stage docker builds if [ -n "${CONDA_CMAKE}" ]; then
cp dist/*.whl /opt/triton # TODO: This is to make sure that the same cmake and numpy version from install conda
# Install the wheel for docker builds that don't use multi stage # script is used. Without this step, the newer cmake version (3.25.2) downloaded by
pip_install dist/*.whl # triton build step via pip will fail to detect conda MKL. Once that issue is fixed,
# this can be removed.
# TODO: This is to make sure that the same cmake and numpy version from install conda #
# script is used. Without this step, the newer cmake version (3.25.2) downloaded by # The correct numpy version also needs to be set here because conda claims that it
# triton build step via pip will fail to detect conda MKL. Once that issue is fixed, # causes inconsistent environment. Without this, conda will attempt to install the
# this can be removed. # latest numpy version, which fails ASAN tests with the following import error: Numba
# # needs NumPy 1.20 or less.
# The correct numpy version also needs to be set here because conda claims that it conda_reinstall cmake="${CMAKE_VERSION}"
# causes inconsistent environment. Without this, conda will attempt to install the # Note that we install numpy with pip as conda might not have the version we want
# latest numpy version, which fails ASAN tests with the following import error: Numba pip_install --force-reinstall numpy=="${NUMPY_VERSION}"
# needs NumPy 1.20 or less.
# Note that we install numpy with pip as conda might not have the version we want
if [ -n "${CMAKE_VERSION}" ]; then
pip_install "cmake==${CMAKE_VERSION}"
fi
if [ -n "${NUMPY_VERSION}" ]; then
pip_install "numpy==${NUMPY_VERSION}"
fi
# IMPORTANT: helion needs to be installed without dependencies.
# It depends on torch and triton. We don't want to install
# triton and torch from production on Docker CI images
if [[ "$ANACONDA_PYTHON_VERSION" != 3.9* ]]; then
pip_install helion --no-deps
fi fi

View File

@ -44,12 +44,8 @@ function install_ucc() {
./autogen.sh ./autogen.sh
if [[ -n "$CUDA_VERSION" && $CUDA_VERSION == 13* ]]; then # We only run distributed tests on Tesla M60 and A10G
NVCC_GENCODE="-gencode=arch=compute_86,code=compute_86" NVCC_GENCODE="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_86,code=compute_86"
else
# We only run distributed tests on Tesla M60 and A10G
NVCC_GENCODE="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_86,code=compute_86"
fi
if [[ -n "$ROCM_VERSION" ]]; then if [[ -n "$ROCM_VERSION" ]]; then
if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then

View File

@ -0,0 +1,24 @@
#!/bin/bash
set -ex
[ -n "${VULKAN_SDK_VERSION}" ]
retry () {
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
}
_vulkansdk_dir=/var/lib/jenkins/vulkansdk
_tmp_vulkansdk_targz=/tmp/vulkansdk.tar.gz
curl \
--silent \
--show-error \
--location \
--fail \
--retry 3 \
--output "${_tmp_vulkansdk_targz}" "https://ossci-android.s3.amazonaws.com/vulkansdk-linux-x86_64-${VULKAN_SDK_VERSION}.tar.gz"
mkdir -p "${_vulkansdk_dir}"
tar -C "${_vulkansdk_dir}" -xzf "${_tmp_vulkansdk_targz}" --strip-components 1
rm -rf "${_tmp_vulkansdk_targz}"

View File

@ -26,7 +26,7 @@ function install_ubuntu() {
wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \
| gpg --dearmor > /usr/share/keyrings/oneapi-archive-keyring.gpg.gpg | gpg --dearmor > /usr/share/keyrings/oneapi-archive-keyring.gpg.gpg
echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg.gpg] \ echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg.gpg] \
https://apt.repos.intel.com/oneapi all main" \ https://apt.repos.intel.com/${XPU_REPO_NAME} all main" \
| tee /etc/apt/sources.list.d/oneAPI.list | tee /etc/apt/sources.list.d/oneAPI.list
# Update the packages list and repository index # Update the packages list and repository index
@ -34,27 +34,18 @@ function install_ubuntu() {
# The xpu-smi packages # The xpu-smi packages
apt-get install -y flex bison xpu-smi apt-get install -y flex bison xpu-smi
# Compute and Media Runtimes
if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then apt-get install -y \
# Compute and Media Runtimes intel-opencl-icd intel-level-zero-gpu level-zero \
apt-get install -y \ intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 \
intel-opencl-icd intel-level-zero-gpu level-zero \ libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 \ libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \ mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo
libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \ if [[ "${XPU_DRIVER_TYPE,,}" == "rolling" ]]; then
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo apt-get install -y intel-ocloc
# Development Packages
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev
else # rolling driver
apt-get install -y \
intel-opencl-icd libze-intel-gpu1 libze1 \
intel-media-va-driver-non-free libmfx-gen1 libvpl2 \
libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \
libglapi-mesa libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \
mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo intel-ocloc
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev libze-dev
fi fi
# Development Packages
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev
# Install Intel Support Packages # Install Intel Support Packages
apt-get install -y ${XPU_PACKAGES} apt-get install -y ${XPU_PACKAGES}
@ -83,7 +74,7 @@ function install_rhel() {
tee > /etc/yum.repos.d/oneAPI.repo << EOF tee > /etc/yum.repos.d/oneAPI.repo << EOF
[oneAPI] [oneAPI]
name=Intel for Pytorch GPU dev repository name=Intel for Pytorch GPU dev repository
baseurl=https://yum.repos.intel.com/oneapi baseurl=https://yum.repos.intel.com/${XPU_REPO_NAME}
enabled=1 enabled=1
gpgcheck=1 gpgcheck=1
repo_gpgcheck=1 repo_gpgcheck=1
@ -127,7 +118,7 @@ function install_sles() {
https://repositories.intel.com/gpu/sles/${VERSION_SP}${XPU_DRIVER_VERSION}/unified/intel-gpu-${VERSION_SP}.repo https://repositories.intel.com/gpu/sles/${VERSION_SP}${XPU_DRIVER_VERSION}/unified/intel-gpu-${VERSION_SP}.repo
rpm --import https://repositories.intel.com/gpu/intel-graphics.key rpm --import https://repositories.intel.com/gpu/intel-graphics.key
# To add the online network network package repository for the Intel Support Packages # To add the online network network package repository for the Intel Support Packages
zypper addrepo https://yum.repos.intel.com/oneapi oneAPI zypper addrepo https://yum.repos.intel.com/${XPU_REPO_NAME} oneAPI
rpm --import https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB rpm --import https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
# The xpu-smi packages # The xpu-smi packages
@ -143,18 +134,18 @@ function install_sles() {
} }
# Default use GPU driver rolling releases # Default use GPU driver LTS releases
XPU_DRIVER_VERSION="" XPU_DRIVER_VERSION="/lts/2350"
if [[ "${XPU_DRIVER_TYPE,,}" == "lts" ]]; then if [[ "${XPU_DRIVER_TYPE,,}" == "rolling" ]]; then
# Use GPU driver LTS releases # Use GPU driver rolling releases
XPU_DRIVER_VERSION="/lts/2350" XPU_DRIVER_VERSION=""
fi fi
# Default use Intel® oneAPI Deep Learning Essentials 2025.1 XPU_REPO_NAME="intel-for-pytorch-gpu-dev"
if [[ "$XPU_VERSION" == "2025.2" ]]; then XPU_PACKAGES="intel-for-pytorch-gpu-dev-0.5 intel-pti-dev-0.9"
XPU_PACKAGES="intel-deep-learning-essentials-2025.2" if [[ "$XPU_VERSION" == "2025.0" ]]; then
else XPU_REPO_NAME="oneapi"
XPU_PACKAGES="intel-deep-learning-essentials-2025.1" XPU_PACKAGES="intel-deep-learning-essentials-2025.0"
fi fi
# The installation depends on the base OS # The installation depends on the base OS

View File

@ -1,9 +0,0 @@
#!/bin/bash
set -xe
# Script used in Linux x86 and aarch64 CD pipeline
# Workaround for exposing statically linked libstdc++ CXX11 ABI symbols.
# see: https://github.com/pytorch/pytorch/issues/133437
LIBNONSHARED=$(gcc -print-file-name=libstdc++_nonshared.a)
nm -g $LIBNONSHARED | grep " T " | grep recursive_directory_iterator | cut -c 20- > weaken-symbols.txt
objcopy --weaken-symbols weaken-symbols.txt $LIBNONSHARED $LIBNONSHARED

View File

@ -49,11 +49,18 @@ RUN bash ./install_mkl.sh && rm install_mkl.sh
FROM cpu as cuda FROM cpu as cuda
ADD ./common/install_cuda.sh install_cuda.sh ADD ./common/install_cuda.sh install_cuda.sh
ADD ./common/install_magma.sh install_magma.sh ADD ./common/install_magma.sh install_magma.sh
COPY ./common/install_nccl.sh install_nccl.sh
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
COPY ./common/install_cusparselt.sh install_cusparselt.sh
ENV CUDA_HOME /usr/local/cuda ENV CUDA_HOME /usr/local/cuda
FROM cuda as cuda11.8
RUN bash ./install_cuda.sh 11.8
RUN bash ./install_magma.sh 11.8
RUN ln -sf /usr/local/cuda-11.8 /usr/local/cuda
FROM cuda as cuda12.4
RUN bash ./install_cuda.sh 12.4
RUN bash ./install_magma.sh 12.4
RUN ln -sf /usr/local/cuda-12.4 /usr/local/cuda
FROM cuda as cuda12.6 FROM cuda as cuda12.6
RUN bash ./install_cuda.sh 12.6 RUN bash ./install_cuda.sh 12.6
RUN bash ./install_magma.sh 12.6 RUN bash ./install_magma.sh 12.6
@ -64,26 +71,7 @@ RUN bash ./install_cuda.sh 12.8
RUN bash ./install_magma.sh 12.8 RUN bash ./install_magma.sh 12.8
RUN ln -sf /usr/local/cuda-12.8 /usr/local/cuda RUN ln -sf /usr/local/cuda-12.8 /usr/local/cuda
FROM cuda as cuda12.9
RUN bash ./install_cuda.sh 12.9
RUN bash ./install_magma.sh 12.9
RUN ln -sf /usr/local/cuda-12.9 /usr/local/cuda
FROM cuda as cuda13.0
RUN bash ./install_cuda.sh 13.0
RUN bash ./install_magma.sh 13.0
RUN ln -sf /usr/local/cuda-13.0 /usr/local/cuda
# Install libibverbs for libtorch and copy to CUDA directory
RUN apt-get update -y && \
apt-get install -y libibverbs-dev librdmacm-dev && \
cp /usr/lib/x86_64-linux-gnu/libmlx5.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/librdmacm.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/libibverbs.so* /usr/local/cuda/lib64/ && \
cp /usr/lib/x86_64-linux-gnu/libnl* /usr/local/cuda/lib64/
FROM cpu as rocm FROM cpu as rocm
ARG ROCM_VERSION
ARG PYTORCH_ROCM_ARCH ARG PYTORCH_ROCM_ARCH
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH} ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
ENV MKLROOT /opt/intel ENV MKLROOT /opt/intel
@ -98,11 +86,11 @@ ADD ./common/install_rocm_magma.sh install_rocm_magma.sh
# gfortran and python needed for building magma from source for ROCm # gfortran and python needed for building magma from source for ROCm
RUN apt-get update -y && \ RUN apt-get update -y && \
apt-get install gfortran -y && \ apt-get install gfortran -y && \
apt-get install python3 python-is-python3 -y && \ apt-get install python -y && \
apt-get clean apt-get clean
RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} && rm install_rocm_magma.sh RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh
FROM ${BASE_TARGET} as final FROM ${BASE_TARGET} as final
COPY --from=openssl /opt/openssl /opt/openssl COPY --from=openssl /opt/openssl /opt/openssl

View File

@ -1,71 +1,83 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Script used only in CD pipeline # Script used only in CD pipeline
set -eoux pipefail set -eou pipefail
image="$1" image="$1"
shift shift
if [ -z "${image}" ]; then if [ -z "${image}" ]; then
echo "Usage: $0 IMAGENAME:ARCHTAG" echo "Usage: $0 IMAGE"
exit 1 exit 1
fi fi
DOCKER_IMAGE="pytorch/${image}"
TOPDIR=$(git rev-parse --show-toplevel) TOPDIR=$(git rev-parse --show-toplevel)
GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu}
GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
WITH_PUSH=${WITH_PUSH:-}
DOCKER=${DOCKER:-docker} DOCKER=${DOCKER:-docker}
# Go from imagename:tag to tag case ${GPU_ARCH_TYPE} in
DOCKER_TAG_PREFIX=$(echo "${image}" | awk -F':' '{print $2}')
GPU_ARCH_VERSION=""
if [[ "${DOCKER_TAG_PREFIX}" == cuda* ]]; then
# extract cuda version from image name. e.g. manylinux2_28-builder:cuda12.8 returns 12.8
GPU_ARCH_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'cuda' '{print $2}')
elif [[ "${DOCKER_TAG_PREFIX}" == rocm* ]]; then
# extract rocm version from image name. e.g. manylinux2_28-builder:rocm6.2.4 returns 6.2.4
GPU_ARCH_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'rocm' '{print $2}')
fi
case ${DOCKER_TAG_PREFIX} in
cpu) cpu)
BASE_TARGET=cpu BASE_TARGET=cpu
DOCKER_TAG=cpu
GPU_IMAGE=ubuntu:20.04 GPU_IMAGE=ubuntu:20.04
DOCKER_GPU_BUILD_ARG="" DOCKER_GPU_BUILD_ARG=""
;; ;;
cuda*) cuda)
BASE_TARGET=cuda${GPU_ARCH_VERSION} BASE_TARGET=cuda${GPU_ARCH_VERSION}
DOCKER_TAG=cuda${GPU_ARCH_VERSION}
GPU_IMAGE=ubuntu:20.04 GPU_IMAGE=ubuntu:20.04
DOCKER_GPU_BUILD_ARG="" DOCKER_GPU_BUILD_ARG=""
;; ;;
rocm*) rocm)
# we want the patch version of 6.4 instead
if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
fi
BASE_TARGET=rocm BASE_TARGET=rocm
GPU_IMAGE=rocm/dev-ubuntu-22.04:${GPU_ARCH_VERSION}-complete DOCKER_TAG=rocm${GPU_ARCH_VERSION}
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201" GPU_IMAGE=rocm/dev-ubuntu-20.04:${GPU_ARCH_VERSION}-complete
# add gfx950 conditionally starting in ROCm 7.0 PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx1030;gfx1100;gfx1101;gfx942"
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}"
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950"
fi
DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg ROCM_VERSION=${GPU_ARCH_VERSION}"
;; ;;
*) *)
echo "ERROR: Unrecognized DOCKER_TAG_PREFIX: ${DOCKER_TAG_PREFIX}" echo "ERROR: Unrecognized GPU_ARCH_TYPE: ${GPU_ARCH_TYPE}"
exit 1 exit 1
;; ;;
esac esac
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
DOCKER_BUILDKIT=1 ${DOCKER} build \ (
--target final \ set -x
${DOCKER_GPU_BUILD_ARG} \ DOCKER_BUILDKIT=1 ${DOCKER} build \
--build-arg "GPU_IMAGE=${GPU_IMAGE}" \ --target final \
--build-arg "BASE_TARGET=${BASE_TARGET}" \ ${DOCKER_GPU_BUILD_ARG} \
-t "${tmp_tag}" \ --build-arg "GPU_IMAGE=${GPU_IMAGE}" \
$@ \ --build-arg "BASE_TARGET=${BASE_TARGET}" \
-f "${TOPDIR}/.ci/docker/libtorch/Dockerfile" \ -t "${DOCKER_IMAGE}" \
"${TOPDIR}/.ci/docker/" $@ \
-f "${TOPDIR}/.ci/docker/libtorch/Dockerfile" \
"${TOPDIR}/.ci/docker/"
)
GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)}
GIT_BRANCH_NAME=${GITHUB_REF##*/}
GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)}
DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE}-${GIT_BRANCH_NAME}
DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE}-${GIT_COMMIT_SHA}
if [[ "${WITH_PUSH}" == true ]]; then
(
set -x
${DOCKER} push "${DOCKER_IMAGE}"
if [[ -n ${GITHUB_REF} ]]; then
${DOCKER} tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_BRANCH_TAG}
${DOCKER} tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_SHA_TAG}
${DOCKER} push "${DOCKER_IMAGE_BRANCH_TAG}"
${DOCKER} push "${DOCKER_IMAGE_SHA_TAG}"
fi
)
fi

View File

@ -18,31 +18,28 @@ COPY ./common/install_user.sh install_user.sh
RUN bash ./install_user.sh && rm install_user.sh RUN bash ./install_user.sh && rm install_user.sh
# Install conda and other packages (e.g., numpy, pytest) # Install conda and other packages (e.g., numpy, pytest)
ARG PYTHON_VERSION ARG ANACONDA_PYTHON_VERSION
ARG PIP_CMAKE ARG CONDA_CMAKE
# Put venv into the env vars so users don't need to activate it ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
ENV PATH /var/lib/jenkins/ci_env/bin:$PATH ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
ENV VIRTUAL_ENV /var/lib/jenkins/ci_env COPY requirements-ci.txt /opt/conda/requirements-ci.txt
COPY requirements-ci.txt /opt/requirements-ci.txt COPY ./common/install_conda.sh install_conda.sh
COPY ./common/install_python.sh install_python.sh COPY ./common/common_utils.sh common_utils.sh
RUN bash ./install_python.sh && rm install_python.sh /opt/requirements-ci.txt COPY ./common/install_magma_conda.sh install_magma_conda.sh
RUN bash ./install_conda.sh && rm install_conda.sh install_magma_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
# Install cuda and cudnn # Install cuda and cudnn
ARG CUDA_VERSION ARG CUDA_VERSION
COPY ./common/install_cuda.sh install_cuda.sh COPY ./common/install_cuda.sh install_cuda.sh
COPY ./common/install_nccl.sh install_nccl.sh RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
COPY ./common/install_cusparselt.sh install_cusparselt.sh
RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh install_nccl.sh /ci_commit_pins/nccl-cu* install_cusparselt.sh
ENV DESIRED_CUDA ${CUDA_VERSION} ENV DESIRED_CUDA ${CUDA_VERSION}
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH
# Note that Docker build forbids copying file outside the build context # Note that Docker build forbids copying file outside the build context
COPY ./common/install_linter.sh install_linter.sh COPY ./common/install_linter.sh install_linter.sh
COPY ./common/common_utils.sh common_utils.sh
RUN bash ./install_linter.sh RUN bash ./install_linter.sh
RUN rm install_linter.sh RUN rm install_linter.sh common_utils.sh
RUN chown -R jenkins:jenkins /var/lib/jenkins/ci_env
USER jenkins USER jenkins
CMD ["bash"] CMD ["bash"]

View File

@ -15,19 +15,20 @@ COPY ./common/install_user.sh install_user.sh
RUN bash ./install_user.sh && rm install_user.sh RUN bash ./install_user.sh && rm install_user.sh
# Install conda and other packages (e.g., numpy, pytest) # Install conda and other packages (e.g., numpy, pytest)
ARG PYTHON_VERSION ARG ANACONDA_PYTHON_VERSION
ENV PATH /var/lib/jenkins/ci_env/bin:$PATH ARG CONDA_CMAKE
ENV VIRTUAL_ENV /var/lib/jenkins/ci_env ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
COPY requirements-ci.txt /opt/requirements-ci.txt ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
COPY ./common/install_python.sh install_python.sh COPY requirements-ci.txt /opt/conda/requirements-ci.txt
RUN bash ./install_python.sh && rm install_python.sh /opt/requirements-ci.txt COPY ./common/install_conda.sh install_conda.sh
COPY ./common/common_utils.sh common_utils.sh
RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
# Note that Docker build forbids copying file outside the build context # Note that Docker build forbids copying file outside the build context
COPY ./common/install_linter.sh install_linter.sh COPY ./common/install_linter.sh install_linter.sh
COPY ./common/common_utils.sh common_utils.sh
RUN bash ./install_linter.sh RUN bash ./install_linter.sh
RUN rm install_linter.sh RUN rm install_linter.sh common_utils.sh
RUN chown -R jenkins:jenkins /var/lib/jenkins/ci_env
USER jenkins USER jenkins
CMD ["bash"] CMD ["bash"]

View File

@ -0,0 +1,200 @@
# syntax = docker/dockerfile:experimental
ARG ROCM_VERSION=3.7
ARG BASE_CUDA_VERSION=11.8
ARG GPU_IMAGE=centos:7
FROM centos:7 as base
ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
ARG DEVTOOLSET_VERSION=9
# Note: This is required patch since CentOS have reached EOL
# otherwise any yum install setp will fail
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
RUN yum install -y wget curl perl util-linux xz bzip2 git patch which perl zlib-devel
# Just add everything as a safe.directory for git since these will be used in multiple places with git
RUN git config --global --add safe.directory '*'
RUN yum install -y yum-utils centos-release-scl
RUN yum-config-manager --enable rhel-server-rhscl-7-rpms
# Note: After running yum-config-manager --enable rhel-server-rhscl-7-rpms
# patch is required once again. Somehow this steps adds mirror.centos.org
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
RUN yum install -y devtoolset-${DEVTOOLSET_VERSION}-gcc devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran devtoolset-${DEVTOOLSET_VERSION}-binutils
ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH
ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH
RUN yum --enablerepo=extras install -y epel-release
# cmake-3.18.4 from pip
RUN yum install -y python3-pip && \
python3 -mpip install cmake==3.18.4 && \
ln -s /usr/local/bin/cmake /usr/bin/cmake
RUN yum install -y autoconf aclocal automake make sudo
FROM base as openssl
# Install openssl (this must precede `build python` step)
# (In order to have a proper SSL module, Python is compiled
# against a recent openssl [see env vars above], which is linked
# statically. We delete openssl afterwards.)
ADD ./common/install_openssl.sh install_openssl.sh
RUN bash ./install_openssl.sh && rm install_openssl.sh
# EPEL for cmake
FROM base as patchelf
# Install patchelf
ADD ./common/install_patchelf.sh install_patchelf.sh
RUN bash ./install_patchelf.sh && rm install_patchelf.sh
RUN cp $(which patchelf) /patchelf
FROM patchelf as python
# build python
COPY manywheel/build_scripts /build_scripts
ADD ./common/install_cpython.sh /build_scripts/install_cpython.sh
RUN bash build_scripts/build.sh && rm -r build_scripts
FROM base as cuda
ARG BASE_CUDA_VERSION=10.2
# Install CUDA
ADD ./common/install_cuda.sh install_cuda.sh
RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh
FROM base as intel
# MKL
ADD ./common/install_mkl.sh install_mkl.sh
RUN bash ./install_mkl.sh && rm install_mkl.sh
FROM base as magma
ARG BASE_CUDA_VERSION=10.2
# Install magma
ADD ./common/install_magma.sh install_magma.sh
RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh
FROM base as jni
# Install java jni header
ADD ./common/install_jni.sh install_jni.sh
ADD ./java/jni.h jni.h
RUN bash ./install_jni.sh && rm install_jni.sh
FROM base as libpng
# Install libpng
ADD ./common/install_libpng.sh install_libpng.sh
RUN bash ./install_libpng.sh && rm install_libpng.sh
FROM ${GPU_IMAGE} as common
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
RUN yum install -y \
aclocal \
autoconf \
automake \
bison \
bzip2 \
curl \
diffutils \
file \
git \
make \
patch \
perl \
unzip \
util-linux \
wget \
which \
xz \
yasm
RUN yum install -y \
https://repo.ius.io/ius-release-el7.rpm \
https://ossci-linux.s3.amazonaws.com/epel-release-7-14.noarch.rpm
RUN yum swap -y git git236-core
# git236+ would refuse to run git commands in repos owned by other users
# Which causes version check to fail, as pytorch repo is bind-mounted into the image
# Override this behaviour by treating every folder as safe
# For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327
RUN git config --global --add safe.directory "*"
ENV SSL_CERT_FILE=/opt/_internal/certs.pem
# Install LLVM version
COPY --from=openssl /opt/openssl /opt/openssl
COPY --from=python /opt/python /opt/python
COPY --from=python /opt/_internal /opt/_internal
COPY --from=python /opt/python/cp39-cp39/bin/auditwheel /usr/local/bin/auditwheel
COPY --from=intel /opt/intel /opt/intel
COPY --from=patchelf /usr/local/bin/patchelf /usr/local/bin/patchelf
COPY --from=jni /usr/local/include/jni.h /usr/local/include/jni.h
COPY --from=libpng /usr/local/bin/png* /usr/local/bin/
COPY --from=libpng /usr/local/bin/libpng* /usr/local/bin/
COPY --from=libpng /usr/local/include/png* /usr/local/include/
COPY --from=libpng /usr/local/include/libpng* /usr/local/include/
COPY --from=libpng /usr/local/lib/libpng* /usr/local/lib/
COPY --from=libpng /usr/local/lib/pkgconfig /usr/local/lib/pkgconfig
FROM common as cpu_final
ARG BASE_CUDA_VERSION=10.1
ARG DEVTOOLSET_VERSION=9
# Install Anaconda
ADD ./common/install_conda_docker.sh install_conda.sh
RUN bash ./install_conda.sh && rm install_conda.sh
ENV PATH /opt/conda/bin:$PATH
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
RUN yum install -y yum-utils centos-release-scl
RUN yum-config-manager --enable rhel-server-rhscl-7-rpms
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
RUN yum install -y devtoolset-${DEVTOOLSET_VERSION}-gcc devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran devtoolset-${DEVTOOLSET_VERSION}-binutils
ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH
ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH
# cmake is already installed inside the rocm base image, so remove if present
RUN rpm -e cmake || true
# cmake-3.18.4 from pip
RUN yum install -y python3-pip && \
python3 -mpip install cmake==3.18.4 && \
ln -s /usr/local/bin/cmake /usr/bin/cmake
# ninja
RUN yum install -y ninja-build
FROM cpu_final as cuda_final
RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION}
COPY --from=cuda /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda-${BASE_CUDA_VERSION}
COPY --from=magma /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda-${BASE_CUDA_VERSION}
RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda
ENV PATH=/usr/local/cuda/bin:$PATH
FROM cpu_final as rocm_final
ARG ROCM_VERSION=3.7
ARG PYTORCH_ROCM_ARCH
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
# Adding ROCM_PATH env var so that LoadHip.cmake (even with logic updated for ROCm6.0)
# find HIP works for ROCm5.7. Not needed for ROCm6.0 and above.
# Remove below when ROCm5.7 is not in support matrix anymore.
ENV ROCM_PATH /opt/rocm
ENV MKLROOT /opt/intel
# No need to install ROCm as base docker image should have full ROCm install
#ADD ./common/install_rocm.sh install_rocm.sh
#RUN ROCM_VERSION=${ROCM_VERSION} bash ./install_rocm.sh && rm install_rocm.sh
ADD ./common/install_rocm_drm.sh install_rocm_drm.sh
RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh
# cmake3 is needed for the MIOpen build
RUN ln -sf /usr/local/bin/cmake /usr/bin/cmake3
ADD ./common/install_rocm_magma.sh install_rocm_magma.sh
RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh
ADD ./common/install_miopen.sh install_miopen.sh
RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh

View File

@ -0,0 +1,153 @@
# syntax = docker/dockerfile:experimental
ARG ROCM_VERSION=3.7
ARG BASE_CUDA_VERSION=10.2
ARG GPU_IMAGE=nvidia/cuda:${BASE_CUDA_VERSION}-devel-centos7
FROM quay.io/pypa/manylinux2014_x86_64 as base
ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
RUN yum install -y wget curl perl util-linux xz bzip2 git patch which perl zlib-devel
RUN yum install -y yum-utils centos-release-scl sudo
RUN yum-config-manager --enable rhel-server-rhscl-7-rpms
RUN yum install -y devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-gcc-gfortran devtoolset-7-binutils
ENV PATH=/opt/rh/devtoolset-7/root/usr/bin:$PATH
ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-7/root/usr/lib64:/opt/rh/devtoolset-7/root/usr/lib:$LD_LIBRARY_PATH
# cmake
RUN yum install -y cmake3 && \
ln -s /usr/bin/cmake3 /usr/bin/cmake
FROM base as openssl
# Install openssl (this must precede `build python` step)
# (In order to have a proper SSL module, Python is compiled
# against a recent openssl [see env vars above], which is linked
# statically. We delete openssl afterwards.)
ADD ./common/install_openssl.sh install_openssl.sh
RUN bash ./install_openssl.sh && rm install_openssl.sh
# remove unncessary python versions
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6
FROM base as cuda
ARG BASE_CUDA_VERSION=10.2
# Install CUDA
ADD ./common/install_cuda.sh install_cuda.sh
RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh
FROM base as intel
# MKL
ADD ./common/install_mkl.sh install_mkl.sh
RUN bash ./install_mkl.sh && rm install_mkl.sh
FROM base as magma
ARG BASE_CUDA_VERSION=10.2
# Install magma
ADD ./common/install_magma.sh install_magma.sh
RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh
FROM base as jni
# Install java jni header
ADD ./common/install_jni.sh install_jni.sh
ADD ./java/jni.h jni.h
RUN bash ./install_jni.sh && rm install_jni.sh
FROM base as libpng
# Install libpng
ADD ./common/install_libpng.sh install_libpng.sh
RUN bash ./install_libpng.sh && rm install_libpng.sh
FROM ${GPU_IMAGE} as common
RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
RUN yum install -y \
aclocal \
autoconf \
automake \
bison \
bzip2 \
curl \
diffutils \
file \
git \
make \
patch \
perl \
unzip \
util-linux \
wget \
which \
xz \
yasm
RUN yum install -y \
https://repo.ius.io/ius-release-el7.rpm \
https://ossci-linux.s3.amazonaws.com/epel-release-7-14.noarch.rpm
RUN yum swap -y git git236-core
# git236+ would refuse to run git commands in repos owned by other users
# Which causes version check to fail, as pytorch repo is bind-mounted into the image
# Override this behaviour by treating every folder as safe
# For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327
RUN git config --global --add safe.directory "*"
ENV SSL_CERT_FILE=/opt/_internal/certs.pem
# Install LLVM version
COPY --from=openssl /opt/openssl /opt/openssl
COPY --from=base /opt/python /opt/python
COPY --from=base /opt/_internal /opt/_internal
COPY --from=base /usr/local/bin/auditwheel /usr/local/bin/auditwheel
COPY --from=intel /opt/intel /opt/intel
COPY --from=base /usr/local/bin/patchelf /usr/local/bin/patchelf
COPY --from=libpng /usr/local/bin/png* /usr/local/bin/
COPY --from=libpng /usr/local/bin/libpng* /usr/local/bin/
COPY --from=libpng /usr/local/include/png* /usr/local/include/
COPY --from=libpng /usr/local/include/libpng* /usr/local/include/
COPY --from=libpng /usr/local/lib/libpng* /usr/local/lib/
COPY --from=libpng /usr/local/lib/pkgconfig /usr/local/lib/pkgconfig
COPY --from=jni /usr/local/include/jni.h /usr/local/include/jni.h
FROM common as cpu_final
ARG BASE_CUDA_VERSION=10.2
RUN yum install -y yum-utils centos-release-scl
RUN yum-config-manager --enable rhel-server-rhscl-7-rpms
RUN yum install -y devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-gcc-gfortran devtoolset-7-binutils
ENV PATH=/opt/rh/devtoolset-7/root/usr/bin:$PATH
ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-7/root/usr/lib64:/opt/rh/devtoolset-7/root/usr/lib:$LD_LIBRARY_PATH
# cmake
RUN yum install -y cmake3 && \
ln -s /usr/bin/cmake3 /usr/bin/cmake
# ninja
RUN yum install -y http://repo.okay.com.mx/centos/7/x86_64/release/okay-release-1-1.noarch.rpm
RUN yum install -y ninja-build
FROM cpu_final as cuda_final
RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION}
COPY --from=cuda /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda-${BASE_CUDA_VERSION}
COPY --from=magma /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda-${BASE_CUDA_VERSION}
FROM common as rocm_final
ARG ROCM_VERSION=3.7
# Install ROCm
ADD ./common/install_rocm.sh install_rocm.sh
RUN bash ./install_rocm.sh ${ROCM_VERSION} && rm install_rocm.sh
# cmake is already installed inside the rocm base image, but both 2 and 3 exist
# cmake3 is needed for the later MIOpen custom build, so that step is last.
RUN yum install -y cmake3 && \
rm -f /usr/bin/cmake && \
ln -s /usr/bin/cmake3 /usr/bin/cmake
ADD ./common/install_miopen.sh install_miopen.sh
RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh

View File

@ -7,8 +7,8 @@ ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8 ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8 ENV LANGUAGE en_US.UTF-8
ARG DEVTOOLSET_VERSION=13 ARG DEVTOOLSET_VERSION=11
RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel yum-utils gcc-toolset-${DEVTOOLSET_VERSION}-gcc gcc-toolset-${DEVTOOLSET_VERSION}-gcc-c++ gcc-toolset-${DEVTOOLSET_VERSION}-gcc-gfortran gcc-toolset-${DEVTOOLSET_VERSION}-gdb RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel yum-utils gcc-toolset-${DEVTOOLSET_VERSION}-toolchain
ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH
ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH
@ -26,20 +26,17 @@ ADD ./common/install_openssl.sh install_openssl.sh
RUN bash ./install_openssl.sh && rm install_openssl.sh RUN bash ./install_openssl.sh && rm install_openssl.sh
# remove unnecessary python versions # remove unncessary python versions
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6
FROM base as cuda FROM base as cuda
ARG BASE_CUDA_VERSION=12.6 ARG BASE_CUDA_VERSION=11.8
# Install CUDA # Install CUDA
ADD ./common/install_cuda.sh install_cuda.sh ADD ./common/install_cuda.sh install_cuda.sh
COPY ./common/install_nccl.sh install_nccl.sh RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
COPY ./common/install_cusparselt.sh install_cusparselt.sh
RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh install_nccl.sh ci_commit_pins/nccl-cu* install_cusparselt.sh
FROM base as intel FROM base as intel
# MKL # MKL
@ -47,7 +44,7 @@ ADD ./common/install_mkl.sh install_mkl.sh
RUN bash ./install_mkl.sh && rm install_mkl.sh RUN bash ./install_mkl.sh && rm install_mkl.sh
FROM base as magma FROM base as magma
ARG BASE_CUDA_VERSION=12.6 ARG BASE_CUDA_VERSION=10.2
# Install magma # Install magma
ADD ./common/install_magma.sh install_magma.sh ADD ./common/install_magma.sh install_magma.sh
RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh
@ -64,7 +61,7 @@ ADD ./common/install_libpng.sh install_libpng.sh
RUN bash ./install_libpng.sh && rm install_libpng.sh RUN bash ./install_libpng.sh && rm install_libpng.sh
FROM ${GPU_IMAGE} as common FROM ${GPU_IMAGE} as common
ARG DEVTOOLSET_VERSION=13 ARG DEVTOOLSET_VERSION=11
ENV LC_ALL en_US.UTF-8 ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8 ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8 ENV LANGUAGE en_US.UTF-8
@ -87,12 +84,13 @@ RUN yum install -y \
wget \ wget \
which \ which \
xz \ xz \
glibc-langpack-en \ gcc-toolset-${DEVTOOLSET_VERSION}-toolchain \
gcc-toolset-${DEVTOOLSET_VERSION}-gcc \ glibc-langpack-en
gcc-toolset-${DEVTOOLSET_VERSION}-gcc-c++ \ RUN yum install -y \
gcc-toolset-${DEVTOOLSET_VERSION}-gcc-gfortran \ https://repo.ius.io/ius-release-el7.rpm \
gcc-toolset-${DEVTOOLSET_VERSION}-gdb https://ossci-linux.s3.amazonaws.com/epel-release-7-14.noarch.rpm
RUN yum swap -y git git236-core
# git236+ would refuse to run git commands in repos owned by other users # git236+ would refuse to run git commands in repos owned by other users
# Which causes version check to fail, as pytorch repo is bind-mounted into the image # Which causes version check to fail, as pytorch repo is bind-mounted into the image
# Override this behaviour by treating every folder as safe # Override this behaviour by treating every folder as safe
@ -103,7 +101,6 @@ ENV SSL_CERT_FILE=/opt/_internal/certs.pem
# Install LLVM version # Install LLVM version
COPY --from=openssl /opt/openssl /opt/openssl COPY --from=openssl /opt/openssl /opt/openssl
COPY --from=base /opt/python /opt/python COPY --from=base /opt/python /opt/python
COPY --from=base /usr/local/lib/ /usr/local/lib/
COPY --from=base /opt/_internal /opt/_internal COPY --from=base /opt/_internal /opt/_internal
COPY --from=base /usr/local/bin/auditwheel /usr/local/bin/auditwheel COPY --from=base /usr/local/bin/auditwheel /usr/local/bin/auditwheel
COPY --from=intel /opt/intel /opt/intel COPY --from=intel /opt/intel /opt/intel
@ -117,8 +114,8 @@ COPY --from=libpng /usr/local/lib/pkgconfig /usr/local/
COPY --from=jni /usr/local/include/jni.h /usr/local/include/jni.h COPY --from=jni /usr/local/include/jni.h /usr/local/include/jni.h
FROM common as cpu_final FROM common as cpu_final
ARG BASE_CUDA_VERSION=12.6 ARG BASE_CUDA_VERSION=11.8
ARG DEVTOOLSET_VERSION=13 ARG DEVTOOLSET_VERSION=11
# Install Anaconda # Install Anaconda
ADD ./common/install_conda_docker.sh install_conda.sh ADD ./common/install_conda_docker.sh install_conda.sh
RUN bash ./install_conda.sh && rm install_conda.sh RUN bash ./install_conda.sh && rm install_conda.sh
@ -130,8 +127,7 @@ ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/op
RUN for cpython_version in "cp312-cp312" "cp313-cp313" "cp313-cp313t"; do \ RUN for cpython_version in "cp312-cp312" "cp313-cp313" "cp313-cp313t"; do \
/opt/python/${cpython_version}/bin/python -m pip install setuptools wheel; \ /opt/python/${cpython_version}/bin/python -m pip install setuptools wheel; \
done; done;
ADD ./common/patch_libstdc.sh patch_libstdc.sh
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh
# cmake-3.18.4 from pip; force in case cmake3 already exists # cmake-3.18.4 from pip; force in case cmake3 already exists
RUN yum install -y python3-pip && \ RUN yum install -y python3-pip && \
@ -158,14 +154,11 @@ ENV ROCM_PATH /opt/rocm
# and avoid 3.21.0 cmake+ninja issues with ninja inserting "-Wl,--no-as-needed" in LINK_FLAGS for static linker # and avoid 3.21.0 cmake+ninja issues with ninja inserting "-Wl,--no-as-needed" in LINK_FLAGS for static linker
RUN python3 -m pip install --upgrade pip && \ RUN python3 -m pip install --upgrade pip && \
python3 -mpip install cmake==3.28.4 python3 -mpip install cmake==3.28.4
# replace the libdrm in /opt/amdgpu with custom amdgpu.ids lookup path
ADD ./common/install_rocm_drm.sh install_rocm_drm.sh ADD ./common/install_rocm_drm.sh install_rocm_drm.sh
RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh
# ROCm 6.4 rocm-smi depends on system drm.h header
RUN yum install -y libdrm-devel
ENV MKLROOT /opt/intel ENV MKLROOT /opt/intel
ADD ./common/install_rocm_magma.sh install_rocm_magma.sh ADD ./common/install_rocm_magma.sh install_rocm_magma.sh
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} && rm install_rocm_magma.sh RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh
ADD ./common/install_miopen.sh install_miopen.sh ADD ./common/install_miopen.sh install_miopen.sh
RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh
@ -176,6 +169,6 @@ ENV XPU_DRIVER_TYPE ROLLING
RUN python3 -m pip install --upgrade pip && \ RUN python3 -m pip install --upgrade pip && \
python3 -mpip install cmake==3.28.4 python3 -mpip install cmake==3.28.4
ADD ./common/install_xpu.sh install_xpu.sh ADD ./common/install_xpu.sh install_xpu.sh
ENV XPU_VERSION 2025.2 ENV XPU_VERSION 2025.0
RUN bash ./install_xpu.sh && rm install_xpu.sh RUN bash ./install_xpu.sh && rm install_xpu.sh
RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd

View File

@ -1,8 +1,9 @@
FROM quay.io/pypa/manylinux_2_28_aarch64 as base FROM quay.io/pypa/manylinux_2_28_aarch64 as base
ARG GCCTOOLSET_VERSION=13 # Graviton needs GCC 10 or above for the build. GCC12 is the default version in almalinux-8.
ARG GCCTOOLSET_VERSION=11
# Language variables # Language variabes
ENV LC_ALL=en_US.UTF-8 ENV LC_ALL=en_US.UTF-8
ENV LANG=en_US.UTF-8 ENV LANG=en_US.UTF-8
ENV LANGUAGE=en_US.UTF-8 ENV LANGUAGE=en_US.UTF-8
@ -35,16 +36,7 @@ RUN yum install -y \
yasm \ yasm \
zstd \ zstd \
sudo \ sudo \
gcc-toolset-${GCCTOOLSET_VERSION}-gcc \ gcc-toolset-${GCCTOOLSET_VERSION}-toolchain
gcc-toolset-${GCCTOOLSET_VERSION}-gcc-c++ \
gcc-toolset-${GCCTOOLSET_VERSION}-gcc-gfortran \
gcc-toolset-${GCCTOOLSET_VERSION}-gdb
# (optional) Install non-default Ninja version
ARG NINJA_VERSION
COPY ./common/install_ninja.sh install_ninja.sh
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
RUN rm install_ninja.sh
# Ensure the expected devtoolset is used # Ensure the expected devtoolset is used
ENV PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/bin:$PATH ENV PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/bin:$PATH
@ -58,26 +50,15 @@ RUN git config --global --add safe.directory "*"
FROM base as openblas FROM base as openblas
# Install openblas # Install openblas
ARG OPENBLAS_VERSION
ADD ./common/install_openblas.sh install_openblas.sh ADD ./common/install_openblas.sh install_openblas.sh
RUN bash ./install_openblas.sh && rm install_openblas.sh RUN bash ./install_openblas.sh && rm install_openblas.sh
# Install Arm Compute Library
FROM base as arm_compute
# use python3.9 to install scons
RUN python3.9 -m pip install scons==4.7.0
RUN ln -sf /opt/python/cp39-cp39/bin/scons /usr/local/bin
COPY ./common/install_acl.sh install_acl.sh
RUN bash ./install_acl.sh && rm install_acl.sh
FROM base as final FROM base as final
# remove unnecessary python versions # remove unncessary python versions
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6
COPY --from=openblas /opt/OpenBLAS/ /opt/OpenBLAS/ COPY --from=openblas /opt/OpenBLAS/ /opt/OpenBLAS/
COPY --from=arm_compute /acl /acl ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:$LD_LIBRARY_PATH
ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:/acl/build/:$LD_LIBRARY_PATH
ADD ./common/patch_libstdc.sh patch_libstdc.sh
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh

View File

@ -0,0 +1,94 @@
FROM quay.io/pypa/manylinux2014_aarch64 as base
# Graviton needs GCC 10 for the build
ARG DEVTOOLSET_VERSION=10
# Language variabes
ENV LC_ALL=en_US.UTF-8
ENV LANG=en_US.UTF-8
ENV LANGUAGE=en_US.UTF-8
# Installed needed OS packages. This is to support all
# the binary builds (torch, vision, audio, text, data)
RUN yum -y install epel-release
RUN yum -y update
RUN yum install -y \
autoconf \
automake \
bison \
bzip2 \
curl \
diffutils \
file \
git \
make \
patch \
perl \
unzip \
util-linux \
wget \
which \
xz \
yasm \
less \
zstd \
libgomp \
sudo \
devtoolset-${DEVTOOLSET_VERSION}-gcc \
devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ \
devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran \
devtoolset-${DEVTOOLSET_VERSION}-binutils
# Ensure the expected devtoolset is used
ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH
ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH
# git236+ would refuse to run git commands in repos owned by other users
# Which causes version check to fail, as pytorch repo is bind-mounted into the image
# Override this behaviour by treating every folder as safe
# For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327
RUN git config --global --add safe.directory "*"
###############################################################################
# libglfortran.a hack
#
# libgfortran.a from quay.io/pypa/manylinux2014_aarch64 is not compiled with -fPIC.
# This causes __stack_chk_guard@@GLIBC_2.17 on pytorch build. To solve, get
# ubuntu's libgfortran.a which is compiled with -fPIC
# NOTE: Need a better way to get this library as Ubuntu's package can be removed by the vender, or changed
###############################################################################
RUN cd ~/ \
&& curl -L -o ~/libgfortran-10-dev.deb http://ports.ubuntu.com/ubuntu-ports/pool/universe/g/gcc-10/libgfortran-10-dev_10.5.0-4ubuntu2_arm64.deb \
&& ar x ~/libgfortran-10-dev.deb \
&& tar --use-compress-program=unzstd -xvf data.tar.zst -C ~/ \
&& cp -f ~/usr/lib/gcc/aarch64-linux-gnu/10/libgfortran.a /opt/rh/devtoolset-10/root/usr/lib/gcc/aarch64-redhat-linux/10/
# install cmake
RUN yum install -y cmake3 && \
ln -s /usr/bin/cmake3 /usr/bin/cmake
FROM base as openssl
# Install openssl (this must precede `build python` step)
# (In order to have a proper SSL module, Python is compiled
# against a recent openssl [see env vars above], which is linked
# statically. We delete openssl afterwards.)
ADD ./common/install_openssl.sh install_openssl.sh
RUN bash ./install_openssl.sh && rm install_openssl.sh
ENV SSL_CERT_FILE=/opt/_internal/certs.pem
FROM base as openblas
# Install openblas
ADD ./common/install_openblas.sh install_openblas.sh
RUN bash ./install_openblas.sh && rm install_openblas.sh
FROM openssl as final
# remove unncessary python versions
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6
COPY --from=openblas /opt/OpenBLAS/ /opt/OpenBLAS/
ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:$LD_LIBRARY_PATH

View File

@ -1,7 +1,7 @@
FROM quay.io/pypa/manylinux_2_28_aarch64 as base FROM quay.io/pypa/manylinux_2_28_aarch64 as base
# Cuda ARM build needs gcc 11 # Cuda ARM build needs gcc 11
ARG DEVTOOLSET_VERSION=13 ARG DEVTOOLSET_VERSION=11
# Language variables # Language variables
ENV LC_ALL=en_US.UTF-8 ENV LC_ALL=en_US.UTF-8
@ -34,10 +34,7 @@ RUN yum install -y \
zstd \ zstd \
libgomp \ libgomp \
sudo \ sudo \
gcc-toolset-${DEVTOOLSET_VERSION}-gcc \ gcc-toolset-${DEVTOOLSET_VERSION}-toolchain
gcc-toolset-${DEVTOOLSET_VERSION}-gcc-c++ \
gcc-toolset-${DEVTOOLSET_VERSION}-gcc-gfortran \
gcc-toolset-${DEVTOOLSET_VERSION}-gdb
# Ensure the expected devtoolset is used # Ensure the expected devtoolset is used
ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH
@ -60,7 +57,7 @@ RUN bash ./install_openssl.sh && rm install_openssl.sh
ENV SSL_CERT_FILE=/opt/_internal/certs.pem ENV SSL_CERT_FILE=/opt/_internal/certs.pem
FROM openssl as final FROM openssl as final
# remove unnecessary python versions # remove unncessary python versions
RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2
RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4
RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6
@ -69,11 +66,8 @@ RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6
FROM base as cuda FROM base as cuda
ARG BASE_CUDA_VERSION ARG BASE_CUDA_VERSION
# Install CUDA # Install CUDA
ADD ./common/install_cuda.sh install_cuda.sh ADD ./common/install_cuda_aarch64.sh install_cuda_aarch64.sh
COPY ./common/install_nccl.sh install_nccl.sh RUN bash ./install_cuda_aarch64.sh ${BASE_CUDA_VERSION} && rm install_cuda_aarch64.sh
COPY ./common/install_cusparselt.sh install_cusparselt.sh
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh install_nccl.sh ci_commit_pins/nccl-cu* install_cusparselt.sh
FROM base as magma FROM base as magma
ARG BASE_CUDA_VERSION ARG BASE_CUDA_VERSION
@ -86,15 +80,6 @@ FROM base as nvpl
ADD ./common/install_nvpl.sh install_nvpl.sh ADD ./common/install_nvpl.sh install_nvpl.sh
RUN bash ./install_nvpl.sh && rm install_nvpl.sh RUN bash ./install_nvpl.sh && rm install_nvpl.sh
# Install Arm Compute Library
FROM base as arm_compute
# use python3.9 to install scons
RUN python3.9 -m pip install scons==4.7.0
RUN ln -sf /opt/python/cp39-cp39/bin/scons /usr/local/bin
COPY ./common/install_acl.sh install_acl.sh
RUN bash ./install_acl.sh && rm install_acl.sh
FROM base as final
FROM final as cuda_final FROM final as cuda_final
ARG BASE_CUDA_VERSION ARG BASE_CUDA_VERSION
RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION}
@ -102,9 +87,5 @@ COPY --from=cuda /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda-${BAS
COPY --from=magma /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda-${BASE_CUDA_VERSION} COPY --from=magma /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda-${BASE_CUDA_VERSION}
COPY --from=nvpl /opt/nvpl/lib/ /usr/local/lib/ COPY --from=nvpl /opt/nvpl/lib/ /usr/local/lib/
COPY --from=nvpl /opt/nvpl/include/ /usr/local/include/ COPY --from=nvpl /opt/nvpl/include/ /usr/local/include/
COPY --from=arm_compute /acl /acl
RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda
ENV PATH=/usr/local/cuda/bin:$PATH ENV PATH=/usr/local/cuda/bin:$PATH
ENV LD_LIBRARY_PATH=/acl/build/:$LD_LIBRARY_PATH
ADD ./common/patch_libstdc.sh patch_libstdc.sh
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh

View File

@ -0,0 +1,71 @@
FROM centos:8 as base
ENV LC_ALL en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
ENV PATH /opt/rh/gcc-toolset-11/root/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
# change to a valid repo
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*.repo
# enable to install ninja-build
RUN sed -i 's|enabled=0|enabled=1|g' /etc/yum.repos.d/CentOS-Linux-PowerTools.repo
RUN yum -y update
RUN yum install -y wget curl perl util-linux xz bzip2 git patch which zlib-devel sudo
RUN yum install -y autoconf automake make cmake gdb gcc-toolset-11-gcc-c++
FROM base as openssl
ADD ./common/install_openssl.sh install_openssl.sh
RUN bash ./install_openssl.sh && rm install_openssl.sh
# Install python
FROM base as python
RUN yum install -y openssl-devel zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel libpcap-devel xz-devel libffi-devel
ADD common/install_cpython.sh install_cpython.sh
RUN bash ./install_cpython.sh && rm install_cpython.sh
FROM base as conda
ADD ./common/install_conda_docker.sh install_conda.sh
RUN bash ./install_conda.sh && rm install_conda.sh
RUN /opt/conda/bin/conda install -y cmake
FROM base as intel
# Install MKL
COPY --from=python /opt/python /opt/python
COPY --from=python /opt/_internal /opt/_internal
COPY --from=conda /opt/conda /opt/conda
ENV PATH=/opt/conda/bin:$PATH
ADD ./common/install_mkl.sh install_mkl.sh
RUN bash ./install_mkl.sh && rm install_mkl.sh
FROM base as patchelf
ADD ./common/install_patchelf.sh install_patchelf.sh
RUN bash ./install_patchelf.sh && rm install_patchelf.sh
RUN cp $(which patchelf) /patchelf
FROM base as jni
ADD ./common/install_jni.sh install_jni.sh
ADD ./java/jni.h jni.h
RUN bash ./install_jni.sh && rm install_jni.sh
FROM base as libpng
ADD ./common/install_libpng.sh install_libpng.sh
RUN bash ./install_libpng.sh && rm install_libpng.sh
FROM base as final
COPY --from=openssl /opt/openssl /opt/openssl
COPY --from=python /opt/python /opt/python
COPY --from=python /opt/_internal /opt/_internal
COPY --from=intel /opt/intel /opt/intel
COPY --from=conda /opt/conda /opt/conda
COPY --from=patchelf /usr/local/bin/patchelf /usr/local/bin/patchelf
COPY --from=jni /usr/local/include/jni.h /usr/local/include/jni.h
COPY --from=libpng /usr/local/bin/png* /usr/local/bin/
COPY --from=libpng /usr/local/bin/libpng* /usr/local/bin/
COPY --from=libpng /usr/local/include/png* /usr/local/include/
COPY --from=libpng /usr/local/include/libpng* /usr/local/include/
COPY --from=libpng /usr/local/lib/libpng* /usr/local/lib/
COPY --from=libpng /usr/local/lib/pkgconfig /usr/local/lib/pkgconfig
RUN yum install -y ninja-build

View File

@ -5,9 +5,7 @@ ENV LC_ALL=C.UTF-8
ENV LANG=C.UTF-8 ENV LANG=C.UTF-8
ENV LANGUAGE=C.UTF-8 ENV LANGUAGE=C.UTF-8
# there is a bugfix in gcc >= 14 for precompiled headers and s390x vectorization interaction. ARG DEVTOOLSET_VERSION=13
# with earlier gcc versions test/inductor/test_cpu_cpp_wrapper.py will fail.
ARG DEVTOOLSET_VERSION=14
# Installed needed OS packages. This is to support all # Installed needed OS packages. This is to support all
# the binary builds (torch, vision, audio, text, data) # the binary builds (torch, vision, audio, text, data)
RUN yum -y install epel-release RUN yum -y install epel-release
@ -44,7 +42,6 @@ RUN yum install -y \
llvm-devel \ llvm-devel \
libzstd-devel \ libzstd-devel \
python3.12-devel \ python3.12-devel \
python3.12-test \
python3.12-setuptools \ python3.12-setuptools \
python3.12-pip \ python3.12-pip \
python3-virtualenv \ python3-virtualenv \
@ -60,8 +57,7 @@ RUN yum install -y \
libxslt-devel \ libxslt-devel \
libxml2-devel \ libxml2-devel \
openssl-devel \ openssl-devel \
valgrind \ valgrind
ninja-build
ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH
ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH
@ -105,40 +101,24 @@ CMD ["/bin/bash"]
# install test dependencies: # install test dependencies:
# - grpcio requires system openssl, bundled crypto fails to build # - grpcio requires system openssl, bundled crypto fails to build
# - ml_dtypes 0.4.0 requires some fixes provided in later commits to build
RUN dnf install -y \ RUN dnf install -y \
hdf5-devel \ protobuf-devel \
python3-h5py \ protobuf-c-devel \
git protobuf-lite-devel \
wget \
patch
RUN env GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=True pip3 install grpcio RUN env GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=True pip3 install grpcio==1.65.4
RUN cd ~ && \
# cmake-3.28.0 from pip for onnxruntime git clone https://github.com/jax-ml/ml_dtypes && \
RUN python3 -mpip install cmake==3.28.0 cd ml_dtypes && \
git checkout v0.4.0 && \
ADD ./common/patch_libstdc.sh patch_libstdc.sh
RUN bash ./patch_libstdc.sh && rm patch_libstdc.sh
# build onnxruntime 1.21.0 from sources.
# it is not possible to build it from sources using pip,
# so just build it from upstream repository.
# h5py is dependency of onnxruntime_training.
# h5py==3.11.0 builds with hdf5-devel 1.10.5 from repository.
# h5py 3.11.0 doesn't build with numpy >= 2.3.0.
# install newest flatbuffers version first:
# for some reason old version is getting pulled in otherwise.
# packaging package is required for onnxruntime wheel build.
RUN pip3 install flatbuffers && \
pip3 install cython 'pkgconfig>=1.5.5' 'setuptools>=77' 'numpy<2.3.0' && \
pip3 install --no-build-isolation h5py==3.11.0 && \
pip3 install packaging && \
git clone https://github.com/microsoft/onnxruntime && \
cd onnxruntime && git checkout v1.21.0 && \
git submodule update --init --recursive && \ git submodule update --init --recursive && \
wget https://github.com/microsoft/onnxruntime/commit/f57db79743c4d1a3553aa05cf95bcd10966030e6.patch && \ wget https://github.com/jax-ml/ml_dtypes/commit/b969f76914d6b30676721bc92bf0f6021a0d1321.patch && \
patch -p1 < f57db79743c4d1a3553aa05cf95bcd10966030e6.patch && \ wget https://github.com/jax-ml/ml_dtypes/commit/d4e6d035ecda073eab8bcf60f4eef572ee7087e6.patch && \
./build.sh --config Release --parallel 0 --enable_pybind \ patch -p1 < b969f76914d6b30676721bc92bf0f6021a0d1321.patch && \
--build_wheel --enable_training --enable_training_apis \ patch -p1 < d4e6d035ecda073eab8bcf60f4eef572ee7087e6.patch && \
--enable_training_ops --skip_tests --allow_running_as_root \ python3 setup.py bdist_wheel && \
--compile_no_warning_as_error && \ pip3 install dist/*.whl && \
pip3 install ./build/Linux/Release/dist/onnxruntime_training-*.whl && \ rm -rf ml_dtypes
cd .. && /bin/rm -rf ./onnxruntime

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Script used only in CD pipeline # Script used only in CD pipeline
set -exou pipefail set -eou pipefail
TOPDIR=$(git rev-parse --show-toplevel) TOPDIR=$(git rev-parse --show-toplevel)
@ -9,120 +9,151 @@ image="$1"
shift shift
if [ -z "${image}" ]; then if [ -z "${image}" ]; then
echo "Usage: $0 IMAGE:ARCHTAG" echo "Usage: $0 IMAGE"
exit 1 exit 1
fi fi
# Go from imagename:tag to tag DOCKER_IMAGE="pytorch/${image}"
DOCKER_TAG_PREFIX=$(echo "${image}" | awk -F':' '{print $2}')
GPU_ARCH_VERSION="" DOCKER_REGISTRY="${DOCKER_REGISTRY:-docker.io}"
if [[ "${DOCKER_TAG_PREFIX}" == cuda* ]]; then
# extract cuda version from image name. e.g. manylinux2_28-builder:cuda12.8 returns 12.8
GPU_ARCH_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'cuda' '{print $2}')
elif [[ "${DOCKER_TAG_PREFIX}" == rocm* ]]; then
# extract rocm version from image name. e.g. manylinux2_28-builder:rocm6.2.4 returns 6.2.4
GPU_ARCH_VERSION=$(echo "${DOCKER_TAG_PREFIX}" | awk -F'rocm' '{print $2}')
fi
GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu}
GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-} MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-}
DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-} DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-}
OPENBLAS_VERSION=${OPENBLAS_VERSION:-} WITH_PUSH=${WITH_PUSH:-}
ACL_VERSION=${ACL_VERSION:-}
case ${image} in case ${GPU_ARCH_TYPE} in
manylinux2_28-builder:cpu) cpu)
TARGET=cpu_final TARGET=cpu_final
DOCKER_TAG=cpu
GPU_IMAGE=centos:7
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9"
;;
cpu-manylinux_2_28)
TARGET=cpu_final
DOCKER_TAG=cpu
GPU_IMAGE=amd64/almalinux:8 GPU_IMAGE=amd64/almalinux:8
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=13" DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11"
MANY_LINUX_VERSION="2_28" MANY_LINUX_VERSION="2_28"
;; ;;
manylinux2_28_aarch64-builder:cpu-aarch64) cpu-aarch64)
TARGET=final TARGET=final
DOCKER_TAG=cpu-aarch64
GPU_IMAGE=arm64v8/centos:7
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=10"
MANY_LINUX_VERSION="aarch64"
;;
cpu-aarch64-2_28)
TARGET=final
DOCKER_TAG=cpu-aarch64
GPU_IMAGE=arm64v8/almalinux:8 GPU_IMAGE=arm64v8/almalinux:8
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=13 --build-arg NINJA_VERSION=1.12.1" DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11"
MANY_LINUX_VERSION="2_28_aarch64" MANY_LINUX_VERSION="2_28_aarch64"
;; ;;
manylinuxs390x-builder:cpu-s390x) cpu-cxx11-abi)
TARGET=final TARGET=final
DOCKER_TAG=cpu-cxx11-abi
GPU_IMAGE=""
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9"
MANY_LINUX_VERSION="cxx11-abi"
;;
cpu-s390x)
TARGET=final
DOCKER_TAG=cpu-s390x
GPU_IMAGE=s390x/almalinux:8 GPU_IMAGE=s390x/almalinux:8
DOCKER_GPU_BUILD_ARG="" DOCKER_GPU_BUILD_ARG=""
MANY_LINUX_VERSION="s390x" MANY_LINUX_VERSION="s390x"
;; ;;
manylinux2_28-builder:cuda11*) cuda)
TARGET=cuda_final TARGET=cuda_final
DOCKER_TAG=cuda${GPU_ARCH_VERSION}
# Keep this up to date with the minimum version of CUDA we currently support
GPU_IMAGE=centos:7
DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=9"
;;
cuda-manylinux_2_28)
TARGET=cuda_final
DOCKER_TAG=cuda${GPU_ARCH_VERSION}
GPU_IMAGE=amd64/almalinux:8 GPU_IMAGE=amd64/almalinux:8
DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=11" DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=11"
MANY_LINUX_VERSION="2_28" MANY_LINUX_VERSION="2_28"
;; ;;
manylinux2_28-builder:cuda12*) cuda-aarch64)
TARGET=cuda_final TARGET=cuda_final
GPU_IMAGE=amd64/almalinux:8 DOCKER_TAG=cuda${GPU_ARCH_VERSION}
DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=13" GPU_IMAGE=arm64v8/centos:7
MANY_LINUX_VERSION="2_28" DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=11"
;;
manylinux2_28-builder:cuda13*)
TARGET=cuda_final
GPU_IMAGE=amd64/almalinux:8
DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=13"
MANY_LINUX_VERSION="2_28"
;;
manylinuxaarch64-builder:cuda*)
TARGET=cuda_final
GPU_IMAGE=amd64/almalinux:8
DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=13"
MANY_LINUX_VERSION="aarch64" MANY_LINUX_VERSION="aarch64"
DOCKERFILE_SUFFIX="_cuda_aarch64" DOCKERFILE_SUFFIX="_cuda_aarch64"
;; ;;
manylinux2_28-builder:rocm*) rocm|rocm-manylinux_2_28)
# we want the patch version of 6.4 instead
if [[ "$GPU_ARCH_VERSION" == *"6.4"* ]]; then
GPU_ARCH_VERSION="${GPU_ARCH_VERSION}.2"
fi
TARGET=rocm_final TARGET=rocm_final
MANY_LINUX_VERSION="2_28" DOCKER_TAG=rocm${GPU_ARCH_VERSION}
DEVTOOLSET_VERSION="11" GPU_IMAGE=rocm/dev-centos-7:${GPU_ARCH_VERSION}-complete
GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete DEVTOOLSET_VERSION="9"
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201" if [ ${GPU_ARCH_TYPE} == "rocm-manylinux_2_28" ]; then
# add gfx950 conditionally starting in ROCm 7.0 MANY_LINUX_VERSION="2_28"
if [[ "$GPU_ARCH_VERSION" == *"7.0"* ]]; then DEVTOOLSET_VERSION="11"
PYTORCH_ROCM_ARCH="${PYTORCH_ROCM_ARCH};gfx950" GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete
fi fi
PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101"
DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}"
;; ;;
manylinux2_28-builder:xpu) xpu)
TARGET=xpu_final TARGET=xpu_final
DOCKER_TAG=xpu
GPU_IMAGE=amd64/almalinux:8 GPU_IMAGE=amd64/almalinux:8
DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11" DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11"
MANY_LINUX_VERSION="2_28" MANY_LINUX_VERSION="2_28"
;; ;;
*) *)
echo "ERROR: Unrecognized image name: ${image}" echo "ERROR: Unrecognized GPU_ARCH_TYPE: ${GPU_ARCH_TYPE}"
exit 1 exit 1
;; ;;
esac esac
IMAGES=''
if [[ -n ${MANY_LINUX_VERSION} && -z ${DOCKERFILE_SUFFIX} ]]; then if [[ -n ${MANY_LINUX_VERSION} && -z ${DOCKERFILE_SUFFIX} ]]; then
DOCKERFILE_SUFFIX=_${MANY_LINUX_VERSION} DOCKERFILE_SUFFIX=_${MANY_LINUX_VERSION}
fi fi
# Only activate this if in CI (
if [ "$(uname -m)" != "s390x" ] && [ -v CI ]; then set -x
# TODO: Remove LimitNOFILE=1048576 patch once https://github.com/pytorch/test-infra/issues/5712
# is resolved. This patch is required in order to fix timing out of Docker build on Amazon Linux 2023. if [ "$(uname -m)" != "s390x" ]; then
sudo sed -i s/LimitNOFILE=infinity/LimitNOFILE=1048576/ /usr/lib/systemd/system/docker.service # TODO: Remove LimitNOFILE=1048576 patch once https://github.com/pytorch/test-infra/issues/5712
sudo systemctl daemon-reload # is resolved. This patch is required in order to fix timing out of Docker build on Amazon Linux 2023.
sudo systemctl restart docker sudo sed -i s/LimitNOFILE=infinity/LimitNOFILE=1048576/ /usr/lib/systemd/system/docker.service
sudo systemctl daemon-reload
sudo systemctl restart docker
fi
DOCKER_BUILDKIT=1 docker build \
${DOCKER_GPU_BUILD_ARG} \
--build-arg "GPU_IMAGE=${GPU_IMAGE}" \
--target "${TARGET}" \
-t "${DOCKER_IMAGE}" \
$@ \
-f "${TOPDIR}/.ci/docker/manywheel/Dockerfile${DOCKERFILE_SUFFIX}" \
"${TOPDIR}/.ci/docker/"
)
GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)}
GIT_BRANCH_NAME=${GITHUB_REF##*/}
GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)}
DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE}-${GIT_BRANCH_NAME}
DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE}-${GIT_COMMIT_SHA}
if [[ "${WITH_PUSH}" == true ]]; then
(
set -x
docker push "${DOCKER_IMAGE}"
if [[ -n ${GITHUB_REF} ]]; then
docker tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_BRANCH_TAG}
docker tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_SHA_TAG}
docker push "${DOCKER_IMAGE_BRANCH_TAG}"
docker push "${DOCKER_IMAGE_SHA_TAG}"
fi
)
fi fi
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
DOCKER_BUILDKIT=1 docker build \
${DOCKER_GPU_BUILD_ARG} \
--build-arg "GPU_IMAGE=${GPU_IMAGE}" \
--build-arg "OPENBLAS_VERSION=${OPENBLAS_VERSION:-}" \
--build-arg "ACL_VERSION=${ACL_VERSION:-}" \
--target "${TARGET}" \
-t "${tmp_tag}" \
$@ \
-f "${TOPDIR}/.ci/docker/manywheel/Dockerfile${DOCKERFILE_SUFFIX}" \
"${TOPDIR}/.ci/docker/"

View File

@ -97,7 +97,7 @@ find /opt/_internal -type f -print0 \
| xargs -0 -n1 strip --strip-unneeded 2>/dev/null || true | xargs -0 -n1 strip --strip-unneeded 2>/dev/null || true
# We do not need the Python test suites, or indeed the precompiled .pyc and # We do not need the Python test suites, or indeed the precompiled .pyc and
# .pyo files. Partially cribbed from: # .pyo files. Partially cribbed from:
# https://github.com/docker-library/python/blob/master/3.4/slim/Dockerfile # @lint-ignore # https://github.com/docker-library/python/blob/master/3.4/slim/Dockerfile
find /opt/_internal \ find /opt/_internal \
\( -type d -a -name test -o -name tests \) \ \( -type d -a -name test -o -name tests \) \
-o \( -type f -a -name '*.pyc' -o -name '*.pyo' \) \ -o \( -type f -a -name '*.pyc' -o -name '*.pyo' \) \

View File

@ -2,8 +2,8 @@
# Helper utilities for build # Helper utilities for build
# Script used only in CD pipeline # Script used only in CD pipeline
OPENSSL_DOWNLOAD_URL=https://www.openssl.org/source/old/1.1.1/ # @lint-ignore OPENSSL_DOWNLOAD_URL=https://www.openssl.org/source/old/1.1.1/
CURL_DOWNLOAD_URL=https://curl.se/download CURL_DOWNLOAD_URL=https://curl.askapache.com/download
AUTOCONF_DOWNLOAD_URL=https://ftp.gnu.org/gnu/autoconf AUTOCONF_DOWNLOAD_URL=https://ftp.gnu.org/gnu/autoconf

View File

@ -10,18 +10,12 @@ boto3==1.35.42
#Pinned versions: 1.19.12, 1.16.34 #Pinned versions: 1.19.12, 1.16.34
#test that import: #test that import:
build==1.3.0
#Description: A simple, correct Python build frontend.
#Pinned versions: 1.3.0
#test that import:
click click
#Description: Command Line Interface Creation Kit #Description: Command Line Interface Creation Kit
#Pinned versions: #Pinned versions:
#test that import: #test that import:
coremltools==5.0b5 ; python_version < "3.12" coremltools==5.0b5 ; python_version < "3.12"
coremltools==8.3 ; python_version == "3.12"
#Description: Apple framework for ML integration #Description: Apple framework for ML integration
#Pinned versions: 5.0b5 #Pinned versions: 5.0b5
#test that import: #test that import:
@ -47,15 +41,15 @@ fbscribelogger==0.1.7
#Pinned versions: 0.1.6 #Pinned versions: 0.1.6
#test that import: #test that import:
flatbuffers==24.12.23 flatbuffers==2.0
#Description: cross platform serialization library #Description: cross platform serialization library
#Pinned versions: 24.12.23 #Pinned versions: 2.0
#test that import: #test that import:
hypothesis==6.56.4 hypothesis==5.35.1
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136 # Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
#Description: advanced library for generating parametrized tests #Description: advanced library for generating parametrized tests
#Pinned versions: 6.56.4 #Pinned versions: 3.44.6, 4.53.2
#test that import: test_xnnpack_integration.py, test_pruning_op.py, test_nn.py #test that import: test_xnnpack_integration.py, test_pruning_op.py, test_nn.py
junitparser==2.1.1 junitparser==2.1.1
@ -68,12 +62,10 @@ lark==0.12.0
#Pinned versions: 0.12.0 #Pinned versions: 0.12.0
#test that import: #test that import:
librosa>=0.6.2 ; python_version < "3.11" and platform_machine != "s390x" librosa>=0.6.2 ; python_version < "3.11"
librosa==0.10.2 ; python_version == "3.12" and platform_machine != "s390x"
#Description: A python package for music and audio analysis #Description: A python package for music and audio analysis
#Pinned versions: >=0.6.2 #Pinned versions: >=0.6.2
#test that import: test_spectral_ops.py #test that import: test_spectral_ops.py
#librosa depends on numba; disable it for s390x while numba is disabled too
#mkl #this breaks linux-bionic-rocm4.5-py3.7 #mkl #this breaks linux-bionic-rocm4.5-py3.7
#Description: Intel oneAPI Math Kernel Library #Description: Intel oneAPI Math Kernel Library
@ -98,11 +90,10 @@ librosa==0.10.2 ; python_version == "3.12" and platform_machine != "s390x"
#Pinned versions: #Pinned versions:
#test that import: #test that import:
mypy==1.16.0 ; platform_system == "Linux" mypy==1.13.0
# Pin MyPy version because new errors are likely to appear with each release # Pin MyPy version because new errors are likely to appear with each release
# Skip on Windows as lots of type annotations are POSIX specific
#Description: linter #Description: linter
#Pinned versions: 1.16.0 #Pinned versions: 1.10.0
#test that import: test_typing.py, test_type_hints.py #test that import: test_typing.py, test_type_hints.py
networkx==2.8.8 networkx==2.8.8
@ -111,18 +102,19 @@ networkx==2.8.8
#Pinned versions: 2.8.8 #Pinned versions: 2.8.8
#test that import: functorch #test that import: functorch
ninja==1.11.1.4 #ninja
#Description: build system. Used in some tests. Used in build to generate build #Description: build system. Note that it install from
#time tracing information #here breaks things so it is commented out
#Pinned versions: 1.11.1.4 #Pinned versions: 1.10.0.post1
#test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py #test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py
numba==0.55.2 ; python_version == "3.10" and platform_machine != "s390x" numba==0.49.0 ; python_version < "3.9"
numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x" numba==0.55.2 ; python_version == "3.9"
numba==0.55.2 ; python_version == "3.10"
#Description: Just-In-Time Compiler for Numerical Functions #Description: Just-In-Time Compiler for Numerical Functions
#Pinned versions: 0.55.2, 0.60.0 #Pinned versions: 0.54.1, 0.49.0, <=0.49.1
#test that import: test_numba_integration.py #test that import: test_numba_integration.py
#Need release > 0.61.2 for s390x due to https://github.com/numba/numba/pull/10073 #For numba issue see https://github.com/pytorch/pytorch/issues/51511
#numpy #numpy
#Description: Provides N-dimensional arrays and linear algebra #Description: Provides N-dimensional arrays and linear algebra
@ -136,7 +128,7 @@ numba==0.60.0 ; python_version == "3.12" and platform_machine != "s390x"
#test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py, #test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py,
#test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py, #test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py,
#test_binary_ufuncs.py #test_binary_ufuncs.py
numpy==1.22.4; python_version == "3.10" numpy==1.22.4; python_version == "3.9" or python_version == "3.10"
numpy==1.26.2; python_version == "3.11" or python_version == "3.12" numpy==1.26.2; python_version == "3.11" or python_version == "3.12"
numpy==2.1.2; python_version >= "3.13" numpy==2.1.2; python_version >= "3.13"
@ -168,13 +160,13 @@ optree==0.13.0
pillow==11.0.0 pillow==11.0.0
#Description: Python Imaging Library fork #Description: Python Imaging Library fork
#Pinned versions: 11.0.0 #Pinned versions: 10.3.0
#test that import: #test that import:
protobuf==5.29.5 protobuf==3.20.2
#Description: Google's data interchange format #Description: Googles data interchange format
#Pinned versions: 5.29.5 #Pinned versions: 3.20.1
#test that import: test_tensorboard.py, test/onnx/* #test that import: test_tensorboard.py
psutil psutil
#Description: information on running processes and system utilization #Description: information on running processes and system utilization
@ -216,7 +208,7 @@ pytest-subtests==0.13.1
#Pinned versions: #Pinned versions:
#test that import: #test that import:
xdoctest==1.3.0 xdoctest==1.1.0
#Description: runs doctests in pytest #Description: runs doctests in pytest
#Pinned versions: 1.1.0 #Pinned versions: 1.1.0
#test that import: #test that import:
@ -226,9 +218,9 @@ pygments==2.15.0
#Pinned versions: 2.12.0 #Pinned versions: 2.12.0
#test that import: the doctests #test that import: the doctests
#pyyaml #PyYAML
#Description: data serialization format #Description: data serialization format
#Pinned versions: 6.0.2 #Pinned versions:
#test that import: #test that import:
#requests #requests
@ -238,12 +230,13 @@ pygments==2.15.0
#rich #rich
#Description: rich text and beautiful formatting in the terminal #Description: rich text and beautiful formatting in the terminal
#Pinned versions: 14.1.0 #Pinned versions: 10.9.0
#test that import: #test that import:
scikit-image==0.22.0 scikit-image==0.19.3 ; python_version < "3.10"
scikit-image==0.22.0 ; python_version >= "3.10"
#Description: image processing routines #Description: image processing routines
#Pinned versions: 0.22.0 #Pinned versions:
#test that import: test_nn.py #test that import: test_nn.py
#scikit-learn #scikit-learn
@ -265,8 +258,13 @@ scipy==1.14.1 ; python_version >= "3.12"
#Pinned versions: #Pinned versions:
#test that import: #test that import:
tb-nightly==2.13.0a20230426
#Description: TensorBoard
#Pinned versions:
#test that import:
# needed by torchgen utils # needed by torchgen utils
typing-extensions==4.12.2 typing-extensions>=4.10.0
#Description: type hints for python #Description: type hints for python
#Pinned versions: #Pinned versions:
#test that import: #test that import:
@ -296,7 +294,7 @@ ghstack==0.8.0
#Pinned versions: 0.8.0 #Pinned versions: 0.8.0
#test that import: #test that import:
jinja2==3.1.6 jinja2==3.1.5
#Description: jinja2 template engine #Description: jinja2 template engine
#Pinned versions: 3.1.4 #Pinned versions: 3.1.4
#test that import: #test that import:
@ -306,7 +304,7 @@ pytest-cpp==2.3.0
#Pinned versions: 2.3.0 #Pinned versions: 2.3.0
#test that import: #test that import:
z3-solver==4.15.1.0 ; platform_machine != "s390x" z3-solver==4.12.6.0
#Description: The Z3 Theorem Prover Project #Description: The Z3 Theorem Prover Project
#Pinned versions: #Pinned versions:
#test that import: #test that import:
@ -327,6 +325,8 @@ pywavelets==1.7.0 ; python_version >= "3.12"
lxml==5.3.0 lxml==5.3.0
#Description: This is a requirement of unittest-xml-reporting #Description: This is a requirement of unittest-xml-reporting
# Python-3.9 binaries
PyGithub==2.3.0 PyGithub==2.3.0
sympy==1.13.3 sympy==1.13.3
@ -334,12 +334,12 @@ sympy==1.13.3
#Pinned versions: #Pinned versions:
#test that import: #test that import:
onnx==1.18.0 onnx==1.17.0
#Description: Required by onnx tests, and mypy and test_public_bindings.py when checking torch.onnx._internal #Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
#Pinned versions: #Pinned versions:
#test that import: #test that import:
onnxscript==0.5.3 onnxscript==0.1.0
#Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal #Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal
#Pinned versions: #Pinned versions:
#test that import: #test that import:
@ -353,21 +353,22 @@ parameterized==0.8.1
#Pinned versions: 1.24.0 #Pinned versions: 1.24.0
#test that import: test_sac_estimator.py #test that import: test_sac_estimator.py
pwlf==2.2.1 pwlf==2.2.1 ; python_version >= "3.8"
#Description: required for testing torch/distributed/_tools/sac_estimator.py #Description: required for testing torch/distributed/_tools/sac_estimator.py
#Pinned versions: 2.2.1 #Pinned versions: 2.2.1
#test that import: test_sac_estimator.py #test that import: test_sac_estimator.py
# To build PyTorch itself
pyyaml==6.0.2
pyzstd
setuptools==78.1.1
packaging==23.1
six
# To build PyTorch itself
astunparse
PyYAML
pyzstd
setuptools
ninja==1.11.1 ; platform_machine == "aarch64"
scons==4.5.2 ; platform_machine == "aarch64" scons==4.5.2 ; platform_machine == "aarch64"
pulp==2.9.0 pulp==2.9.0 ; python_version >= "3.8"
#Description: required for testing ilp formulaiton under torch/distributed/_tools #Description: required for testing ilp formulaiton under torch/distributed/_tools
#Pinned versions: 2.9.0 #Pinned versions: 2.9.0
#test that import: test_sac_ilp.py #test that import: test_sac_ilp.py
@ -376,22 +377,3 @@ dataclasses_json==0.6.7
#Description: required for data pipeline and scripts under tools/stats #Description: required for data pipeline and scripts under tools/stats
#Pinned versions: 0.6.7 #Pinned versions: 0.6.7
#test that import: #test that import:
cmake==3.31.6
#Description: required for building
tlparse==0.4.0
#Description: required for log parsing
filelock==3.18.0
#Description: required for inductor testing
cuda-bindings>=12.0,<13.0 ; platform_machine != "s390x" and platform_system != "Darwin"
#Description: required for testing CUDAGraph::raw_cuda_graph(). See https://nvidia.github.io/cuda-python/cuda-bindings/latest/support.html for how this version was chosen. Note "Any fix in the latest bindings would be backported to the prior major version" means that only the newest version of cuda-bindings will get fixes. Depending on the latest version of 12.x is okay because all 12.y versions will be supported via "CUDA minor version compatibility". Pytorch builds against 13.z versions of cuda toolkit work with 12.x versions of cuda-bindings as well because newer drivers work with old toolkits.
#test that import: test_cuda.py
setuptools-git-versioning==2.1.0
scikit-build==0.18.1
pyre-extensions==0.0.32
tabulate==0.9.0
#Description: These package are needed to build FBGEMM and torchrec on PyTorch CI

View File

@ -1,35 +1,18 @@
sphinx==5.3.0 sphinx==5.3.0
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 5.3.0 #Pinned versions: 5.3.0
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
standard-imghdr==3.13.0; python_version >= "3.13"
#Description: This is needed by Sphinx, so it needs to be added here.
# The reasons are as follows:
# 1) This module has been removed from the Python standard library since Python 3.13(https://peps.python.org/pep-0594/#imghdr);
# 2) The current version of Sphinx (5.3.0) is not compatible with Python 3.13.
# Once Sphinx is upgraded to a version compatible with Python 3.13 or later, we can remove this dependency.
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@71e55749be14ceb56e7f8211a9fb649866b87ad4#egg=pytorch_sphinx_theme2
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering # TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
# but it doesn't seem to work and hangs around idly. The initial thought that it is probably # but it doesn't seem to work and hangs around idly. The initial thought is probably
# something related to Docker setup. We can investigate this later. # something related to Docker setup. We can investigate this later
sphinxcontrib.katex==0.8.6 sphinxcontrib.katex==0.8.6
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 0.8.6 #Pinned versions: 0.8.6
sphinxext-opengraph==0.9.1 matplotlib==3.5.3
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 0.9.1 #Pinned versions: 3.5.3
sphinx_sitemap==2.6.0
#Description: This is used to generate sitemap for PyTorch docs
#Pinned versions: 2.6.0
matplotlib==3.5.3 ; python_version < "3.13"
matplotlib==3.6.3 ; python_version >= "3.13"
#Description: This is used to generate PyTorch docs
#Pinned versions: 3.6.3 if python > 3.12. Otherwise 3.5.3.
tensorboard==2.13.0 ; python_version < "3.13" tensorboard==2.13.0 ; python_version < "3.13"
tensorboard==2.18.0 ; python_version >= "3.13" tensorboard==2.18.0 ; python_version >= "3.13"
@ -57,12 +40,11 @@ IPython==8.12.0
#Pinned versions: 8.12.0 #Pinned versions: 8.12.0
myst-nb==0.17.2 myst-nb==0.17.2
#Description: This is used to generate PyTorch functorch and torch.compile docs. #Description: This is used to generate PyTorch functorch docs
#Pinned versions: 0.17.2 #Pinned versions: 0.13.2
# The following are required to build torch.distributed.elastic.rendezvous.etcd* docs # The following are required to build torch.distributed.elastic.rendezvous.etcd* docs
python-etcd==0.4.5 python-etcd==0.4.5
sphinx-copybutton==0.5.0 sphinx-copybutton==0.5.0
sphinx-design==0.4.0 sphinx-panels==0.4.1
sphinxcontrib-mermaid==1.0.0
myst-parser==0.18.1 myst-parser==0.18.1

View File

@ -1 +1 @@
3.5.0 3.2.0

View File

@ -1 +0,0 @@
3.5.0

View File

@ -1,155 +0,0 @@
# Cross-compilation Docker container for RISC-V architecture
ARG UBUNTU_VERSION
FROM --platform=linux/amd64 ubuntu:${UBUNTU_VERSION} as base
ARG UBUNTU_VERSION
ENV GCC_VERSION=14
ENV PYTHON_VERSION=3.12.3
ENV DEBIAN_FRONTEND=noninteractive
ENV CC=riscv64-linux-gnu-gcc-${GCC_VERSION}
ENV CXX=riscv64-linux-gnu-g++-${GCC_VERSION}
ENV QEMU_LD_PREFIX=/usr/riscv64-linux-gnu/
ENV SYSROOT=/opt/sysroot
# Install basic dependencies
RUN apt-get update && apt-get install -y \
ninja-build \
autoconf \
automake \
libtool \
patchelf \
ccache \
git \
wget \
python3-pip \
python3-venv \
python-is-python3 \
cmake \
sudo \
lsb-release \
gcc-${GCC_VERSION}-riscv64-linux-gnu \
g++-${GCC_VERSION}-riscv64-linux-gnu \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
# Install user
COPY ./common/install_user.sh install_user.sh
RUN bash ./install_user.sh && rm install_user.sh
FROM base as python
ARG ZLIB_VERSION=1.3.1
ARG FFI_VERSION=3.4.6
ARG BZ2_VERSION=1.0.8
ARG XZ_VERSION=5.4.6
ARG OPENSSL_VERSION=3.2.1
# Set up sysroot directory for dependencies
ENV PKG_CONFIG_PATH=${SYSROOT}/lib/pkgconfig
ENV PKG_CONFIG_SYSROOT_DIR=${SYSROOT}
WORKDIR /opt
# Build zlib (for compression)
RUN echo "--- Building zlib ---" \
&& wget -c https://www.zlib.net/zlib-${ZLIB_VERSION}.tar.gz \
&& tar -xf zlib-${ZLIB_VERSION}.tar.gz --no-same-permissions --no-same-owner \
&& cd zlib-${ZLIB_VERSION}/ \
&& mkdir build && cd build \
&& ../configure --prefix=${SYSROOT} \
&& make -j$(nproc) && make install \
&& cd ../..
# Build libffi (for ctypes module)
RUN echo "--- Building libffi ---" \
&& wget -c https://github.com/libffi/libffi/releases/download/v${FFI_VERSION}/libffi-${FFI_VERSION}.tar.gz \
&& tar -xf libffi-${FFI_VERSION}.tar.gz --no-same-permissions --no-same-owner \
&& cd libffi-${FFI_VERSION}/ \
&& mkdir build && cd build \
&& ../configure --prefix=${SYSROOT} --host=riscv64-linux-gnu --build=x86_64-linux-gnu \
&& make -j$(nproc) && make install \
&& cd ../..
# Build bzip2 (for bz2 module)
RUN echo "--- Building bzip2 ---" \
&& wget -c https://sourceware.org/pub/bzip2/bzip2-${BZ2_VERSION}.tar.gz \
&& tar -xf bzip2-${BZ2_VERSION}.tar.gz --no-same-permissions --no-same-owner \
&& cd bzip2-${BZ2_VERSION}/ \
&& make CC=riscv64-linux-gnu-gcc-${GCC_VERSION} bzip2 bzip2recover libbz2.a \
&& make CC=riscv64-linux-gnu-gcc-${GCC_VERSION} -f Makefile-libbz2_so \
&& make install PREFIX=${SYSROOT} \
&& cp libbz2.so.${BZ2_VERSION} ${SYSROOT}/lib/ \
&& cd ${SYSROOT}/lib/ \
&& ln -sf libbz2.so.${BZ2_VERSION} libbz2.so.1.0 \
&& ln -sf libbz2.so.1.0 libbz2.so \
&& cd /opt/
# Build xz (for lzma module)
RUN echo "--- Building xz ---" \
&& wget -c https://github.com/tukaani-project/xz/releases/download/v${XZ_VERSION}/xz-${XZ_VERSION}.tar.gz \
&& tar -xf xz-${XZ_VERSION}.tar.gz --no-same-permissions --no-same-owner \
&& cd xz-${XZ_VERSION} \
&& mkdir build && cd build \
&& ../configure --prefix=${SYSROOT} --host=riscv64-linux-gnu --build=x86_64-linux-gnu \
&& make -j$(nproc) && make install \
&& cd ../..
# Build OpenSSL (for ssl module)
RUN echo "--- Building OpenSSL ---" \
&& wget -c https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz \
&& tar -xf openssl-${OPENSSL_VERSION}.tar.gz --no-same-permissions --no-same-owner \
&& cd openssl-${OPENSSL_VERSION}/ \
&& mkdir build && cd build \
&& ../Configure linux64-riscv64 --prefix=${SYSROOT} \
&& make -j$(nproc) && make install_sw \
&& cd ../..
# Build SQLite3 (for sqlite3 module)
RUN echo "--- Building SQLite3 ---" \
&& wget -c https://www.sqlite.org/2024/sqlite-autoconf-3450200.tar.gz \
&& tar -xf sqlite-autoconf-3450200.tar.gz --no-same-permissions --no-same-owner \
&& cd sqlite-autoconf-3450200 \
&& mkdir build && cd build \
&& ../configure --prefix=${SYSROOT} --host=riscv64-linux-gnu --build=x86_64-linux-gnu \
&& make -j$(nproc) && make install \
&& cd ../..
# Build and install RISC-V Python with all modules
RUN wget -c https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz \
&& tar -xf Python-${PYTHON_VERSION}.tgz --no-same-permissions --no-same-owner \
&& cd Python-${PYTHON_VERSION} \
&& mkdir build && cd build \
&& ../configure \
--host=riscv64-linux-gnu \
--build=x86_64-linux-gnu \
--prefix=${SYSROOT} \
--enable-shared \
--disable-ipv6 \
--with-build-python=/usr/bin/python3 \
--with-ensurepip=no \
ac_cv_file__dev_ptmx=yes \
ac_cv_file__dev_ptc=no \
&& make -j$(nproc) \
&& make install
FROM base as final
COPY --from=python /opt/sysroot /opt/sysroot
# Install crossenv and cmake
RUN pip install crossenv cmake==4.0.0 --break-system-packages \
&& /usr/bin/python3 -m crossenv ${SYSROOT}/bin/python3 /opt/riscv-cross-env
# Add pip-installed cmake binaries to PATH
ENV PATH="/usr/local/bin:${PATH}"
# Set up cross Python environment
SHELL ["/bin/bash", "-c"]
RUN source /opt/riscv-cross-env/bin/activate \
&& pip install setuptools pyyaml typing_extensions wheel
# Set default environment variables for PyTorch build
ENV Python_ROOT_DIR=${SYSROOT}
ENV OPENSSL_ROOT_DIR=${SYSROOT}
USER jenkins
CMD ["bash"]

View File

@ -0,0 +1,175 @@
ARG UBUNTU_VERSION
ARG CUDA_VERSION
ARG IMAGE_NAME
FROM ${IMAGE_NAME}
ARG UBUNTU_VERSION
ARG CUDA_VERSION
ENV DEBIAN_FRONTEND noninteractive
# Install common dependencies (so that this step can be cached separately)
COPY ./common/install_base.sh install_base.sh
RUN bash ./install_base.sh && rm install_base.sh
# Install user
COPY ./common/install_user.sh install_user.sh
RUN bash ./install_user.sh && rm install_user.sh
# Install katex
ARG KATEX
COPY ./common/install_docs_reqs.sh install_docs_reqs.sh
RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
# Install conda and other packages (e.g., numpy, pytest)
ARG ANACONDA_PYTHON_VERSION
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
ARG CONDA_CMAKE
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
COPY ./common/install_conda.sh install_conda.sh
COPY ./common/common_utils.sh common_utils.sh
COPY ./common/install_magma_conda.sh install_magma_conda.sh
RUN bash ./install_conda.sh && rm install_conda.sh install_magma_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
# Install gcc
ARG GCC_VERSION
COPY ./common/install_gcc.sh install_gcc.sh
RUN bash ./install_gcc.sh && rm install_gcc.sh
# Install clang
ARG CLANG_VERSION
COPY ./common/install_clang.sh install_clang.sh
RUN bash ./install_clang.sh && rm install_clang.sh
# (optional) Install protobuf for ONNX
ARG PROTOBUF
COPY ./common/install_protobuf.sh install_protobuf.sh
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
RUN rm install_protobuf.sh
ENV INSTALLED_PROTOBUF ${PROTOBUF}
# (optional) Install database packages like LMDB and LevelDB
ARG DB
COPY ./common/install_db.sh install_db.sh
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
RUN rm install_db.sh
ENV INSTALLED_DB ${DB}
# (optional) Install vision packages like OpenCV
ARG VISION
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
RUN rm install_vision.sh cache_vision_models.sh common_utils.sh
ENV INSTALLED_VISION ${VISION}
# (optional) Install UCC
ARG UCX_COMMIT
ARG UCC_COMMIT
ENV UCX_COMMIT $UCX_COMMIT
ENV UCC_COMMIT $UCC_COMMIT
ENV UCX_HOME /usr
ENV UCC_HOME /usr
ADD ./common/install_ucc.sh install_ucc.sh
RUN if [ -n "${UCX_COMMIT}" ] && [ -n "${UCC_COMMIT}" ]; then bash ./install_ucc.sh; fi
RUN rm install_ucc.sh
COPY ./common/install_openssl.sh install_openssl.sh
ENV OPENSSL_ROOT_DIR /opt/openssl
RUN bash ./install_openssl.sh
ENV OPENSSL_DIR /opt/openssl
ARG INDUCTOR_BENCHMARKS
ARG ANACONDA_PYTHON_VERSION
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/huggingface.txt huggingface.txt
COPY ci_commit_pins/timm.txt timm.txt
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
# (optional) Install non-default CMake version
ARG CMAKE_VERSION
COPY ./common/install_cmake.sh install_cmake.sh
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
RUN rm install_cmake.sh
ARG TRITON
# Install triton, this needs to be done before sccache because the latter will
# try to reach out to S3, which docker build runners don't have access
COPY ./common/install_triton.sh install_triton.sh
COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/triton.txt triton.txt
COPY triton_version.txt triton_version.txt
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt
ARG HALIDE
# Build and install halide
COPY ./common/install_halide.sh install_halide.sh
COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/halide.txt halide.txt
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
RUN rm install_halide.sh common_utils.sh halide.txt
# Install ccache/sccache (do this last, so we get priority in PATH)
COPY ./common/install_cache.sh install_cache.sh
ENV PATH /opt/cache/bin:$PATH
# See https://github.com/pytorch/pytorch/issues/82174
# TODO(sdym@fb.com):
# check if this is needed after full off Xenial migration
ENV CARGO_NET_GIT_FETCH_WITH_CLI true
RUN bash ./install_cache.sh && rm install_cache.sh
ENV CMAKE_CUDA_COMPILER_LAUNCHER=/opt/cache/bin/sccache
# Add jni.h for java host build
COPY ./common/install_jni.sh install_jni.sh
COPY ./java/jni.h jni.h
RUN bash ./install_jni.sh && rm install_jni.sh
# Install Open MPI for CUDA
COPY ./common/install_openmpi.sh install_openmpi.sh
RUN if [ -n "${CUDA_VERSION}" ]; then bash install_openmpi.sh; fi
RUN rm install_openmpi.sh
# Include BUILD_ENVIRONMENT environment variable in image
ARG BUILD_ENVIRONMENT
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
# AWS specific CUDA build guidance
ENV TORCH_CUDA_ARCH_LIST Maxwell
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
ENV CUDA_PATH /usr/local/cuda
# Install LLVM dev version (Defined in the pytorch/builder github repository)
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
# Install CUDNN
ARG CUDNN_VERSION
ARG CUDA_VERSION
COPY ./common/install_cudnn.sh install_cudnn.sh
RUN if [ -n "${CUDNN_VERSION}" ]; then bash install_cudnn.sh; fi
RUN rm install_cudnn.sh
# Install CUSPARSELT
ARG CUDA_VERSION
COPY ./common/install_cusparselt.sh install_cusparselt.sh
RUN bash install_cusparselt.sh
RUN rm install_cusparselt.sh
# Install CUDSS
ARG CUDA_VERSION
COPY ./common/install_cudss.sh install_cudss.sh
RUN bash install_cudss.sh
RUN rm install_cudss.sh
# Delete /usr/local/cuda-11.X/cuda-11.X symlinks
RUN if [ -h /usr/local/cuda-11.6/cuda-11.6 ]; then rm /usr/local/cuda-11.6/cuda-11.6; fi
RUN if [ -h /usr/local/cuda-11.7/cuda-11.7 ]; then rm /usr/local/cuda-11.7/cuda-11.7; fi
RUN if [ -h /usr/local/cuda-12.1/cuda-12.1 ]; then rm /usr/local/cuda-12.1/cuda-12.1; fi
RUN if [ -h /usr/local/cuda-12.4/cuda-12.4 ]; then rm /usr/local/cuda-12.4/cuda-12.4; fi
USER jenkins
CMD ["bash"]

View File

@ -25,9 +25,9 @@ RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
# Install conda and other packages (e.g., numpy, pytest) # Install conda and other packages (e.g., numpy, pytest)
ARG ANACONDA_PYTHON_VERSION ARG ANACONDA_PYTHON_VERSION
ARG BUILD_ENVIRONMENT
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
ARG CONDA_CMAKE
COPY requirements-ci.txt /opt/conda/requirements-ci.txt COPY requirements-ci.txt /opt/conda/requirements-ci.txt
COPY ./common/install_conda.sh install_conda.sh COPY ./common/install_conda.sh install_conda.sh
COPY ./common/common_utils.sh common_utils.sh COPY ./common/common_utils.sh common_utils.sh
@ -43,6 +43,20 @@ ARG CLANG_VERSION
COPY ./common/install_clang.sh install_clang.sh COPY ./common/install_clang.sh install_clang.sh
RUN bash ./install_clang.sh && rm install_clang.sh RUN bash ./install_clang.sh && rm install_clang.sh
# (optional) Install protobuf for ONNX
ARG PROTOBUF
COPY ./common/install_protobuf.sh install_protobuf.sh
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
RUN rm install_protobuf.sh
ENV INSTALLED_PROTOBUF ${PROTOBUF}
# (optional) Install database packages like LMDB and LevelDB
ARG DB
COPY ./common/install_db.sh install_db.sh
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
RUN rm install_db.sh
ENV INSTALLED_DB ${DB}
# (optional) Install vision packages like OpenCV # (optional) Install vision packages like OpenCV
ARG VISION ARG VISION
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
@ -52,15 +66,11 @@ ENV INSTALLED_VISION ${VISION}
# Install rocm # Install rocm
ARG ROCM_VERSION ARG ROCM_VERSION
RUN mkdir ci_commit_pins
COPY ./common/common_utils.sh common_utils.sh
COPY ./ci_commit_pins/rocm-composable-kernel.txt ci_commit_pins/rocm-composable-kernel.txt
COPY ./common/install_rocm.sh install_rocm.sh COPY ./common/install_rocm.sh install_rocm.sh
RUN bash ./install_rocm.sh RUN bash ./install_rocm.sh
RUN rm install_rocm.sh common_utils.sh RUN rm install_rocm.sh
RUN rm -r ci_commit_pins
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
RUN bash ./install_rocm_magma.sh ${ROCM_VERSION} RUN bash ./install_rocm_magma.sh
RUN rm install_rocm_magma.sh RUN rm install_rocm_magma.sh
ADD ./common/install_miopen.sh install_miopen.sh ADD ./common/install_miopen.sh install_miopen.sh
RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh
@ -100,11 +110,16 @@ ARG ANACONDA_PYTHON_VERSION
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
COPY ./common/common_utils.sh common_utils.sh COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt COPY ci_commit_pins/huggingface.txt huggingface.txt
COPY ci_commit_pins/timm.txt timm.txt COPY ci_commit_pins/timm.txt timm.txt
COPY ci_commit_pins/torchbench.txt torchbench.txt
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
# (optional) Install non-default CMake version
ARG CMAKE_VERSION
COPY ./common/install_cmake.sh install_cmake.sh
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
RUN rm install_cmake.sh
# (optional) Install non-default Ninja version # (optional) Install non-default Ninja version
ARG NINJA_VERSION ARG NINJA_VERSION

View File

@ -28,6 +28,7 @@ RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
# Install conda and other packages (e.g., numpy, pytest) # Install conda and other packages (e.g., numpy, pytest)
ARG ANACONDA_PYTHON_VERSION ARG ANACONDA_PYTHON_VERSION
ARG CONDA_CMAKE
ARG DOCS ARG DOCS
ARG BUILD_ENVIRONMENT ARG BUILD_ENVIRONMENT
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
@ -56,10 +57,10 @@ RUN rm install_openssl.sh
ARG INDUCTOR_BENCHMARKS ARG INDUCTOR_BENCHMARKS
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
COPY ./common/common_utils.sh common_utils.sh COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt COPY ci_commit_pins/huggingface.txt huggingface.txt
COPY ci_commit_pins/timm.txt timm.txt COPY ci_commit_pins/timm.txt timm.txt
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
# Install XPU Dependencies # Install XPU Dependencies
ARG XPU_VERSION ARG XPU_VERSION
@ -72,10 +73,17 @@ ARG TRITON
COPY ./common/install_triton.sh install_triton.sh COPY ./common/install_triton.sh install_triton.sh
COPY ./common/common_utils.sh common_utils.sh COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/triton-xpu.txt triton-xpu.txt COPY ci_commit_pins/triton-xpu.txt triton-xpu.txt
COPY triton_xpu_version.txt triton_version.txt COPY triton_version.txt triton_version.txt
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
RUN rm install_triton.sh common_utils.sh triton-xpu.txt triton_version.txt RUN rm install_triton.sh common_utils.sh triton-xpu.txt triton_version.txt
# (optional) Install database packages like LMDB and LevelDB
ARG DB
COPY ./common/install_db.sh install_db.sh
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
RUN rm install_db.sh
ENV INSTALLED_DB ${DB}
# (optional) Install vision packages like OpenCV # (optional) Install vision packages like OpenCV
ARG VISION ARG VISION
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
@ -83,6 +91,12 @@ RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
RUN rm install_vision.sh cache_vision_models.sh common_utils.sh RUN rm install_vision.sh cache_vision_models.sh common_utils.sh
ENV INSTALLED_VISION ${VISION} ENV INSTALLED_VISION ${VISION}
# (optional) Install non-default CMake version
ARG CMAKE_VERSION
COPY ./common/install_cmake.sh install_cmake.sh
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
RUN rm install_cmake.sh
# (optional) Install non-default Ninja version # (optional) Install non-default Ninja version
ARG NINJA_VERSION ARG NINJA_VERSION
COPY ./common/install_ninja.sh install_ninja.sh COPY ./common/install_ninja.sh install_ninja.sh

View File

@ -1,6 +1,6 @@
ARG UBUNTU_VERSION ARG UBUNTU_VERSION
FROM ubuntu:${UBUNTU_VERSION} as base FROM ubuntu:${UBUNTU_VERSION}
ARG UBUNTU_VERSION ARG UBUNTU_VERSION
@ -28,6 +28,7 @@ RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
# Install conda and other packages (e.g., numpy, pytest) # Install conda and other packages (e.g., numpy, pytest)
ARG ANACONDA_PYTHON_VERSION ARG ANACONDA_PYTHON_VERSION
ARG CONDA_CMAKE
ARG DOCS ARG DOCS
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
@ -51,22 +52,13 @@ RUN bash ./install_lcov.sh && rm install_lcov.sh
# Install cuda and cudnn # Install cuda and cudnn
ARG CUDA_VERSION ARG CUDA_VERSION
COPY ./common/install_cuda.sh install_cuda.sh COPY ./common/install_cuda.sh install_cuda.sh
COPY ./common/install_nccl.sh install_nccl.sh RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh
COPY ./ci_commit_pins/nccl-cu* /ci_commit_pins/
COPY ./common/install_cusparselt.sh install_cusparselt.sh
RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh install_nccl.sh /ci_commit_pins/nccl-cu* install_cusparselt.sh
ENV DESIRED_CUDA ${CUDA_VERSION} ENV DESIRED_CUDA ${CUDA_VERSION}
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH
# No effect if cuda not installed
ENV USE_SYSTEM_NCCL=1
ENV NCCL_INCLUDE_DIR="/usr/local/cuda/include/"
ENV NCCL_LIB_DIR="/usr/local/cuda/lib64/"
# (optional) Install UCC # (optional) Install UCC
ARG UCX_COMMIT ARG UCX_COMMIT
ARG UCC_COMMIT ARG UCC_COMMIT
ARG CUDA_VERSION
ENV UCX_COMMIT $UCX_COMMIT ENV UCX_COMMIT $UCX_COMMIT
ENV UCC_COMMIT $UCC_COMMIT ENV UCC_COMMIT $UCC_COMMIT
ENV UCX_HOME /usr ENV UCX_HOME /usr
@ -75,6 +67,20 @@ ADD ./common/install_ucc.sh install_ucc.sh
RUN if [ -n "${UCX_COMMIT}" ] && [ -n "${UCC_COMMIT}" ]; then bash ./install_ucc.sh; fi RUN if [ -n "${UCX_COMMIT}" ] && [ -n "${UCC_COMMIT}" ]; then bash ./install_ucc.sh; fi
RUN rm install_ucc.sh RUN rm install_ucc.sh
# (optional) Install protobuf for ONNX
ARG PROTOBUF
COPY ./common/install_protobuf.sh install_protobuf.sh
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
RUN rm install_protobuf.sh
ENV INSTALLED_PROTOBUF ${PROTOBUF}
# (optional) Install database packages like LMDB and LevelDB
ARG DB
COPY ./common/install_db.sh install_db.sh
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
RUN rm install_db.sh
ENV INSTALLED_DB ${DB}
# (optional) Install vision packages like OpenCV # (optional) Install vision packages like OpenCV
ARG VISION ARG VISION
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
@ -82,6 +88,24 @@ RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
RUN rm install_vision.sh cache_vision_models.sh common_utils.sh RUN rm install_vision.sh cache_vision_models.sh common_utils.sh
ENV INSTALLED_VISION ${VISION} ENV INSTALLED_VISION ${VISION}
# (optional) Install Vulkan SDK
ARG VULKAN_SDK_VERSION
COPY ./common/install_vulkan_sdk.sh install_vulkan_sdk.sh
RUN if [ -n "${VULKAN_SDK_VERSION}" ]; then bash ./install_vulkan_sdk.sh; fi
RUN rm install_vulkan_sdk.sh
# (optional) Install swiftshader
ARG SWIFTSHADER
COPY ./common/install_swiftshader.sh install_swiftshader.sh
RUN if [ -n "${SWIFTSHADER}" ]; then bash ./install_swiftshader.sh; fi
RUN rm install_swiftshader.sh
# (optional) Install non-default CMake version
ARG CMAKE_VERSION
COPY ./common/install_cmake.sh install_cmake.sh
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
RUN rm install_cmake.sh
# (optional) Install non-default Ninja version # (optional) Install non-default Ninja version
ARG NINJA_VERSION ARG NINJA_VERSION
COPY ./common/install_ninja.sh install_ninja.sh COPY ./common/install_ninja.sh install_ninja.sh
@ -97,28 +121,26 @@ RUN rm install_openssl.sh
ARG INDUCTOR_BENCHMARKS ARG INDUCTOR_BENCHMARKS
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
COPY ./common/common_utils.sh common_utils.sh COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/huggingface-requirements.txt huggingface-requirements.txt COPY ci_commit_pins/huggingface.txt huggingface.txt
COPY ci_commit_pins/timm.txt timm.txt COPY ci_commit_pins/timm.txt timm.txt
COPY ci_commit_pins/torchbench.txt torchbench.txt
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface-requirements.txt torchbench.txt RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
ARG TRITON ARG TRITON
ARG TRITON_CPU # Install triton, this needs to be done before sccache because the latter will
# try to reach out to S3, which docker build runners don't have access
# Create a separate stage for building Triton and Triton-CPU. install_triton
# will check for the presence of env vars
FROM base as triton-builder
COPY ./common/install_triton.sh install_triton.sh COPY ./common/install_triton.sh install_triton.sh
COPY ./common/common_utils.sh common_utils.sh COPY ./common/common_utils.sh common_utils.sh
COPY ci_commit_pins/triton.txt triton.txt COPY ci_commit_pins/triton.txt triton.txt
COPY ci_commit_pins/triton-cpu.txt triton-cpu.txt RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
RUN bash ./install_triton.sh RUN rm install_triton.sh common_utils.sh triton.txt
FROM base as final ARG TRITON_CPU
COPY --from=triton-builder /opt/triton /opt/triton COPY ./common/install_triton.sh install_triton.sh
RUN if [ -n "${TRITON}" ] || [ -n "${TRITON_CPU}" ]; then pip install /opt/triton/*.whl; chown -R jenkins:jenkins /opt/conda; fi COPY ./common/common_utils.sh common_utils.sh
RUN rm -rf /opt/triton COPY ci_commit_pins/triton-cpu.txt triton-cpu.txt
RUN if [ -n "${TRITON_CPU}" ]; then bash ./install_triton.sh; fi
RUN rm install_triton.sh common_utils.sh triton-cpu.txt
ARG EXECUTORCH ARG EXECUTORCH
# Build and install executorch # Build and install executorch
@ -149,12 +171,6 @@ RUN if [ -n "${ACL}" ]; then bash ./install_acl.sh; fi
RUN rm install_acl.sh RUN rm install_acl.sh
ENV INSTALLED_ACL ${ACL} ENV INSTALLED_ACL ${ACL}
ARG OPENBLAS
COPY ./common/install_openblas.sh install_openblas.sh
RUN if [ -n "${OPENBLAS}" ]; then bash ./install_openblas.sh; fi
RUN rm install_openblas.sh
ENV INSTALLED_OPENBLAS ${OPENBLAS}
# Install ccache/sccache (do this last, so we get priority in PATH) # Install ccache/sccache (do this last, so we get priority in PATH)
ARG SKIP_SCCACHE_INSTALL ARG SKIP_SCCACHE_INSTALL
COPY ./common/install_cache.sh install_cache.sh COPY ./common/install_cache.sh install_cache.sh
@ -182,6 +198,7 @@ COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
RUN if [ -n "${SKIP_LLVM_SRC_BUILD_INSTALL}" ]; then set -eu; rm -rf /opt/llvm; fi RUN if [ -n "${SKIP_LLVM_SRC_BUILD_INSTALL}" ]; then set -eu; rm -rf /opt/llvm; fi
# AWS specific CUDA build guidance # AWS specific CUDA build guidance
ENV TORCH_CUDA_ARCH_LIST Maxwell
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all" ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
ENV CUDA_PATH /usr/local/cuda ENV CUDA_PATH /usr/local/cuda

View File

@ -7,4 +7,4 @@ set -ex
SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
USE_NVSHMEM=0 USE_CUSPARSELT=0 BUILD_PYTHONLESS=1 DESIRED_PYTHON="3.10" ${SCRIPTPATH}/../manywheel/build.sh USE_CUSPARSELT=0 BUILD_PYTHONLESS=1 DESIRED_PYTHON="3.9" ${SCRIPTPATH}/../manywheel/build.sh

View File

@ -1,31 +0,0 @@
# 🔧 Lumen_cli
A Python CLI tool for building and testing PyTorch-based components, using a YAML configuration file for structured, repeatable workflows.
## Features
- **Build**
- external projects (e.g. vLLM)
## 📦 Installation
at the root of the pytorch repo
```bash
pip install -e .ci/lumen_cli
```
## Run the cli tool
The cli tool must be used at root of pytorch repo, as example to run build external vllm:
```bash
python -m cli.run build external vllm
```
this will run the build steps with default behaviour for vllm project.
to see help messages, run
```bash
python3 -m cli.run --help
```
## Add customized external build logics
To add a new external build, for instance, add a new external build logics:
1. create the build function in cli/lib folder
2. register your target and the main build function at EXTERNAL_BUILD_TARGET_DISPATCH in `cli/build_cli/register_build.py`
3. [optional] create your ci config file in .github/ci_configs/${EXTERNAL_PACKAGE_NAME}.yaml

View File

@ -1,37 +0,0 @@
import argparse
import logging
from cli.lib.common.cli_helper import register_targets, RichHelp, TargetSpec
from cli.lib.core.vllm.vllm_build import VllmBuildRunner
logger = logging.getLogger(__name__)
# Maps targets to their argparse configuration and runner
# it adds new target to path python -m cli.run build external {target} with buildrunner
_TARGETS: dict[str, TargetSpec] = {
"vllm": {
"runner": VllmBuildRunner,
"help": "Build vLLM using docker buildx.",
}
# add yours ...
}
def register_build_commands(subparsers: argparse._SubParsersAction) -> None:
build_parser = subparsers.add_parser(
"build",
help="Build related commands",
formatter_class=RichHelp,
)
build_subparsers = build_parser.add_subparsers(dest="build_command", required=True)
overview = "\n".join(
f" {name:12} {spec.get('help', '')}" for name, spec in _TARGETS.items()
)
external_parser = build_subparsers.add_parser(
"external",
help="Build external targets",
description="Build third-party targets.\n\nAvailable targets:\n" + overview,
formatter_class=RichHelp,
)
register_targets(external_parser, _TARGETS)

View File

@ -1,71 +0,0 @@
"""
Cli Argparser Utility helpers for CLI tasks.
"""
import argparse
from abc import ABC, abstractmethod
try:
from typing import Any, Callable, Required, TypedDict # Python 3.11+
except ImportError:
from typing import Any, Callable, TypedDict
from typing_extensions import Required # Fallback for Python <3.11
class BaseRunner(ABC):
def __init__(self, args: Any) -> None:
self.args = args
@abstractmethod
def run(self) -> None:
"""runs main logics, required"""
# Pretty help: keep newlines + show defaults
class RichHelp(
argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter
):
pass
class TargetSpec(TypedDict, total=False):
"""CLI subcommand specification with bA."""
runner: Required[type[BaseRunner]]
help: str
description: str
add_arguments: Callable[[argparse.ArgumentParser], None]
def register_targets(
parser: argparse.ArgumentParser,
target_specs: dict[str, TargetSpec],
common_args: Callable[[argparse.ArgumentParser], None] = lambda _: None,
) -> None:
"""Register target subcommands."""
targets = parser.add_subparsers(
dest="target",
required=True,
metavar="{" + ",".join(target_specs.keys()) + "}",
)
for name, spec in target_specs.items():
desc = spec.get("description") or spec["runner"].__doc__ or ""
p = targets.add_parser(
name,
help=spec.get("help", ""),
description=desc.strip(),
formatter_class=RichHelp,
)
p.set_defaults(
func=lambda args, cls=spec["runner"]: cls(args).run(),
_runner_class=spec["runner"],
)
if "add_arguments" in spec and callable(spec["add_arguments"]):
spec["add_arguments"](p)
if common_args:
common_args(p)

View File

@ -1,42 +0,0 @@
"""
Docker Utility helpers for CLI tasks.
"""
import logging
from typing import Optional
import docker
from docker.errors import APIError, NotFound
logger = logging.getLogger(__name__)
# lazy singleton so we don't reconnect every call
_docker_client: Optional[docker.DockerClient] = None
def _get_client() -> docker.DockerClient:
global _docker_client
if _docker_client is None:
_docker_client = docker.from_env()
return _docker_client
def local_image_exists(
image_name: str, client: Optional[docker.DockerClient] = None
) -> bool:
"""Return True if a local Docker image exists."""
if not image_name:
return False
client = client or _get_client()
try:
client.images.get(image_name)
return True
except (NotFound, APIError) as e:
logger.error(
"Error when checking Docker image '%s': %s",
image_name,
e.explanation if hasattr(e, "explanation") else str(e),
)
return False

View File

@ -1,110 +0,0 @@
"""
Environment Variables and Dataclasses Utility helpers for CLI tasks.
"""
import os
from dataclasses import field, fields, is_dataclass, MISSING
from pathlib import Path
from textwrap import indent
from typing import Optional, Union
from cli.lib.common.utils import str2bool
def get_env(name: str, default: str = "") -> str:
"""Get environment variable with default fallback."""
return os.environ.get(name) or default
def env_path_optional(
name: str,
default: Optional[Union[str, Path]] = None,
resolve: bool = True,
) -> Optional[Path]:
"""Get environment variable as optional Path."""
val = get_env(name) or default
if not val:
return None
path = Path(val)
return path.resolve() if resolve else path
def env_path(
name: str,
default: Optional[Union[str, Path]] = None,
resolve: bool = True,
) -> Path:
"""Get environment variable as Path, raise if missing."""
path = env_path_optional(name, default, resolve)
if not path:
raise ValueError(f"Missing path value for {name}")
return path
def env_bool(
name: str,
default: bool = False,
) -> bool:
val = get_env(name)
if not val:
return default
return str2bool(val)
def env_bool_field(
name: str,
default: bool = False,
):
return field(default_factory=lambda: env_bool(name, default))
def env_path_field(
name: str,
default: Union[str, Path] = "",
*,
resolve: bool = True,
) -> Path:
return field(default_factory=lambda: env_path(name, default, resolve=resolve))
def env_str_field(
name: str,
default: str = "",
) -> str:
return field(default_factory=lambda: get_env(name, default))
def generate_dataclass_help(cls) -> str:
"""Auto-generate help text for dataclass fields."""
if not is_dataclass(cls):
raise TypeError(f"{cls} is not a dataclass")
def get_value(f):
if f.default is not MISSING:
return f.default
if f.default_factory is not MISSING:
try:
return f.default_factory()
except Exception as e:
return f"<error: {e}>"
return "<required>"
lines = [f"{f.name:<22} = {repr(get_value(f))}" for f in fields(cls)]
return indent("\n".join(lines), " ")
def with_params_help(params_cls: type, title: str = "Parameter defaults"):
"""
Class decorator that appends a help table generated from another dataclass
(e.g., VllmParameters) to the decorated class's docstring.
"""
if not is_dataclass(params_cls):
raise TypeError(f"{params_cls} must be a dataclass")
def _decorator(cls: type) -> type:
block = generate_dataclass_help(params_cls)
cls.__doc__ = (cls.__doc__ or "") + f"\n\n{title}:\n{block}"
return cls
return _decorator

View File

@ -1,143 +0,0 @@
from __future__ import annotations
import logging
import os
import textwrap
from pathlib import Path
from typing import TYPE_CHECKING
from cli.lib.common.utils import get_wheels
from jinja2 import Template
if TYPE_CHECKING:
from collections.abc import Iterable, Mapping
logger = logging.getLogger(__name__)
_TPL_CONTENT = Template(
textwrap.dedent("""\
## {{ title }}
```{{ lang }}
{{ content }}
```
""")
)
_TPL_LIST_ITEMS = Template(
textwrap.dedent("""\
## {{ title }}
{% for it in items %}
- {{ it.pkg }}: {{ it.relpath }}
{% else %}
_(no item found)_
{% endfor %}
""")
)
_TPL_TABLE = Template(
textwrap.dedent("""\
{%- if rows %}
| {{ cols | join(' | ') }} |
|{%- for _ in cols %} --- |{%- endfor %}
{%- for r in rows %}
| {%- for c in cols %} {{ r.get(c, "") }} |{%- endfor %}
{%- endfor %}
{%- else %}
_(no data)_
{%- endif %}
""")
)
def gh_summary_path() -> Path | None:
"""Return the Path to the GitHub step summary file, or None if not set."""
p = os.environ.get("GITHUB_STEP_SUMMARY")
return Path(p) if p else None
def write_gh_step_summary(md: str, *, append_content: bool = True) -> bool:
"""
Write Markdown content to the GitHub Step Summary file if GITHUB_STEP_SUMMARY is set.
append_content: default true, if True, append to the end of the file, else overwrite the whole file
Returns:
True if written successfully (in GitHub Actions environment),
False if skipped (e.g., running locally where the variable is not set).
"""
sp = gh_summary_path()
if not sp:
logger.info("[gh-summary] GITHUB_STEP_SUMMARY not set, skipping write.")
return False
md_clean = textwrap.dedent(md).strip() + "\n"
mode = "a" if append_content else "w"
with sp.open(mode, encoding="utf-8") as f:
f.write(md_clean)
return True
def md_heading(text: str, level: int = 2) -> str:
"""Generate a Markdown heading string with the given level (1-6)."""
return f"{'#' * max(1, min(level, 6))} {text}\n"
def md_details(summary: str, content: str) -> str:
"""Generate a collapsible <details> block with a summary and inner content."""
return f"<details>\n<summary>{summary}</summary>\n\n{content}\n\n</details>\n"
def summarize_content_from_file(
output_dir: Path,
freeze_file: str,
title: str = "Content from file",
code_lang: str = "", # e.g. "text" or "ini"
) -> bool:
f = Path(output_dir) / freeze_file
if not f.exists():
return False
content = f.read_text(encoding="utf-8").strip()
md = render_content(content, title=title, lang=code_lang)
return write_gh_step_summary(md)
def summarize_wheels(path: Path, title: str = "Wheels", max_depth: int = 3):
items = get_wheels(path, max_depth=max_depth)
if not items:
return False
md = render_list(items, title=title)
return write_gh_step_summary(md)
def md_kv_table(rows: Iterable[Mapping[str, str | int | float]]) -> str:
"""
Render a list of dicts as a Markdown table using Jinja template.
"""
rows = list(rows)
cols = list({k for r in rows for k in r.keys()})
md = _TPL_TABLE.render(cols=cols, rows=rows).strip() + "\n"
return md
def render_list(
items: Iterable[str],
*,
title: str = "List",
) -> str:
tpl = _TPL_LIST_ITEMS
md = tpl.render(title=title, items=items)
return md
def render_content(
content: str,
*,
title: str = "Content",
lang: str = "text",
) -> str:
tpl = _TPL_CONTENT
md = tpl.render(title=title, content=content, lang=lang)
return md

View File

@ -1,69 +0,0 @@
"""
Git Utility helpers for CLI tasks.
"""
import logging
from pathlib import Path
from cli.lib.common.path_helper import remove_dir
from git import GitCommandError, RemoteProgress, Repo
logger = logging.getLogger(__name__)
class PrintProgress(RemoteProgress):
"""Simple progress logger for git operations."""
def __init__(self, interval: int = 5):
super().__init__()
self._last_percent = -1
self._interval = interval
def update(self, op_code, cur, max=None, message=""):
msg = self._cur_line or message
if max and cur:
percent = int(cur / max * 100)
if percent != self._last_percent and percent % self._interval == 0:
self._last_percent = percent
logger.info("Progress: %d%% - %s", percent, msg)
elif msg:
logger.info(msg)
def clone_external_repo(target: str, repo: str, dst: str = "", update_submodules=False):
"""Clone repository with pinned commit and optional submodules."""
dst = dst or target
try:
logger.info("Cloning %s to %s", target, dst)
# Clone and fetch
remove_dir(dst)
r = Repo.clone_from(repo, dst, progress=PrintProgress())
r.git.fetch("--all", "--tags")
# Checkout pinned commit
commit = get_post_build_pinned_commit(target)
logger.info("Checking out pinned %s commit %s", target, commit)
r.git.checkout(commit)
# Update submodules if requested
if update_submodules and r.submodules:
logger.info("Updating %d submodule(s)", len(r.submodules))
for sm in r.submodules:
sm.update(init=True, recursive=True, progress=PrintProgress())
logger.info("Successfully cloned %s", target)
return r, commit
except GitCommandError as e:
logger.error("Git operation failed: %s", e)
raise
def get_post_build_pinned_commit(name: str, prefix=".github/ci_commit_pins") -> str:
path = Path(prefix) / f"{name}.txt"
if not path.exists():
raise FileNotFoundError(f"Pin file not found: {path}")
return path.read_text(encoding="utf-8").strip()

View File

@ -1,14 +0,0 @@
"""
Logger Utility helpers for CLI tasks.
"""
import logging
import sys
def setup_logging(level: int = logging.INFO):
logging.basicConfig(
level=level,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
stream=sys.stdout,
)

View File

@ -1,62 +0,0 @@
"""Path utility helpers for CLI tasks."""
import logging
import shutil
from pathlib import Path
from typing import Union
logger = logging.getLogger(__name__)
def get_path(path: Union[str, Path], resolve: bool = False) -> Path:
"""Convert to Path object, optionally resolving to absolute path."""
if not path:
raise ValueError("Path cannot be None or empty")
result = Path(path)
return result.resolve() if resolve else result
def ensure_dir_exists(path: Union[str, Path]) -> Path:
"""Create directory if it doesn't exist."""
path_obj = get_path(path)
path_obj.mkdir(parents=True, exist_ok=True)
return path_obj
def remove_dir(path: Union[str, Path, None]) -> None:
"""Remove directory if it exists."""
if not path:
return
path_obj = get_path(path)
if path_obj.exists():
shutil.rmtree(path_obj)
def force_create_dir(path: Union[str, Path]) -> Path:
"""Remove directory if exists, then create fresh empty directory."""
remove_dir(path)
return ensure_dir_exists(path)
def copy(src: Union[str, Path], dst: Union[str, Path]) -> None:
"""Copy file or directory from src to dst."""
src_path = get_path(src, resolve=True)
dst_path = get_path(dst, resolve=True)
if not src_path.exists():
raise FileNotFoundError(f"Source does not exist: {src_path}")
dst_path.parent.mkdir(parents=True, exist_ok=True)
if src_path.is_file():
shutil.copy2(src_path, dst_path)
elif src_path.is_dir():
shutil.copytree(src_path, dst_path, dirs_exist_ok=True)
else:
raise ValueError(f"Unsupported path type: {src_path}")
def is_path_exist(path: Union[str, Path, None]) -> bool:
"""Check if path exists."""
return bool(path and get_path(path).exists())

View File

@ -1,71 +0,0 @@
import glob
import logging
import shlex
import shutil
import sys
from collections.abc import Iterable
from importlib.metadata import PackageNotFoundError, version # noqa: UP035
from typing import Optional, Union
from cli.lib.common.utils import run_command
logger = logging.getLogger(__name__)
def pip_install_packages(
packages: Iterable[str] = (),
env=None,
*,
requirements: Optional[str] = None,
constraints: Optional[str] = None,
prefer_uv: bool = False,
) -> None:
use_uv = prefer_uv and shutil.which("uv") is not None
base = (
[sys.executable, "-m", "uv", "pip", "install"]
if use_uv
else [sys.executable, "-m", "pip", "install"]
)
cmd = base[:]
if requirements:
cmd += ["-r", requirements]
if constraints:
cmd += ["-c", constraints]
cmd += list(packages)
logger.info("pip installing packages: %s", " ".join(map(shlex.quote, cmd)))
run_command(" ".join(map(shlex.quote, cmd)), env=env)
def pip_install_first_match(pattern: str, extras: Optional[str] = None, pref_uv=False):
wheel = first_matching_pkg(pattern)
target = f"{wheel}[{extras}]" if extras else wheel
logger.info("Installing %s...", target)
pip_install_packages([target], prefer_uv=pref_uv)
def run_python(args: Union[str, list[str]], env=None):
"""
Run the python in the current environment.
"""
if isinstance(args, str):
args = shlex.split(args)
cmd = [sys.executable] + args
run_command(" ".join(map(shlex.quote, cmd)), env=env)
def pkg_exists(name: str) -> bool:
try:
pkg_version = version(name)
logger.info("%s already exist with version: %s", name, pkg_version)
return True
except PackageNotFoundError:
logger.info("%s is not installed", name)
return False
def first_matching_pkg(pattern: str) -> str:
matches = sorted(glob.glob(pattern))
if not matches:
raise FileNotFoundError(f"No wheel matching: {pattern}")
return matches[0]

View File

@ -1,139 +0,0 @@
"""
General Utility helpers for CLI tasks.
"""
import logging
import os
import shlex
import subprocess
import sys
from contextlib import contextmanager
from pathlib import Path
from typing import Optional
logger = logging.getLogger(__name__)
def run_command(
cmd: str,
use_shell: bool = False,
log_cmd: bool = True,
cwd: Optional[str] = None,
env: Optional[dict] = None,
check: bool = True,
) -> int:
"""Run a command with optional shell execution."""
if use_shell:
args = cmd
log_prefix = "[shell]"
executable = "/bin/bash"
else:
args = shlex.split(cmd)
log_prefix = "[cmd]"
executable = None
if log_cmd:
display_cmd = cmd if use_shell else " ".join(args)
logger.info("%s %s", log_prefix, display_cmd)
run_env = {**os.environ, **(env or {})}
proc = subprocess.run(
args,
shell=use_shell,
executable=executable,
stdout=sys.stdout,
stderr=sys.stderr,
cwd=cwd,
env=run_env,
check=False,
)
if check and proc.returncode != 0:
logger.error(
"%s Command failed (exit %s): %s", log_prefix, proc.returncode, cmd
)
raise subprocess.CalledProcessError(
proc.returncode, args if not use_shell else cmd
)
return proc.returncode
def str2bool(value: Optional[str]) -> bool:
"""Convert environment variables to boolean values."""
if not value:
return False
if not isinstance(value, str):
raise ValueError(
f"Expected a string value for boolean conversion, got {type(value)}"
)
value = value.strip().lower()
true_value_set = {"1", "true", "t", "yes", "y", "on", "enable", "enabled", "found"}
false_value_set = {"0", "false", "f", "no", "n", "off", "disable"}
if value in true_value_set:
return True
if value in false_value_set:
return False
raise ValueError(f"Invalid string value for boolean conversion: {value}")
@contextmanager
def temp_environ(updates: dict[str, str]):
"""
Temporarily set environment variables and restore them after the block.
Args:
updates: Dict of environment variables to set.
"""
missing = object()
old: dict[str, str | object] = {k: os.environ.get(k, missing) for k in updates}
try:
os.environ.update(updates)
yield
finally:
for k, v in old.items():
if v is missing:
os.environ.pop(k, None)
else:
os.environ[k] = v # type: ignore[arg-type]
@contextmanager
def working_directory(path: str):
"""
Temporarily change the working directory inside a context.
"""
if not path:
# No-op context
yield
return
prev_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(prev_cwd)
def get_wheels(
output_dir: Path,
max_depth: Optional[int] = None,
) -> list[str]:
"""Return a list of wheels found in the given output directory."""
root = Path(output_dir)
if not root.exists():
return []
items = []
for dirpath, _, filenames in os.walk(root):
depth = Path(dirpath).relative_to(root).parts
if max_depth is not None and len(depth) > max_depth:
continue
for fname in sorted(filenames):
if fname.endswith(".whl"):
pkg = fname.split("-")[0]
relpath = str((Path(dirpath) / fname).relative_to(root))
items.append({"pkg": pkg, "relpath": relpath})
return items

View File

@ -1,292 +0,0 @@
import logging
import os
import textwrap
from typing import Any
from cli.lib.common.gh_summary import write_gh_step_summary
from cli.lib.common.git_helper import clone_external_repo
from cli.lib.common.pip_helper import pip_install_packages
from cli.lib.common.utils import run_command, temp_environ, working_directory
from jinja2 import Template
logger = logging.getLogger(__name__)
_TPL_VLLM_INFO = Template(
textwrap.dedent("""\
## Vllm against Pytorch CI Test Summary
**Vllm Commit**: [{{ vllm_commit }}](https://github.com/vllm-project/vllm/commit/{{ vllm_commit }})
{%- if torch_sha %}
**Pytorch Commit**: [{{ torch_sha }}](https://github.com/pytorch/pytorch/commit/{{ torch_sha }})
{%- endif %}
""")
)
def sample_vllm_test_library():
"""
Simple sample to unblock the vllm ci development, which is mimic to
https://github.com/vllm-project/vllm/blob/main/.buildkite/test-pipeline.yaml
see run_test_plan for more details
"""
# TODO(elainewy): Read from yaml file to handle the env and tests for vllm
return {
"vllm_basic_correctness_test": {
"title": "Basic Correctness Test",
"id": "vllm_basic_correctness_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"steps": [
"pytest -v -s basic_correctness/test_cumem.py",
"pytest -v -s basic_correctness/test_basic_correctness.py",
"pytest -v -s basic_correctness/test_cpu_offload.py",
],
},
"vllm_basic_models_test": {
"title": "Basic models test",
"id": "vllm_basic_models_test",
"steps": [
"pytest -v -s models/test_transformers.py",
"pytest -v -s models/test_registry.py",
"pytest -v -s models/test_utils.py",
"pytest -v -s models/test_vision.py",
"pytest -v -s models/test_initialization.py",
],
},
"vllm_entrypoints_test": {
"title": "Entrypoints Test ",
"id": "vllm_entrypoints_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"steps": [
" ".join(
[
"pytest",
"-v",
"-s",
"entrypoints/llm",
"--ignore=entrypoints/llm/test_generate.py",
"--ignore=entrypoints/llm/test_collective_rpc.py",
]
),
"pytest -v -s entrypoints/llm/test_generate.py",
"pytest -v -s entrypoints/offline_mode",
],
},
"vllm_regression_test": {
"title": "Regression Test",
"id": "vllm_regression_test",
"package_install": ["modelscope"],
"steps": [
"pytest -v -s test_regression.py",
],
},
"vllm_lora_tp_test_distributed": {
"title": "LoRA TP Test (Distributed)",
"id": "vllm_lora_tp_test_distributed",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"num_gpus": 4,
"steps": [
"pytest -v -s -x lora/test_chatglm3_tp.py",
"pytest -v -s -x lora/test_llama_tp.py",
"pytest -v -s -x lora/test_llm_with_multi_loras.py",
],
},
"vllm_distributed_test_28_failure_test": {
"title": "Distributed Tests (2 GPUs) pytorch 2.8 release failure",
"id": "vllm_distributed_test_28_failure_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"num_gpus": 4,
"steps": [
"pytest -v -s distributed/test_sequence_parallel.py",
],
},
"vllm_lora_28_failure_test": {
"title": "LoRA pytorch 2.8 failure test",
"id": "vllm_lora_28_failure_test",
"steps": ["pytest -v lora/test_quant_model.py"],
},
"vllm_multi_model_processor_test": {
"title": "Multi-Modal Processor Test",
"id": "vllm_multi_model_processor_test",
"package_install": ["git+https://github.com/TIGER-AI-Lab/Mantis.git"],
"steps": [
"pytest -v -s models/multimodal/processing --ignore models/multimodal/processing/test_tensor_schema.py",
],
},
"vllm_multi_model_test_28_failure_test": {
"title": "Multi-Model Test (Failed 2.8 release)",
"id": "vllm_multi_model_test_28_failure_test",
"package_install": ["git+https://github.com/TIGER-AI-Lab/Mantis.git"],
"steps": [
"pytest -v -s models/multimodal/generation/test_voxtral.py",
"pytest -v -s models/multimodal/pooling",
],
},
"vllm_pytorch_compilation_unit_tests": {
"title": "PyTorch Compilation Unit Tests",
"id": "vllm_pytorch_compilation_unit_tests",
"steps": [
"pytest -v -s compile/test_pass_manager.py",
"pytest -v -s compile/test_fusion.py",
"pytest -v -s compile/test_fusion_attn.py",
"pytest -v -s compile/test_silu_mul_quant_fusion.py",
"pytest -v -s compile/test_sequence_parallelism.py",
"pytest -v -s compile/test_async_tp.py",
"pytest -v -s compile/test_fusion_all_reduce.py",
"pytest -v -s compile/test_decorator.py",
],
},
"vllm_languagde_model_test_extended_generation_28_failure_test": {
"title": "Language Models Test (Extended Generation) 2.8 release failure",
"id": "vllm_languagde_model_test_extended_generation_28_failure_test",
"package_install": [
"--no-build-isolation",
"git+https://github.com/Dao-AILab/causal-conv1d@v1.5.0.post8",
],
"steps": [
"pytest -v -s models/language/generation/test_mistral.py",
],
},
"vllm_distributed_test_2_gpu_28_failure_test": {
"title": "Distributed Tests (2 GPUs) pytorch 2.8 release failure",
"id": "vllm_distributed_test_2_gpu_28_failure_test",
"env_vars": {
"VLLM_WORKER_MULTIPROC_METHOD": "spawn",
},
"num_gpus": 4,
"steps": [
"pytest -v -s distributed/test_sequence_parallel.py",
],
},
# TODO(elainewy):need to add g6 with 4 gpus to run this test
"vllm_lora_test": {
"title": "LoRA Test %N",
"id": "lora_test",
"parallelism": 4,
"steps": [
"echo '[checking] list sharded lora tests:'",
" ".join(
[
"pytest -q --collect-only lora",
"--shard-id=$$BUILDKITE_PARALLEL_JOB",
"--num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT",
"--ignore=lora/test_chatglm3_tp.py --ignore=lora/test_llama_tp.py",
]
),
"echo '[checking] Done. list lora tests'",
" ".join(
[
"pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB",
"--num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT",
"--ignore=lora/test_chatglm3_tp.py --ignore=lora/test_llama_tp.py",
]
),
],
},
}
def check_parallelism(tests: Any, title: str, shard_id: int = 0, num_shards: int = 0):
"""
a method to check if the test plan is parallelism or not.
"""
parallelism = int(tests.get("parallelism", "0"))
is_parallel = parallelism and parallelism > 1
if not is_parallel:
return False
if shard_id > num_shards:
raise RuntimeError(
f"Test {title} expects {num_shards} shards, but invalid {shard_id} is provided"
)
if num_shards != parallelism:
raise RuntimeError(
f"Test {title} expects {parallelism} shards, but invalid {num_shards} is provided"
)
return True
def run_test_plan(
test_plan: str,
test_target: str,
tests_map: dict[str, Any],
shard_id: int = 0,
num_shards: int = 0,
):
"""
a method to run list of tests based on the test plan.
"""
logger.info("run %s tests.....", test_target)
if test_plan not in tests_map:
raise RuntimeError(
f"test {test_plan} not found, please add it to test plan pool"
)
tests = tests_map[test_plan]
pkgs = tests.get("package_install", [])
title = tests.get("title", "unknown test")
is_parallel = check_parallelism(tests, title, shard_id, num_shards)
if is_parallel:
title = title.replace("%N", f"{shard_id}/{num_shards}")
logger.info("Running tests: %s", title)
if pkgs:
logger.info("Installing packages: %s", pkgs)
pip_install_packages(packages=pkgs, prefer_uv=True)
with (
working_directory(tests.get("working_directory", "tests")),
temp_environ(tests.get("env_vars", {})),
):
failures = []
for step in tests["steps"]:
logger.info("Running step: %s", step)
if is_parallel:
step = replace_buildkite_placeholders(step, shard_id, num_shards)
logger.info("Running parallel step: %s", step)
code = run_command(cmd=step, check=False, use_shell=True)
if code != 0:
failures.append(step)
logger.info("Finish running step: %s", step)
if failures:
logger.error("Failed tests: %s", failures)
raise RuntimeError(f"{len(failures)} pytest runs failed: {failures}")
logger.info("Done. All tests passed")
def clone_vllm(dst: str = "vllm"):
_, commit = clone_external_repo(
target="vllm",
repo="https://github.com/vllm-project/vllm.git",
dst=dst,
update_submodules=True,
)
return commit
def replace_buildkite_placeholders(step: str, shard_id: int, num_shards: int) -> str:
mapping = {
"$$BUILDKITE_PARALLEL_JOB_COUNT": str(num_shards),
"$$BUILDKITE_PARALLEL_JOB": str(shard_id),
}
for k in sorted(mapping, key=len, reverse=True):
step = step.replace(k, mapping[k])
return step
def summarize_build_info(vllm_commit: str) -> bool:
torch_sha = os.getenv("GITHUB_SHA")
md = (
_TPL_VLLM_INFO.render(vllm_commit=vllm_commit, torch_sha=torch_sha).strip()
+ "\n"
)
return write_gh_step_summary(md)

View File

@ -1,296 +0,0 @@
import logging
import os
import textwrap
from dataclasses import dataclass
from pathlib import Path
from typing import Optional
from cli.lib.common.cli_helper import BaseRunner
from cli.lib.common.docker_helper import local_image_exists
from cli.lib.common.envs_helper import (
env_bool_field,
env_path_field,
env_str_field,
with_params_help,
)
from cli.lib.common.gh_summary import (
gh_summary_path,
summarize_content_from_file,
summarize_wheels,
)
from cli.lib.common.path_helper import (
copy,
ensure_dir_exists,
force_create_dir,
get_path,
is_path_exist,
)
from cli.lib.common.utils import run_command
from cli.lib.core.vllm.lib import clone_vllm, summarize_build_info
logger = logging.getLogger(__name__)
# Default path for docker build artifacts
_DEFAULT_RESULT_PATH = "./shared"
# Temp folder in vllm work place to cp torch whls in vllm work directory for docker build
_VLLM_TEMP_FOLDER = "tmp"
@dataclass
class VllmBuildParameters:
"""
Parameters defining the vllm external input configurations.
Combine with VllmDockerBuildArgs to define the vllm build environment
"""
# USE_TORCH_WHEEL: when true, use local Torch wheels; requires TORCH_WHEELS_PATH.
# Otherwise docker build pull torch nightly during build
# TORCH_WHEELS_PATH: directory containing local torch wheels when use_torch_whl is True
use_torch_whl: bool = env_bool_field("USE_TORCH_WHEEL", True)
torch_whls_path: Path = env_path_field("TORCH_WHEELS_PATH", "./dist")
# USE_LOCAL_BASE_IMAGE: when true, use an existing local Docker base image; requires BASE_IMAGE
# Otherwise, pull dockerfile's default image remotely
# BASE_IMAGE: name:tag (only needed when use_local_base_image is True)
use_local_base_image: bool = env_bool_field("USE_LOCAL_BASE_IMAGE", True)
base_image: str = env_str_field("BASE_IMAGE")
# USE_LOCAL_DOCKERFILE: when true("1"), use a local Dockerfile; requires DOCKERFILE_PATH.
# otherwise, use vllm's default dockerfile.torch_nightly for build
# DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True"
use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True)
dockerfile_path: Path = env_path_field(
"DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile.tmp_vllm"
)
# the cleaning script to remove torch dependencies from pip
cleaning_script: Path = env_path_field(
"cleaning_script", ".github/ci_configs/vllm/use_existing_torch.py"
)
# OUTPUT_DIR: where docker buildx (local exporter) will write artifacts
output_dir: Path = env_path_field("OUTPUT_DIR", "external/vllm")
# --- Build args ----------------------------------------------------------
target_stage: str = env_str_field("TARGET_STAGE", "export-wheels")
tag_name: str = env_str_field("TAG", "vllm-wheels")
cuda_version: str = env_str_field("CUDA_VERSION", "12.8.1")
python_version: str = env_str_field("PYTHON_VERSION", "3.12")
max_jobs: str = env_str_field("MAX_JOBS", "64")
sccache_bucket: str = env_str_field("SCCACHE_BUCKET")
sccache_region: str = env_str_field("SCCACHE_REGION")
torch_cuda_arch_list: str = env_str_field("TORCH_CUDA_ARCH_LIST", "8.9")
def __post_init__(self):
checks = [
(
self.use_torch_whl, # flag
True, # trigger_value
"torch_whls_path", # resource
is_path_exist, # check_func
"TORCH_WHEELS_PATH is not provided, but USE_TORCH_WHEEL is set to 1",
),
(
self.use_local_base_image,
True,
"base_image",
local_image_exists,
f"BASE_IMAGE {self.base_image} does not found, but USE_LOCAL_BASE_IMAGE is set to 1",
),
(
self.use_local_dockerfile,
True,
"dockerfile_path",
is_path_exist,
" DOCKERFILE_PATH path does not found, but USE_LOCAL_DOCKERFILE is set to 1",
),
]
for flag, trigger_value, attr_name, check_func, error_msg in checks:
value = getattr(self, attr_name)
if flag == trigger_value:
if not value or not check_func(value):
raise ValueError(error_msg)
else:
logger.info("flag %s is not set", flag)
if not self.output_dir:
raise ValueError("missing required output_dir")
@with_params_help(VllmBuildParameters)
class VllmBuildRunner(BaseRunner):
"""
Build vLLM using docker buildx.
Environment variable options:
"USE_TORCH_WHEEL": "1: use local wheels; 0: pull nightly from pypi",
"TORCH_WHEELS_PATH": "Path to local wheels (when USE_TORCH_WHEEL=1)",
"USE_LOCAL_BASE_IMAGE": "1: use local base image; 0: default image",
"BASE_IMAGE": "name:tag to indicate base image the dockerfile depends on (when USE_LOCAL_BASE_IMAGE=1)",
"USE_LOCAL_DOCKERFILE": "1: use local Dockerfile; 0: vllm repo default dockerfile.torch_nightly",
"DOCKERFILE_PATH": "Path to Dockerfile (when USE_LOCAL_DOCKERFILE=1)",
"OUTPUT_DIR": "e.g. './shared'",
"TORCH_CUDA_ARCH_LIST": "e.g. '8.0' or '8.0;9.0'",
"CUDA_VERSION": "e.g. '12.8.1'",
"PYTHON_VERSION": "e.g. '3.12'",
"MAX_JOBS": "e.g. '64'",
"SCCACHE_BUCKET": "e.g. 'my-bucket'",
"SCCACHE_REGION": "e.g. 'us-west-2'",
"""
def __init__(self, args=None):
self.work_directory = "vllm"
def run(self):
"""
main function to run vllm build
1. prepare vllm build environment
2. prepare the docker build command args
3. run docker build
"""
inputs = VllmBuildParameters()
logger.info("Running vllm build with inputs: %s", inputs)
vllm_commit = clone_vllm()
self.cp_torch_cleaning_script(inputs)
self.cp_dockerfile_if_exist(inputs)
# cp torch wheels from root direct to vllm workspace if exist
self.cp_torch_whls_if_exist(inputs)
# make sure the output dir to store the build artifacts exist
ensure_dir_exists(Path(inputs.output_dir))
cmd = self._generate_docker_build_cmd(inputs)
logger.info("Running docker build: \n %s", cmd)
try:
run_command(cmd, cwd="vllm", env=os.environ.copy())
finally:
self.genearte_vllm_build_summary(vllm_commit, inputs)
def genearte_vllm_build_summary(
self, vllm_commit: str, inputs: VllmBuildParameters
):
if not gh_summary_path():
return logger.info("Skipping, not detect GH Summary env var....")
logger.info("Generate GH Summary ...")
# summarize vllm build info
summarize_build_info(vllm_commit)
# summarize vllm build artifacts
vllm_artifact_dir = inputs.output_dir / "wheels"
summarize_content_from_file(
vllm_artifact_dir,
"build_summary.txt",
title="Vllm build env pip package summary",
)
summarize_wheels(
inputs.torch_whls_path, max_depth=3, title="Torch Wheels Artifacts"
)
summarize_wheels(vllm_artifact_dir, max_depth=3, title="Vllm Wheels Artifacts")
def cp_torch_whls_if_exist(self, inputs: VllmBuildParameters) -> str:
if not inputs.use_torch_whl:
return ""
tmp_dir = f"./{self.work_directory}/{_VLLM_TEMP_FOLDER}"
tmp_path = Path(tmp_dir)
force_create_dir(tmp_path)
copy(inputs.torch_whls_path, tmp_dir)
return tmp_dir
def cp_torch_cleaning_script(self, inputs: VllmBuildParameters):
script = get_path(inputs.cleaning_script, resolve=True)
vllm_script = Path(f"./{self.work_directory}/use_existing_torch.py")
copy(script, vllm_script)
def cp_dockerfile_if_exist(self, inputs: VllmBuildParameters):
if not inputs.use_local_dockerfile:
logger.info("using vllm default dockerfile.torch_nightly for build")
return
dockerfile_path = get_path(inputs.dockerfile_path, resolve=True)
vllm_torch_dockerfile = Path(
f"./{self.work_directory}/docker/Dockerfile.nightly_torch"
)
copy(dockerfile_path, vllm_torch_dockerfile)
def get_result_path(self, path):
"""
Get the absolute path of the result path
"""
if not path:
path = _DEFAULT_RESULT_PATH
abs_path = get_path(path, resolve=True)
return abs_path
def _get_torch_wheel_path_arg(self, torch_whl_dir: Optional[Path]) -> str:
if not torch_whl_dir:
return ""
return f"--build-arg TORCH_WHEELS_PATH={_VLLM_TEMP_FOLDER}"
def _get_base_image_args(self, inputs: VllmBuildParameters) -> tuple[str, str, str]:
"""
Returns:
- base_image_arg: docker buildx arg string for base image
- final_base_image_arg: docker buildx arg string for vllm-base stage
- pull_flag: --pull=true or --pull=false depending on whether the image exists locally
"""
if not inputs.use_local_base_image:
return "", "", ""
base_image = inputs.base_image
# set both base image and final base image to the same local image
base_image_arg = f"--build-arg BUILD_BASE_IMAGE={base_image}"
final_base_image_arg = f"--build-arg FINAL_BASE_IMAGE={base_image}"
if local_image_exists(base_image):
pull_flag = "--pull=false"
return base_image_arg, final_base_image_arg, pull_flag
logger.info(
"[INFO] Local image not found:%s will try to pull from remote", {base_image}
)
return base_image_arg, final_base_image_arg, ""
def _generate_docker_build_cmd(
self,
inputs: VllmBuildParameters,
) -> str:
base_image_arg, final_base_image_arg, pull_flag = self._get_base_image_args(
inputs
)
torch_arg = self._get_torch_wheel_path_arg(inputs.torch_whls_path)
return textwrap.dedent(
f"""
docker buildx build \
--output type=local,dest={inputs.output_dir} \
-f docker/Dockerfile.nightly_torch \
{pull_flag} \
{torch_arg} \
{base_image_arg} \
{final_base_image_arg} \
--build-arg max_jobs={inputs.max_jobs} \
--build-arg CUDA_VERSION={inputs.cuda_version} \
--build-arg PYTHON_VERSION={inputs.python_version} \
--build-arg USE_SCCACHE={int(bool(inputs.sccache_bucket and inputs.sccache_region))} \
--build-arg SCCACHE_BUCKET_NAME={inputs.sccache_bucket} \
--build-arg SCCACHE_REGION_NAME={inputs.sccache_region} \
--build-arg torch_cuda_arch_list='{inputs.torch_cuda_arch_list}' \
--target {inputs.target_stage} \
-t {inputs.tag_name} \
--progress=plain .
"""
).strip()

View File

@ -1,280 +0,0 @@
import logging
import os
import re
import subprocess
import sys
from collections.abc import Iterable
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Any
from cli.lib.common.cli_helper import BaseRunner
from cli.lib.common.envs_helper import env_path_field, env_str_field, get_env
from cli.lib.common.path_helper import copy, get_path, remove_dir
from cli.lib.common.pip_helper import (
pip_install_first_match,
pip_install_packages,
pkg_exists,
run_python,
)
from cli.lib.common.utils import run_command, working_directory
from cli.lib.core.vllm.lib import clone_vllm, run_test_plan, sample_vllm_test_library
logger = logging.getLogger(__name__)
@dataclass
class VllmTestParameters:
"""
Parameters defining the vllm external test input
!!!DO NOT ADD SECRETS IN THIS CLASS!!!
you can put environment variable name in VllmTestParameters if it's not the same as the secret one
fetch secrests directly from env variables during runtime
"""
torch_whls_path: Path = env_path_field("WHEELS_PATH", "./dist")
vllm_whls_path: Path = env_path_field(
"VLLM_WHEELS_PATH", "./dist/external/vllm/wheels"
)
torch_cuda_arch_list: str = env_str_field("TORCH_CUDA_ARCH_LIST", "8.9")
cleaning_script: Path = env_path_field(
"cleaning_script", ".github/ci_configs/vllm/use_existing_torch.py"
)
def __post_init__(self):
if not self.torch_whls_path.exists():
raise ValueError("missing torch_whls_path")
if not self.vllm_whls_path.exists():
raise ValueError("missing vllm_whls_path")
class TestInpuType(Enum):
TEST_PLAN = "test_plan"
UNKNOWN = "unknown"
class VllmTestRunner(BaseRunner):
def __init__(self, args: Any):
self.work_directory = "vllm"
self.test_plan = ""
self.test_type = TestInpuType.UNKNOWN
self.shard_id = args.shard_id
self.num_shards = args.num_shards
if args.test_plan:
self.test_plan = args.test_plan
self.test_type = TestInpuType.TEST_PLAN
# Matches the structeur in the artifacts.zip from torcb build
self.TORCH_WHL_PATH_REGEX = "torch*.whl"
self.TORCH_WHL_EXTRA = "opt-einsum"
self.TORCH_ADDITIONAL_WHLS_REGEX = [
"vision/torchvision*.whl",
"audio/torchaudio*.whl",
]
# Match the structure of the artifacts.zip from vllm external build
self.VLLM_TEST_WHLS_REGEX = [
"xformers/*.whl",
"vllm/vllm*.whl",
"flashinfer-python/flashinfer*.whl",
]
def prepare(self):
"""
prepare test environment for vllm. This includes clone vllm repo, install all wheels, test dependencies and set env
"""
params = VllmTestParameters()
logger.info("Display VllmTestParameters %s", params)
self._set_envs(params)
clone_vllm(dst=self.work_directory)
self.cp_torch_cleaning_script(params)
with working_directory(self.work_directory):
remove_dir(Path("vllm"))
self._install_wheels(params)
self._install_dependencies()
# verify the torches are not overridden by test dependencies
check_versions()
def run(self):
"""
main function to run vllm test
"""
self.prepare()
try:
with working_directory(self.work_directory):
if self.test_type == TestInpuType.TEST_PLAN:
if self.num_shards > 1:
run_test_plan(
self.test_plan,
"vllm",
sample_vllm_test_library(),
self.shard_id,
self.num_shards,
)
else:
run_test_plan(
self.test_plan, "vllm", sample_vllm_test_library()
)
else:
raise ValueError(f"Unknown test type {self.test_type}")
finally:
# double check the torches are not overridden by other packages
check_versions()
def cp_torch_cleaning_script(self, params: VllmTestParameters):
script = get_path(params.cleaning_script, resolve=True)
vllm_script = Path(f"./{self.work_directory}/use_existing_torch.py")
copy(script, vllm_script)
def _install_wheels(self, params: VllmTestParameters):
logger.info("Running vllm test with inputs: %s", params)
if not pkg_exists("torch"):
# install torch from local whls if it's not installed yet.
torch_p = f"{str(params.torch_whls_path)}/{self.TORCH_WHL_PATH_REGEX}"
pip_install_first_match(torch_p, self.TORCH_WHL_EXTRA)
torch_whls_path = [
f"{str(params.torch_whls_path)}/{whl_path}"
for whl_path in self.TORCH_ADDITIONAL_WHLS_REGEX
]
for torch_whl in torch_whls_path:
pip_install_first_match(torch_whl)
logger.info("Done. Installed torch and other torch-related wheels ")
logger.info("Installing vllm wheels")
vllm_whls_path = [
f"{str(params.vllm_whls_path)}/{whl_path}"
for whl_path in self.VLLM_TEST_WHLS_REGEX
]
for vllm_whl in vllm_whls_path:
pip_install_first_match(vllm_whl)
logger.info("Done. Installed vllm wheels")
def _install_test_dependencies(self):
"""
This method replaces torch dependencies with local torch wheel info in
requirements/test.in file from vllm repo. then generates the test.txt
in runtime
"""
logger.info("generate test.txt from requirements/test.in with local torch whls")
preprocess_test_in()
copy("requirements/test.txt", "snapshot_constraint.txt")
run_command(
f"{sys.executable} -m uv pip compile requirements/test.in "
"-o test.txt "
"--index-strategy unsafe-best-match "
"--constraint snapshot_constraint.txt "
"--torch-backend cu128"
)
pip_install_packages(requirements="test.txt", prefer_uv=True)
logger.info("Done. installed requirements for test dependencies")
def _install_dependencies(self):
pip_install_packages(packages=["-e", "tests/vllm_test_utils"], prefer_uv=True)
pip_install_packages(packages=["hf_transfer"], prefer_uv=True)
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
# using script from vllm repo to remove all torch packages from requirements txt
run_python("use_existing_torch.py")
# install common packages
for requirements in ["requirements/common.txt", "requirements/build.txt"]:
pip_install_packages(
requirements=requirements,
prefer_uv=True,
)
# install test packages
self._install_test_dependencies()
def _set_envs(self, inputs: VllmTestParameters):
os.environ["TORCH_CUDA_ARCH_LIST"] = inputs.torch_cuda_arch_list
if not validate_cuda(get_env("TORCH_CUDA_ARCH_LIST")):
logger.warning(
"Missing supported TORCH_CUDA_ARCH_LIST. "
"Currently support TORCH_CUDA_ARCH_LIST env var "
"with supported arch [8.0, 8.9, 9.0]"
)
os.environ["HF_TOKEN"] = os.getenv("VLLM_TEST_HUGGING_FACE_TOKEN", "")
if not get_env("HF_TOKEN"):
raise ValueError(
"missing required HF_TOKEN, please set VLLM_TEST_HUGGING_FACE_TOKEN env var"
)
if not get_env("TORCH_CUDA_ARCH_LIST"):
raise ValueError(
"missing required TORCH_CUDA_ARCH_LIST, please set TORCH_CUDA_ARCH_LIST env var"
)
def preprocess_test_in(
target_file: str = "requirements/test.in", additional_packages: Iterable[str] = ()
):
"""
This modifies the target_file file in place in vllm work directory.
It removes torch and unwanted packages in target_file and replace with local torch whls
package with format "$WHEEL_PACKAGE_NAME @ file://<LOCAL_PATH>"
"""
additional_package_to_move = list(additional_packages or ())
pkgs_to_remove = [
"torch",
"torchvision",
"torchaudio",
"xformers",
"mamba_ssm",
] + additional_package_to_move
# Read current requirements
target_path = Path(target_file)
lines = target_path.read_text().splitlines()
pkgs_to_add = []
# Remove lines starting with the package names (==, @, >=) — case-insensitive
pattern = re.compile(rf"^({'|'.join(pkgs_to_remove)})\s*(==|@|>=)", re.IGNORECASE)
kept_lines = [line for line in lines if not pattern.match(line)]
# Get local installed torch/vision/audio from pip freeze
# This is hacky, but it works
pip_freeze = subprocess.check_output(["pip", "freeze"], text=True)
header_lines = [
line
for line in pip_freeze.splitlines()
if re.match(
r"^(torch|torchvision|torchaudio)\s*@\s*file://", line, re.IGNORECASE
)
]
# Write back: header_lines + blank + kept_lines
out_lines = header_lines + [""] + kept_lines
if pkgs_to_add:
out_lines += [""] + pkgs_to_add
out = "\n".join(out_lines) + "\n"
target_path.write_text(out)
logger.info("[INFO] Updated %s", target_file)
def validate_cuda(value: str) -> bool:
VALID_VALUES = {"8.0", "8.9", "9.0"}
return all(v in VALID_VALUES for v in value.split())
def check_versions():
"""
check installed packages version
"""
logger.info("Double check installed packages")
patterns = ["torch", "xformers", "torchvision", "torchaudio", "vllm"]
for pkg in patterns:
pkg_exists(pkg)
logger.info("Done. checked installed packages")

View File

@ -1,40 +0,0 @@
# main.py
import argparse
import logging
from cli.build_cli.register_build import register_build_commands
from cli.lib.common.logger import setup_logging
from cli.test_cli.register_test import register_test_commands
logger = logging.getLogger(__name__)
def main():
# Define top-level parser
parser = argparse.ArgumentParser(description="Lumos CLI")
subparsers = parser.add_subparsers(dest="command", required=True)
parser.add_argument(
"--log-level", default="INFO", help="Log level (DEBUG, INFO, WARNING, ERROR)"
)
# registers second-level subcommands
register_build_commands(subparsers)
register_test_commands(subparsers)
# parse args after all options are registered
args = parser.parse_args()
# setup global logging
setup_logging(getattr(logging, args.log_level.upper(), logging.INFO))
logger.debug("Parsed args: %s", args)
if hasattr(args, "func"):
args.func(args)
else:
parser.print_help()
if __name__ == "__main__":
main()

View File

@ -1,62 +0,0 @@
import argparse
import logging
from cli.lib.common.cli_helper import register_targets, RichHelp, TargetSpec
from cli.lib.core.vllm.vllm_test import VllmTestRunner
logger = logging.getLogger(__name__)
# Maps targets to their argparse configuration and runner
# it adds new target to path python -m cli.run build external {target} with buildrunner
_TARGETS: dict[str, TargetSpec] = {
"vllm": {
"runner": VllmTestRunner,
"help": "test vLLM with pytorch main",
}
# add yours ...
}
def common_args(parser: argparse.ArgumentParser) -> None:
"""
Add common CLI arguments to the given parser.
"""
parser.add_argument(
"--shard-id",
type=int,
default=1,
help="a shard id to run, e.g. '0,1,2,3'",
)
parser.add_argument(
"--num-shards",
type=int,
default=1,
help="a number of shards to run, e.g. '4'",
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-tp",
"--test-plan",
type=str,
help="a pre-defined test plan to run, e.g. 'basic_correctness_test'",
)
def register_test_commands(subparsers: argparse._SubParsersAction) -> None:
build_parser = subparsers.add_parser(
"test",
help="test related commands",
formatter_class=RichHelp,
)
build_subparsers = build_parser.add_subparsers(dest="test_command", required=True)
overview = "\n".join(
f" {name:12} {spec.get('help', '')}" for name, spec in _TARGETS.items()
)
external_parser = build_subparsers.add_parser(
"external",
help="Test external targets",
description="Test third-party targets.\n\nAvailable targets:\n" + overview,
formatter_class=RichHelp,
)
register_targets(external_parser, _TARGETS, common_args=common_args)

View File

@ -1,23 +0,0 @@
[project]
name = "lumen-ci"
version = "0.1.0"
dependencies = [
"pyyaml==6.0.2",
"GitPython==3.1.45",
"docker==7.1.0",
"pytest==7.3.2",
"uv==0.8.6"
]
[tool.setuptools]
packages = ["cli"]
[tool.setuptools.package-dir]
cli = "cli"
[tool.ruff.lint]
# Enable preview mode for linting
preview = true
# Now you can select your preview rules, like RUF048
extend-select = ["RUF048"]

View File

@ -1,47 +0,0 @@
# tests/test_cli.py
import io
import sys
import unittest
from contextlib import redirect_stderr, redirect_stdout
from unittest.mock import patch
from cli.run import main
class TestArgparseCLI(unittest.TestCase):
@patch("cli.build_cli.register_build.VllmBuildRunner.run", return_value=None)
@patch("cli.build_cli.register_build.VllmBuildRunner.__init__", return_value=None)
def test_cli_run_build_external(self, mock_init, mock_run):
from cli.run import main # import after patches if needed
test_args = ["cli.run", "build", "external", "vllm"]
with patch.object(sys, "argv", test_args):
# argparse may call sys.exit on error; capture to avoid test aborts
try:
main()
except SystemExit:
pass
mock_init.assert_called_once() # got constructed
mock_run.assert_called_once_with() # run() called
def test_build_help(self):
test_args = ["cli.run", "build", "--help"]
with patch.object(sys, "argv", test_args):
stdout = io.StringIO()
stderr = io.StringIO()
# --help always raises SystemExit(0)
with self.assertRaises(SystemExit) as cm:
with redirect_stdout(stdout), redirect_stderr(stderr):
main()
self.assertEqual(cm.exception.code, 0)
output = stdout.getvalue()
self.assertIn("usage", output)
self.assertIn("external", output)
if __name__ == "__main__":
unittest.main()

View File

@ -1,115 +0,0 @@
import argparse
import io
import unittest
from contextlib import redirect_stderr
from unittest.mock import patch
from cli.lib.common.cli_helper import BaseRunner, register_targets, RichHelp, TargetSpec
# ---- Dummy runners for unittests----
class FooRunner(BaseRunner):
"""Foo description from docstring."""
def run(self) -> None: # replaced by mock
pass
class BarRunner(BaseRunner):
def run(self) -> None: # replaced by mock
pass
def add_foo_args(p: argparse.ArgumentParser) -> None:
p.add_argument("--x", type=int, required=True, help="x value")
def common_args(p: argparse.ArgumentParser) -> None:
p.add_argument("--verbose", action="store_true", help="verbose flag")
def build_parser(specs: dict[str, TargetSpec]) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(prog="app", formatter_class=RichHelp)
register_targets(
parser=parser,
target_specs=specs,
common_args=common_args,
)
return parser
def get_subparser(
parser: argparse.ArgumentParser, name: str
) -> argparse.ArgumentParser:
subparsers_action = next(
a
for a in parser._subparsers._group_actions # type: ignore[attr-defined]
if isinstance(a, argparse._SubParsersAction)
)
return subparsers_action.choices[name]
class TestRegisterTargets(unittest.TestCase):
def test_metavar_lists_targets(self):
specs: dict[str, TargetSpec] = {
"foo": {"runner": FooRunner, "add_arguments": add_foo_args},
"bar": {"runner": BarRunner},
}
parser = build_parser(specs)
subparsers_action = next(
a
for a in parser._subparsers._group_actions # type: ignore[attr-defined]
if isinstance(a, argparse._SubParsersAction)
)
self.assertEqual(subparsers_action.metavar, "{foo,bar}")
def test_add_arguments_and_common_args_present(self):
specs: dict[str, TargetSpec] = {
"foo": {"runner": FooRunner, "add_arguments": add_foo_args},
}
parser = build_parser(specs)
foo = get_subparser(parser, "foo")
help_text = foo.format_help()
self.assertIn("--x", help_text)
self.assertIn("--verbose", help_text)
def test_runner_constructed_with_ns_and_run_called(self):
specs: dict[str, TargetSpec] = {
"foo": {"runner": FooRunner, "add_arguments": add_foo_args},
}
parser = build_parser(specs)
with (
patch.object(FooRunner, "__init__", return_value=None) as mock_init,
patch.object(FooRunner, "run", return_value=None) as mock_run,
):
ns = parser.parse_args(["foo", "--x", "3", "--verbose"])
ns.func(ns) # set by register_targets
# __init__ received the Namespace
self.assertEqual(mock_init.call_count, 1)
(called_ns,), _ = mock_init.call_args
self.assertIsInstance(called_ns, argparse.Namespace)
# run() called with no args
mock_run.assert_called_once_with()
def test_runner_docstring_used_as_description_when_missing(self):
specs: dict[str, TargetSpec] = {
"foo": {"runner": FooRunner, "add_arguments": add_foo_args},
}
parser = build_parser(specs)
foo = get_subparser(parser, "foo")
help_text = foo.format_help()
self.assertIn("Foo description from docstring.", help_text)
def test_missing_target_raises_systemexit_with_usage(self):
specs: dict[str, TargetSpec] = {"foo": {"runner": FooRunner}}
parser = build_parser(specs)
buf = io.StringIO()
with self.assertRaises(SystemExit), redirect_stderr(buf):
parser.parse_args([])
err = buf.getvalue()
self.assertIn("usage:", err)
if __name__ == "__main__":
unittest.main()

View File

@ -1,75 +0,0 @@
import unittest
from unittest import mock
from unittest.mock import MagicMock
import docker.errors as derr
from cli.lib.common.docker_helper import _get_client, local_image_exists
class TestDockerImageHelpers(unittest.TestCase):
def setUp(self):
# Reset the singleton in the target module
patcher = mock.patch("cli.lib.common.docker_helper._docker_client", None)
self.addCleanup(patcher.stop)
patcher.start()
def test_local_image_exists_true(self):
# Mock a docker client whose images.get returns an object (no exception)
mock_client = MagicMock()
mock_client.images.get.return_value = object()
ok = local_image_exists("repo:tag", client=mock_client)
self.assertTrue(ok)
def test_local_image_exists_not_found_false(self):
mock_client = MagicMock()
# Raise docker.errors.NotFound
mock_client.images.get.side_effect = derr.NotFound("nope")
ok = local_image_exists("missing:latest", client=mock_client)
self.assertFalse(ok)
def test_local_image_exists_api_error_false(self):
mock_client = MagicMock()
mock_client.images.get.side_effect = derr.APIError("boom", None)
ok = local_image_exists("broken:tag", client=mock_client)
self.assertFalse(ok)
def test_local_image_exists_uses_lazy_singleton(self):
# Patch docker.from_env used by _get_client()
with mock.patch(
"cli.lib.common.docker_helper.docker.from_env"
) as mock_from_env:
mock_docker_client = MagicMock()
mock_from_env.return_value = mock_docker_client
# First call should create and cache the client
c1 = _get_client()
self.assertIs(c1, mock_docker_client)
mock_from_env.assert_called_once()
# Second call should reuse cached client (no extra from_env calls)
c2 = _get_client()
self.assertIs(c2, mock_docker_client)
mock_from_env.assert_called_once() # still once
def test_local_image_exists_without_client_param_calls_get_client_once(self):
# Ensure _get_client is called and cached; local_image_exists should reuse it
with mock.patch("cli.lib.common.docker_helper._get_client") as mock_get_client:
mock_client = MagicMock()
mock_get_client.return_value = mock_client
# 1st call
local_image_exists("repo:tag")
# 2nd call
local_image_exists("repo:tag2")
# local_image_exists should call _get_client each time,
# but your _get_client itself caches docker.from_env.
self.assertEqual(mock_get_client.call_count, 2)
self.assertEqual(mock_client.images.get.call_count, 2)
mock_client.images.get.assert_any_call("repo:tag")
mock_client.images.get.assert_any_call("repo:tag2")
if __name__ == "__main__":
unittest.main()

View File

@ -1,149 +0,0 @@
import os
import unittest
from dataclasses import dataclass
from pathlib import Path
from unittest.mock import patch
import cli.lib.common.envs_helper as m
class TestEnvHelpers(unittest.TestCase):
def setUp(self):
# Keep a copy of the original environment to restore later
self._env_backup = dict(os.environ)
def tearDown(self):
# Restore environment to original state
os.environ.clear()
os.environ.update(self._env_backup)
# -------- get_env --------
def test_get_env_unset_returns_default(self):
with patch.dict(os.environ, {}, clear=True):
self.assertEqual(m.get_env("FOO", "default"), "default")
def test_get_env_empty_returns_default(self):
with patch.dict(os.environ, {"FOO": ""}, clear=True):
self.assertEqual(m.get_env("FOO", "default"), "default")
def test_get_env_set_returns_value(self):
with patch.dict(os.environ, {"FOO": "bar"}, clear=True):
self.assertEqual(m.get_env("FOO", "default"), "bar")
def test_get_env_not_exist_returns_default(self):
with patch.dict(os.environ, {"FOO": "bar"}, clear=True):
self.assertEqual(m.get_env("TEST_NOT_EXIST", "default"), "default")
def test_get_env_not_exist_without_default(self):
with patch.dict(os.environ, {"FOO": "bar"}, clear=True):
self.assertEqual(m.get_env("TEST_NOT_EXIST"), "")
# -------- env_bool --------
def test_env_bool_uses_default_when_unset(self):
with patch.dict(os.environ, {}, clear=True):
self.assertTrue(m.env_bool("FLAG", default=True))
self.assertFalse(m.env_bool("FLAG", default=False))
def test_env_bool_uses_str2bool_when_set(self):
# Patch str2bool used by env_bool so we don't depend on its exact behavior
def fake_str2bool(s: str) -> bool:
return s.lower() in {"1", "true", "yes", "on", "y"}
with (
patch.dict(os.environ, {"FLAG": "yEs"}, clear=True),
patch.object(m, "str2bool", fake_str2bool),
):
self.assertTrue(m.env_bool("FLAG", default=False))
# -------- env_path_optional / env_path --------
def test_env_path_optional_unset_returns_none_by_default(self):
with patch.dict(os.environ, {}, clear=True):
self.assertIsNone(m.env_path_optional("P"))
def test_env_path_optional_unset_returns_none_when_env_var_is_empty(self):
with patch.dict(os.environ, {"P": ""}, clear=True):
self.assertIsNone(m.env_path_optional("P"))
def test_env_path_optional_unset_returns_default_str(self):
# default as string; resolve=True by default -> absolute path
default_str = "x/y"
with patch.dict(os.environ, {}, clear=True):
p = m.env_path_optional("P", default=default_str)
self.assertIsInstance(p, Path)
self.assertIsNotNone(p)
if p:
self.assertTrue(p.is_absolute())
self.assertEqual(p.parts[-2:], ("x", "y"))
def test_env_path_optional_unset_returns_default_path_no_resolve(self):
d = Path("z")
with patch.dict(os.environ, {}, clear=True):
p = m.env_path_optional("P", default=d, resolve=False)
self.assertEqual(p, d)
def test_env_path_optional_respects_resolve_true(self):
with patch.dict(os.environ, {"P": "a/b"}, clear=True):
p = m.env_path_optional("P", resolve=True)
self.assertIsInstance(p, Path)
if p:
self.assertTrue(p.is_absolute())
def test_env_path_optional_respects_resolve_false(self):
with patch.dict(os.environ, {"P": "rel/dir"}, clear=True):
p = m.env_path_optional("P", resolve=False)
self.assertEqual(p, Path("rel/dir"))
if p:
self.assertFalse(p.is_absolute())
def test_env_path_raises_when_missing_and_default_none(self):
with patch.dict(os.environ, {}, clear=True):
with self.assertRaises(ValueError):
m.env_path("P", None, resolve=True)
def test_env_path_returns_path_when_present(self):
tmp = Path("./b").resolve()
with patch.dict(os.environ, {"P": str(tmp)}, clear=True):
p = m.env_path("P", None, resolve=True)
self.assertEqual(p, tmp)
# -------- dataclass field helpers --------
def test_dataclass_fields_read_env_at_instantiation(self):
@dataclass
class Cfg:
flag: bool = m.env_bool_field("FLAG", default=False)
out: Path = m.env_path_field("OUT", default="ab", resolve=True)
name: str = m.env_str_field("NAME", default="anon")
# First instantiation
with patch.dict(
os.environ, {"FLAG": "true", "OUT": "outdir", "NAME": "alice"}, clear=True
):
cfg1 = Cfg()
self.assertTrue(cfg1.flag)
self.assertIsInstance(cfg1.out, Path)
self.assertTrue(cfg1.out.is_absolute())
self.assertEqual(cfg1.name, "alice")
cfg1.name = "bob" # change instance value
self.assertEqual(cfg1.name, "bob") # change is reflected
# Change env; new instance should reflect new values
with patch.dict(os.environ, {"FLAG": "false", "NAME": ""}, clear=True):
cfg2 = Cfg()
self.assertFalse(cfg2.flag) # str2bool("false") -> False
self.assertTrue("ab" in str(cfg2.out))
self.assertIsInstance(cfg2.out, Path)
self.assertTrue(cfg2.out.is_absolute())
self.assertEqual(cfg2.name, "anon") # empty -> fallback to default
def test_dataclass_path_field_with_default_value(self):
@dataclass
class C2:
out: Path = m.env_path_field("OUT", default="some/dir", resolve=False)
with patch.dict(os.environ, {}, clear=True):
c = C2()
self.assertEqual(c.out, Path("some/dir"))
if __name__ == "__main__":
unittest.main()

Some files were not shown because too many files have changed in this diff Show More