mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-10-31 20:34:54 +08:00 
			
		
		
		
	Compare commits
	
		
			72 Commits
		
	
	
		
			validation
			...
			release/2.
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 63d5e9221b | |||
| 91bdec37c8 | |||
| d44533f9d0 | |||
| bd1040c3b0 | |||
| 81b88543f0 | |||
| e63004b649 | |||
| 00804a79e4 | |||
| cd033a128c | |||
| 19058a60b0 | |||
| 30650e0add | |||
| 661c3de2a7 | |||
| 71dd2de836 | |||
| 6cd59f1f07 | |||
| ee68b41571 | |||
| 0365423035 | |||
| 03baf94aae | |||
| 7782f2866c | |||
| be9a4076f0 | |||
| d114e0488c | |||
| 194698a4ac | |||
| 1d6a938090 | |||
| 4f0b3ad855 | |||
| bf1b3a056a | |||
| c365674171 | |||
| 768e4b9420 | |||
| e25474c05d | |||
| d8b35dac22 | |||
| bbb838654c | |||
| d983cb78e2 | |||
| 75e01e7df0 | |||
| a696b3b7f6 | |||
| 2e165ec9c2 | |||
| 1199df476e | |||
| 97ff6cfd9c | |||
| fb38ab7881 | |||
| 23961cef85 | |||
| 634cf5069a | |||
| 12d0e693d0 | |||
| 38acd812ab | |||
| b197f540bc | |||
| dc81d19aac | |||
| 108305e47b | |||
| a8b009185d | |||
| b67b277268 | |||
| a8f93a5c71 | |||
| fa07dc5132 | |||
| 2a82d31f78 | |||
| 4bb5cb51e6 | |||
| ef38d0572e | |||
| 5a53185e65 | |||
| bc9e23abb5 | |||
| 8194fae625 | |||
| 12acd4c9b3 | |||
| 857797d148 | |||
| 233dfe4d6a | |||
| e22b534b10 | |||
| 8602990e3f | |||
| 685cc955df | |||
| b1c2430fbd | |||
| 3002eb2556 | |||
| e1a846d6b8 | |||
| 4a9a8c606d | |||
| d3201f48b1 | |||
| 74832f12fa | |||
| 02cdb400d7 | |||
| 37257774c6 | |||
| c4e5434423 | |||
| b4f90aae1b | |||
| 94d6463255 | |||
| 6a89a753b1 | |||
| d69c421912 | |||
| 6725db07ae | 
| @ -1,4 +1,3 @@ | |||||||
| # We do not use this library in our Bazel build. It contains an | # We do not use this library in our Bazel build. It contains an | ||||||
| # infinitely recursing symlink that makes Bazel very unhappy. | # infinitely recursing symlink that makes Bazel very unhappy. | ||||||
| third_party/ittapi/ | third_party/ittapi/ | ||||||
| third_party/opentelemetry-cpp |  | ||||||
|  | |||||||
| @ -1 +1 @@ | |||||||
| 6.5.0 | 6.1.1 | ||||||
|  | |||||||
							
								
								
									
										26
									
								
								.buckconfig.oss
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								.buckconfig.oss
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,26 @@ | |||||||
|  | [pt] | ||||||
|  |   is_oss=1 | ||||||
|  |  | ||||||
|  | [buildfile] | ||||||
|  |   name = BUCK.oss | ||||||
|  |   includes = //tools/build_defs/select.bzl | ||||||
|  |  | ||||||
|  | [repositories] | ||||||
|  |   bazel_skylib = third_party/bazel-skylib/ | ||||||
|  |   ovr_config = . | ||||||
|  |  | ||||||
|  | [download] | ||||||
|  |   in_build = true | ||||||
|  |  | ||||||
|  | [cxx] | ||||||
|  |   cxxflags = -std=c++17 | ||||||
|  |   ldflags = -Wl,--no-undefined | ||||||
|  |   should_remap_host_platform = true | ||||||
|  |   cpp = /usr/bin/clang | ||||||
|  |   cc = /usr/bin/clang | ||||||
|  |   cxx = /usr/bin/clang++ | ||||||
|  |   cxxpp = /usr/bin/clang++ | ||||||
|  |   ld = /usr/bin/clang++ | ||||||
|  |  | ||||||
|  | [project] | ||||||
|  |   default_flavors_mode=all | ||||||
| @ -1,19 +0,0 @@ | |||||||
| # Aarch64 (ARM/Graviton) Support Scripts |  | ||||||
| Scripts for building aarch64 PyTorch PIP Wheels. These scripts build the following wheels: |  | ||||||
| * torch |  | ||||||
| * torchvision |  | ||||||
| * torchaudio |  | ||||||
| * torchtext |  | ||||||
| * torchdata |  | ||||||
| ## Aarch64_ci_build.sh |  | ||||||
| This script is design to support CD operations within PyPi manylinux aarch64 container, and be executed in the container. It prepares the container and then executes __aarch64_wheel_ci_build.py__ to build the wheels. The script "assumes" the PyTorch repo is located at: ```/pytorch``` and will put the wheels into ```/artifacts```. |  | ||||||
| ### Usage |  | ||||||
| ```DESIRED_PYTHON=<PythonVersion> aarch64_ci_build.sh``` |  | ||||||
|  |  | ||||||
| __NOTE:__ CI build is currently __EXPERMINTAL__ |  | ||||||
|  |  | ||||||
| ## Build_aarch64_wheel.py |  | ||||||
| This app allows a person to build using AWS EC3 resources and requires AWS-CLI and Boto3 with AWS credentials to support building EC2 instances for the wheel builds. Can be used in a codebuild CD or from a local system. |  | ||||||
|  |  | ||||||
| ### Usage |  | ||||||
| ```build_aarch64_wheel.py --key-name <YourPemKey> --use-docker --python 3.8 --branch <RCtag>``` |  | ||||||
| @ -1,39 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-} |  | ||||||
|  |  | ||||||
| SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" |  | ||||||
| source $SCRIPTPATH/aarch64_ci_setup.sh |  | ||||||
|  |  | ||||||
| tagged_version() { |  | ||||||
|   GIT_DESCRIBE="git --git-dir /pytorch/.git describe --tags --match v[0-9]*.[0-9]*.[0-9]*" |  | ||||||
|   if ${GIT_DESCRIBE} --exact >/dev/null; then |  | ||||||
|     ${GIT_DESCRIBE} |  | ||||||
|   else |  | ||||||
|     return 1 |  | ||||||
|   fi |  | ||||||
| } |  | ||||||
|  |  | ||||||
| if tagged_version >/dev/null; then |  | ||||||
|   export OVERRIDE_PACKAGE_VERSION="$(tagged_version | sed -e 's/^v//' -e 's/-.*$//')" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| ############################################################################### |  | ||||||
| # Run aarch64 builder python |  | ||||||
| ############################################################################### |  | ||||||
| cd / |  | ||||||
| # adding safe directory for git as the permissions will be |  | ||||||
| # on the mounted pytorch repo |  | ||||||
| git config --global --add safe.directory /pytorch |  | ||||||
| pip install -r /pytorch/requirements.txt |  | ||||||
| pip install auditwheel |  | ||||||
| if [ "$DESIRED_CUDA" = "cpu" ]; then |  | ||||||
|     echo "BASE_CUDA_VERSION is not set. Building cpu wheel." |  | ||||||
|     #USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files |  | ||||||
|     USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn |  | ||||||
| else |  | ||||||
|     echo "BASE_CUDA_VERSION is set to: $DESIRED_CUDA" |  | ||||||
|     #USE_PRIORITIZED_TEXT_FOR_LD for enable linker script optimization https://github.com/pytorch/pytorch/pull/121975/files |  | ||||||
|     USE_PRIORITIZED_TEXT_FOR_LD=1 python /pytorch/.ci/aarch64_linux/aarch64_wheel_ci_build.py --enable-mkldnn --enable-cuda |  | ||||||
| fi |  | ||||||
| @ -1,23 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| # This script is used to prepare the Docker container for aarch64_ci_wheel_build.py python script |  | ||||||
| # By creating symlinks from desired /opt/python to /usr/local/bin/ |  | ||||||
|  |  | ||||||
| NUMPY_VERSION=2.0.2 |  | ||||||
| PYGIT2_VERSION=1.15.1 |  | ||||||
| if [[ "$DESIRED_PYTHON"  == "3.13" ]]; then |  | ||||||
|     NUMPY_VERSION=2.1.2 |  | ||||||
|     PYGIT2_VERSION=1.16.0 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" |  | ||||||
| source $SCRIPTPATH/../manywheel/set_desired_python.sh |  | ||||||
|  |  | ||||||
| pip install -q numpy==${NUMPY_VERSION} pyyaml==6.0.2 scons==4.7.0 ninja==1.11.1 patchelf==0.17.2 pygit2==${PYGIT2_VERSION} |  | ||||||
|  |  | ||||||
| for tool in python python3 pip pip3 ninja scons patchelf; do |  | ||||||
|     ln -sf ${DESIRED_PYTHON_BIN_DIR}/${tool} /usr/local/bin; |  | ||||||
| done |  | ||||||
|  |  | ||||||
| python --version |  | ||||||
| @ -1,230 +0,0 @@ | |||||||
| #!/usr/bin/env python3 |  | ||||||
| # encoding: UTF-8 |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| import shutil |  | ||||||
| from subprocess import check_call, check_output |  | ||||||
| from typing import List |  | ||||||
|  |  | ||||||
| from pygit2 import Repository |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def list_dir(path: str) -> List[str]: |  | ||||||
|     """' |  | ||||||
|     Helper for getting paths for Python |  | ||||||
|     """ |  | ||||||
|     return check_output(["ls", "-1", path]).decode().split("\n") |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def build_ArmComputeLibrary() -> None: |  | ||||||
|     """ |  | ||||||
|     Using ArmComputeLibrary for aarch64 PyTorch |  | ||||||
|     """ |  | ||||||
|     print("Building Arm Compute Library") |  | ||||||
|     acl_build_flags = [ |  | ||||||
|         "debug=0", |  | ||||||
|         "neon=1", |  | ||||||
|         "opencl=0", |  | ||||||
|         "os=linux", |  | ||||||
|         "openmp=1", |  | ||||||
|         "cppthreads=0", |  | ||||||
|         "arch=armv8a", |  | ||||||
|         "multi_isa=1", |  | ||||||
|         "fixed_format_kernels=1", |  | ||||||
|         "build=native", |  | ||||||
|     ] |  | ||||||
|     acl_install_dir = "/acl" |  | ||||||
|     acl_checkout_dir = "ComputeLibrary" |  | ||||||
|     os.makedirs(acl_install_dir) |  | ||||||
|     check_call( |  | ||||||
|         [ |  | ||||||
|             "git", |  | ||||||
|             "clone", |  | ||||||
|             "https://github.com/ARM-software/ComputeLibrary.git", |  | ||||||
|             "-b", |  | ||||||
|             "v24.09", |  | ||||||
|             "--depth", |  | ||||||
|             "1", |  | ||||||
|             "--shallow-submodules", |  | ||||||
|         ] |  | ||||||
|     ) |  | ||||||
|  |  | ||||||
|     check_call( |  | ||||||
|         ["scons", "Werror=1", "-j8", f"build_dir=/{acl_install_dir}/build"] |  | ||||||
|         + acl_build_flags, |  | ||||||
|         cwd=acl_checkout_dir, |  | ||||||
|     ) |  | ||||||
|     for d in ["arm_compute", "include", "utils", "support", "src"]: |  | ||||||
|         shutil.copytree(f"{acl_checkout_dir}/{d}", f"{acl_install_dir}/{d}") |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def update_wheel(wheel_path) -> None: |  | ||||||
|     """ |  | ||||||
|     Update the cuda wheel libraries |  | ||||||
|     """ |  | ||||||
|     folder = os.path.dirname(wheel_path) |  | ||||||
|     wheelname = os.path.basename(wheel_path) |  | ||||||
|     os.mkdir(f"{folder}/tmp") |  | ||||||
|     os.system(f"unzip {wheel_path} -d {folder}/tmp") |  | ||||||
|     libs_to_copy = [ |  | ||||||
|         "/usr/local/cuda/extras/CUPTI/lib64/libcupti.so.12", |  | ||||||
|         "/usr/local/cuda/lib64/libcudnn.so.9", |  | ||||||
|         "/usr/local/cuda/lib64/libcublas.so.12", |  | ||||||
|         "/usr/local/cuda/lib64/libcublasLt.so.12", |  | ||||||
|         "/usr/local/cuda/lib64/libcudart.so.12", |  | ||||||
|         "/usr/local/cuda/lib64/libcufft.so.11", |  | ||||||
|         "/usr/local/cuda/lib64/libcusparse.so.12", |  | ||||||
|         "/usr/local/cuda/lib64/libcusparseLt.so.0", |  | ||||||
|         "/usr/local/cuda/lib64/libcusolver.so.11", |  | ||||||
|         "/usr/local/cuda/lib64/libcurand.so.10", |  | ||||||
|         "/usr/local/cuda/lib64/libnvToolsExt.so.1", |  | ||||||
|         "/usr/local/cuda/lib64/libnvJitLink.so.12", |  | ||||||
|         "/usr/local/cuda/lib64/libnvrtc.so.12", |  | ||||||
|         "/usr/local/cuda/lib64/libnvrtc-builtins.so.12.6", |  | ||||||
|         "/usr/local/cuda/lib64/libcudnn_adv.so.9", |  | ||||||
|         "/usr/local/cuda/lib64/libcudnn_cnn.so.9", |  | ||||||
|         "/usr/local/cuda/lib64/libcudnn_graph.so.9", |  | ||||||
|         "/usr/local/cuda/lib64/libcudnn_ops.so.9", |  | ||||||
|         "/usr/local/cuda/lib64/libcudnn_engines_runtime_compiled.so.9", |  | ||||||
|         "/usr/local/cuda/lib64/libcudnn_engines_precompiled.so.9", |  | ||||||
|         "/usr/local/cuda/lib64/libcudnn_heuristic.so.9", |  | ||||||
|         "/lib64/libgomp.so.1", |  | ||||||
|         "/usr/lib64/libgfortran.so.5", |  | ||||||
|         "/acl/build/libarm_compute.so", |  | ||||||
|         "/acl/build/libarm_compute_graph.so", |  | ||||||
|     ] |  | ||||||
|     if enable_cuda: |  | ||||||
|         libs_to_copy += [ |  | ||||||
|             "/usr/local/lib/libnvpl_lapack_lp64_gomp.so.0", |  | ||||||
|             "/usr/local/lib/libnvpl_blas_lp64_gomp.so.0", |  | ||||||
|             "/usr/local/lib/libnvpl_lapack_core.so.0", |  | ||||||
|             "/usr/local/lib/libnvpl_blas_core.so.0", |  | ||||||
|         ] |  | ||||||
|     else: |  | ||||||
|         libs_to_copy += [ |  | ||||||
|             "/opt/OpenBLAS/lib/libopenblas.so.0", |  | ||||||
|         ] |  | ||||||
|     # Copy libraries to unzipped_folder/a/lib |  | ||||||
|     for lib_path in libs_to_copy: |  | ||||||
|         lib_name = os.path.basename(lib_path) |  | ||||||
|         shutil.copy2(lib_path, f"{folder}/tmp/torch/lib/{lib_name}") |  | ||||||
|         os.system( |  | ||||||
|             f"cd {folder}/tmp/torch/lib/; " |  | ||||||
|             f"patchelf --set-rpath '$ORIGIN' --force-rpath {folder}/tmp/torch/lib/{lib_name}" |  | ||||||
|         ) |  | ||||||
|     os.mkdir(f"{folder}/cuda_wheel") |  | ||||||
|     os.system(f"cd {folder}/tmp/; zip -r {folder}/cuda_wheel/{wheelname} *") |  | ||||||
|     shutil.move( |  | ||||||
|         f"{folder}/cuda_wheel/{wheelname}", |  | ||||||
|         f"{folder}/{wheelname}", |  | ||||||
|         copy_function=shutil.copy2, |  | ||||||
|     ) |  | ||||||
|     os.system(f"rm -rf {folder}/tmp/ {folder}/cuda_wheel/") |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def complete_wheel(folder: str) -> str: |  | ||||||
|     """ |  | ||||||
|     Complete wheel build and put in artifact location |  | ||||||
|     """ |  | ||||||
|     wheel_name = list_dir(f"/{folder}/dist")[0] |  | ||||||
|  |  | ||||||
|     if "pytorch" in folder and not enable_cuda: |  | ||||||
|         print("Repairing Wheel with AuditWheel") |  | ||||||
|         check_call(["auditwheel", "repair", f"dist/{wheel_name}"], cwd=folder) |  | ||||||
|         repaired_wheel_name = list_dir(f"/{folder}/wheelhouse")[0] |  | ||||||
|  |  | ||||||
|         print(f"Moving {repaired_wheel_name} wheel to /{folder}/dist") |  | ||||||
|         os.rename( |  | ||||||
|             f"/{folder}/wheelhouse/{repaired_wheel_name}", |  | ||||||
|             f"/{folder}/dist/{repaired_wheel_name}", |  | ||||||
|         ) |  | ||||||
|     else: |  | ||||||
|         repaired_wheel_name = wheel_name |  | ||||||
|  |  | ||||||
|     print(f"Copying {repaired_wheel_name} to artifacts") |  | ||||||
|     shutil.copy2( |  | ||||||
|         f"/{folder}/dist/{repaired_wheel_name}", f"/artifacts/{repaired_wheel_name}" |  | ||||||
|     ) |  | ||||||
|  |  | ||||||
|     return repaired_wheel_name |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def parse_arguments(): |  | ||||||
|     """ |  | ||||||
|     Parse inline arguments |  | ||||||
|     """ |  | ||||||
|     from argparse import ArgumentParser |  | ||||||
|  |  | ||||||
|     parser = ArgumentParser("AARCH64 wheels python CD") |  | ||||||
|     parser.add_argument("--debug", action="store_true") |  | ||||||
|     parser.add_argument("--build-only", action="store_true") |  | ||||||
|     parser.add_argument("--test-only", type=str) |  | ||||||
|     parser.add_argument("--enable-mkldnn", action="store_true") |  | ||||||
|     parser.add_argument("--enable-cuda", action="store_true") |  | ||||||
|     return parser.parse_args() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == "__main__": |  | ||||||
|     """ |  | ||||||
|     Entry Point |  | ||||||
|     """ |  | ||||||
|     args = parse_arguments() |  | ||||||
|     enable_mkldnn = args.enable_mkldnn |  | ||||||
|     enable_cuda = args.enable_cuda |  | ||||||
|     repo = Repository("/pytorch") |  | ||||||
|     branch = repo.head.name |  | ||||||
|     if branch == "HEAD": |  | ||||||
|         branch = "master" |  | ||||||
|  |  | ||||||
|     print("Building PyTorch wheel") |  | ||||||
|     build_vars = "MAX_JOBS=5 CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 " |  | ||||||
|     os.system("cd /pytorch; python setup.py clean") |  | ||||||
|  |  | ||||||
|     override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION") |  | ||||||
|     if override_package_version is not None: |  | ||||||
|         version = override_package_version |  | ||||||
|         build_vars += ( |  | ||||||
|             f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version} PYTORCH_BUILD_NUMBER=1 " |  | ||||||
|         ) |  | ||||||
|     elif branch in ["nightly", "master"]: |  | ||||||
|         build_date = ( |  | ||||||
|             check_output(["git", "log", "--pretty=format:%cs", "-1"], cwd="/pytorch") |  | ||||||
|             .decode() |  | ||||||
|             .replace("-", "") |  | ||||||
|         ) |  | ||||||
|         version = ( |  | ||||||
|             check_output(["cat", "version.txt"], cwd="/pytorch").decode().strip()[:-2] |  | ||||||
|         ) |  | ||||||
|         if enable_cuda: |  | ||||||
|             desired_cuda = os.getenv("DESIRED_CUDA") |  | ||||||
|             build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date}+{desired_cuda} PYTORCH_BUILD_NUMBER=1 " |  | ||||||
|         else: |  | ||||||
|             build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date} PYTORCH_BUILD_NUMBER=1 " |  | ||||||
|     elif branch.startswith(("v1.", "v2.")): |  | ||||||
|         build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1:branch.find('-')]} PYTORCH_BUILD_NUMBER=1 " |  | ||||||
|  |  | ||||||
|     if enable_mkldnn: |  | ||||||
|         build_ArmComputeLibrary() |  | ||||||
|         print("build pytorch with mkldnn+acl backend") |  | ||||||
|         build_vars += ( |  | ||||||
|             "USE_MKLDNN=ON USE_MKLDNN_ACL=ON " |  | ||||||
|             "ACL_ROOT_DIR=/acl " |  | ||||||
|             "LD_LIBRARY_PATH=/pytorch/build/lib:/acl/build:$LD_LIBRARY_PATH " |  | ||||||
|             "ACL_INCLUDE_DIR=/acl/build " |  | ||||||
|             "ACL_LIBRARY=/acl/build " |  | ||||||
|         ) |  | ||||||
|         if enable_cuda: |  | ||||||
|             build_vars += "BLAS=NVPL " |  | ||||||
|         else: |  | ||||||
|             build_vars += "BLAS=OpenBLAS OpenBLAS_HOME=/OpenBLAS " |  | ||||||
|     else: |  | ||||||
|         print("build pytorch without mkldnn backend") |  | ||||||
|  |  | ||||||
|     os.system(f"cd /pytorch; {build_vars} python3 setup.py bdist_wheel") |  | ||||||
|     if enable_cuda: |  | ||||||
|         print("Updating Cuda Dependency") |  | ||||||
|         filename = os.listdir("/pytorch/dist/") |  | ||||||
|         wheel_path = f"/pytorch/dist/{filename[0]}" |  | ||||||
|         update_wheel(wheel_path) |  | ||||||
|     pytorch_wheel_name = complete_wheel("/pytorch/") |  | ||||||
|     print(f"Build Complete. Created {pytorch_wheel_name}..") |  | ||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -1,87 +0,0 @@ | |||||||
| #!/usr/bin/env python3 |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| import shutil |  | ||||||
| import sys |  | ||||||
| from subprocess import check_call |  | ||||||
| from tempfile import TemporaryDirectory |  | ||||||
|  |  | ||||||
| from auditwheel.elfutils import elf_file_filter |  | ||||||
| from auditwheel.lddtree import lddtree |  | ||||||
| from auditwheel.patcher import Patchelf |  | ||||||
| from auditwheel.repair import copylib |  | ||||||
| from auditwheel.wheeltools import InWheelCtx |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def replace_tag(filename): |  | ||||||
|     with open(filename) as f: |  | ||||||
|         lines = f.read().split("\\n") |  | ||||||
|     for i, line in enumerate(lines): |  | ||||||
|         if not line.startswith("Tag: "): |  | ||||||
|             continue |  | ||||||
|         lines[i] = line.replace("-linux_", "-manylinux2014_") |  | ||||||
|         print(f"Updated tag from {line} to {lines[i]}") |  | ||||||
|  |  | ||||||
|     with open(filename, "w") as f: |  | ||||||
|         f.write("\\n".join(lines)) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class AlignedPatchelf(Patchelf): |  | ||||||
|     def set_soname(self, file_name: str, new_soname: str) -> None: |  | ||||||
|         check_call( |  | ||||||
|             ["patchelf", "--page-size", "65536", "--set-soname", new_soname, file_name] |  | ||||||
|         ) |  | ||||||
|  |  | ||||||
|     def replace_needed(self, file_name: str, soname: str, new_soname: str) -> None: |  | ||||||
|         check_call( |  | ||||||
|             [ |  | ||||||
|                 "patchelf", |  | ||||||
|                 "--page-size", |  | ||||||
|                 "65536", |  | ||||||
|                 "--replace-needed", |  | ||||||
|                 soname, |  | ||||||
|                 new_soname, |  | ||||||
|                 file_name, |  | ||||||
|             ] |  | ||||||
|         ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def embed_library(whl_path, lib_soname, update_tag=False): |  | ||||||
|     patcher = AlignedPatchelf() |  | ||||||
|     out_dir = TemporaryDirectory() |  | ||||||
|     whl_name = os.path.basename(whl_path) |  | ||||||
|     tmp_whl_name = os.path.join(out_dir.name, whl_name) |  | ||||||
|     with InWheelCtx(whl_path) as ctx: |  | ||||||
|         torchlib_path = os.path.join(ctx._tmpdir.name, "torch", "lib") |  | ||||||
|         ctx.out_wheel = tmp_whl_name |  | ||||||
|         new_lib_path, new_lib_soname = None, None |  | ||||||
|         for filename, _ in elf_file_filter(ctx.iter_files()): |  | ||||||
|             if not filename.startswith("torch/lib"): |  | ||||||
|                 continue |  | ||||||
|             libtree = lddtree(filename) |  | ||||||
|             if lib_soname not in libtree["needed"]: |  | ||||||
|                 continue |  | ||||||
|             lib_path = libtree["libs"][lib_soname]["path"] |  | ||||||
|             if lib_path is None: |  | ||||||
|                 print(f"Can't embed {lib_soname} as it could not be found") |  | ||||||
|                 break |  | ||||||
|             if lib_path.startswith(torchlib_path): |  | ||||||
|                 continue |  | ||||||
|  |  | ||||||
|             if new_lib_path is None: |  | ||||||
|                 new_lib_soname, new_lib_path = copylib(lib_path, torchlib_path, patcher) |  | ||||||
|             patcher.replace_needed(filename, lib_soname, new_lib_soname) |  | ||||||
|             print(f"Replacing {lib_soname} with {new_lib_soname} for {filename}") |  | ||||||
|         if update_tag: |  | ||||||
|             # Add manylinux2014 tag |  | ||||||
|             for filename in ctx.iter_files(): |  | ||||||
|                 if os.path.basename(filename) != "WHEEL": |  | ||||||
|                     continue |  | ||||||
|                 replace_tag(filename) |  | ||||||
|     shutil.move(tmp_whl_name, whl_path) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == "__main__": |  | ||||||
|     embed_library( |  | ||||||
|         sys.argv[1], "libgomp.so.1", len(sys.argv) > 2 and sys.argv[2] == "--update-tag" |  | ||||||
|     ) |  | ||||||
| @ -1,4 +1,4 @@ | |||||||
| # Docker images for GitHub CI and CD | # Docker images for GitHub CI | ||||||
|  |  | ||||||
| This directory contains everything needed to build the Docker images | This directory contains everything needed to build the Docker images | ||||||
| that are used in our CI. | that are used in our CI. | ||||||
| @ -12,7 +12,7 @@ each image as the `BUILD_ENVIRONMENT` environment variable. | |||||||
|  |  | ||||||
| See `build.sh` for valid build environments (it's the giant switch). | See `build.sh` for valid build environments (it's the giant switch). | ||||||
|  |  | ||||||
| ## Docker CI builds | ## Contents | ||||||
|  |  | ||||||
| * `build.sh` -- dispatch script to launch all builds | * `build.sh` -- dispatch script to launch all builds | ||||||
| * `common` -- scripts used to execute individual Docker build stages | * `common` -- scripts used to execute individual Docker build stages | ||||||
| @ -21,12 +21,6 @@ See `build.sh` for valid build environments (it's the giant switch). | |||||||
| * `ubuntu-rocm` -- Dockerfile for Ubuntu image with ROCm support | * `ubuntu-rocm` -- Dockerfile for Ubuntu image with ROCm support | ||||||
| * `ubuntu-xpu` -- Dockerfile for Ubuntu image with XPU support | * `ubuntu-xpu` -- Dockerfile for Ubuntu image with XPU support | ||||||
|  |  | ||||||
| ### Docker CD builds |  | ||||||
|  |  | ||||||
| * `conda` - Dockerfile and build.sh to build Docker images used in nightly conda builds |  | ||||||
| * `manywheel` - Dockerfile and build.sh to build Docker images used in nightly manywheel builds |  | ||||||
| * `libtorch` - Dockerfile and build.sh to build Docker images used in nightly libtorch builds |  | ||||||
|  |  | ||||||
| ## Usage | ## Usage | ||||||
|  |  | ||||||
| ```bash | ```bash | ||||||
|  | |||||||
| @ -1,98 +0,0 @@ | |||||||
| ARG CUDA_VERSION=12.4 |  | ||||||
| ARG BASE_TARGET=cuda${CUDA_VERSION} |  | ||||||
| FROM amd64/almalinux:8 as base |  | ||||||
|  |  | ||||||
| ENV LC_ALL en_US.UTF-8 |  | ||||||
| ENV LANG en_US.UTF-8 |  | ||||||
| ENV LANGUAGE en_US.UTF-8 |  | ||||||
|  |  | ||||||
| ARG DEVTOOLSET_VERSION=11 |  | ||||||
|  |  | ||||||
| ENV LC_ALL en_US.UTF-8 |  | ||||||
| ENV LANG en_US.UTF-8 |  | ||||||
| ENV LANGUAGE en_US.UTF-8 |  | ||||||
|  |  | ||||||
| RUN yum -y update |  | ||||||
| RUN yum -y install epel-release |  | ||||||
| RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel openssl-devel yum-utils autoconf automake make gcc-toolset-${DEVTOOLSET_VERSION}-toolchain |  | ||||||
| # Just add everything as a safe.directory for git since these will be used in multiple places with git |  | ||||||
| RUN git config --global --add safe.directory '*' |  | ||||||
| ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH |  | ||||||
|  |  | ||||||
| # cmake-3.18.4 from pip |  | ||||||
| RUN yum install -y python3-pip && \ |  | ||||||
|     python3 -mpip install cmake==3.18.4 && \ |  | ||||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake3 |  | ||||||
| RUN rm -rf /usr/local/cuda-* |  | ||||||
|  |  | ||||||
| FROM base as openssl |  | ||||||
| ADD ./common/install_openssl.sh install_openssl.sh |  | ||||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh |  | ||||||
|  |  | ||||||
| FROM base as patchelf |  | ||||||
| # Install patchelf |  | ||||||
| ADD ./common/install_patchelf.sh install_patchelf.sh |  | ||||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh && cp $(which patchelf) /patchelf |  | ||||||
|  |  | ||||||
| FROM base as conda |  | ||||||
| # Install Anaconda |  | ||||||
| ADD ./common/install_conda_docker.sh install_conda.sh |  | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh |  | ||||||
|  |  | ||||||
| # Install CUDA |  | ||||||
| FROM base as cuda |  | ||||||
| ARG CUDA_VERSION=12.4 |  | ||||||
| RUN rm -rf /usr/local/cuda-* |  | ||||||
| ADD ./common/install_cuda.sh install_cuda.sh |  | ||||||
| ENV CUDA_HOME=/usr/local/cuda-${CUDA_VERSION} |  | ||||||
| # Preserve CUDA_VERSION for the builds |  | ||||||
| ENV CUDA_VERSION=${CUDA_VERSION} |  | ||||||
| # Make things in our path by default |  | ||||||
| ENV PATH=/usr/local/cuda-${CUDA_VERSION}/bin:$PATH |  | ||||||
|  |  | ||||||
| FROM cuda as cuda11.8 |  | ||||||
| RUN bash ./install_cuda.sh 11.8 |  | ||||||
| ENV DESIRED_CUDA=11.8 |  | ||||||
|  |  | ||||||
| FROM cuda as cuda12.1 |  | ||||||
| RUN bash ./install_cuda.sh 12.1 |  | ||||||
| ENV DESIRED_CUDA=12.1 |  | ||||||
|  |  | ||||||
| FROM cuda as cuda12.4 |  | ||||||
| RUN bash ./install_cuda.sh 12.4 |  | ||||||
| ENV DESIRED_CUDA=12.4 |  | ||||||
|  |  | ||||||
| FROM cuda as cuda12.6 |  | ||||||
| RUN bash ./install_cuda.sh 12.6 |  | ||||||
| ENV DESIRED_CUDA=12.6 |  | ||||||
|  |  | ||||||
| # Install MNIST test data |  | ||||||
| FROM base as mnist |  | ||||||
| ADD ./common/install_mnist.sh install_mnist.sh |  | ||||||
| RUN bash ./install_mnist.sh |  | ||||||
|  |  | ||||||
| FROM base as all_cuda |  | ||||||
| COPY --from=cuda11.8  /usr/local/cuda-11.8 /usr/local/cuda-11.8 |  | ||||||
| COPY --from=cuda12.1  /usr/local/cuda-12.1 /usr/local/cuda-12.1 |  | ||||||
| COPY --from=cuda12.4  /usr/local/cuda-12.4 /usr/local/cuda-12.4 |  | ||||||
| COPY --from=cuda12.6  /usr/local/cuda-12.6 /usr/local/cuda-12.6 |  | ||||||
|  |  | ||||||
| # Final step |  | ||||||
| FROM ${BASE_TARGET} as final |  | ||||||
| COPY --from=openssl            /opt/openssl           /opt/openssl |  | ||||||
| COPY --from=patchelf           /patchelf              /usr/local/bin/patchelf |  | ||||||
| COPY --from=conda              /opt/conda             /opt/conda |  | ||||||
|  |  | ||||||
| # Add jni.h for java host build. |  | ||||||
| COPY ./common/install_jni.sh install_jni.sh |  | ||||||
| COPY ./java/jni.h jni.h |  | ||||||
| RUN bash ./install_jni.sh && rm install_jni.sh |  | ||||||
|  |  | ||||||
| ENV PATH /opt/conda/bin:$PATH |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH |  | ||||||
| COPY --from=mnist  /usr/local/mnist /usr/local/mnist |  | ||||||
| RUN rm -rf /usr/local/cuda |  | ||||||
| RUN chmod o+rw /usr/local |  | ||||||
| RUN touch /.condarc && \ |  | ||||||
|     chmod o+rw /.condarc && \ |  | ||||||
|     chmod -R o+rw /opt/conda |  | ||||||
| @ -1,82 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| set -eou pipefail |  | ||||||
|  |  | ||||||
| image="$1" |  | ||||||
| shift |  | ||||||
|  |  | ||||||
| if [ -z "${image}" ]; then |  | ||||||
|   echo "Usage: $0 IMAGE" |  | ||||||
|   exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| DOCKER_IMAGE_NAME="pytorch/${image}" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| export DOCKER_BUILDKIT=1 |  | ||||||
| TOPDIR=$(git rev-parse --show-toplevel) |  | ||||||
|  |  | ||||||
| CUDA_VERSION=${CUDA_VERSION:-12.1} |  | ||||||
|  |  | ||||||
| case ${CUDA_VERSION} in |  | ||||||
|   cpu) |  | ||||||
|     BASE_TARGET=base |  | ||||||
|     DOCKER_TAG=cpu |  | ||||||
|     ;; |  | ||||||
|   all) |  | ||||||
|     BASE_TARGET=all_cuda |  | ||||||
|     DOCKER_TAG=latest |  | ||||||
|     ;; |  | ||||||
|   *) |  | ||||||
|     BASE_TARGET=cuda${CUDA_VERSION} |  | ||||||
|     DOCKER_TAG=cuda${CUDA_VERSION} |  | ||||||
|     ;; |  | ||||||
| esac |  | ||||||
|  |  | ||||||
|  |  | ||||||
| ( |  | ||||||
|   set -x |  | ||||||
|   # TODO: Remove LimitNOFILE=1048576 patch once https://github.com/pytorch/test-infra/issues/5712 |  | ||||||
|   # is resolved. This patch is required in order to fix timing out of Docker build on Amazon Linux 2023. |  | ||||||
|   sudo sed -i s/LimitNOFILE=infinity/LimitNOFILE=1048576/ /usr/lib/systemd/system/docker.service |  | ||||||
|   sudo systemctl daemon-reload |  | ||||||
|   sudo systemctl restart docker |  | ||||||
|  |  | ||||||
|   docker build \ |  | ||||||
|     --target final \ |  | ||||||
|     --progress plain \ |  | ||||||
|     --build-arg "BASE_TARGET=${BASE_TARGET}" \ |  | ||||||
|     --build-arg "CUDA_VERSION=${CUDA_VERSION}" \ |  | ||||||
|     --build-arg "DEVTOOLSET_VERSION=11" \ |  | ||||||
|     -t ${DOCKER_IMAGE_NAME} \ |  | ||||||
|     $@ \ |  | ||||||
|     -f "${TOPDIR}/.ci/docker/almalinux/Dockerfile" \ |  | ||||||
|     ${TOPDIR}/.ci/docker/ |  | ||||||
| ) |  | ||||||
|  |  | ||||||
| if [[ "${DOCKER_TAG}" =~ ^cuda* ]]; then |  | ||||||
|   # Test that we're using the right CUDA compiler |  | ||||||
|   ( |  | ||||||
|     set -x |  | ||||||
|     docker run --rm "${DOCKER_IMAGE_NAME}" nvcc --version | grep "cuda_${CUDA_VERSION}" |  | ||||||
|   ) |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)} |  | ||||||
| GIT_BRANCH_NAME=${GITHUB_REF##*/} |  | ||||||
| GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)} |  | ||||||
| DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE_NAME}-${GIT_BRANCH_NAME} |  | ||||||
| DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE_NAME}-${GIT_COMMIT_SHA} |  | ||||||
| if [[ "${WITH_PUSH:-}" == true ]]; then |  | ||||||
|   ( |  | ||||||
|     set -x |  | ||||||
|     docker push "${DOCKER_IMAGE_NAME}" |  | ||||||
|     if [[ -n ${GITHUB_REF} ]]; then |  | ||||||
|         docker tag ${DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_BRANCH_TAG} |  | ||||||
|         docker tag ${DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_SHA_TAG} |  | ||||||
|         docker push "${DOCKER_IMAGE_BRANCH_TAG}" |  | ||||||
|         docker push "${DOCKER_IMAGE_SHA_TAG}" |  | ||||||
|     fi |  | ||||||
|   ) |  | ||||||
| fi |  | ||||||
							
								
								
									
										1
									
								
								.ci/docker/android/AndroidManifest.xml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								.ci/docker/android/AndroidManifest.xml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1 @@ | |||||||
|  | <manifest package="org.pytorch.deps" /> | ||||||
							
								
								
									
										66
									
								
								.ci/docker/android/build.gradle
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										66
									
								
								.ci/docker/android/build.gradle
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,66 @@ | |||||||
|  | buildscript { | ||||||
|  |     ext { | ||||||
|  |         minSdkVersion = 21 | ||||||
|  |         targetSdkVersion = 28 | ||||||
|  |         compileSdkVersion = 28 | ||||||
|  |         buildToolsVersion = '28.0.3' | ||||||
|  |  | ||||||
|  |         coreVersion = "1.2.0" | ||||||
|  |         extJUnitVersion = "1.1.1" | ||||||
|  |         runnerVersion = "1.2.0" | ||||||
|  |         rulesVersion = "1.2.0" | ||||||
|  |         junitVersion = "4.12" | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     repositories { | ||||||
|  |         google() | ||||||
|  |         mavenLocal() | ||||||
|  |         mavenCentral() | ||||||
|  |         jcenter() | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     dependencies { | ||||||
|  |         classpath 'com.android.tools.build:gradle:4.1.2' | ||||||
|  |         classpath 'com.vanniktech:gradle-maven-publish-plugin:0.14.2' | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | repositories { | ||||||
|  |     google() | ||||||
|  |     jcenter() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | apply plugin: 'com.android.library' | ||||||
|  |  | ||||||
|  | android { | ||||||
|  |     compileSdkVersion rootProject.compileSdkVersion | ||||||
|  |     buildToolsVersion rootProject.buildToolsVersion | ||||||
|  |  | ||||||
|  |     defaultConfig { | ||||||
|  |         minSdkVersion minSdkVersion | ||||||
|  |         targetSdkVersion targetSdkVersion | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     sourceSets { | ||||||
|  |         main { | ||||||
|  |             manifest.srcFile 'AndroidManifest.xml' | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | dependencies { | ||||||
|  |     implementation 'com.android.support:appcompat-v7:28.0.0' | ||||||
|  |     implementation 'androidx.appcompat:appcompat:1.0.0' | ||||||
|  |     implementation 'com.facebook.fbjni:fbjni-java-only:0.2.2' | ||||||
|  |     implementation 'com.google.code.findbugs:jsr305:3.0.1' | ||||||
|  |     implementation 'com.facebook.soloader:nativeloader:0.10.5' | ||||||
|  |  | ||||||
|  |     implementation 'junit:junit:' + rootProject.junitVersion | ||||||
|  |     implementation 'androidx.test:core:' + rootProject.coreVersion | ||||||
|  |  | ||||||
|  |     implementation 'junit:junit:' + rootProject.junitVersion | ||||||
|  |     implementation 'androidx.test:core:' + rootProject.coreVersion | ||||||
|  |     implementation 'androidx.test.ext:junit:' + rootProject.extJUnitVersion | ||||||
|  |     implementation 'androidx.test:rules:' + rootProject.rulesVersion | ||||||
|  |     implementation 'androidx.test:runner:' + rootProject.runnerVersion | ||||||
|  | } | ||||||
| @ -1,5 +0,0 @@ | |||||||
| 0.8b |  | ||||||
| manylinux_2_28 |  | ||||||
| rocm6.2 |  | ||||||
| 6f8cbcac8a92775291bb1ba8f514d4beb350baf4 |  | ||||||
| e938def5d32869fe2e00aec0300f354c9f157867bebdf2e104d732b94cb238d8 |  | ||||||
| @ -84,30 +84,16 @@ fi | |||||||
| # CMake 3.18 is needed to support CUDA17 language variant | # CMake 3.18 is needed to support CUDA17 language variant | ||||||
| CMAKE_VERSION=3.18.5 | CMAKE_VERSION=3.18.5 | ||||||
|  |  | ||||||
| _UCX_COMMIT=7bb2722ff2187a0cad557ae4a6afa090569f83fb | _UCX_COMMIT=00bcc6bb18fc282eb160623b4c0d300147f579af | ||||||
| _UCC_COMMIT=20eae37090a4ce1b32bcce6144ccad0b49943e0b | _UCC_COMMIT=7cb07a76ccedad7e56ceb136b865eb9319c258ea | ||||||
|  |  | ||||||
| # It's annoying to rename jobs every time you want to rewrite a | # It's annoying to rename jobs every time you want to rewrite a | ||||||
| # configuration, so we hardcode everything here rather than do it | # configuration, so we hardcode everything here rather than do it | ||||||
| # from scratch | # from scratch | ||||||
| case "$image" in | case "$image" in | ||||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9) |   pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9) | ||||||
|     CUDA_VERSION=12.4.1 |  | ||||||
|     CUDNN_VERSION=9 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |  | ||||||
|     GCC_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     UCX_COMMIT=${_UCX_COMMIT} |  | ||||||
|     UCC_COMMIT=${_UCC_COMMIT} |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     TRITON=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9) |  | ||||||
|     CUDA_VERSION=12.1.1 |     CUDA_VERSION=12.1.1 | ||||||
|     CUDNN_VERSION=9 |     CUDNN_VERSION=8 | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |     ANACONDA_PYTHON_VERSION=3.10 | ||||||
|     GCC_VERSION=9 |     GCC_VERSION=9 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
| @ -119,24 +105,9 @@ case "$image" in | |||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
|     TRITON=yes |     TRITON=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks) |   pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks) | ||||||
|     CUDA_VERSION=12.4.1 |  | ||||||
|     CUDNN_VERSION=9 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |  | ||||||
|     GCC_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     UCX_COMMIT=${_UCX_COMMIT} |  | ||||||
|     UCC_COMMIT=${_UCC_COMMIT} |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     TRITON=yes |  | ||||||
|     INDUCTOR_BENCHMARKS=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks) |  | ||||||
|     CUDA_VERSION=12.1.1 |     CUDA_VERSION=12.1.1 | ||||||
|     CUDNN_VERSION=9 |     CUDNN_VERSION=8 | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |     ANACONDA_PYTHON_VERSION=3.10 | ||||||
|     GCC_VERSION=9 |     GCC_VERSION=9 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
| @ -149,54 +120,9 @@ case "$image" in | |||||||
|     TRITON=yes |     TRITON=yes | ||||||
|     INDUCTOR_BENCHMARKS=yes |     INDUCTOR_BENCHMARKS=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3.12-gcc9-inductor-benchmarks) |   pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9) | ||||||
|     CUDA_VERSION=12.1.1 |  | ||||||
|     CUDNN_VERSION=9 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.12 |  | ||||||
|     GCC_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     UCX_COMMIT=${_UCX_COMMIT} |  | ||||||
|     UCC_COMMIT=${_UCC_COMMIT} |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     TRITON=yes |  | ||||||
|     INDUCTOR_BENCHMARKS=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks) |  | ||||||
|     CUDA_VERSION=12.4.1 |  | ||||||
|     CUDNN_VERSION=9 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.12 |  | ||||||
|     GCC_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     UCX_COMMIT=${_UCX_COMMIT} |  | ||||||
|     UCC_COMMIT=${_UCC_COMMIT} |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     TRITON=yes |  | ||||||
|     INDUCTOR_BENCHMARKS=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3.13-gcc9-inductor-benchmarks) |  | ||||||
|     CUDA_VERSION=12.4.1 |  | ||||||
|     CUDNN_VERSION=9 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.13 |  | ||||||
|     GCC_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     UCX_COMMIT=${_UCX_COMMIT} |  | ||||||
|     UCC_COMMIT=${_UCC_COMMIT} |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     TRITON=yes |  | ||||||
|     INDUCTOR_BENCHMARKS=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9) |  | ||||||
|     CUDA_VERSION=11.8.0 |     CUDA_VERSION=11.8.0 | ||||||
|     CUDNN_VERSION=9 |     CUDNN_VERSION=8 | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |     ANACONDA_PYTHON_VERSION=3.10 | ||||||
|     GCC_VERSION=9 |     GCC_VERSION=9 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
| @ -208,37 +134,9 @@ case "$image" in | |||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
|     TRITON=yes |     TRITON=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9) |   pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9) | ||||||
|     CUDA_VERSION=12.4.1 |  | ||||||
|     CUDNN_VERSION=9 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |  | ||||||
|     GCC_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     UCX_COMMIT=${_UCX_COMMIT} |  | ||||||
|     UCC_COMMIT=${_UCC_COMMIT} |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     TRITON=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9) |  | ||||||
|     CUDA_VERSION=12.1.1 |     CUDA_VERSION=12.1.1 | ||||||
|     CUDNN_VERSION=9 |     CUDNN_VERSION=8 | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |  | ||||||
|     GCC_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     UCX_COMMIT=${_UCX_COMMIT} |  | ||||||
|     UCC_COMMIT=${_UCC_COMMIT} |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     TRITON=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9) |  | ||||||
|     CUDA_VERSION=12.4.1 |  | ||||||
|     CUDNN_VERSION=9 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |     ANACONDA_PYTHON_VERSION=3.10 | ||||||
|     GCC_VERSION=9 |     GCC_VERSION=9 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
| @ -251,7 +149,7 @@ case "$image" in | |||||||
|     TRITON=yes |     TRITON=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-focal-py3-clang10-onnx) |   pytorch-linux-focal-py3-clang10-onnx) | ||||||
|     ANACONDA_PYTHON_VERSION=3.9 |     ANACONDA_PYTHON_VERSION=3.8 | ||||||
|     CLANG_VERSION=10 |     CLANG_VERSION=10 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
|     DB=yes |     DB=yes | ||||||
| @ -259,8 +157,18 @@ case "$image" in | |||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
|     ONNX=yes |     ONNX=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-focal-py3.9-clang10) |   pytorch-linux-focal-py3-clang9-android-ndk-r21e) | ||||||
|     ANACONDA_PYTHON_VERSION=3.9 |     ANACONDA_PYTHON_VERSION=3.8 | ||||||
|  |     CLANG_VERSION=9 | ||||||
|  |     LLVMDEV=yes | ||||||
|  |     PROTOBUF=yes | ||||||
|  |     ANDROID=yes | ||||||
|  |     ANDROID_NDK_VERSION=r21e | ||||||
|  |     GRADLE_VERSION=6.8.3 | ||||||
|  |     NINJA_VERSION=1.9.0 | ||||||
|  |     ;; | ||||||
|  |   pytorch-linux-focal-py3.8-clang10) | ||||||
|  |     ANACONDA_PYTHON_VERSION=3.8 | ||||||
|     CLANG_VERSION=10 |     CLANG_VERSION=10 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
|     DB=yes |     DB=yes | ||||||
| @ -281,8 +189,8 @@ case "$image" in | |||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
|     TRITON=yes |     TRITON=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-focal-py3.9-gcc9) |   pytorch-linux-focal-py3.8-gcc9) | ||||||
|     ANACONDA_PYTHON_VERSION=3.9 |     ANACONDA_PYTHON_VERSION=3.8 | ||||||
|     GCC_VERSION=9 |     GCC_VERSION=9 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
|     DB=yes |     DB=yes | ||||||
| @ -291,51 +199,39 @@ case "$image" in | |||||||
|     TRITON=yes |     TRITON=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-focal-rocm-n-1-py3) |   pytorch-linux-focal-rocm-n-1-py3) | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |     ANACONDA_PYTHON_VERSION=3.8 | ||||||
|     GCC_VERSION=9 |     GCC_VERSION=9 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
|     DB=yes |     DB=yes | ||||||
|     VISION=yes |     VISION=yes | ||||||
|     ROCM_VERSION=6.1 |     ROCM_VERSION=5.7 | ||||||
|     NINJA_VERSION=1.9.0 |     NINJA_VERSION=1.9.0 | ||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
|     TRITON=yes |     TRITON=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-focal-rocm-n-py3) |   pytorch-linux-focal-rocm-n-py3) | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |     ANACONDA_PYTHON_VERSION=3.8 | ||||||
|     GCC_VERSION=9 |     GCC_VERSION=9 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
|     DB=yes |     DB=yes | ||||||
|     VISION=yes |     VISION=yes | ||||||
|     ROCM_VERSION=6.2.4 |     ROCM_VERSION=6.0 | ||||||
|     NINJA_VERSION=1.9.0 |     NINJA_VERSION=1.9.0 | ||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
|     TRITON=yes |     TRITON=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-jammy-xpu-2024.0-py3) |   pytorch-linux-jammy-xpu-2024.0-py3) | ||||||
|     ANACONDA_PYTHON_VERSION=3.9 |     ANACONDA_PYTHON_VERSION=3.8 | ||||||
|     GCC_VERSION=11 |     GCC_VERSION=11 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
|     DB=yes |     DB=yes | ||||||
|     VISION=yes |     VISION=yes | ||||||
|     XPU_VERSION=0.5 |     BASEKIT_VERSION=2024.0.0-49522 | ||||||
|     NINJA_VERSION=1.9.0 |     NINJA_VERSION=1.9.0 | ||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
|     TRITON=yes |  | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-jammy-xpu-2025.0-py3) |     pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks) | ||||||
|     ANACONDA_PYTHON_VERSION=3.9 |     ANACONDA_PYTHON_VERSION=3.8 | ||||||
|     GCC_VERSION=11 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     XPU_VERSION=2025.0 |  | ||||||
|     NINJA_VERSION=1.9.0 |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     TRITON=yes |  | ||||||
|     ;; |  | ||||||
|     pytorch-linux-jammy-py3.9-gcc11-inductor-benchmarks) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.9 |  | ||||||
|     GCC_VERSION=11 |     GCC_VERSION=11 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
|     DB=yes |     DB=yes | ||||||
| @ -346,10 +242,10 @@ case "$image" in | |||||||
|     DOCS=yes |     DOCS=yes | ||||||
|     INDUCTOR_BENCHMARKS=yes |     INDUCTOR_BENCHMARKS=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-clang12) |   pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12) | ||||||
|     ANACONDA_PYTHON_VERSION=3.9 |     ANACONDA_PYTHON_VERSION=3.8 | ||||||
|     CUDA_VERSION=11.8 |     CUDA_VERSION=11.8 | ||||||
|     CUDNN_VERSION=9 |     CUDNN_VERSION=8 | ||||||
|     CLANG_VERSION=12 |     CLANG_VERSION=12 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
|     DB=yes |     DB=yes | ||||||
| @ -371,14 +267,8 @@ case "$image" in | |||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
|     VISION=yes |     VISION=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-jammy-py3-clang18-asan) |   pytorch-linux-jammy-py3.8-gcc11) | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |     ANACONDA_PYTHON_VERSION=3.8 | ||||||
|     CLANG_VERSION=18 |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     VISION=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-jammy-py3.9-gcc11) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.9 |  | ||||||
|     GCC_VERSION=11 |     GCC_VERSION=11 | ||||||
|     PROTOBUF=yes |     PROTOBUF=yes | ||||||
|     DB=yes |     DB=yes | ||||||
| @ -395,21 +285,6 @@ case "$image" in | |||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
|     EXECUTORCH=yes |     EXECUTORCH=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-jammy-py3.12-halide) |  | ||||||
|     CUDA_VERSION=12.4 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.12 |  | ||||||
|     GCC_VERSION=11 |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     HALIDE=yes |  | ||||||
|     TRITON=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-jammy-py3.12-triton-cpu) |  | ||||||
|     CUDA_VERSION=12.4 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.12 |  | ||||||
|     GCC_VERSION=11 |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     TRITON_CPU=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-focal-linter) |   pytorch-linux-focal-linter) | ||||||
|     # TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627. |     # TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627. | ||||||
|     # We will need to update mypy version eventually, but that's for another day. The task |     # We will need to update mypy version eventually, but that's for another day. The task | ||||||
| @ -417,7 +292,7 @@ case "$image" in | |||||||
|     ANACONDA_PYTHON_VERSION=3.9 |     ANACONDA_PYTHON_VERSION=3.9 | ||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
|     ;; |     ;; | ||||||
|   pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter) |   pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter) | ||||||
|     ANACONDA_PYTHON_VERSION=3.9 |     ANACONDA_PYTHON_VERSION=3.9 | ||||||
|     CUDA_VERSION=11.8 |     CUDA_VERSION=11.8 | ||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
| @ -430,22 +305,6 @@ case "$image" in | |||||||
|     DB=yes |     DB=yes | ||||||
|     VISION=yes |     VISION=yes | ||||||
|     CONDA_CMAKE=yes |     CONDA_CMAKE=yes | ||||||
|     # snadampal: skipping llvm src build install because the current version |  | ||||||
|     # from pytorch/llvm:9.0.1 is x86 specific |  | ||||||
|     SKIP_LLVM_SRC_BUILD_INSTALL=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-jammy-aarch64-py3.10-gcc11-inductor-benchmarks) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.10 |  | ||||||
|     GCC_VERSION=11 |  | ||||||
|     ACL=yes |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     CONDA_CMAKE=yes |  | ||||||
|     # snadampal: skipping llvm src build install because the current version |  | ||||||
|     # from pytorch/llvm:9.0.1 is x86 specific |  | ||||||
|     SKIP_LLVM_SRC_BUILD_INSTALL=yes |  | ||||||
|     INDUCTOR_BENCHMARKS=yes |  | ||||||
|     ;; |     ;; | ||||||
|   *) |   *) | ||||||
|     # Catch-all for builds that are not hardcoded. |     # Catch-all for builds that are not hardcoded. | ||||||
| @ -494,13 +353,13 @@ tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]') | |||||||
| #when using cudnn version 8 install it separately from cuda | #when using cudnn version 8 install it separately from cuda | ||||||
| if [[ "$image" == *cuda*  && ${OS} == "ubuntu" ]]; then | if [[ "$image" == *cuda*  && ${OS} == "ubuntu" ]]; then | ||||||
|   IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}" |   IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}" | ||||||
|   if [[ ${CUDNN_VERSION} == 9 ]]; then |   if [[ ${CUDNN_VERSION} == 8 ]]; then | ||||||
|     IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}" |     IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}" | ||||||
|   fi |   fi | ||||||
| fi | fi | ||||||
|  |  | ||||||
| # Build image | # Build image | ||||||
| docker build \ | DOCKER_BUILDKIT=1 docker build \ | ||||||
|        --no-cache \ |        --no-cache \ | ||||||
|        --progress=plain \ |        --progress=plain \ | ||||||
|        --build-arg "BUILD_ENVIRONMENT=${image}" \ |        --build-arg "BUILD_ENVIRONMENT=${image}" \ | ||||||
| @ -518,6 +377,8 @@ docker build \ | |||||||
|        --build-arg "CUDA_VERSION=${CUDA_VERSION}" \ |        --build-arg "CUDA_VERSION=${CUDA_VERSION}" \ | ||||||
|        --build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \ |        --build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \ | ||||||
|        --build-arg "TENSORRT_VERSION=${TENSORRT_VERSION}" \ |        --build-arg "TENSORRT_VERSION=${TENSORRT_VERSION}" \ | ||||||
|  |        --build-arg "ANDROID=${ANDROID}" \ | ||||||
|  |        --build-arg "ANDROID_NDK=${ANDROID_NDK_VERSION}" \ | ||||||
|        --build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \ |        --build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \ | ||||||
|        --build-arg "VULKAN_SDK_VERSION=${VULKAN_SDK_VERSION}" \ |        --build-arg "VULKAN_SDK_VERSION=${VULKAN_SDK_VERSION}" \ | ||||||
|        --build-arg "SWIFTSHADER=${SWIFTSHADER}" \ |        --build-arg "SWIFTSHADER=${SWIFTSHADER}" \ | ||||||
| @ -525,28 +386,24 @@ docker build \ | |||||||
|        --build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \ |        --build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \ | ||||||
|        --build-arg "KATEX=${KATEX:-}" \ |        --build-arg "KATEX=${KATEX:-}" \ | ||||||
|        --build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \ |        --build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \ | ||||||
|        --build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx90a}" \ |        --build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx906;gfx90a}" \ | ||||||
|        --build-arg "IMAGE_NAME=${IMAGE_NAME}" \ |        --build-arg "IMAGE_NAME=${IMAGE_NAME}" \ | ||||||
|        --build-arg "UCX_COMMIT=${UCX_COMMIT}" \ |        --build-arg "UCX_COMMIT=${UCX_COMMIT}" \ | ||||||
|        --build-arg "UCC_COMMIT=${UCC_COMMIT}" \ |        --build-arg "UCC_COMMIT=${UCC_COMMIT}" \ | ||||||
|        --build-arg "CONDA_CMAKE=${CONDA_CMAKE}" \ |        --build-arg "CONDA_CMAKE=${CONDA_CMAKE}" \ | ||||||
|        --build-arg "TRITON=${TRITON}" \ |        --build-arg "TRITON=${TRITON}" \ | ||||||
|        --build-arg "TRITON_CPU=${TRITON_CPU}" \ |  | ||||||
|        --build-arg "ONNX=${ONNX}" \ |        --build-arg "ONNX=${ONNX}" \ | ||||||
|        --build-arg "DOCS=${DOCS}" \ |        --build-arg "DOCS=${DOCS}" \ | ||||||
|        --build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \ |        --build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \ | ||||||
|        --build-arg "EXECUTORCH=${EXECUTORCH}" \ |        --build-arg "EXECUTORCH=${EXECUTORCH}" \ | ||||||
|        --build-arg "HALIDE=${HALIDE}" \ |        --build-arg "BASEKIT_VERSION=${BASEKIT_VERSION}" \ | ||||||
|        --build-arg "XPU_VERSION=${XPU_VERSION}" \ |  | ||||||
|        --build-arg "ACL=${ACL:-}" \ |        --build-arg "ACL=${ACL:-}" \ | ||||||
|        --build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \ |  | ||||||
|        --build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \ |  | ||||||
|        -f $(dirname ${DOCKERFILE})/Dockerfile \ |        -f $(dirname ${DOCKERFILE})/Dockerfile \ | ||||||
|        -t "$tmp_tag" \ |        -t "$tmp_tag" \ | ||||||
|        "$@" \ |        "$@" \ | ||||||
|        . |        . | ||||||
|  |  | ||||||
| # NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`, | # NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`, | ||||||
| # for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could | # for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could | ||||||
| # find the correct image. As a result, here we have to replace the | # find the correct image. As a result, here we have to replace the | ||||||
| #   "$UBUNTU_VERSION" == "18.04-rc" | #   "$UBUNTU_VERSION" == "18.04-rc" | ||||||
|  | |||||||
| @ -62,7 +62,7 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | |||||||
| RUN rm install_db.sh | RUN rm install_db.sh | ||||||
| ENV INSTALLED_DB ${DB} | ENV INSTALLED_DB ${DB} | ||||||
|  |  | ||||||
| # (optional) Install vision packages like OpenCV | # (optional) Install vision packages like OpenCV and ffmpeg | ||||||
| ARG VISION | ARG VISION | ||||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | ||||||
| @ -77,9 +77,6 @@ RUN rm install_rocm.sh | |||||||
| COPY ./common/install_rocm_magma.sh install_rocm_magma.sh | COPY ./common/install_rocm_magma.sh install_rocm_magma.sh | ||||||
| RUN bash ./install_rocm_magma.sh | RUN bash ./install_rocm_magma.sh | ||||||
| RUN rm install_rocm_magma.sh | RUN rm install_rocm_magma.sh | ||||||
| COPY ./common/install_amdsmi.sh install_amdsmi.sh |  | ||||||
| RUN bash ./install_amdsmi.sh |  | ||||||
| RUN rm install_amdsmi.sh |  | ||||||
| ENV PATH /opt/rocm/bin:$PATH | ENV PATH /opt/rocm/bin:$PATH | ||||||
| ENV PATH /opt/rocm/hcc/bin:$PATH | ENV PATH /opt/rocm/hcc/bin:$PATH | ||||||
| ENV PATH /opt/rocm/hip/bin:$PATH | ENV PATH /opt/rocm/hip/bin:$PATH | ||||||
| @ -108,17 +105,10 @@ ENV CMAKE_C_COMPILER cc | |||||||
| ENV CMAKE_CXX_COMPILER c++ | ENV CMAKE_CXX_COMPILER c++ | ||||||
| COPY ./common/install_triton.sh install_triton.sh | COPY ./common/install_triton.sh install_triton.sh | ||||||
| COPY ./common/common_utils.sh common_utils.sh | COPY ./common/common_utils.sh common_utils.sh | ||||||
| COPY ci_commit_pins/triton.txt triton.txt | COPY ci_commit_pins/triton-rocm.txt triton-rocm.txt | ||||||
| COPY triton_version.txt triton_version.txt | COPY triton_version.txt triton_version.txt | ||||||
| RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | ||||||
| RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt | RUN rm install_triton.sh common_utils.sh triton-rocm.txt triton_version.txt | ||||||
|  |  | ||||||
| # Install AOTriton (Early fail) |  | ||||||
| COPY ./aotriton_version.txt aotriton_version.txt |  | ||||||
| COPY ./common/common_utils.sh common_utils.sh |  | ||||||
| COPY ./common/install_aotriton.sh install_aotriton.sh |  | ||||||
| RUN ["/bin/bash", "-c", "./install_aotriton.sh /opt/rocm && rm -rf install_aotriton.sh aotriton_version.txt common_utils.sh"] |  | ||||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton |  | ||||||
|  |  | ||||||
| # Install ccache/sccache (do this last, so we get priority in PATH) | # Install ccache/sccache (do this last, so we get priority in PATH) | ||||||
| COPY ./common/install_cache.sh install_cache.sh | COPY ./common/install_cache.sh install_cache.sh | ||||||
|  | |||||||
| @ -1 +1 @@ | |||||||
| 6f638937d64e3396793956d75ee3e14802022745 | e2a8f9548aecb62a68e264607174a7d207ed2929 | ||||||
|  | |||||||
| @ -1 +0,0 @@ | |||||||
| 461c12871f336fe6f57b55d6a297f13ef209161b |  | ||||||
| @ -1 +1 @@ | |||||||
| ac3470188b914c5d7a5058a7e28b9eb685a62427 | 730b907b4d45a4713cbc425cbf224c46089fd514 | ||||||
|  | |||||||
| @ -1 +0,0 @@ | |||||||
| c7711371cace304afe265c1ffa906415ab82fc66 |  | ||||||
							
								
								
									
										1
									
								
								.ci/docker/ci_commit_pins/triton-rocm.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								.ci/docker/ci_commit_pins/triton-rocm.txt
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1 @@ | |||||||
|  | c8ad905211f45e162102823149f0d7f2cfaa4418 | ||||||
| @ -1 +0,0 @@ | |||||||
| e98b6fcb8df5b44eb0d0addb6767c573d37ba024 |  | ||||||
| @ -1 +1 @@ | |||||||
| 0d4682f073ded4d1a8260dd4208a43d735ae3a2b | 958fccea74da58e7e0595ab88ae6cd3f6795a173 | ||||||
|  | |||||||
| @ -1,6 +1,6 @@ | |||||||
| set -euo pipefail | set -euo pipefail | ||||||
|  |  | ||||||
| readonly version=v24.04 | readonly version=v23.08 | ||||||
| readonly src_host=https://review.mlplatform.org/ml | readonly src_host=https://review.mlplatform.org/ml | ||||||
| readonly src_repo=ComputeLibrary | readonly src_repo=ComputeLibrary | ||||||
|  |  | ||||||
|  | |||||||
| @ -1,5 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| cd /opt/rocm/share/amd_smi && pip install . |  | ||||||
							
								
								
									
										112
									
								
								.ci/docker/common/install_android.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										112
									
								
								.ci/docker/common/install_android.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,112 @@ | |||||||
|  | #!/bin/bash | ||||||
|  |  | ||||||
|  | set -ex | ||||||
|  |  | ||||||
|  | [ -n "${ANDROID_NDK}" ] | ||||||
|  |  | ||||||
|  | _https_amazon_aws=https://ossci-android.s3.amazonaws.com | ||||||
|  |  | ||||||
|  | apt-get update | ||||||
|  | apt-get install -y --no-install-recommends autotools-dev autoconf unzip | ||||||
|  | apt-get autoclean && apt-get clean | ||||||
|  | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* | ||||||
|  |  | ||||||
|  | pushd /tmp | ||||||
|  | curl -Os --retry 3 $_https_amazon_aws/android-ndk-${ANDROID_NDK}-linux-x86_64.zip | ||||||
|  | popd | ||||||
|  | _ndk_dir=/opt/ndk | ||||||
|  | mkdir -p "$_ndk_dir" | ||||||
|  | unzip -qo /tmp/android*.zip -d "$_ndk_dir" | ||||||
|  | _versioned_dir=$(find "$_ndk_dir/" -mindepth 1 -maxdepth 1 -type d) | ||||||
|  | mv "$_versioned_dir"/* "$_ndk_dir"/ | ||||||
|  | rmdir "$_versioned_dir" | ||||||
|  | rm -rf /tmp/* | ||||||
|  |  | ||||||
|  | # Install OpenJDK | ||||||
|  | # https://hub.docker.com/r/picoded/ubuntu-openjdk-8-jdk/dockerfile/ | ||||||
|  |  | ||||||
|  | sudo apt-get update && \ | ||||||
|  |     apt-get install -y openjdk-8-jdk && \ | ||||||
|  |     apt-get install -y ant && \ | ||||||
|  |     apt-get clean && \ | ||||||
|  |     rm -rf /var/lib/apt/lists/* && \ | ||||||
|  |     rm -rf /var/cache/oracle-jdk8-installer; | ||||||
|  |  | ||||||
|  | # Fix certificate issues, found as of | ||||||
|  | # https://bugs.launchpad.net/ubuntu/+source/ca-certificates-java/+bug/983302 | ||||||
|  |  | ||||||
|  | sudo apt-get update && \ | ||||||
|  |     apt-get install -y ca-certificates-java && \ | ||||||
|  |     apt-get clean && \ | ||||||
|  |     update-ca-certificates -f && \ | ||||||
|  |     rm -rf /var/lib/apt/lists/* && \ | ||||||
|  |     rm -rf /var/cache/oracle-jdk8-installer; | ||||||
|  |  | ||||||
|  | export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/ | ||||||
|  |  | ||||||
|  | # Installing android sdk | ||||||
|  | # https://github.com/circleci/circleci-images/blob/staging/android/Dockerfile.m4 | ||||||
|  |  | ||||||
|  | _tmp_sdk_zip=/tmp/android-sdk-linux.zip | ||||||
|  | _android_home=/opt/android/sdk | ||||||
|  |  | ||||||
|  | rm -rf $_android_home | ||||||
|  | sudo mkdir -p $_android_home | ||||||
|  | curl --silent --show-error --location --fail --retry 3 --output /tmp/android-sdk-linux.zip $_https_amazon_aws/android-sdk-linux-tools3859397-build-tools2803-2902-platforms28-29.zip | ||||||
|  | sudo unzip -q $_tmp_sdk_zip -d $_android_home | ||||||
|  | rm $_tmp_sdk_zip | ||||||
|  |  | ||||||
|  | sudo chmod -R 777 $_android_home | ||||||
|  |  | ||||||
|  | export ANDROID_HOME=$_android_home | ||||||
|  | export ADB_INSTALL_TIMEOUT=120 | ||||||
|  |  | ||||||
|  | export PATH="${ANDROID_HOME}/tools:${ANDROID_HOME}/tools/bin:${ANDROID_HOME}/platform-tools:${PATH}" | ||||||
|  | echo "PATH:${PATH}" | ||||||
|  |  | ||||||
|  | # Installing Gradle | ||||||
|  | echo "GRADLE_VERSION:${GRADLE_VERSION}" | ||||||
|  | _gradle_home=/opt/gradle | ||||||
|  | sudo rm -rf $gradle_home | ||||||
|  | sudo mkdir -p $_gradle_home | ||||||
|  |  | ||||||
|  | curl --silent --output /tmp/gradle.zip --retry 3 $_https_amazon_aws/gradle-${GRADLE_VERSION}-bin.zip | ||||||
|  |  | ||||||
|  | sudo unzip -q /tmp/gradle.zip -d $_gradle_home | ||||||
|  | rm /tmp/gradle.zip | ||||||
|  |  | ||||||
|  | sudo chmod -R 777 $_gradle_home | ||||||
|  |  | ||||||
|  | export GRADLE_HOME=$_gradle_home/gradle-$GRADLE_VERSION | ||||||
|  | alias gradle="${GRADLE_HOME}/bin/gradle" | ||||||
|  |  | ||||||
|  | export PATH="${GRADLE_HOME}/bin/:${PATH}" | ||||||
|  | echo "PATH:${PATH}" | ||||||
|  |  | ||||||
|  | gradle --version | ||||||
|  |  | ||||||
|  | mkdir /var/lib/jenkins/gradledeps | ||||||
|  | cp build.gradle /var/lib/jenkins/gradledeps | ||||||
|  | cp AndroidManifest.xml /var/lib/jenkins/gradledeps | ||||||
|  |  | ||||||
|  | pushd /var/lib/jenkins | ||||||
|  |  | ||||||
|  | export GRADLE_LOCAL_PROPERTIES=gradledeps/local.properties | ||||||
|  | rm -f $GRADLE_LOCAL_PROPERTIES | ||||||
|  | echo "sdk.dir=/opt/android/sdk" >> $GRADLE_LOCAL_PROPERTIES | ||||||
|  | echo "ndk.dir=/opt/ndk" >> $GRADLE_LOCAL_PROPERTIES | ||||||
|  |  | ||||||
|  | chown -R jenkins /var/lib/jenkins/gradledeps | ||||||
|  | chgrp -R jenkins /var/lib/jenkins/gradledeps | ||||||
|  |  | ||||||
|  | sudo -H -u jenkins $GRADLE_HOME/bin/gradle -Pandroid.useAndroidX=true -p /var/lib/jenkins/gradledeps -g /var/lib/jenkins/.gradle --refresh-dependencies --debug --stacktrace assemble | ||||||
|  |  | ||||||
|  | chown -R jenkins /var/lib/jenkins/.gradle | ||||||
|  | chgrp -R jenkins /var/lib/jenkins/.gradle | ||||||
|  |  | ||||||
|  | popd | ||||||
|  |  | ||||||
|  | rm -rf /var/lib/jenkins/.gradle/daemon | ||||||
|  |  | ||||||
|  | # Cache vision models used by the test | ||||||
|  | source "$(dirname "${BASH_SOURCE[0]}")/cache_vision_models.sh" | ||||||
| @ -1,23 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" |  | ||||||
|  |  | ||||||
| TARBALL='aotriton.tar.gz' |  | ||||||
| # This read command alwasy returns with exit code 1 |  | ||||||
| read -d "\n" VER MANYLINUX ROCMBASE PINNED_COMMIT SHA256 < aotriton_version.txt || true |  | ||||||
| ARCH=$(uname -m) |  | ||||||
| AOTRITON_INSTALL_PREFIX="$1" |  | ||||||
| AOTRITON_URL="https://github.com/ROCm/aotriton/releases/download/${VER}/aotriton-${VER}-${MANYLINUX}_${ARCH}-${ROCMBASE}-shared.tar.gz" |  | ||||||
|  |  | ||||||
| cd "${AOTRITON_INSTALL_PREFIX}" |  | ||||||
| # Must use -L to follow redirects |  | ||||||
| curl -L --retry 3 -o "${TARBALL}" "${AOTRITON_URL}" |  | ||||||
| ACTUAL_SHA256=$(sha256sum "${TARBALL}" | cut -d " " -f 1) |  | ||||||
| if [ "${SHA256}" != "${ACTUAL_SHA256}" ]; then |  | ||||||
|   echo -n "Error: The SHA256 of downloaded tarball is ${ACTUAL_SHA256}," |  | ||||||
|   echo " which does not match the expected value ${SHA256}." |  | ||||||
|   exit |  | ||||||
| fi |  | ||||||
| tar xf "${TARBALL}" && rm -rf "${TARBALL}" |  | ||||||
| @ -3,7 +3,7 @@ | |||||||
| set -ex | set -ex | ||||||
|  |  | ||||||
| install_ubuntu() { | install_ubuntu() { | ||||||
|   # NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`, |   # NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`, | ||||||
|   # for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could |   # for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could | ||||||
|   # find the correct image. As a result, here we have to check for |   # find the correct image. As a result, here we have to check for | ||||||
|   #   "$UBUNTU_VERSION" == "18.04"* |   #   "$UBUNTU_VERSION" == "18.04"* | ||||||
| @ -76,8 +76,7 @@ install_ubuntu() { | |||||||
|     vim \ |     vim \ | ||||||
|     unzip \ |     unzip \ | ||||||
|     gpg-agent \ |     gpg-agent \ | ||||||
|     gdb \ |     gdb | ||||||
|     bc |  | ||||||
|  |  | ||||||
|   # Should resolve issues related to various apt package repository cert issues |   # Should resolve issues related to various apt package repository cert issues | ||||||
|   # see: https://github.com/pytorch/pytorch/issues/65931 |   # see: https://github.com/pytorch/pytorch/issues/65931 | ||||||
| @ -114,6 +113,7 @@ install_centos() { | |||||||
|     glibc-devel \ |     glibc-devel \ | ||||||
|     glibc-headers \ |     glibc-headers \ | ||||||
|     glog-devel \ |     glog-devel \ | ||||||
|  |     hiredis-devel \ | ||||||
|     libstdc++-devel \ |     libstdc++-devel \ | ||||||
|     libsndfile-devel \ |     libsndfile-devel \ | ||||||
|     make \ |     make \ | ||||||
|  | |||||||
| @ -9,7 +9,7 @@ install_ubuntu() { | |||||||
|   # Instead use lib and headers from OpenSSL1.1 installed in `install_openssl.sh`` |   # Instead use lib and headers from OpenSSL1.1 installed in `install_openssl.sh`` | ||||||
|   apt-get install -y cargo |   apt-get install -y cargo | ||||||
|   echo "Checking out sccache repo" |   echo "Checking out sccache repo" | ||||||
|   git clone https://github.com/mozilla/sccache -b v0.9.0 |   git clone https://github.com/pytorch/sccache | ||||||
|   cd sccache |   cd sccache | ||||||
|   echo "Building sccache" |   echo "Building sccache" | ||||||
|   cargo build --release |   cargo build --release | ||||||
| @ -19,10 +19,6 @@ install_ubuntu() { | |||||||
|   rm -rf sccache |   rm -rf sccache | ||||||
|   apt-get remove -y cargo rustc |   apt-get remove -y cargo rustc | ||||||
|   apt-get autoclean && apt-get clean |   apt-get autoclean && apt-get clean | ||||||
|  |  | ||||||
|   echo "Downloading old sccache binary from S3 repo for PCH builds" |  | ||||||
|   curl --retry 3 https://s3.amazonaws.com/ossci-linux/sccache -o /opt/cache/bin/sccache-0.2.14a |  | ||||||
|   chmod 755 /opt/cache/bin/sccache-0.2.14a |  | ||||||
| } | } | ||||||
|  |  | ||||||
| install_binary() { | install_binary() { | ||||||
| @ -39,43 +35,19 @@ export PATH="/opt/cache/bin:$PATH" | |||||||
| if [ -n "$ROCM_VERSION" ]; then | if [ -n "$ROCM_VERSION" ]; then | ||||||
|   curl --retry 3 http://repo.radeon.com/misc/.sccache_amd/sccache -o /opt/cache/bin/sccache |   curl --retry 3 http://repo.radeon.com/misc/.sccache_amd/sccache -o /opt/cache/bin/sccache | ||||||
| else | else | ||||||
|   install_ubuntu |   ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ||||||
|  |   # TODO: Install the pre-built binary from S3 as building from source | ||||||
|  |   # https://github.com/pytorch/sccache has started failing mysteriously | ||||||
|  |   # in which sccache server couldn't start with the following error: | ||||||
|  |   #   sccache: error: Invalid argument (os error 22) | ||||||
|  |   install_binary | ||||||
| fi | fi | ||||||
| chmod a+x /opt/cache/bin/sccache | chmod a+x /opt/cache/bin/sccache | ||||||
|  |  | ||||||
| function write_sccache_stub() { | function write_sccache_stub() { | ||||||
|   # Unset LD_PRELOAD for ps because of asan + ps issues |   # Unset LD_PRELOAD for ps because of asan + ps issues | ||||||
|   # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90589 |   # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90589 | ||||||
|   if [ $1 == "gcc" ]; then |   printf "#!/bin/sh\nif [ \$(env -u LD_PRELOAD ps -p \$PPID -o comm=) != sccache ]; then\n  exec sccache $(which $1) \"\$@\"\nelse\n  exec $(which $1) \"\$@\"\nfi" > "/opt/cache/bin/$1" | ||||||
|     # Do not call sccache recursively when dumping preprocessor argument |  | ||||||
|     # For some reason it's very important for the first cached nvcc invocation |  | ||||||
|     cat >"/opt/cache/bin/$1" <<EOF |  | ||||||
| #!/bin/sh |  | ||||||
|  |  | ||||||
| # sccache does not support -E flag, so we need to call the original compiler directly in order to avoid calling this wrapper recursively |  | ||||||
| for arg in "\$@"; do |  | ||||||
|   if [ "\$arg" = "-E" ]; then |  | ||||||
|     exec $(which $1) "\$@" |  | ||||||
|   fi |  | ||||||
| done |  | ||||||
|  |  | ||||||
| if [ \$(env -u LD_PRELOAD ps -p \$PPID -o comm=) != sccache ]; then |  | ||||||
|   exec sccache $(which $1) "\$@" |  | ||||||
| else |  | ||||||
|   exec $(which $1) "\$@" |  | ||||||
| fi |  | ||||||
| EOF |  | ||||||
|   else |  | ||||||
|     cat >"/opt/cache/bin/$1" <<EOF |  | ||||||
| #!/bin/sh |  | ||||||
|  |  | ||||||
| if [ \$(env -u LD_PRELOAD ps -p \$PPID -o comm=) != sccache ]; then |  | ||||||
|   exec sccache $(which $1) "\$@" |  | ||||||
| else |  | ||||||
|   exec $(which $1) "\$@" |  | ||||||
| fi |  | ||||||
| EOF |  | ||||||
|   fi |  | ||||||
|   chmod a+x "/opt/cache/bin/$1" |   chmod a+x "/opt/cache/bin/$1" | ||||||
| } | } | ||||||
|  |  | ||||||
| @ -116,7 +88,7 @@ if [ -n "$ROCM_VERSION" ]; then | |||||||
|     TOPDIR=$(dirname $OLDCOMP) |     TOPDIR=$(dirname $OLDCOMP) | ||||||
|     WRAPPED="$TOPDIR/original/$COMPNAME" |     WRAPPED="$TOPDIR/original/$COMPNAME" | ||||||
|     mv "$OLDCOMP" "$WRAPPED" |     mv "$OLDCOMP" "$WRAPPED" | ||||||
|     printf "#!/bin/sh\nexec sccache $WRAPPED \"\$@\"" >"$OLDCOMP" |     printf "#!/bin/sh\nexec sccache $WRAPPED \"\$@\"" > "$OLDCOMP" | ||||||
|     chmod a+x "$OLDCOMP" |     chmod a+x "$OLDCOMP" | ||||||
|   } |   } | ||||||
|  |  | ||||||
|  | |||||||
| @ -13,18 +13,11 @@ if [ -n "$CLANG_VERSION" ]; then | |||||||
|   elif [[ $UBUNTU_VERSION == 22.04 ]]; then |   elif [[ $UBUNTU_VERSION == 22.04 ]]; then | ||||||
|     # work around ubuntu apt-get conflicts |     # work around ubuntu apt-get conflicts | ||||||
|     sudo apt-get -y -f install |     sudo apt-get -y -f install | ||||||
|     wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add  - |  | ||||||
|     if [[ $CLANG_VERSION == 18 ]]; then |  | ||||||
|       apt-add-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-18 main" |  | ||||||
|     fi |  | ||||||
|   fi |   fi | ||||||
|  |  | ||||||
|   sudo apt-get update |   sudo apt-get update | ||||||
|   if [[ $CLANG_VERSION -ge 18 ]]; then |   apt-get install -y --no-install-recommends clang-"$CLANG_VERSION" | ||||||
|     apt-get install -y libomp-${CLANG_VERSION}-dev libclang-rt-${CLANG_VERSION}-dev clang-"$CLANG_VERSION" llvm-"$CLANG_VERSION" |   apt-get install -y --no-install-recommends llvm-"$CLANG_VERSION" | ||||||
|   else |  | ||||||
|     apt-get install -y --no-install-recommends clang-"$CLANG_VERSION" llvm-"$CLANG_VERSION" |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   # Install dev version of LLVM. |   # Install dev version of LLVM. | ||||||
|   if [ -n "$LLVMDEV" ]; then |   if [ -n "$LLVMDEV" ]; then | ||||||
|  | |||||||
| @ -5,28 +5,37 @@ set -ex | |||||||
| # Optionally install conda | # Optionally install conda | ||||||
| if [ -n "$ANACONDA_PYTHON_VERSION" ]; then | if [ -n "$ANACONDA_PYTHON_VERSION" ]; then | ||||||
|   BASE_URL="https://repo.anaconda.com/miniconda" |   BASE_URL="https://repo.anaconda.com/miniconda" | ||||||
|   CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh" |  | ||||||
|   if [[ $(uname -m) == "aarch64" ]] || [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then |  | ||||||
|     BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download" |  | ||||||
|     CONDA_FILE="Miniforge3-Linux-$(uname -m).sh" |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1) |   MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1) | ||||||
|   MINOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 2) |   MINOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 2) | ||||||
|  |  | ||||||
|  | if [[ $(uname -m) == "aarch64" ]]; then | ||||||
|  |   BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download" | ||||||
|   case "$MAJOR_PYTHON_VERSION" in |   case "$MAJOR_PYTHON_VERSION" in | ||||||
|     3);; |     3) | ||||||
|  |       CONDA_FILE="Miniforge3-Linux-aarch64.sh" | ||||||
|  |     ;; | ||||||
|     *) |     *) | ||||||
|       echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION" |       echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION" | ||||||
|       exit 1 |       exit 1 | ||||||
|       ;; |       ;; | ||||||
|   esac |   esac | ||||||
|  | else | ||||||
|  |   case "$MAJOR_PYTHON_VERSION" in | ||||||
|  |     3) | ||||||
|  |       CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh" | ||||||
|  |     ;; | ||||||
|  |     *) | ||||||
|  |       echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION" | ||||||
|  |       exit 1 | ||||||
|  |       ;; | ||||||
|  |   esac | ||||||
|  | fi | ||||||
|  |  | ||||||
|   mkdir -p /opt/conda |   mkdir -p /opt/conda | ||||||
|   chown jenkins:jenkins /opt/conda |   chown jenkins:jenkins /opt/conda | ||||||
|  |  | ||||||
|   SCRIPT_FOLDER="$( cd "$(dirname "$0")" ; pwd -P )" |   source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" | ||||||
|   source "${SCRIPT_FOLDER}/common_utils.sh" |  | ||||||
|  |  | ||||||
|   pushd /tmp |   pushd /tmp | ||||||
|   wget -q "${BASE_URL}/${CONDA_FILE}" |   wget -q "${BASE_URL}/${CONDA_FILE}" | ||||||
| @ -66,9 +75,21 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then | |||||||
|  |  | ||||||
|   # Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README |   # Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README | ||||||
|   if [[ $(uname -m) == "aarch64" ]]; then |   if [[ $(uname -m) == "aarch64" ]]; then | ||||||
|     conda_install "openblas==0.3.28=*openmp*" |     CONDA_COMMON_DEPS="astunparse pyyaml setuptools openblas==0.3.25=*openmp* ninja==1.11.1 scons==4.5.2" | ||||||
|  |  | ||||||
|  |     if [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then | ||||||
|  |       conda_install numpy=1.24.4 ${CONDA_COMMON_DEPS} | ||||||
|  |     else | ||||||
|  |       conda_install numpy=1.26.2 ${CONDA_COMMON_DEPS} | ||||||
|  |     fi | ||||||
|   else |   else | ||||||
|     conda_install "mkl=2021.4.0 mkl-include=2021.4.0" |     CONDA_COMMON_DEPS="astunparse pyyaml mkl=2021.4.0 mkl-include=2021.4.0 setuptools" | ||||||
|  |  | ||||||
|  |     if [ "$ANACONDA_PYTHON_VERSION" = "3.11" ] || [ "$ANACONDA_PYTHON_VERSION" = "3.12" ]; then | ||||||
|  |       conda_install numpy=1.26.0 ${CONDA_COMMON_DEPS} | ||||||
|  |     else | ||||||
|  |       conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS} | ||||||
|  |     fi | ||||||
|   fi |   fi | ||||||
|  |  | ||||||
|   # Install llvm-8 as it is required to compile llvmlite-0.30.0 from source |   # Install llvm-8 as it is required to compile llvmlite-0.30.0 from source | ||||||
| @ -85,14 +106,15 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then | |||||||
|  |  | ||||||
|   # Magma package names are concatenation of CUDA major and minor ignoring revision |   # Magma package names are concatenation of CUDA major and minor ignoring revision | ||||||
|   # I.e. magma-cuda102 package corresponds to CUDA_VERSION=10.2 and CUDA_VERSION=10.2.89 |   # I.e. magma-cuda102 package corresponds to CUDA_VERSION=10.2 and CUDA_VERSION=10.2.89 | ||||||
|   # Magma is installed from a tarball in the ossci-linux bucket into the conda env |  | ||||||
|   if [ -n "$CUDA_VERSION" ]; then |   if [ -n "$CUDA_VERSION" ]; then | ||||||
|     ${SCRIPT_FOLDER}/install_magma_conda.sh $(cut -f1-2 -d'.' <<< ${CUDA_VERSION}) ${ANACONDA_PYTHON_VERSION} |     conda_install magma-cuda$(TMP=${CUDA_VERSION/./};echo ${TMP%.*[0-9]}) -c pytorch | ||||||
|   fi |   fi | ||||||
|  |  | ||||||
|   # Install some other packages, including those needed for Python test reporting |   # Install some other packages, including those needed for Python test reporting | ||||||
|   pip_install -r /opt/conda/requirements-ci.txt |   pip_install -r /opt/conda/requirements-ci.txt | ||||||
|  |  | ||||||
|  |   pip_install -U scikit-learn | ||||||
|  |  | ||||||
|   if [ -n "$DOCS" ]; then |   if [ -n "$DOCS" ]; then | ||||||
|     apt-get update |     apt-get update | ||||||
|     apt-get -y install expect-dev |     apt-get -y install expect-dev | ||||||
|  | |||||||
| @ -1,20 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| # Anaconda |  | ||||||
| # Latest anaconda is using openssl-3 which is incompatible with all currently published versions of git |  | ||||||
| # Which are using openssl-1.1.1, see https://anaconda.org/anaconda/git/files?version=2.40.1 for example |  | ||||||
| MINICONDA_URL=https://repo.anaconda.com/miniconda/Miniconda3-py311_23.5.2-0-Linux-x86_64.sh |  | ||||||
| wget -q $MINICONDA_URL |  | ||||||
| # NB: Manually invoke bash per https://github.com/conda/conda/issues/10431 |  | ||||||
| bash $(basename "$MINICONDA_URL") -b -p /opt/conda |  | ||||||
| rm $(basename "$MINICONDA_URL") |  | ||||||
| export PATH=/opt/conda/bin:$PATH |  | ||||||
| # See https://github.com/pytorch/builder/issues/1473 |  | ||||||
| # Pin conda to 23.5.2 as it's the last one compatible with openssl-1.1.1 |  | ||||||
| conda install -y conda=23.5.2 conda-build anaconda-client git ninja |  | ||||||
| # The cmake version here needs to match with the minimum version of cmake |  | ||||||
| # supported by PyTorch (3.18). There is only 3.18.2 on anaconda |  | ||||||
| /opt/conda/bin/pip3 install cmake==3.18.2 |  | ||||||
| conda remove -y --force patchelf |  | ||||||
| @ -1,112 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
| set -uex -o pipefail |  | ||||||
|  |  | ||||||
| PYTHON_DOWNLOAD_URL=https://www.python.org/ftp/python |  | ||||||
| PYTHON_DOWNLOAD_GITHUB_BRANCH=https://github.com/python/cpython/archive/refs/heads |  | ||||||
| GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py |  | ||||||
|  |  | ||||||
| # Python versions to be installed in /opt/$VERSION_NO |  | ||||||
| CPYTHON_VERSIONS=${CPYTHON_VERSIONS:-"3.8.1 3.9.0 3.10.1 3.11.0 3.12.0 3.13.0 3.13.0t"} |  | ||||||
|  |  | ||||||
| function check_var { |  | ||||||
|     if [ -z "$1" ]; then |  | ||||||
|         echo "required variable not defined" |  | ||||||
|         exit 1 |  | ||||||
|     fi |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function do_cpython_build { |  | ||||||
|     local py_ver=$1 |  | ||||||
|     local py_folder=$2 |  | ||||||
|     check_var $py_ver |  | ||||||
|     check_var $py_folder |  | ||||||
|     tar -xzf Python-$py_ver.tgz |  | ||||||
|  |  | ||||||
|     local additional_flags="" |  | ||||||
|     if [ "$py_ver" == "3.13.0t" ]; then |  | ||||||
|         additional_flags=" --disable-gil" |  | ||||||
|         mv cpython-3.13/ cpython-3.13t/ |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     pushd $py_folder |  | ||||||
|  |  | ||||||
|     local prefix="/opt/_internal/cpython-${py_ver}" |  | ||||||
|     mkdir -p ${prefix}/lib |  | ||||||
|     if [[ -n $(which patchelf) ]]; then |  | ||||||
|         local shared_flags="--enable-shared" |  | ||||||
|     else |  | ||||||
|         local shared_flags="--disable-shared" |  | ||||||
|     fi |  | ||||||
|     if [[ -z  "${WITH_OPENSSL+x}" ]]; then |  | ||||||
|         local openssl_flags="" |  | ||||||
|     else |  | ||||||
|         local openssl_flags="--with-openssl=${WITH_OPENSSL} --with-openssl-rpath=auto" |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     # -Wformat added for https://bugs.python.org/issue17547 on Python 2.6 |  | ||||||
|     CFLAGS="-Wformat" ./configure --prefix=${prefix} ${openssl_flags} ${shared_flags} ${additional_flags} > /dev/null |  | ||||||
|  |  | ||||||
|     make -j40 > /dev/null |  | ||||||
|     make install > /dev/null |  | ||||||
|  |  | ||||||
|     if [[ "${shared_flags}" == "--enable-shared" ]]; then |  | ||||||
|         patchelf --set-rpath '$ORIGIN/../lib' ${prefix}/bin/python3 |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     popd |  | ||||||
|     rm -rf $py_folder |  | ||||||
|     # Some python's install as bin/python3. Make them available as |  | ||||||
|     # bin/python. |  | ||||||
|     if [ -e ${prefix}/bin/python3 ]; then |  | ||||||
|         ln -s python3 ${prefix}/bin/python |  | ||||||
|     fi |  | ||||||
|     ${prefix}/bin/python get-pip.py |  | ||||||
|     if [ -e ${prefix}/bin/pip3 ] && [ ! -e ${prefix}/bin/pip ]; then |  | ||||||
|         ln -s pip3 ${prefix}/bin/pip |  | ||||||
|     fi |  | ||||||
|     # install setuptools since python 3.12 is required to use distutils |  | ||||||
|     ${prefix}/bin/pip install wheel==0.34.2 setuptools==68.2.2 |  | ||||||
|     local abi_tag=$(${prefix}/bin/python -c "from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag; print('{0}{1}-{2}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()))") |  | ||||||
|     ln -sf ${prefix} /opt/python/${abi_tag} |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function build_cpython { |  | ||||||
|     local py_ver=$1 |  | ||||||
|     check_var $py_ver |  | ||||||
|     check_var $PYTHON_DOWNLOAD_URL |  | ||||||
|     local py_ver_folder=$py_ver |  | ||||||
|  |  | ||||||
|     if [ "$py_ver" = "3.13.0t" ]; then |  | ||||||
|         PY_VER_SHORT="3.13" |  | ||||||
|         PYT_VER_SHORT="3.13t" |  | ||||||
|         check_var $PYTHON_DOWNLOAD_GITHUB_BRANCH |  | ||||||
|         wget $PYTHON_DOWNLOAD_GITHUB_BRANCH/$PY_VER_SHORT.tar.gz -O Python-$py_ver.tgz |  | ||||||
|         do_cpython_build $py_ver cpython-$PYT_VER_SHORT |  | ||||||
|     elif [ "$py_ver" = "3.13.0" ]; then |  | ||||||
|         PY_VER_SHORT="3.13" |  | ||||||
|         check_var $PYTHON_DOWNLOAD_GITHUB_BRANCH |  | ||||||
|         wget $PYTHON_DOWNLOAD_GITHUB_BRANCH/$PY_VER_SHORT.tar.gz -O Python-$py_ver.tgz |  | ||||||
|         do_cpython_build $py_ver cpython-$PY_VER_SHORT |  | ||||||
|     else |  | ||||||
|         wget -q $PYTHON_DOWNLOAD_URL/$py_ver_folder/Python-$py_ver.tgz |  | ||||||
|         do_cpython_build $py_ver Python-$py_ver |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     rm -f Python-$py_ver.tgz |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function build_cpythons { |  | ||||||
|     check_var $GET_PIP_URL |  | ||||||
|     curl -sLO $GET_PIP_URL |  | ||||||
|     for py_ver in $@; do |  | ||||||
|         build_cpython $py_ver |  | ||||||
|     done |  | ||||||
|     rm -f get-pip.py |  | ||||||
| } |  | ||||||
|  |  | ||||||
| mkdir -p /opt/python |  | ||||||
| mkdir -p /opt/_internal |  | ||||||
| build_cpythons $CPYTHON_VERSIONS |  | ||||||
| @ -1,332 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| NCCL_VERSION=v2.21.5-1 |  | ||||||
| CUDNN_VERSION=9.5.1.17 |  | ||||||
|  |  | ||||||
| function install_cusparselt_040 { |  | ||||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html |  | ||||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt |  | ||||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.4.0.7-archive.tar.xz |  | ||||||
|     tar xf libcusparse_lt-linux-x86_64-0.4.0.7-archive.tar.xz |  | ||||||
|     cp -a libcusparse_lt-linux-x86_64-0.4.0.7-archive/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a libcusparse_lt-linux-x86_64-0.4.0.7-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|     popd |  | ||||||
|     rm -rf tmp_cusparselt |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function install_cusparselt_052 { |  | ||||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html |  | ||||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt |  | ||||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.5.2.1-archive.tar.xz |  | ||||||
|     tar xf libcusparse_lt-linux-x86_64-0.5.2.1-archive.tar.xz |  | ||||||
|     cp -a libcusparse_lt-linux-x86_64-0.5.2.1-archive/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a libcusparse_lt-linux-x86_64-0.5.2.1-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|     popd |  | ||||||
|     rm -rf tmp_cusparselt |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function install_cusparselt_062 { |  | ||||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html |  | ||||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt |  | ||||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.6.2.3-archive.tar.xz |  | ||||||
|     tar xf libcusparse_lt-linux-x86_64-0.6.2.3-archive.tar.xz |  | ||||||
|     cp -a libcusparse_lt-linux-x86_64-0.6.2.3-archive/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a libcusparse_lt-linux-x86_64-0.6.2.3-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|     popd |  | ||||||
|     rm -rf tmp_cusparselt |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function install_cusparselt_063 { |  | ||||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html |  | ||||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt |  | ||||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.6.3.2-archive.tar.xz |  | ||||||
|     tar xf libcusparse_lt-linux-x86_64-0.6.3.2-archive.tar.xz |  | ||||||
|     cp -a libcusparse_lt-linux-x86_64-0.6.3.2-archive/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a libcusparse_lt-linux-x86_64-0.6.3.2-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|     popd |  | ||||||
|     rm -rf tmp_cusparselt |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function install_118 { |  | ||||||
|     CUDNN_VERSION=9.1.0.70 |  | ||||||
|     echo "Installing CUDA 11.8 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.4.0" |  | ||||||
|     rm -rf /usr/local/cuda-11.8 /usr/local/cuda |  | ||||||
|     # install CUDA 11.8.0 in the same container |  | ||||||
|     wget -q https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run |  | ||||||
|     chmod +x cuda_11.8.0_520.61.05_linux.run |  | ||||||
|     ./cuda_11.8.0_520.61.05_linux.run --toolkit --silent |  | ||||||
|     rm -f cuda_11.8.0_520.61.05_linux.run |  | ||||||
|     rm -f /usr/local/cuda && ln -s /usr/local/cuda-11.8 /usr/local/cuda |  | ||||||
|  |  | ||||||
|     # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement |  | ||||||
|     mkdir tmp_cudnn && cd tmp_cudnn |  | ||||||
|     wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz |  | ||||||
|     tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz |  | ||||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|     cd .. |  | ||||||
|     rm -rf tmp_cudnn |  | ||||||
|  |  | ||||||
|     # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses |  | ||||||
|     # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build |  | ||||||
|     git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git |  | ||||||
|     cd nccl && make -j src.build |  | ||||||
|     cp -a build/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a build/lib/* /usr/local/cuda/lib64/ |  | ||||||
|     cd .. |  | ||||||
|     rm -rf nccl |  | ||||||
|  |  | ||||||
|     install_cusparselt_040 |  | ||||||
|  |  | ||||||
|     ldconfig |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function install_121 { |  | ||||||
|     echo "Installing CUDA 12.1 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.5.2" |  | ||||||
|     rm -rf /usr/local/cuda-12.1 /usr/local/cuda |  | ||||||
|     # install CUDA 12.1.0 in the same container |  | ||||||
|     wget -q https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run |  | ||||||
|     chmod +x cuda_12.1.1_530.30.02_linux.run |  | ||||||
|     ./cuda_12.1.1_530.30.02_linux.run --toolkit --silent |  | ||||||
|     rm -f cuda_12.1.1_530.30.02_linux.run |  | ||||||
|     rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.1 /usr/local/cuda |  | ||||||
|  |  | ||||||
|     # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement |  | ||||||
|     mkdir tmp_cudnn && cd tmp_cudnn |  | ||||||
|     wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz |  | ||||||
|     tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz |  | ||||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|     cd .. |  | ||||||
|     rm -rf tmp_cudnn |  | ||||||
|  |  | ||||||
|     # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses |  | ||||||
|     # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build |  | ||||||
|     git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git |  | ||||||
|     cd nccl && make -j src.build |  | ||||||
|     cp -a build/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a build/lib/* /usr/local/cuda/lib64/ |  | ||||||
|     cd .. |  | ||||||
|     rm -rf nccl |  | ||||||
|  |  | ||||||
|     install_cusparselt_052 |  | ||||||
|  |  | ||||||
|     ldconfig |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function install_124 { |  | ||||||
|   CUDNN_VERSION=9.1.0.70 |  | ||||||
|   echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.2" |  | ||||||
|   rm -rf /usr/local/cuda-12.4 /usr/local/cuda |  | ||||||
|   # install CUDA 12.4.1 in the same container |  | ||||||
|   wget -q https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run |  | ||||||
|   chmod +x cuda_12.4.1_550.54.15_linux.run |  | ||||||
|   ./cuda_12.4.1_550.54.15_linux.run --toolkit --silent |  | ||||||
|   rm -f cuda_12.4.1_550.54.15_linux.run |  | ||||||
|   rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.4 /usr/local/cuda |  | ||||||
|  |  | ||||||
|   # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement |  | ||||||
|   mkdir tmp_cudnn && cd tmp_cudnn |  | ||||||
|   wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz |  | ||||||
|   tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz |  | ||||||
|   cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/ |  | ||||||
|   cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|   cd .. |  | ||||||
|   rm -rf tmp_cudnn |  | ||||||
|  |  | ||||||
|   # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses |  | ||||||
|   # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build |  | ||||||
|   git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git |  | ||||||
|   cd nccl && make -j src.build |  | ||||||
|   cp -a build/include/* /usr/local/cuda/include/ |  | ||||||
|   cp -a build/lib/* /usr/local/cuda/lib64/ |  | ||||||
|   cd .. |  | ||||||
|   rm -rf nccl |  | ||||||
|  |  | ||||||
|   install_cusparselt_062 |  | ||||||
|  |  | ||||||
|   ldconfig |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function install_126 { |  | ||||||
|   echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.3" |  | ||||||
|   rm -rf /usr/local/cuda-12.6 /usr/local/cuda |  | ||||||
|   # install CUDA 12.6.3 in the same container |  | ||||||
|   wget -q https://developer.download.nvidia.com/compute/cuda/12.6.3/local_installers/cuda_12.6.3_560.35.05_linux.run |  | ||||||
|   chmod +x cuda_12.6.3_560.35.05_linux.run |  | ||||||
|   ./cuda_12.6.3_560.35.05_linux.run --toolkit --silent |  | ||||||
|   rm -f cuda_12.6.3_560.35.05_linux.run |  | ||||||
|   rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.6 /usr/local/cuda |  | ||||||
|  |  | ||||||
|   # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement |  | ||||||
|   mkdir tmp_cudnn && cd tmp_cudnn |  | ||||||
|   wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz |  | ||||||
|   tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz |  | ||||||
|   cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/ |  | ||||||
|   cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|   cd .. |  | ||||||
|   rm -rf tmp_cudnn |  | ||||||
|  |  | ||||||
|   # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses |  | ||||||
|   # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build |  | ||||||
|   git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git |  | ||||||
|   cd nccl && make -j src.build |  | ||||||
|   cp -a build/include/* /usr/local/cuda/include/ |  | ||||||
|   cp -a build/lib/* /usr/local/cuda/lib64/ |  | ||||||
|   cd .. |  | ||||||
|   rm -rf nccl |  | ||||||
|  |  | ||||||
|   install_cusparselt_063 |  | ||||||
|  |  | ||||||
|   ldconfig |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function prune_118 { |  | ||||||
|     echo "Pruning CUDA 11.8 and cuDNN" |  | ||||||
|     ##################################################################################### |  | ||||||
|     # CUDA 11.8 prune static libs |  | ||||||
|     ##################################################################################### |  | ||||||
|     export NVPRUNE="/usr/local/cuda-11.8/bin/nvprune" |  | ||||||
|     export CUDA_LIB_DIR="/usr/local/cuda-11.8/lib64" |  | ||||||
|  |  | ||||||
|     export GENCODE="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|     export GENCODE_CUDNN="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|  |  | ||||||
|     if [[ -n "$OVERRIDE_GENCODE" ]]; then |  | ||||||
|         export GENCODE=$OVERRIDE_GENCODE |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     # all CUDA libs except CuDNN and CuBLAS (cudnn and cublas need arch 3.7 included) |  | ||||||
|     ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ |  | ||||||
|       | xargs -I {} bash -c \ |  | ||||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" |  | ||||||
|  |  | ||||||
|     # prune CuDNN and CuBLAS |  | ||||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a |  | ||||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a |  | ||||||
|  |  | ||||||
|     ##################################################################################### |  | ||||||
|     # CUDA 11.8 prune visual tools |  | ||||||
|     ##################################################################################### |  | ||||||
|     export CUDA_BASE="/usr/local/cuda-11.8/" |  | ||||||
|     rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2022.3.0 $CUDA_BASE/nsight-systems-2022.4.2/ |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function prune_121 { |  | ||||||
|   echo "Pruning CUDA 12.1" |  | ||||||
|   ##################################################################################### |  | ||||||
|   # CUDA 12.1 prune static libs |  | ||||||
|   ##################################################################################### |  | ||||||
|     export NVPRUNE="/usr/local/cuda-12.1/bin/nvprune" |  | ||||||
|     export CUDA_LIB_DIR="/usr/local/cuda-12.1/lib64" |  | ||||||
|  |  | ||||||
|     export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|     export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|  |  | ||||||
|     if [[ -n "$OVERRIDE_GENCODE" ]]; then |  | ||||||
|         export GENCODE=$OVERRIDE_GENCODE |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     # all CUDA libs except CuDNN and CuBLAS |  | ||||||
|     ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ |  | ||||||
|       | xargs -I {} bash -c \ |  | ||||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" |  | ||||||
|  |  | ||||||
|     # prune CuDNN and CuBLAS |  | ||||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a |  | ||||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a |  | ||||||
|  |  | ||||||
|     ##################################################################################### |  | ||||||
|     # CUDA 12.1 prune visual tools |  | ||||||
|     ##################################################################################### |  | ||||||
|     export CUDA_BASE="/usr/local/cuda-12.1/" |  | ||||||
|     rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2023.1.0 $CUDA_BASE/nsight-systems-2023.1.2/ |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function prune_124 { |  | ||||||
|   echo "Pruning CUDA 12.4" |  | ||||||
|   ##################################################################################### |  | ||||||
|   # CUDA 12.4 prune static libs |  | ||||||
|   ##################################################################################### |  | ||||||
|   export NVPRUNE="/usr/local/cuda-12.4/bin/nvprune" |  | ||||||
|   export CUDA_LIB_DIR="/usr/local/cuda-12.4/lib64" |  | ||||||
|  |  | ||||||
|   export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|   export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|  |  | ||||||
|   if [[ -n "$OVERRIDE_GENCODE" ]]; then |  | ||||||
|       export GENCODE=$OVERRIDE_GENCODE |  | ||||||
|   fi |  | ||||||
|   if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then |  | ||||||
|       export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   # all CUDA libs except CuDNN and CuBLAS |  | ||||||
|   ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ |  | ||||||
|       | xargs -I {} bash -c \ |  | ||||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" |  | ||||||
|  |  | ||||||
|   # prune CuDNN and CuBLAS |  | ||||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a |  | ||||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a |  | ||||||
|  |  | ||||||
|   ##################################################################################### |  | ||||||
|   # CUDA 12.4 prune visual tools |  | ||||||
|   ##################################################################################### |  | ||||||
|   export CUDA_BASE="/usr/local/cuda-12.4/" |  | ||||||
|   rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.1.0 $CUDA_BASE/nsight-systems-2023.4.4/ |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function prune_126 { |  | ||||||
|   echo "Pruning CUDA 12.6" |  | ||||||
|   ##################################################################################### |  | ||||||
|   # CUDA 12.6 prune static libs |  | ||||||
|   ##################################################################################### |  | ||||||
|   export NVPRUNE="/usr/local/cuda-12.6/bin/nvprune" |  | ||||||
|   export CUDA_LIB_DIR="/usr/local/cuda-12.6/lib64" |  | ||||||
|  |  | ||||||
|   export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|   export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|  |  | ||||||
|   if [[ -n "$OVERRIDE_GENCODE" ]]; then |  | ||||||
|       export GENCODE=$OVERRIDE_GENCODE |  | ||||||
|   fi |  | ||||||
|   if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then |  | ||||||
|       export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   # all CUDA libs except CuDNN and CuBLAS |  | ||||||
|   ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ |  | ||||||
|       | xargs -I {} bash -c \ |  | ||||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" |  | ||||||
|  |  | ||||||
|   # prune CuDNN and CuBLAS |  | ||||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a |  | ||||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a |  | ||||||
|  |  | ||||||
|   ##################################################################################### |  | ||||||
|   # CUDA 12.6 prune visual tools |  | ||||||
|   ##################################################################################### |  | ||||||
|   export CUDA_BASE="/usr/local/cuda-12.6/" |  | ||||||
|   rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.3.2 $CUDA_BASE/nsight-systems-2024.5.1/ |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # idiomatic parameter and option handling in sh |  | ||||||
| while test $# -gt 0 |  | ||||||
| do |  | ||||||
|     case "$1" in |  | ||||||
|     11.8) install_118; prune_118 |  | ||||||
|         ;; |  | ||||||
|     12.1) install_121; prune_121 |  | ||||||
|         ;; |  | ||||||
|     12.4) install_124; prune_124 |  | ||||||
|         ;; |  | ||||||
|     12.6) install_126; prune_126 |  | ||||||
|         ;; |  | ||||||
|     *) echo "bad argument $1"; exit 1 |  | ||||||
|         ;; |  | ||||||
|     esac |  | ||||||
|     shift |  | ||||||
| done |  | ||||||
| @ -1,175 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| NCCL_VERSION=v2.21.5-1 |  | ||||||
| CUDNN_VERSION=9.5.1.17 |  | ||||||
|  |  | ||||||
| function install_cusparselt_062 { |  | ||||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html |  | ||||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt |  | ||||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-sbsa/libcusparse_lt-linux-sbsa-0.6.2.3-archive.tar.xz |  | ||||||
|     tar xf libcusparse_lt-linux-sbsa-0.6.2.3-archive.tar.xz |  | ||||||
|     cp -a libcusparse_lt-linux-sbsa-0.6.2.3-archive/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a libcusparse_lt-linux-sbsa-0.6.2.3-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|     popd |  | ||||||
|     rm -rf tmp_cusparselt |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function install_cusparselt_063 { |  | ||||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html |  | ||||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt |  | ||||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-sbsa/libcusparse_lt-linux-sbsa-0.6.3.2-archive.tar.xz |  | ||||||
|     tar xf libcusparse_lt-linux-sbsa-0.6.3.2-archive.tar.xz |  | ||||||
|     cp -a libcusparse_lt-linux-sbsa-0.6.3.2-archive/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a libcusparse_lt-linux-sbsa-0.6.3.2-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|     popd |  | ||||||
|     rm -rf tmp_cusparselt |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function install_124 { |  | ||||||
|   CUDNN_VERSION=9.1.0.70 |  | ||||||
|   echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.2" |  | ||||||
|   rm -rf /usr/local/cuda-12.4 /usr/local/cuda |  | ||||||
|   # install CUDA 12.4.1 in the same container |  | ||||||
|   wget -q https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux_sbsa.run |  | ||||||
|   chmod +x cuda_12.4.1_550.54.15_linux_sbsa.run |  | ||||||
|   ./cuda_12.4.1_550.54.15_linux_sbsa.run --toolkit --silent |  | ||||||
|   rm -f cuda_12.4.1_550.54.15_linux_sbsa.run |  | ||||||
|   rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.4 /usr/local/cuda |  | ||||||
|  |  | ||||||
|   # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement |  | ||||||
|   mkdir tmp_cudnn && cd tmp_cudnn |  | ||||||
|   wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz |  | ||||||
|   tar xf cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz |  | ||||||
|   cp -a cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/ |  | ||||||
|   cp -a cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|   cd .. |  | ||||||
|   rm -rf tmp_cudnn |  | ||||||
|  |  | ||||||
|   # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses |  | ||||||
|   # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build |  | ||||||
|   git clone -b ${NCCL_VERSION} --depth 1 https://github.com/NVIDIA/nccl.git |  | ||||||
|   cd nccl && make -j src.build |  | ||||||
|   cp -a build/include/* /usr/local/cuda/include/ |  | ||||||
|   cp -a build/lib/* /usr/local/cuda/lib64/ |  | ||||||
|   cd .. |  | ||||||
|   rm -rf nccl |  | ||||||
|  |  | ||||||
|   install_cusparselt_062 |  | ||||||
|  |  | ||||||
|   ldconfig |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function prune_124 { |  | ||||||
|   echo "Pruning CUDA 12.4" |  | ||||||
|   ##################################################################################### |  | ||||||
|   # CUDA 12.4 prune static libs |  | ||||||
|   ##################################################################################### |  | ||||||
|   export NVPRUNE="/usr/local/cuda-12.4/bin/nvprune" |  | ||||||
|   export CUDA_LIB_DIR="/usr/local/cuda-12.4/lib64" |  | ||||||
|  |  | ||||||
|   export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|   export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|  |  | ||||||
|   if [[ -n "$OVERRIDE_GENCODE" ]]; then |  | ||||||
|       export GENCODE=$OVERRIDE_GENCODE |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   # all CUDA libs except CuDNN and CuBLAS |  | ||||||
|   ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ |  | ||||||
|       | xargs -I {} bash -c \ |  | ||||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" |  | ||||||
|  |  | ||||||
|   # prune CuDNN and CuBLAS |  | ||||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a |  | ||||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a |  | ||||||
|  |  | ||||||
|   ##################################################################################### |  | ||||||
|   # CUDA 12.4 prune visual tools |  | ||||||
|   ##################################################################################### |  | ||||||
|   export CUDA_BASE="/usr/local/cuda-12.4/" |  | ||||||
|   rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.1.0 $CUDA_BASE/nsight-systems-2023.4.4/ |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function install_126 { |  | ||||||
|   echo "Installing CUDA 12.6.3 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.3" |  | ||||||
|   rm -rf /usr/local/cuda-12.6 /usr/local/cuda |  | ||||||
|   # install CUDA 12.6.3 in the same container |  | ||||||
|   wget -q https://developer.download.nvidia.com/compute/cuda/12.6.3/local_installers/cuda_12.6.3_560.35.05_linux_sbsa.run |  | ||||||
|   chmod +x cuda_12.6.3_560.35.05_linux_sbsa.run |  | ||||||
|   ./cuda_12.6.3_560.35.05_linux_sbsa.run --toolkit --silent |  | ||||||
|   rm -f cuda_12.6.3_560.35.05_linux_sbsa.run |  | ||||||
|   rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.6 /usr/local/cuda |  | ||||||
|  |  | ||||||
|   # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement |  | ||||||
|   mkdir tmp_cudnn && cd tmp_cudnn |  | ||||||
|   wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz |  | ||||||
|   tar xf cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive.tar.xz |  | ||||||
|   cp -a cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/ |  | ||||||
|   cp -a cudnn-linux-sbsa-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/ |  | ||||||
|   cd .. |  | ||||||
|   rm -rf tmp_cudnn |  | ||||||
|  |  | ||||||
|   # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses |  | ||||||
|   # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build |  | ||||||
|   git clone -b ${NCCL_VERSION} --depth 1 https://github.com/NVIDIA/nccl.git |  | ||||||
|   cd nccl && make -j src.build |  | ||||||
|   cp -a build/include/* /usr/local/cuda/include/ |  | ||||||
|   cp -a build/lib/* /usr/local/cuda/lib64/ |  | ||||||
|   cd .. |  | ||||||
|   rm -rf nccl |  | ||||||
|  |  | ||||||
|   install_cusparselt_063 |  | ||||||
|  |  | ||||||
|   ldconfig |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function prune_126 { |  | ||||||
|   echo "Pruning CUDA 12.6" |  | ||||||
|   ##################################################################################### |  | ||||||
|   # CUDA 12.6 prune static libs |  | ||||||
|   ##################################################################################### |  | ||||||
|   export NVPRUNE="/usr/local/cuda-12.6/bin/nvprune" |  | ||||||
|   export CUDA_LIB_DIR="/usr/local/cuda-12.6/lib64" |  | ||||||
|  |  | ||||||
|   export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|   export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" |  | ||||||
|  |  | ||||||
|   if [[ -n "$OVERRIDE_GENCODE" ]]; then |  | ||||||
|       export GENCODE=$OVERRIDE_GENCODE |  | ||||||
|   fi |  | ||||||
|   if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then |  | ||||||
|       export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   # all CUDA libs except CuDNN and CuBLAS |  | ||||||
|   ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ |  | ||||||
|       | xargs -I {} bash -c \ |  | ||||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" |  | ||||||
|  |  | ||||||
|   # prune CuDNN and CuBLAS |  | ||||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a |  | ||||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a |  | ||||||
|  |  | ||||||
|   ##################################################################################### |  | ||||||
|   # CUDA 12.6 prune visual tools |  | ||||||
|   ##################################################################################### |  | ||||||
|   export CUDA_BASE="/usr/local/cuda-12.6/" |  | ||||||
|   rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.3.2 $CUDA_BASE/nsight-systems-2024.5.1/ |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # idiomatic parameter and option handling in sh |  | ||||||
| while test $# -gt 0 |  | ||||||
| do |  | ||||||
|     case "$1" in |  | ||||||
|     12.4) install_124; prune_124 |  | ||||||
|         ;; |  | ||||||
|     12.6) install_126; prune_126 |  | ||||||
|         ;; |  | ||||||
|     *) echo "bad argument $1"; exit 1 |  | ||||||
|         ;; |  | ||||||
|     esac |  | ||||||
|     shift |  | ||||||
| done |  | ||||||
| @ -1,20 +1,20 @@ | |||||||
| #!/bin/bash | #!/bin/bash | ||||||
|  |  | ||||||
| if [[ -n "${CUDNN_VERSION}" ]]; then | if [[ ${CUDNN_VERSION} == 8 ]]; then | ||||||
|     # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement |     # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||||
|     mkdir tmp_cudnn |     mkdir tmp_cudnn | ||||||
|     pushd tmp_cudnn |     pushd tmp_cudnn | ||||||
|     if [[ ${CUDA_VERSION:0:4} == "12.6" ]]; then |     if [[ ${CUDA_VERSION:0:4} == "12.1" ]]; then | ||||||
|         CUDNN_NAME="cudnn-linux-x86_64-9.5.1.17_cuda12-archive" |         CUDNN_NAME="cudnn-linux-x86_64-8.9.2.26_cuda12-archive" | ||||||
|     elif [[ ${CUDA_VERSION:0:2} == "12" ]]; then |         curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz | ||||||
|         CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda12-archive" |     elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then | ||||||
|     elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then |         CUDNN_NAME="cudnn-linux-x86_64-8.7.0.84_cuda11-archive" | ||||||
|         CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive" |         curl --retry 3 -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.7.0/local_installers/11.8/${CUDNN_NAME}.tar.xz | ||||||
|     else |     else | ||||||
|         print "Unsupported CUDA version ${CUDA_VERSION}" |         print "Unsupported CUDA version ${CUDA_VERSION}" | ||||||
|         exit 1 |         exit 1 | ||||||
|     fi |     fi | ||||||
|     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz |  | ||||||
|     tar xf ${CUDNN_NAME}.tar.xz |     tar xf ${CUDNN_NAME}.tar.xz | ||||||
|     cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/ |     cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/ | ||||||
|     cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/ |     cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/ | ||||||
|  | |||||||
| @ -1,25 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| # cudss license: https://docs.nvidia.com/cuda/cudss/license.html |  | ||||||
| mkdir tmp_cudss && cd tmp_cudss |  | ||||||
|  |  | ||||||
| if [[ ${CUDA_VERSION:0:4} =~ ^12\.[1-4]$ ]]; then |  | ||||||
|     arch_path='sbsa' |  | ||||||
|     export TARGETARCH=${TARGETARCH:-$(uname -m)} |  | ||||||
|     if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then |  | ||||||
|         arch_path='x86_64' |  | ||||||
|     fi |  | ||||||
|     CUDSS_NAME="libcudss-linux-${arch_path}-0.3.0.9_cuda12-archive" |  | ||||||
|     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudss/redist/libcudss/linux-${arch_path}/${CUDSS_NAME}.tar.xz |  | ||||||
|  |  | ||||||
|     # only for cuda 12 |  | ||||||
|     tar xf ${CUDSS_NAME}.tar.xz |  | ||||||
|     cp -a ${CUDSS_NAME}/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a ${CUDSS_NAME}/lib/* /usr/local/cuda/lib64/ |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| cd .. |  | ||||||
| rm -rf tmp_cudss |  | ||||||
| ldconfig |  | ||||||
| @ -5,22 +5,9 @@ set -ex | |||||||
| # cuSPARSELt license: https://docs.nvidia.com/cuda/cusparselt/license.html | # cuSPARSELt license: https://docs.nvidia.com/cuda/cusparselt/license.html | ||||||
| mkdir tmp_cusparselt && cd tmp_cusparselt | mkdir tmp_cusparselt && cd tmp_cusparselt | ||||||
|  |  | ||||||
| if [[ ${CUDA_VERSION:0:4} =~ ^12\.[2-6]$ ]]; then | if [[ ${CUDA_VERSION:0:4} == "12.1" ]]; then | ||||||
|     arch_path='sbsa' |     CUSPARSELT_NAME="libcusparse_lt-linux-x86_64-0.5.2.1-archive" | ||||||
|     export TARGETARCH=${TARGETARCH:-$(uname -m)} |     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/${CUSPARSELT_NAME}.tar.xz | ||||||
|     if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then |  | ||||||
|         arch_path='x86_64' |  | ||||||
|     fi |  | ||||||
|     CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.6.2.3-archive" |  | ||||||
|     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz |  | ||||||
| elif [[ ${CUDA_VERSION:0:4} == "12.1" ]]; then |  | ||||||
|     arch_path='sbsa' |  | ||||||
|     export TARGETARCH=${TARGETARCH:-$(uname -m)} |  | ||||||
|     if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then |  | ||||||
|         arch_path='x86_64' |  | ||||||
|     fi |  | ||||||
|     CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.5.2.1-archive" |  | ||||||
|     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz |  | ||||||
| elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then | elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then | ||||||
|     CUSPARSELT_NAME="libcusparse_lt-linux-x86_64-0.4.0.7-archive" |     CUSPARSELT_NAME="libcusparse_lt-linux-x86_64-0.4.0.7-archive" | ||||||
|     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/${CUSPARSELT_NAME}.tar.xz |     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/${CUSPARSELT_NAME}.tar.xz | ||||||
|  | |||||||
| @ -4,6 +4,11 @@ set -ex | |||||||
|  |  | ||||||
| install_ubuntu() { | install_ubuntu() { | ||||||
|   apt-get update |   apt-get update | ||||||
|  |   apt-get install -y --no-install-recommends \ | ||||||
|  |           libhiredis-dev \ | ||||||
|  |           libleveldb-dev \ | ||||||
|  |           liblmdb-dev \ | ||||||
|  |           libsnappy-dev | ||||||
|  |  | ||||||
|   # Cleanup |   # Cleanup | ||||||
|   apt-get autoclean && apt-get clean |   apt-get autoclean && apt-get clean | ||||||
| @ -15,6 +20,12 @@ install_centos() { | |||||||
|   # See http://fedoraproject.org/wiki/EPEL |   # See http://fedoraproject.org/wiki/EPEL | ||||||
|   yum --enablerepo=extras install -y epel-release |   yum --enablerepo=extras install -y epel-release | ||||||
|  |  | ||||||
|  |   yum install -y \ | ||||||
|  |       hiredis-devel \ | ||||||
|  |       leveldb-devel \ | ||||||
|  |       lmdb-devel \ | ||||||
|  |       snappy-devel | ||||||
|  |  | ||||||
|   # Cleanup |   # Cleanup | ||||||
|   yum clean all |   yum clean all | ||||||
|   rm -rf /var/cache/yum |   rm -rf /var/cache/yum | ||||||
|  | |||||||
| @ -36,19 +36,21 @@ install_conda_dependencies() { | |||||||
| } | } | ||||||
|  |  | ||||||
| install_pip_dependencies() { | install_pip_dependencies() { | ||||||
|   pushd executorch |   pushd executorch/.ci/docker | ||||||
|   as_jenkins bash install_requirements.sh --pybind xnnpack |   # Install all Python dependencies | ||||||
|  |   pip_install -r requirements-ci.txt | ||||||
|   popd |   popd | ||||||
| } | } | ||||||
|  |  | ||||||
| setup_executorch() { | setup_executorch() { | ||||||
|   pushd executorch |   pushd executorch | ||||||
|  |   source .ci/scripts/utils.sh | ||||||
|  |  | ||||||
|   export PYTHON_EXECUTABLE=python |   install_flatc_from_source | ||||||
|   export EXECUTORCH_BUILD_PYBIND=ON |   pip_install . | ||||||
|   export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" |  | ||||||
|  |  | ||||||
|   as_jenkins .ci/scripts/setup-linux.sh cmake || true |   # Make sure that all the newly generate files are owned by Jenkins | ||||||
|  |   chown -R jenkins . | ||||||
|   popd |   popd | ||||||
| } | } | ||||||
|  |  | ||||||
|  | |||||||
| @ -1,46 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" |  | ||||||
|  |  | ||||||
| COMMIT=$(get_pinned_commit halide) |  | ||||||
| test -n "$COMMIT" |  | ||||||
|  |  | ||||||
| # activate conda to populate CONDA_PREFIX |  | ||||||
| test -n "$ANACONDA_PYTHON_VERSION" |  | ||||||
| eval "$(conda shell.bash hook)" |  | ||||||
| conda activate py_$ANACONDA_PYTHON_VERSION |  | ||||||
|  |  | ||||||
| if [ -n "${UBUNTU_VERSION}" ];then |  | ||||||
|     apt update |  | ||||||
|     apt-get install -y lld liblld-15-dev libpng-dev libjpeg-dev libgl-dev \ |  | ||||||
|                   libopenblas-dev libeigen3-dev libatlas-base-dev libzstd-dev |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| conda_install numpy scipy imageio cmake ninja |  | ||||||
|  |  | ||||||
| git clone --depth 1 --branch release/16.x --recursive https://github.com/llvm/llvm-project.git |  | ||||||
| cmake -DCMAKE_BUILD_TYPE=Release \ |  | ||||||
|         -DLLVM_ENABLE_PROJECTS="clang" \ |  | ||||||
|         -DLLVM_TARGETS_TO_BUILD="X86;NVPTX" \ |  | ||||||
|         -DLLVM_ENABLE_TERMINFO=OFF -DLLVM_ENABLE_ASSERTIONS=ON \ |  | ||||||
|         -DLLVM_ENABLE_EH=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_BUILD_32_BITS=OFF \ |  | ||||||
|         -S llvm-project/llvm -B llvm-build -G Ninja |  | ||||||
| cmake --build llvm-build |  | ||||||
| cmake --install llvm-build --prefix llvm-install |  | ||||||
| export LLVM_ROOT=`pwd`/llvm-install |  | ||||||
| export LLVM_CONFIG=$LLVM_ROOT/bin/llvm-config |  | ||||||
|  |  | ||||||
| git clone https://github.com/halide/Halide.git |  | ||||||
| pushd Halide |  | ||||||
| git checkout ${COMMIT} && git submodule update --init --recursive |  | ||||||
| pip_install -r requirements.txt |  | ||||||
| cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -S . -B build |  | ||||||
| cmake --build build |  | ||||||
| test -e ${CONDA_PREFIX}/lib/python3 || ln -s python${ANACONDA_PYTHON_VERSION} ${CONDA_PREFIX}/lib/python3 |  | ||||||
| cmake --install build --prefix ${CONDA_PREFIX} |  | ||||||
| chown -R jenkins ${CONDA_PREFIX} |  | ||||||
| popd |  | ||||||
| rm -rf Halide llvm-build llvm-project llvm-install |  | ||||||
|  |  | ||||||
| python -c "import halide"  # check for errors |  | ||||||
| @ -7,20 +7,14 @@ source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" | |||||||
| function install_huggingface() { | function install_huggingface() { | ||||||
|   local version |   local version | ||||||
|   commit=$(get_pinned_commit huggingface) |   commit=$(get_pinned_commit huggingface) | ||||||
|  |   pip_install pandas==2.0.3 | ||||||
|   pip_install "git+https://github.com/huggingface/transformers@${commit}" |   pip_install "git+https://github.com/huggingface/transformers@${commit}" | ||||||
| } | } | ||||||
|  |  | ||||||
| function install_timm() { | function install_timm() { | ||||||
|   local commit |   local commit | ||||||
|   commit=$(get_pinned_commit timm) |   commit=$(get_pinned_commit timm) | ||||||
|  |   pip_install pandas==2.0.3 | ||||||
|   # TODO (huydhn): There is no torchvision release on 3.13 when I write this, so |  | ||||||
|   # I'm using nightly here instead. We just need to package to be able to install |  | ||||||
|   # TIMM. Removing this once vision has a release on 3.13 |  | ||||||
|   if [[ "${ANACONDA_PYTHON_VERSION}" == "3.13" ]]; then |  | ||||||
|     pip_install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/cu124 |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   pip_install "git+https://github.com/huggingface/pytorch-image-models@${commit}" |   pip_install "git+https://github.com/huggingface/pytorch-image-models@${commit}" | ||||||
|   # Clean up |   # Clean up | ||||||
|   conda_run pip uninstall -y cmake torch torchvision triton |   conda_run pip uninstall -y cmake torch torchvision triton | ||||||
|  | |||||||
| @ -1,23 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| LIBPNG_VERSION=1.6.37 |  | ||||||
|  |  | ||||||
| mkdir -p libpng |  | ||||||
| pushd libpng |  | ||||||
|  |  | ||||||
| wget http://download.sourceforge.net/libpng/libpng-$LIBPNG_VERSION.tar.gz |  | ||||||
| tar -xvzf libpng-$LIBPNG_VERSION.tar.gz |  | ||||||
|  |  | ||||||
| pushd libpng-$LIBPNG_VERSION |  | ||||||
|  |  | ||||||
| ./configure |  | ||||||
| make |  | ||||||
| make install |  | ||||||
|  |  | ||||||
| popd |  | ||||||
|  |  | ||||||
| popd |  | ||||||
| rm -rf libpng |  | ||||||
| @ -1,27 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| set -eou pipefail |  | ||||||
|  |  | ||||||
| function do_install() { |  | ||||||
|     cuda_version=$1 |  | ||||||
|     cuda_version_nodot=${1/./} |  | ||||||
|  |  | ||||||
|     MAGMA_VERSION="2.6.1" |  | ||||||
|     magma_archive="magma-cuda${cuda_version_nodot}-${MAGMA_VERSION}-1.tar.bz2" |  | ||||||
|  |  | ||||||
|     cuda_dir="/usr/local/cuda-${cuda_version}" |  | ||||||
|     ( |  | ||||||
|         set -x |  | ||||||
|         tmp_dir=$(mktemp -d) |  | ||||||
|         pushd ${tmp_dir} |  | ||||||
|         curl -OLs https://ossci-linux.s3.us-east-1.amazonaws.com/${magma_archive} |  | ||||||
|         tar -xvf "${magma_archive}" |  | ||||||
|         mkdir -p "${cuda_dir}/magma" |  | ||||||
|         mv include "${cuda_dir}/magma/include" |  | ||||||
|         mv lib "${cuda_dir}/magma/lib" |  | ||||||
|         popd |  | ||||||
|     ) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| do_install $1 |  | ||||||
| @ -1,26 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
| # Script that replaces the magma install from a conda package |  | ||||||
|  |  | ||||||
| set -eou pipefail |  | ||||||
|  |  | ||||||
| function do_install() { |  | ||||||
|     cuda_version_nodot=${1/./} |  | ||||||
|     anaconda_python_version=$2 |  | ||||||
|  |  | ||||||
|     MAGMA_VERSION="2.6.1" |  | ||||||
|     magma_archive="magma-cuda${cuda_version_nodot}-${MAGMA_VERSION}-1.tar.bz2" |  | ||||||
|  |  | ||||||
|     anaconda_dir="/opt/conda/envs/py_${anaconda_python_version}" |  | ||||||
|     ( |  | ||||||
|         set -x |  | ||||||
|         tmp_dir=$(mktemp -d) |  | ||||||
|         pushd ${tmp_dir} |  | ||||||
|         curl -OLs https://ossci-linux.s3.us-east-1.amazonaws.com/${magma_archive} |  | ||||||
|         tar -xvf "${magma_archive}" |  | ||||||
|         mv include/* "${anaconda_dir}/include/" |  | ||||||
|         mv lib/* "${anaconda_dir}/lib" |  | ||||||
|         popd |  | ||||||
|     ) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| do_install $1 $2 |  | ||||||
| @ -1,129 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| ROCM_VERSION=$1 |  | ||||||
|  |  | ||||||
| if [[ -z $ROCM_VERSION ]]; then |  | ||||||
|     echo "missing ROCM_VERSION" |  | ||||||
|     exit 1; |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| IS_UBUNTU=0 |  | ||||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') |  | ||||||
| case "$ID" in |  | ||||||
|   ubuntu) |  | ||||||
|     IS_UBUNTU=1 |  | ||||||
|     ;; |  | ||||||
|   centos|almalinux) |  | ||||||
|     IS_UBUNTU=0 |  | ||||||
|     ;; |  | ||||||
|   *) |  | ||||||
|     echo "Unable to determine OS..." |  | ||||||
|     exit 1 |  | ||||||
|     ;; |  | ||||||
| esac |  | ||||||
|  |  | ||||||
| # To make version comparison easier, create an integer representation. |  | ||||||
| save_IFS="$IFS" |  | ||||||
| IFS=. ROCM_VERSION_ARRAY=(${ROCM_VERSION}) |  | ||||||
| IFS="$save_IFS" |  | ||||||
| if [[ ${#ROCM_VERSION_ARRAY[@]} == 2 ]]; then |  | ||||||
|     ROCM_VERSION_MAJOR=${ROCM_VERSION_ARRAY[0]} |  | ||||||
|     ROCM_VERSION_MINOR=${ROCM_VERSION_ARRAY[1]} |  | ||||||
|     ROCM_VERSION_PATCH=0 |  | ||||||
| elif [[ ${#ROCM_VERSION_ARRAY[@]} == 3 ]]; then |  | ||||||
|     ROCM_VERSION_MAJOR=${ROCM_VERSION_ARRAY[0]} |  | ||||||
|     ROCM_VERSION_MINOR=${ROCM_VERSION_ARRAY[1]} |  | ||||||
|     ROCM_VERSION_PATCH=${ROCM_VERSION_ARRAY[2]} |  | ||||||
| else |  | ||||||
|     echo "Unhandled ROCM_VERSION ${ROCM_VERSION}" |  | ||||||
|     exit 1 |  | ||||||
| fi |  | ||||||
| ROCM_INT=$(($ROCM_VERSION_MAJOR * 10000 + $ROCM_VERSION_MINOR * 100 + $ROCM_VERSION_PATCH)) |  | ||||||
|  |  | ||||||
| # Function to retry functions that sometimes timeout or have flaky failures |  | ||||||
| retry () { |  | ||||||
|     $*  || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Build custom MIOpen to use comgr for offline compilation. |  | ||||||
|  |  | ||||||
| ## Need a sanitized ROCM_VERSION without patchlevel; patchlevel version 0 must be added to paths. |  | ||||||
| ROCM_DOTS=$(echo ${ROCM_VERSION} | tr -d -c '.' | wc -c) |  | ||||||
| if [[ ${ROCM_DOTS} == 1 ]]; then |  | ||||||
|     ROCM_VERSION_NOPATCH="${ROCM_VERSION}" |  | ||||||
|     ROCM_INSTALL_PATH="/opt/rocm-${ROCM_VERSION}.0" |  | ||||||
| else |  | ||||||
|     ROCM_VERSION_NOPATCH="${ROCM_VERSION%.*}" |  | ||||||
|     ROCM_INSTALL_PATH="/opt/rocm-${ROCM_VERSION}" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| MIOPEN_CMAKE_COMMON_FLAGS=" |  | ||||||
| -DMIOPEN_USE_COMGR=ON |  | ||||||
| -DMIOPEN_BUILD_DRIVER=OFF |  | ||||||
| " |  | ||||||
| if [[ $ROCM_INT -ge 60200 ]] && [[ $ROCM_INT -lt 60204 ]]; then |  | ||||||
|     MIOPEN_BRANCH="release/rocm-rel-6.2-staging" |  | ||||||
| else |  | ||||||
|     echo "ROCm ${ROCM_VERSION} does not need any patches, do not build from source" |  | ||||||
|     exit 0 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if [[ ${IS_UBUNTU} == 1 ]]; then |  | ||||||
|   apt-get remove -y miopen-hip |  | ||||||
| else |  | ||||||
|   # Workaround since almalinux manylinux image already has this and cget doesn't like that |  | ||||||
|   rm -rf /usr/local/lib/pkgconfig/sqlite3.pc |  | ||||||
|  |  | ||||||
|   # Versioned package name needs regex match |  | ||||||
|   # Use --noautoremove to prevent other rocm packages from being uninstalled |  | ||||||
|   yum remove -y miopen-hip* --noautoremove |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| git clone https://github.com/ROCm/MIOpen -b ${MIOPEN_BRANCH} |  | ||||||
| pushd MIOpen |  | ||||||
| # remove .git to save disk space since CI runner was running out |  | ||||||
| rm -rf .git |  | ||||||
| # Don't build CK to save docker build time |  | ||||||
| sed -i '/composable_kernel/d' requirements.txt |  | ||||||
| ## MIOpen minimum requirements |  | ||||||
| cmake -P install_deps.cmake --minimum |  | ||||||
|  |  | ||||||
| # clean up since CI runner was running out of disk space |  | ||||||
| rm -rf /tmp/* |  | ||||||
| if [[ ${IS_UBUNTU} == 1 ]]; then |  | ||||||
|   apt-get autoclean && apt-get clean |  | ||||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |  | ||||||
| else |  | ||||||
|   yum clean all |  | ||||||
|   rm -rf /var/cache/yum |  | ||||||
|   rm -rf /var/lib/yum/yumdb |  | ||||||
|   rm -rf /var/lib/yum/history |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| ## Build MIOpen |  | ||||||
| mkdir -p build |  | ||||||
| cd build |  | ||||||
| PKG_CONFIG_PATH=/usr/local/lib/pkgconfig CXX=${ROCM_INSTALL_PATH}/llvm/bin/clang++ cmake .. \ |  | ||||||
|     ${MIOPEN_CMAKE_COMMON_FLAGS} \ |  | ||||||
|     ${MIOPEN_CMAKE_DB_FLAGS} \ |  | ||||||
|     -DCMAKE_PREFIX_PATH="${ROCM_INSTALL_PATH}" |  | ||||||
| make MIOpen -j $(nproc) |  | ||||||
|  |  | ||||||
| # Build MIOpen package |  | ||||||
| make -j $(nproc) package |  | ||||||
|  |  | ||||||
| # clean up since CI runner was running out of disk space |  | ||||||
| rm -rf /usr/local/cget |  | ||||||
|  |  | ||||||
| if [[ ${IS_UBUNTU} == 1 ]]; then |  | ||||||
|   sudo dpkg -i miopen-hip*.deb |  | ||||||
| else |  | ||||||
|   yum install -y miopen-*.rpm |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| popd |  | ||||||
| rm -rf MIOpen |  | ||||||
| @ -1,16 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| # MKL |  | ||||||
| MKL_VERSION=2024.2.0 |  | ||||||
|  |  | ||||||
| MKLROOT=/opt/intel |  | ||||||
| mkdir -p ${MKLROOT} |  | ||||||
| pushd /tmp |  | ||||||
|  |  | ||||||
| python3 -mpip install wheel |  | ||||||
| python3 -mpip download -d . mkl-static==${MKL_VERSION} |  | ||||||
| python3 -m wheel unpack mkl_static-${MKL_VERSION}-py2.py3-none-manylinux1_x86_64.whl |  | ||||||
| python3 -m wheel unpack mkl_include-${MKL_VERSION}-py2.py3-none-manylinux1_x86_64.whl |  | ||||||
| mv mkl_static-${MKL_VERSION}/mkl_static-${MKL_VERSION}.data/data/lib ${MKLROOT} |  | ||||||
| mv mkl_include-${MKL_VERSION}/mkl_include-${MKL_VERSION}.data/data/include ${MKLROOT} |  | ||||||
| @ -1,13 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| mkdir -p /usr/local/mnist/ |  | ||||||
|  |  | ||||||
| cd /usr/local/mnist |  | ||||||
|  |  | ||||||
| for img in train-images-idx3-ubyte.gz train-labels-idx1-ubyte.gz t10k-images-idx3-ubyte.gz t10k-labels-idx1-ubyte.gz; do |  | ||||||
|   wget -q https://ossci-datasets.s3.amazonaws.com/mnist/$img |  | ||||||
|   gzip -d $img |  | ||||||
| done |  | ||||||
| @ -1,20 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| function install_nvpl { |  | ||||||
|  |  | ||||||
|     mkdir -p /opt/nvpl/lib /opt/nvpl/include |  | ||||||
|  |  | ||||||
|     wget https://developer.download.nvidia.com/compute/nvpl/redist/nvpl_blas/linux-sbsa/nvpl_blas-linux-sbsa-0.3.0-archive.tar.xz |  | ||||||
|     tar xf nvpl_blas-linux-sbsa-0.3.0-archive.tar.xz |  | ||||||
|     cp -r nvpl_blas-linux-sbsa-0.3.0-archive/lib/* /opt/nvpl/lib/ |  | ||||||
|     cp -r nvpl_blas-linux-sbsa-0.3.0-archive/include/* /opt/nvpl/include/ |  | ||||||
|  |  | ||||||
|     wget https://developer.download.nvidia.com/compute/nvpl/redist/nvpl_lapack/linux-sbsa/nvpl_lapack-linux-sbsa-0.2.3.1-archive.tar.xz |  | ||||||
|     tar xf nvpl_lapack-linux-sbsa-0.2.3.1-archive.tar.xz |  | ||||||
|     cp -r nvpl_lapack-linux-sbsa-0.2.3.1-archive/lib/* /opt/nvpl/lib/ |  | ||||||
|     cp -r nvpl_lapack-linux-sbsa-0.2.3.1-archive/include/* /opt/nvpl/include/ |  | ||||||
| } |  | ||||||
|  |  | ||||||
| install_nvpl |  | ||||||
| @ -15,7 +15,7 @@ pip_install \ | |||||||
|   flatbuffers==2.0 \ |   flatbuffers==2.0 \ | ||||||
|   mock==5.0.1 \ |   mock==5.0.1 \ | ||||||
|   ninja==1.10.2 \ |   ninja==1.10.2 \ | ||||||
|   networkx==2.5 \ |   networkx==2.0 \ | ||||||
|   numpy==1.24.2 |   numpy==1.24.2 | ||||||
|  |  | ||||||
| # ONNXRuntime should be installed before installing | # ONNXRuntime should be installed before installing | ||||||
| @ -30,16 +30,15 @@ pip_install \ | |||||||
|  |  | ||||||
| pip_install coloredlogs packaging | pip_install coloredlogs packaging | ||||||
|  |  | ||||||
| pip_install onnxruntime==1.18.1 | pip_install onnxruntime==1.17.0 | ||||||
| pip_install onnx==1.16.2 | pip_install onnx==1.15.0 | ||||||
| pip_install onnxscript==0.1.0.dev20241124 --no-deps | # pip_install "onnxscript@git+https://github.com/microsoft/onnxscript@3e869ef8ccf19b5ebd21c10d3e9c267c9a9fa729" --no-deps | ||||||
| # required by onnxscript | pip_install onnxscript==0.1.0.dev20240301 --no-deps | ||||||
| pip_install ml_dtypes |  | ||||||
|  |  | ||||||
| # Cache the transformers model to be used later by ONNX tests. We need to run the transformers | # Cache the transformers model to be used later by ONNX tests. We need to run the transformers | ||||||
| # package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/ | # package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/ | ||||||
| IMPORT_SCRIPT_FILENAME="/tmp/onnx_import_script.py" | IMPORT_SCRIPT_FILENAME="/tmp/onnx_import_script.py" | ||||||
| as_jenkins echo 'import transformers; transformers.AutoModel.from_pretrained("sshleifer/tiny-gpt2"); transformers.AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2"); transformers.AutoModelForSpeechSeq2Seq.from_pretrained("openai/whisper-large-v3");' > "${IMPORT_SCRIPT_FILENAME}" | as_jenkins echo 'import transformers; transformers.AutoModel.from_pretrained("sshleifer/tiny-gpt2"); transformers.AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2");' > "${IMPORT_SCRIPT_FILENAME}" | ||||||
|  |  | ||||||
| # Need a PyTorch version for transformers to work | # Need a PyTorch version for transformers to work | ||||||
| pip_install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu | pip_install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu | ||||||
|  | |||||||
| @ -1,22 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| cd / |  | ||||||
| git clone https://github.com/OpenMathLib/OpenBLAS.git -b v0.3.28 --depth 1 --shallow-submodules |  | ||||||
|  |  | ||||||
|  |  | ||||||
| OPENBLAS_BUILD_FLAGS=" |  | ||||||
| NUM_THREADS=128 |  | ||||||
| USE_OPENMP=1 |  | ||||||
| NO_SHARED=0 |  | ||||||
| DYNAMIC_ARCH=1 |  | ||||||
| TARGET=ARMV8 |  | ||||||
| CFLAGS=-O3 |  | ||||||
| " |  | ||||||
|  |  | ||||||
| OPENBLAS_CHECKOUT_DIR="OpenBLAS" |  | ||||||
|  |  | ||||||
| make -j8 ${OPENBLAS_BUILD_FLAGS} -C ${OPENBLAS_CHECKOUT_DIR} |  | ||||||
| make -j8 ${OPENBLAS_BUILD_FLAGS} install -C ${OPENBLAS_CHECKOUT_DIR} |  | ||||||
| @ -1,16 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| # Pin the version to latest release 0.17.2, building newer commit starts |  | ||||||
| # to fail on the current image |  | ||||||
| git clone -b 0.17.2 --single-branch https://github.com/NixOS/patchelf |  | ||||||
| cd patchelf |  | ||||||
| sed -i 's/serial/parallel/g' configure.ac |  | ||||||
| ./bootstrap.sh |  | ||||||
| ./configure |  | ||||||
| make |  | ||||||
| make install |  | ||||||
| cd .. |  | ||||||
| rm -rf patchelf |  | ||||||
| @ -11,8 +11,7 @@ mkdir -p $pb_dir | |||||||
| ln -s /usr/lib64 "$pb_dir/lib64" | ln -s /usr/lib64 "$pb_dir/lib64" | ||||||
|  |  | ||||||
| curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz" --retry 3 | curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz" --retry 3 | ||||||
|  | tar -xvz -C "$pb_dir" --strip-components 1 -f protobuf-all-3.17.3.tar.gz | ||||||
| tar -xvz --no-same-owner -C "$pb_dir" --strip-components 1 -f protobuf-all-3.17.3.tar.gz |  | ||||||
| NPROC=$[$(nproc) - 2] | NPROC=$[$(nproc) - 2] | ||||||
| pushd "$pb_dir" && ./configure && make -j${NPROC} && make -j${NPROC} check && sudo make -j${NRPOC} install && sudo ldconfig | pushd "$pb_dir" && ./configure && make -j${NPROC} && make -j${NPROC} check && sudo make -j${NRPOC} install && sudo ldconfig | ||||||
| popd | popd | ||||||
|  | |||||||
| @ -6,6 +6,9 @@ ver() { | |||||||
|     printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' '); |     printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' '); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | # Map ROCm version to AMDGPU version | ||||||
|  | declare -A AMDGPU_VERSIONS=( ["5.0"]="21.50" ["5.1.1"]="22.10.1" ["5.2"]="22.20" ) | ||||||
|  |  | ||||||
| install_ubuntu() { | install_ubuntu() { | ||||||
|     apt-get update |     apt-get update | ||||||
|     if [[ $UBUNTU_VERSION == 18.04 ]]; then |     if [[ $UBUNTU_VERSION == 18.04 ]]; then | ||||||
| @ -23,14 +26,31 @@ install_ubuntu() { | |||||||
|     apt-get install -y libc++1 |     apt-get install -y libc++1 | ||||||
|     apt-get install -y libc++abi1 |     apt-get install -y libc++abi1 | ||||||
|  |  | ||||||
|     # Add amdgpu repository |     if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then | ||||||
|     UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'` |         # Add amdgpu repository | ||||||
|     echo "deb [arch=amd64] https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list |         UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'` | ||||||
|  |         local amdgpu_baseurl | ||||||
|  |         if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then | ||||||
|  |           amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu" | ||||||
|  |         else | ||||||
|  |           amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/ubuntu" | ||||||
|  |         fi | ||||||
|  |         echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list | ||||||
|  |     fi | ||||||
|  |  | ||||||
|  |     ROCM_REPO="ubuntu" | ||||||
|  |     if [[ $(ver $ROCM_VERSION) -lt $(ver 4.2) ]]; then | ||||||
|  |         ROCM_REPO="xenial" | ||||||
|  |     fi | ||||||
|  |  | ||||||
|  |     if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then | ||||||
|  |         ROCM_REPO="${UBUNTU_VERSION_NAME}" | ||||||
|  |     fi | ||||||
|  |  | ||||||
|     # Add rocm repository |     # Add rocm repository | ||||||
|     wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add - |     wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add - | ||||||
|     local rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}" |     local rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}" | ||||||
|     echo "deb [arch=amd64] ${rocm_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/rocm.list |     echo "deb [arch=amd64] ${rocm_baseurl} ${ROCM_REPO} main" > /etc/apt/sources.list.d/rocm.list | ||||||
|     apt-get update --allow-insecure-repositories |     apt-get update --allow-insecure-repositories | ||||||
|  |  | ||||||
|     DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ |     DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ | ||||||
| @ -39,28 +59,38 @@ install_ubuntu() { | |||||||
|                    rocm-libs \ |                    rocm-libs \ | ||||||
|                    rccl \ |                    rccl \ | ||||||
|                    rocprofiler-dev \ |                    rocprofiler-dev \ | ||||||
|                    roctracer-dev \ |                    roctracer-dev | ||||||
|                    amd-smi-lib |  | ||||||
|  |  | ||||||
|     if [[ $(ver $ROCM_VERSION) -ge $(ver 6.1) ]]; then |  | ||||||
|         DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated rocm-llvm-dev |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     # precompiled miopen kernels added in ROCm 3.5, renamed in ROCm 5.5 |     # precompiled miopen kernels added in ROCm 3.5, renamed in ROCm 5.5 | ||||||
|     # search for all unversioned packages |     # search for all unversioned packages | ||||||
|     # if search fails it will abort this script; use true to avoid case where search fails |     # if search fails it will abort this script; use true to avoid case where search fails | ||||||
|     MIOPENHIPGFX=$(apt-cache search --names-only miopen-hip-gfx | awk '{print $1}' | grep -F -v . || true) |     if [[ $(ver $ROCM_VERSION) -ge $(ver 5.5) ]]; then | ||||||
|     if [[ "x${MIOPENHIPGFX}" = x ]]; then |         MIOPENHIPGFX=$(apt-cache search --names-only miopen-hip-gfx | awk '{print $1}' | grep -F -v . || true) | ||||||
|       echo "miopen-hip-gfx package not available" && exit 1 |         if [[ "x${MIOPENHIPGFX}" = x ]]; then | ||||||
|  |           echo "miopen-hip-gfx package not available" && exit 1 | ||||||
|  |         else | ||||||
|  |           DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENHIPGFX} | ||||||
|  |         fi | ||||||
|     else |     else | ||||||
|       DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENHIPGFX} |         MIOPENKERNELS=$(apt-cache search --names-only miopenkernels | awk '{print $1}' | grep -F -v . || true) | ||||||
|  |         if [[ "x${MIOPENKERNELS}" = x ]]; then | ||||||
|  |           echo "miopenkernels package not available" && exit 1 | ||||||
|  |         else | ||||||
|  |           DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENKERNELS} | ||||||
|  |         fi | ||||||
|     fi |     fi | ||||||
|  |  | ||||||
|     # ROCm 6.0 had a regression where journal_mode was enabled on the kdb files resulting in permission errors at runtime |     # ROCm 6.0 had a regression where journal_mode was enabled on the kdb files resulting in permission errors at runtime | ||||||
|     for kdb in /opt/rocm/share/miopen/db/*.kdb |     if [[ $(ver $ROCM_VERSION) -ge $(ver 6.0) ]]; then | ||||||
|     do |         for kdb in /opt/rocm/share/miopen/db/*.kdb | ||||||
|         sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;" |         do | ||||||
|     done |           # journal_mode=delete seems to work on some kdbs that have "wal" as initial journal_mode | ||||||
|  |           sqlite3 $kdb "PRAGMA journal_mode=delete; PRAGMA VACUUM;" | ||||||
|  |           JOURNAL_MODE=$(sqlite3 $kdb "PRAGMA journal_mode;") | ||||||
|  |           # Both "delete and "off" work in cases where user doesn't have write permissions to directory where kdbs are installed | ||||||
|  |           if [[ $JOURNAL_MODE != "delete" ]] && [[ $JOURNAL_MODE != "off" ]]; then echo "kdb journal_mode change failed" && exit 1; fi | ||||||
|  |         done | ||||||
|  |     fi | ||||||
|  |  | ||||||
|     # Cleanup |     # Cleanup | ||||||
|     apt-get autoclean && apt-get clean |     apt-get autoclean && apt-get clean | ||||||
| @ -77,19 +107,25 @@ install_centos() { | |||||||
|   yum install -y epel-release |   yum install -y epel-release | ||||||
|   yum install -y dkms kernel-headers-`uname -r` kernel-devel-`uname -r` |   yum install -y dkms kernel-headers-`uname -r` kernel-devel-`uname -r` | ||||||
|  |  | ||||||
|   # Add amdgpu repository |   if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then | ||||||
|   local amdgpu_baseurl |       # Add amdgpu repository | ||||||
|   if [[ $OS_VERSION == 9 ]]; then |       local amdgpu_baseurl | ||||||
|       amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/rhel/9.0/main/x86_64" |       if [[ $OS_VERSION == 9 ]]; then | ||||||
|   else |           amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/rhel/9.0/main/x86_64" | ||||||
|       amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/rhel/7.9/main/x86_64" |       else | ||||||
|  |         if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then | ||||||
|  |           amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/rhel/7.9/main/x86_64" | ||||||
|  |         else | ||||||
|  |           amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/rhel/7.9/main/x86_64" | ||||||
|  |         fi | ||||||
|  |       fi | ||||||
|  |       echo "[AMDGPU]" > /etc/yum.repos.d/amdgpu.repo | ||||||
|  |       echo "name=AMDGPU" >> /etc/yum.repos.d/amdgpu.repo | ||||||
|  |       echo "baseurl=${amdgpu_baseurl}" >> /etc/yum.repos.d/amdgpu.repo | ||||||
|  |       echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo | ||||||
|  |       echo "gpgcheck=1" >> /etc/yum.repos.d/amdgpu.repo | ||||||
|  |       echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/amdgpu.repo | ||||||
|   fi |   fi | ||||||
|   echo "[AMDGPU]" > /etc/yum.repos.d/amdgpu.repo |  | ||||||
|   echo "name=AMDGPU" >> /etc/yum.repos.d/amdgpu.repo |  | ||||||
|   echo "baseurl=${amdgpu_baseurl}" >> /etc/yum.repos.d/amdgpu.repo |  | ||||||
|   echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo |  | ||||||
|   echo "gpgcheck=1" >> /etc/yum.repos.d/amdgpu.repo |  | ||||||
|   echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/amdgpu.repo |  | ||||||
|  |  | ||||||
|   local rocm_baseurl="http://repo.radeon.com/rocm/yum/${ROCM_VERSION}" |   local rocm_baseurl="http://repo.radeon.com/rocm/yum/${ROCM_VERSION}" | ||||||
|   echo "[ROCm]" > /etc/yum.repos.d/rocm.repo |   echo "[ROCm]" > /etc/yum.repos.d/rocm.repo | ||||||
| @ -107,23 +143,37 @@ install_centos() { | |||||||
|                    rocm-libs \ |                    rocm-libs \ | ||||||
|                    rccl \ |                    rccl \ | ||||||
|                    rocprofiler-dev \ |                    rocprofiler-dev \ | ||||||
|                    roctracer-dev \ |                    roctracer-dev | ||||||
|                    amd-smi-lib |  | ||||||
|  |  | ||||||
|   # precompiled miopen kernels; search for all unversioned packages |   # precompiled miopen kernels; search for all unversioned packages | ||||||
|   # if search fails it will abort this script; use true to avoid case where search fails |   # if search fails it will abort this script; use true to avoid case where search fails | ||||||
|   MIOPENHIPGFX=$(yum -q search miopen-hip-gfx | grep miopen-hip-gfx | awk '{print $1}'| grep -F kdb. || true) |   if [[ $(ver $ROCM_VERSION) -ge $(ver 5.5) ]]; then | ||||||
|   if [[ "x${MIOPENHIPGFX}" = x ]]; then |       MIOPENHIPGFX=$(yum -q search miopen-hip-gfx | grep miopen-hip-gfx | awk '{print $1}'| grep -F kdb. || true) | ||||||
|     echo "miopen-hip-gfx package not available" && exit 1 |       if [[ "x${MIOPENHIPGFX}" = x ]]; then | ||||||
|  |         echo "miopen-hip-gfx package not available" && exit 1 | ||||||
|  |       else | ||||||
|  |         yum install -y ${MIOPENHIPGFX} | ||||||
|  |       fi | ||||||
|   else |   else | ||||||
|     yum install -y ${MIOPENHIPGFX} |       MIOPENKERNELS=$(yum -q search miopenkernels | grep miopenkernels- | awk '{print $1}'| grep -F kdb. || true) | ||||||
|  |       if [[ "x${MIOPENKERNELS}" = x ]]; then | ||||||
|  |         echo "miopenkernels package not available" && exit 1 | ||||||
|  |       else | ||||||
|  |         yum install -y ${MIOPENKERNELS} | ||||||
|  |       fi | ||||||
|   fi |   fi | ||||||
|  |  | ||||||
|   # ROCm 6.0 had a regression where journal_mode was enabled on the kdb files resulting in permission errors at runtime |   # ROCm 6.0 had a regression where journal_mode was enabled on the kdb files resulting in permission errors at runtime | ||||||
|   for kdb in /opt/rocm/share/miopen/db/*.kdb |   if [[ $(ver $ROCM_VERSION) -ge $(ver 6.0) ]]; then | ||||||
|   do |       for kdb in /opt/rocm/share/miopen/db/*.kdb | ||||||
|       sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;" |       do | ||||||
|   done |         # journal_mode=delete seems to work on some kdbs that have "wal" as initial journal_mode | ||||||
|  |         sqlite3 $kdb "PRAGMA journal_mode=delete; PRAGMA VACUUM;" | ||||||
|  |         JOURNAL_MODE=$(sqlite3 $kdb "PRAGMA journal_mode;") | ||||||
|  |         # Both "delete" and "off" work in cases where user doesn't have write permissions to directory where kdbs are installed | ||||||
|  |         if [[ $JOURNAL_MODE != "delete" ]] && [[ $JOURNAL_MODE != "off" ]]; then echo "kdb journal_mode change failed" && exit 1; fi | ||||||
|  |       done | ||||||
|  |   fi | ||||||
|  |  | ||||||
|   # Cleanup |   # Cleanup | ||||||
|   yum clean all |   yum clean all | ||||||
|  | |||||||
| @ -1,150 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| ########################### |  | ||||||
| ### prereqs |  | ||||||
| ########################### |  | ||||||
| # Install Python packages depending on the base OS |  | ||||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') |  | ||||||
| case "$ID" in |  | ||||||
|   ubuntu) |  | ||||||
|     apt-get update -y |  | ||||||
|     apt-get install -y libpciaccess-dev pkg-config |  | ||||||
|     apt-get clean |  | ||||||
|     ;; |  | ||||||
|   centos|almalinux) |  | ||||||
|     yum install -y libpciaccess-devel pkgconfig |  | ||||||
|     ;; |  | ||||||
|   *) |  | ||||||
|     echo "Unable to determine OS..." |  | ||||||
|     exit 1 |  | ||||||
|     ;; |  | ||||||
| esac |  | ||||||
| python3 -m pip install meson ninja |  | ||||||
|  |  | ||||||
| ########################### |  | ||||||
| ### clone repo |  | ||||||
| ########################### |  | ||||||
| GIT_SSL_NO_VERIFY=true git clone https://gitlab.freedesktop.org/mesa/drm.git |  | ||||||
| pushd drm |  | ||||||
|  |  | ||||||
| ########################### |  | ||||||
| ### patch |  | ||||||
| ########################### |  | ||||||
| patch -p1 <<'EOF' |  | ||||||
| diff --git a/amdgpu/amdgpu_asic_id.c b/amdgpu/amdgpu_asic_id.c |  | ||||||
| index a5007ffc..13fa07fc 100644 |  | ||||||
| --- a/amdgpu/amdgpu_asic_id.c |  | ||||||
| +++ b/amdgpu/amdgpu_asic_id.c |  | ||||||
| @@ -22,6 +22,13 @@ |  | ||||||
|   * |  | ||||||
|   */ |  | ||||||
|  |  | ||||||
| +#define _XOPEN_SOURCE 700 |  | ||||||
| +#define _LARGEFILE64_SOURCE |  | ||||||
| +#define _FILE_OFFSET_BITS 64 |  | ||||||
| +#include <ftw.h> |  | ||||||
| +#include <link.h> |  | ||||||
| +#include <limits.h> |  | ||||||
| + |  | ||||||
|  #include <ctype.h> |  | ||||||
|  #include <stdio.h> |  | ||||||
|  #include <stdlib.h> |  | ||||||
| @@ -34,6 +41,19 @@ |  | ||||||
|  #include "amdgpu_drm.h" |  | ||||||
|  #include "amdgpu_internal.h" |  | ||||||
|  |  | ||||||
| +static char *amdgpuids_path = NULL; |  | ||||||
| +static const char* amdgpuids_path_msg = NULL; |  | ||||||
| + |  | ||||||
| +static int check_for_location_of_amdgpuids(const char *filepath, const struct stat *info, const int typeflag, struct FTW *pathinfo) |  | ||||||
| +{ |  | ||||||
| +	if (typeflag == FTW_F && strstr(filepath, "amdgpu.ids")) { |  | ||||||
| +		amdgpuids_path = strdup(filepath); |  | ||||||
| +		return 1; |  | ||||||
| +	} |  | ||||||
| + |  | ||||||
| +	return 0; |  | ||||||
| +} |  | ||||||
| + |  | ||||||
|  static int parse_one_line(struct amdgpu_device *dev, const char *line) |  | ||||||
|  { |  | ||||||
|  	char *buf, *saveptr; |  | ||||||
| @@ -113,10 +133,46 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev) |  | ||||||
|  	int line_num = 1; |  | ||||||
|  	int r = 0; |  | ||||||
|  |  | ||||||
| +	// attempt to find typical location for amdgpu.ids file |  | ||||||
|  	fp = fopen(AMDGPU_ASIC_ID_TABLE, "r"); |  | ||||||
| + |  | ||||||
| +	// if it doesn't exist, search |  | ||||||
| +	if (!fp) { |  | ||||||
| + |  | ||||||
| +	char self_path[ PATH_MAX ]; |  | ||||||
| +	ssize_t count; |  | ||||||
| +	ssize_t i; |  | ||||||
| + |  | ||||||
| +	count = readlink( "/proc/self/exe", self_path, PATH_MAX ); |  | ||||||
| +	if (count > 0) { |  | ||||||
| +		self_path[count] = '\0'; |  | ||||||
| + |  | ||||||
| +		// remove '/bin/python' from self_path |  | ||||||
| +		for (i=count; i>0; --i) { |  | ||||||
| +			if (self_path[i] == '/') break; |  | ||||||
| +			self_path[i] = '\0'; |  | ||||||
| +		} |  | ||||||
| +		self_path[i] = '\0'; |  | ||||||
| +		for (; i>0; --i) { |  | ||||||
| +			if (self_path[i] == '/') break; |  | ||||||
| +			self_path[i] = '\0'; |  | ||||||
| +		} |  | ||||||
| +		self_path[i] = '\0'; |  | ||||||
| + |  | ||||||
| +		if (1 == nftw(self_path, check_for_location_of_amdgpuids, 5, FTW_PHYS)) { |  | ||||||
| +			fp = fopen(amdgpuids_path, "r"); |  | ||||||
| +			amdgpuids_path_msg = amdgpuids_path; |  | ||||||
| +		} |  | ||||||
| +	} |  | ||||||
| + |  | ||||||
| +	} |  | ||||||
| +	else { |  | ||||||
| +		amdgpuids_path_msg = AMDGPU_ASIC_ID_TABLE; |  | ||||||
| +	} |  | ||||||
| + |  | ||||||
| +	// both hard-coded location and search have failed |  | ||||||
|  	if (!fp) { |  | ||||||
| -		fprintf(stderr, "%s: %s\n", AMDGPU_ASIC_ID_TABLE, |  | ||||||
| -			strerror(errno)); |  | ||||||
| +		fprintf(stderr, "amdgpu.ids: No such file or directory\n"); |  | ||||||
|  		return; |  | ||||||
|  	} |  | ||||||
|  |  | ||||||
| @@ -132,7 +188,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev) |  | ||||||
|  			continue; |  | ||||||
|  		} |  | ||||||
|  |  | ||||||
| -		drmMsg("%s version: %s\n", AMDGPU_ASIC_ID_TABLE, line); |  | ||||||
| +		drmMsg("%s version: %s\n", amdgpuids_path_msg, line); |  | ||||||
|  		break; |  | ||||||
|  	} |  | ||||||
|  |  | ||||||
| @@ -150,7 +206,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev) |  | ||||||
|  |  | ||||||
|  	if (r == -EINVAL) { |  | ||||||
|  		fprintf(stderr, "Invalid format: %s: line %d: %s\n", |  | ||||||
| -			AMDGPU_ASIC_ID_TABLE, line_num, line); |  | ||||||
| +			amdgpuids_path_msg, line_num, line); |  | ||||||
|  	} else if (r && r != -EAGAIN) { |  | ||||||
|  		fprintf(stderr, "%s: Cannot parse ASIC IDs: %s\n", |  | ||||||
|  			__func__, strerror(-r)); |  | ||||||
| EOF |  | ||||||
|  |  | ||||||
| ########################### |  | ||||||
| ### build |  | ||||||
| ########################### |  | ||||||
| meson builddir --prefix=/opt/amdgpu |  | ||||||
| pushd builddir |  | ||||||
| ninja install |  | ||||||
|  |  | ||||||
| popd |  | ||||||
| popd |  | ||||||
| @ -1,23 +1,7 @@ | |||||||
| #!/bin/bash | #!/bin/bash | ||||||
| # Script used in CI and CD pipeline |  | ||||||
|  |  | ||||||
| set -ex | set -ex | ||||||
|  |  | ||||||
| # Magma build scripts need `python` |  | ||||||
| ln -sf /usr/bin/python3 /usr/bin/python |  | ||||||
|  |  | ||||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') |  | ||||||
| case "$ID" in |  | ||||||
|   almalinux) |  | ||||||
|     yum install -y gcc-gfortran |  | ||||||
|     ;; |  | ||||||
|   *) |  | ||||||
|     echo "No preinstalls to build magma..." |  | ||||||
|     ;; |  | ||||||
| esac |  | ||||||
|  |  | ||||||
| MKLROOT=${MKLROOT:-/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION} |  | ||||||
|  |  | ||||||
| # "install" hipMAGMA into /opt/rocm/magma by copying after build | # "install" hipMAGMA into /opt/rocm/magma by copying after build | ||||||
| git clone https://bitbucket.org/icl/magma.git | git clone https://bitbucket.org/icl/magma.git | ||||||
| pushd magma | pushd magma | ||||||
| @ -27,10 +11,7 @@ git checkout a1625ff4d9bc362906bd01f805dbbe12612953f6 | |||||||
|  |  | ||||||
| cp make.inc-examples/make.inc.hip-gcc-mkl make.inc | cp make.inc-examples/make.inc.hip-gcc-mkl make.inc | ||||||
| echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc | echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc | ||||||
| if [[ -f "${MKLROOT}/lib/libmkl_core.a" ]]; then | echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc | ||||||
|     echo 'LIB = -Wl,--start-group -lmkl_gf_lp64 -lmkl_gnu_thread -lmkl_core -Wl,--end-group -lpthread -lstdc++ -lm -lgomp -lhipblas -lhipsparse' >> make.inc |  | ||||||
| fi |  | ||||||
| echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib -ldl' >> make.inc |  | ||||||
| echo 'DEVCCFLAGS += --gpu-max-threads-per-block=256' >> make.inc | echo 'DEVCCFLAGS += --gpu-max-threads-per-block=256' >> make.inc | ||||||
| export PATH="${PATH}:/opt/rocm/bin" | export PATH="${PATH}:/opt/rocm/bin" | ||||||
| if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then | if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then | ||||||
| @ -44,7 +25,7 @@ done | |||||||
| # hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition | # hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition | ||||||
| sed -i 's/^FOPENMP/#FOPENMP/g' make.inc | sed -i 's/^FOPENMP/#FOPENMP/g' make.inc | ||||||
| make -f make.gen.hipMAGMA -j $(nproc) | make -f make.gen.hipMAGMA -j $(nproc) | ||||||
| LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT="${MKLROOT}" | LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION | ||||||
| make testing/testing_dgemm -j $(nproc) MKLROOT="${MKLROOT}" | make testing/testing_dgemm -j $(nproc) MKLROOT=/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION | ||||||
| popd | popd | ||||||
| mv magma /opt/rocm | mv magma /opt/rocm | ||||||
|  | |||||||
| @ -12,14 +12,11 @@ conda_reinstall() { | |||||||
|   as_jenkins conda install -q -n py_$ANACONDA_PYTHON_VERSION -y --force-reinstall $* |   as_jenkins conda install -q -n py_$ANACONDA_PYTHON_VERSION -y --force-reinstall $* | ||||||
| } | } | ||||||
|  |  | ||||||
| if [ -n "${XPU_VERSION}" ]; then | if [ -n "${ROCM_VERSION}" ]; then | ||||||
|   TRITON_REPO="https://github.com/intel/intel-xpu-backend-for-triton" |   TRITON_REPO="https://github.com/ROCmSoftwarePlatform/triton" | ||||||
|   TRITON_TEXT_FILE="triton-xpu" |   TRITON_TEXT_FILE="triton-rocm" | ||||||
| elif [ -n "${TRITON_CPU}" ]; then |  | ||||||
|   TRITON_REPO="https://github.com/triton-lang/triton-cpu" |  | ||||||
|   TRITON_TEXT_FILE="triton-cpu" |  | ||||||
| else | else | ||||||
|   TRITON_REPO="https://github.com/triton-lang/triton" |   TRITON_REPO="https://github.com/openai/triton" | ||||||
|   TRITON_TEXT_FILE="triton" |   TRITON_TEXT_FILE="triton" | ||||||
| fi | fi | ||||||
|  |  | ||||||
| @ -41,34 +38,19 @@ if [ -z "${MAX_JOBS}" ]; then | |||||||
|     export MAX_JOBS=$(nproc) |     export MAX_JOBS=$(nproc) | ||||||
| fi | fi | ||||||
|  |  | ||||||
| # Git checkout triton |  | ||||||
| mkdir /var/lib/jenkins/triton |  | ||||||
| chown -R jenkins /var/lib/jenkins/triton |  | ||||||
| chgrp -R jenkins /var/lib/jenkins/triton |  | ||||||
| pushd /var/lib/jenkins/ |  | ||||||
|  |  | ||||||
| as_jenkins git clone --recursive ${TRITON_REPO} triton |  | ||||||
| cd triton |  | ||||||
| as_jenkins git checkout ${TRITON_PINNED_COMMIT} |  | ||||||
| as_jenkins git submodule update --init --recursive |  | ||||||
| cd python |  | ||||||
|  |  | ||||||
| # TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527 |  | ||||||
| as_jenkins sed -i -e 's/https:\/\/tritonlang.blob.core.windows.net\/llvm-builds/https:\/\/oaitriton.blob.core.windows.net\/public\/llvm-builds/g' setup.py |  | ||||||
|  |  | ||||||
| if [ -n "${UBUNTU_VERSION}" ] && [ -n "${GCC_VERSION}" ] && [[ "${GCC_VERSION}" == "7" ]]; then | if [ -n "${UBUNTU_VERSION}" ] && [ -n "${GCC_VERSION}" ] && [[ "${GCC_VERSION}" == "7" ]]; then | ||||||
|   # Triton needs at least gcc-9 to build |   # Triton needs at least gcc-9 to build | ||||||
|   apt-get install -y g++-9 |   apt-get install -y g++-9 | ||||||
|  |  | ||||||
|   CXX=g++-9 pip_install -e . |   CXX=g++-9 pip_install "git+${TRITON_REPO}@${TRITON_PINNED_COMMIT}#subdirectory=python" | ||||||
| elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then | elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then | ||||||
|   # Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain |   # Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain | ||||||
|   add-apt-repository -y ppa:ubuntu-toolchain-r/test |   add-apt-repository -y ppa:ubuntu-toolchain-r/test | ||||||
|   apt-get install -y g++-9 |   apt-get install -y g++-9 | ||||||
|  |  | ||||||
|   CXX=g++-9 pip_install -e . |   CXX=g++-9 pip_install "git+${TRITON_REPO}@${TRITON_PINNED_COMMIT}#subdirectory=python" | ||||||
| else | else | ||||||
|   pip_install -e . |   pip_install "git+${TRITON_REPO}@${TRITON_PINNED_COMMIT}#subdirectory=python" | ||||||
| fi | fi | ||||||
|  |  | ||||||
| if [ -n "${CONDA_CMAKE}" ]; then | if [ -n "${CONDA_CMAKE}" ]; then | ||||||
|  | |||||||
| @ -2,13 +2,6 @@ | |||||||
|  |  | ||||||
| set -ex | set -ex | ||||||
|  |  | ||||||
| # Since version 24 the system ships with user 'ubuntu' that has id 1000 |  | ||||||
| # We need a work-around to enable id 1000 usage for this script |  | ||||||
| if [[ $UBUNTU_VERSION == 24.04 ]]; then |  | ||||||
|     # touch is used to disable harmless error message |  | ||||||
|     touch /var/mail/ubuntu && chown ubuntu /var/mail/ubuntu && userdel -r ubuntu |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Mirror jenkins user in container | # Mirror jenkins user in container | ||||||
| # jenkins user as ec2-user should have the same user-id | # jenkins user as ec2-user should have the same user-id | ||||||
| echo "jenkins:x:1000:1000::/var/lib/jenkins:" >> /etc/passwd | echo "jenkins:x:1000:1000::/var/lib/jenkins:" >> /etc/passwd | ||||||
|  | |||||||
| @ -5,7 +5,8 @@ set -ex | |||||||
| install_ubuntu() { | install_ubuntu() { | ||||||
|   apt-get update |   apt-get update | ||||||
|   apt-get install -y --no-install-recommends \ |   apt-get install -y --no-install-recommends \ | ||||||
|           libopencv-dev |           libopencv-dev \ | ||||||
|  |           libavcodec-dev | ||||||
|  |  | ||||||
|   # Cleanup |   # Cleanup | ||||||
|   apt-get autoclean && apt-get clean |   apt-get autoclean && apt-get clean | ||||||
| @ -18,7 +19,8 @@ install_centos() { | |||||||
|   yum --enablerepo=extras install -y epel-release |   yum --enablerepo=extras install -y epel-release | ||||||
|  |  | ||||||
|   yum install -y \ |   yum install -y \ | ||||||
|       opencv-devel |       opencv-devel \ | ||||||
|  |       ffmpeg-devel | ||||||
|  |  | ||||||
|   # Cleanup |   # Cleanup | ||||||
|   yum clean all |   yum clean all | ||||||
|  | |||||||
| @ -1,32 +1,29 @@ | |||||||
| #!/bin/bash | #!/bin/bash | ||||||
| set -xe | set -xe | ||||||
| # Script used in CI and CD pipeline |  | ||||||
|  |  | ||||||
| # Intel® software for general purpose GPU capabilities. | # Intel® software for general purpose GPU capabilities. | ||||||
| # Refer to https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html | # Refer to https://dgpu-docs.intel.com/releases/stable_647_21_20230714.html | ||||||
|  |  | ||||||
|  | # Intel® oneAPI Base Toolkit (version 2024.0.0) has been updated to include functional and security updates. | ||||||
|  | # Refer to https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit-download.html | ||||||
|  |  | ||||||
| # Users should update to the latest version as it becomes available | # Users should update to the latest version as it becomes available | ||||||
|  |  | ||||||
| function install_ubuntu() { | function install_ubuntu() { | ||||||
|     . /etc/os-release |  | ||||||
|     if [[ ! " jammy " =~ " ${VERSION_CODENAME} " ]]; then |  | ||||||
|         echo "Ubuntu version ${VERSION_CODENAME} not supported" |  | ||||||
|         exit |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     apt-get update -y |     apt-get update -y | ||||||
|     apt-get install -y gpg-agent wget |     apt-get install -y gpg-agent wget | ||||||
|     # To add the online network package repository for the GPU Driver |  | ||||||
|  |     # Set up the repository. To do this, download the key to the system keyring | ||||||
|     wget -qO - https://repositories.intel.com/gpu/intel-graphics.key \ |     wget -qO - https://repositories.intel.com/gpu/intel-graphics.key \ | ||||||
|         | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg |         | gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg | ||||||
|     echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] \ |  | ||||||
|         https://repositories.intel.com/gpu/ubuntu ${VERSION_CODENAME}${XPU_DRIVER_VERSION} unified" \ |  | ||||||
|         | tee /etc/apt/sources.list.d/intel-gpu-${VERSION_CODENAME}.list |  | ||||||
|     # To add the online network network package repository for the Intel Support Packages |  | ||||||
|     wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ |     wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ | ||||||
|         | gpg --dearmor > /usr/share/keyrings/oneapi-archive-keyring.gpg.gpg |         | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null | ||||||
|     echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg.gpg] \ |  | ||||||
|         https://apt.repos.intel.com/${XPU_REPO_NAME} all main" \ |     # Add the signed entry to APT sources and configure the APT client to use the Intel repository | ||||||
|  |     echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/production/2328 unified" \ | ||||||
|  |         | tee /etc/apt/sources.list.d/intel-gpu-jammy.list | ||||||
|  |     echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" \ | ||||||
|         | tee /etc/apt/sources.list.d/oneAPI.list |         | tee /etc/apt/sources.list.d/oneAPI.list | ||||||
|  |  | ||||||
|     # Update the packages list and repository index |     # Update the packages list and repository index | ||||||
| @ -41,62 +38,58 @@ function install_ubuntu() { | |||||||
|         libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \ |         libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \ | ||||||
|         libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \ |         libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \ | ||||||
|         mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo |         mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo | ||||||
|     if [[ "${XPU_DRIVER_TYPE,,}" == "rolling" ]]; then |  | ||||||
|         apt-get install -y intel-ocloc |  | ||||||
|     fi |  | ||||||
|     # Development Packages |     # Development Packages | ||||||
|     apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev |     apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev | ||||||
|     # Install Intel Support Packages |     # Install Intel® oneAPI Base Toolkit | ||||||
|     apt-get install -y ${XPU_PACKAGES} |     if [ -n "$BASEKIT_VERSION" ]; then | ||||||
|  |         apt-get install intel-basekit=$BASEKIT_VERSION -y | ||||||
|  |     else | ||||||
|  |         apt-get install intel-basekit -y | ||||||
|  |     fi | ||||||
|  |  | ||||||
|     # Cleanup |     # Cleanup | ||||||
|     apt-get autoclean && apt-get clean |     apt-get autoclean && apt-get clean | ||||||
|     rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |     rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* | ||||||
| } | } | ||||||
|  |  | ||||||
| function install_rhel() { | function install_centos() { | ||||||
|     . /etc/os-release |  | ||||||
|     if [[ "${ID}" == "rhel" ]]; then |  | ||||||
|         if [[ ! " 8.8 8.9 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then |  | ||||||
|             echo "RHEL version ${VERSION_ID} not supported" |  | ||||||
|             exit |  | ||||||
|         fi |  | ||||||
|     elif [[ "${ID}" == "almalinux" ]]; then |  | ||||||
|         # Workaround for almalinux8 which used by quay.io/pypa/manylinux_2_28_x86_64 |  | ||||||
|         VERSION_ID="8.8" |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     dnf install -y 'dnf-command(config-manager)' |     dnf install -y 'dnf-command(config-manager)' | ||||||
|     # To add the online network package repository for the GPU Driver |  | ||||||
|     dnf config-manager --add-repo \ |     dnf config-manager --add-repo \ | ||||||
|         https://repositories.intel.com/gpu/rhel/${VERSION_ID}${XPU_DRIVER_VERSION}/unified/intel-gpu-${VERSION_ID}.repo |         https://repositories.intel.com/gpu/rhel/8.6/production/2328/unified/intel-gpu-8.6.repo | ||||||
|     # To add the online network network package repository for the Intel Support Packages |     # To add the EPEL repository needed for DKMS | ||||||
|     tee > /etc/yum.repos.d/oneAPI.repo << EOF |     dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm | ||||||
|  |         # https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm | ||||||
|  |  | ||||||
|  |     # Create the YUM repository file in the /temp directory as a normal user | ||||||
|  |     tee > /tmp/oneAPI.repo << EOF | ||||||
| [oneAPI] | [oneAPI] | ||||||
| name=Intel for Pytorch GPU dev repository | name=Intel® oneAPI repository | ||||||
| baseurl=https://yum.repos.intel.com/${XPU_REPO_NAME} | baseurl=https://yum.repos.intel.com/oneapi | ||||||
| enabled=1 | enabled=1 | ||||||
| gpgcheck=1 | gpgcheck=1 | ||||||
| repo_gpgcheck=1 | repo_gpgcheck=1 | ||||||
| gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | ||||||
| EOF | EOF | ||||||
|  |  | ||||||
|     # Install Intel Support Packages |     # Move the newly created oneAPI.repo file to the YUM configuration directory /etc/yum.repos.d | ||||||
|     yum install -y ${XPU_PACKAGES} |     mv /tmp/oneAPI.repo /etc/yum.repos.d | ||||||
|  |  | ||||||
|     # The xpu-smi packages |     # The xpu-smi packages | ||||||
|     dnf install -y xpu-smi |     dnf install -y flex bison xpu-smi | ||||||
|     # Compute and Media Runtimes |     # Compute and Media Runtimes | ||||||
|     dnf install --skip-broken -y \ |     dnf install -y \ | ||||||
|         intel-opencl intel-media intel-mediasdk libmfxgen1 libvpl2\ |         intel-opencl intel-media intel-mediasdk libmfxgen1 libvpl2\ | ||||||
|         level-zero intel-level-zero-gpu mesa-dri-drivers mesa-vulkan-drivers \ |         level-zero intel-level-zero-gpu mesa-dri-drivers mesa-vulkan-drivers \ | ||||||
|         mesa-vdpau-drivers libdrm mesa-libEGL mesa-libgbm mesa-libGL \ |         mesa-vdpau-drivers libdrm mesa-libEGL mesa-libgbm mesa-libGL \ | ||||||
|         mesa-libxatracker libvpl-tools intel-metrics-discovery \ |         mesa-libxatracker libvpl-tools intel-metrics-discovery \ | ||||||
|         intel-metrics-library intel-igc-core intel-igc-cm \ |         intel-metrics-library intel-igc-core intel-igc-cm \ | ||||||
|         libva libva-utils intel-gmmlib libmetee intel-gsc intel-ocloc |         libva libva-utils intel-gmmlib libmetee intel-gsc intel-ocloc hwinfo clinfo | ||||||
|     # Development packages |     # Development packages | ||||||
|     dnf install -y --refresh \ |     dnf install -y --refresh \ | ||||||
|         intel-igc-opencl-devel level-zero-devel intel-gsc-devel libmetee-devel \ |         intel-igc-opencl-devel level-zero-devel intel-gsc-devel libmetee-devel \ | ||||||
|         level-zero-devel |         level-zero-devel | ||||||
|  |     # Install Intel® oneAPI Base Toolkit | ||||||
|  |     dnf install intel-basekit -y | ||||||
|  |  | ||||||
|     # Cleanup |     # Cleanup | ||||||
|     dnf clean all |     dnf clean all | ||||||
| @ -105,48 +98,6 @@ EOF | |||||||
|     rm -rf /var/lib/yum/history |     rm -rf /var/lib/yum/history | ||||||
| } | } | ||||||
|  |  | ||||||
| function install_sles() { |  | ||||||
|     . /etc/os-release |  | ||||||
|     VERSION_SP=${VERSION_ID//./sp} |  | ||||||
|     if [[ ! " 15sp4 15sp5 " =~ " ${VERSION_SP} " ]]; then |  | ||||||
|         echo "SLES version ${VERSION_ID} not supported" |  | ||||||
|         exit |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     # To add the online network package repository for the GPU Driver |  | ||||||
|     zypper addrepo -f -r \ |  | ||||||
|         https://repositories.intel.com/gpu/sles/${VERSION_SP}${XPU_DRIVER_VERSION}/unified/intel-gpu-${VERSION_SP}.repo |  | ||||||
|     rpm --import https://repositories.intel.com/gpu/intel-graphics.key |  | ||||||
|     # To add the online network network package repository for the Intel Support Packages |  | ||||||
|     zypper addrepo https://yum.repos.intel.com/${XPU_REPO_NAME} oneAPI |  | ||||||
|     rpm --import https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB |  | ||||||
|  |  | ||||||
|     # The xpu-smi packages |  | ||||||
|     zypper install -y lsb-release flex bison xpu-smi |  | ||||||
|     # Compute and Media Runtimes |  | ||||||
|     zypper install -y intel-level-zero-gpu level-zero intel-gsc intel-opencl intel-ocloc \ |  | ||||||
|         intel-media-driver libigfxcmrt7 libvpl2 libvpl-tools libmfxgen1 libmfx1 |  | ||||||
|     # Development packages |  | ||||||
|     zypper install -y libigdfcl-devel intel-igc-cm libigfxcmrt-devel level-zero-devel |  | ||||||
|  |  | ||||||
|     # Install Intel Support Packages |  | ||||||
|     zypper install -y ${XPU_PACKAGES} |  | ||||||
|  |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Default use GPU driver LTS releases |  | ||||||
| XPU_DRIVER_VERSION="/lts/2350" |  | ||||||
| if [[ "${XPU_DRIVER_TYPE,,}" == "rolling" ]]; then |  | ||||||
|     # Use GPU driver rolling releases |  | ||||||
|     XPU_DRIVER_VERSION="" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| XPU_REPO_NAME="intel-for-pytorch-gpu-dev" |  | ||||||
| XPU_PACKAGES="intel-for-pytorch-gpu-dev-0.5 intel-pti-dev-0.9" |  | ||||||
| if [[ "$XPU_VERSION" == "2025.0" ]]; then |  | ||||||
|     XPU_REPO_NAME="oneapi" |  | ||||||
|     XPU_PACKAGES="intel-deep-learning-essentials-2025.0" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # The installation depends on the base OS | # The installation depends on the base OS | ||||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ||||||
| @ -154,11 +105,8 @@ case "$ID" in | |||||||
|     ubuntu) |     ubuntu) | ||||||
|         install_ubuntu |         install_ubuntu | ||||||
|     ;; |     ;; | ||||||
|     rhel|almalinux) |     centos) | ||||||
|         install_rhel |         install_centos | ||||||
|     ;; |  | ||||||
|     sles) |  | ||||||
|         install_sles |  | ||||||
|     ;; |     ;; | ||||||
|     *) |     *) | ||||||
|         echo "Unable to determine OS..." |         echo "Unable to determine OS..." | ||||||
|  | |||||||
| @ -1,112 +0,0 @@ | |||||||
| ARG BASE_TARGET=base |  | ||||||
| ARG GPU_IMAGE=ubuntu:20.04 |  | ||||||
| FROM ${GPU_IMAGE} as base |  | ||||||
|  |  | ||||||
| ENV DEBIAN_FRONTEND=noninteractive |  | ||||||
|  |  | ||||||
| RUN apt-get clean && apt-get update |  | ||||||
| RUN apt-get install -y curl locales g++ git-all autoconf automake make cmake wget unzip sudo |  | ||||||
| # Just add everything as a safe.directory for git since these will be used in multiple places with git |  | ||||||
| RUN git config --global --add safe.directory '*' |  | ||||||
|  |  | ||||||
| RUN locale-gen en_US.UTF-8 |  | ||||||
|  |  | ||||||
| ENV LC_ALL en_US.UTF-8 |  | ||||||
| ENV LANG en_US.UTF-8 |  | ||||||
| ENV LANGUAGE en_US.UTF-8 |  | ||||||
|  |  | ||||||
| # Install openssl |  | ||||||
| FROM base as openssl |  | ||||||
| ADD ./common/install_openssl.sh install_openssl.sh |  | ||||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh |  | ||||||
|  |  | ||||||
| # Install python |  | ||||||
| FROM base as python |  | ||||||
| ADD common/install_cpython.sh install_cpython.sh |  | ||||||
| RUN apt-get update -y && \ |  | ||||||
|     apt-get install build-essential gdb lcov libbz2-dev libffi-dev \ |  | ||||||
|         libgdbm-dev liblzma-dev libncurses5-dev libreadline6-dev \ |  | ||||||
|         libsqlite3-dev libssl-dev lzma lzma-dev tk-dev uuid-dev zlib1g-dev -y && \ |  | ||||||
|     bash ./install_cpython.sh && \ |  | ||||||
|     rm install_cpython.sh && \ |  | ||||||
|     apt-get clean |  | ||||||
|  |  | ||||||
| FROM base as conda |  | ||||||
| ADD ./common/install_conda_docker.sh install_conda.sh |  | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh |  | ||||||
|  |  | ||||||
| FROM base as cpu |  | ||||||
| # Install Anaconda |  | ||||||
| COPY --from=conda /opt/conda /opt/conda |  | ||||||
| # Install python |  | ||||||
| COPY --from=python /opt/python    /opt/python |  | ||||||
| COPY --from=python /opt/_internal /opt/_internal |  | ||||||
| ENV PATH=/opt/conda/bin:/usr/local/cuda/bin:$PATH |  | ||||||
| # Install MKL |  | ||||||
| ADD ./common/install_mkl.sh install_mkl.sh |  | ||||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh |  | ||||||
|  |  | ||||||
| FROM cpu as cuda |  | ||||||
| ADD ./common/install_cuda.sh install_cuda.sh |  | ||||||
| ADD ./common/install_magma.sh install_magma.sh |  | ||||||
| ENV CUDA_HOME /usr/local/cuda |  | ||||||
|  |  | ||||||
| FROM cuda as cuda11.8 |  | ||||||
| RUN bash ./install_cuda.sh 11.8 |  | ||||||
| RUN bash ./install_magma.sh 11.8 |  | ||||||
| RUN ln -sf /usr/local/cuda-11.8 /usr/local/cuda |  | ||||||
|  |  | ||||||
| FROM cuda as cuda12.1 |  | ||||||
| RUN bash ./install_cuda.sh 12.1 |  | ||||||
| RUN bash ./install_magma.sh 12.1 |  | ||||||
| RUN ln -sf /usr/local/cuda-12.1 /usr/local/cuda |  | ||||||
|  |  | ||||||
| FROM cuda as cuda12.4 |  | ||||||
| RUN bash ./install_cuda.sh 12.4 |  | ||||||
| RUN bash ./install_magma.sh 12.4 |  | ||||||
| RUN ln -sf /usr/local/cuda-12.4 /usr/local/cuda |  | ||||||
|  |  | ||||||
| FROM cuda as cuda12.6 |  | ||||||
| RUN bash ./install_cuda.sh 12.6 |  | ||||||
| RUN bash ./install_magma.sh 12.6 |  | ||||||
| RUN ln -sf /usr/local/cuda-12.6 /usr/local/cuda |  | ||||||
|  |  | ||||||
| FROM cpu as rocm |  | ||||||
| ARG PYTORCH_ROCM_ARCH |  | ||||||
| ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH} |  | ||||||
| ENV MKLROOT /opt/intel |  | ||||||
| # Adding ROCM_PATH env var so that LoadHip.cmake (even with logic updated for ROCm6.0) |  | ||||||
| # find HIP works for ROCm5.7. Not needed for ROCm6.0 and above. |  | ||||||
| # Remove below when ROCm5.7 is not in support matrix anymore. |  | ||||||
| ENV ROCM_PATH /opt/rocm |  | ||||||
| # No need to install ROCm as base docker image should have full ROCm install |  | ||||||
| #ADD ./common/install_rocm.sh install_rocm.sh |  | ||||||
| ADD ./common/install_rocm_drm.sh install_rocm_drm.sh |  | ||||||
| ADD ./common/install_rocm_magma.sh install_rocm_magma.sh |  | ||||||
| # gfortran and python needed for building magma from source for ROCm |  | ||||||
| RUN apt-get update -y && \ |  | ||||||
|     apt-get install gfortran -y && \ |  | ||||||
|     apt-get install python -y && \ |  | ||||||
|     apt-get clean |  | ||||||
|  |  | ||||||
| RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh |  | ||||||
| RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh |  | ||||||
|  |  | ||||||
| # Install AOTriton |  | ||||||
| COPY ./common/common_utils.sh common_utils.sh |  | ||||||
| COPY ./aotriton_version.txt aotriton_version.txt |  | ||||||
| COPY ./common/install_aotriton.sh install_aotriton.sh |  | ||||||
| RUN bash ./install_aotriton.sh /opt/rocm && rm install_aotriton.sh aotriton_version.txt |  | ||||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton |  | ||||||
|  |  | ||||||
| FROM ${BASE_TARGET} as final |  | ||||||
| COPY --from=openssl            /opt/openssl           /opt/openssl |  | ||||||
| # Install patchelf |  | ||||||
| ADD ./common/install_patchelf.sh install_patchelf.sh |  | ||||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh |  | ||||||
| # Install Anaconda |  | ||||||
| COPY --from=conda /opt/conda /opt/conda |  | ||||||
| # Install python |  | ||||||
| COPY --from=python /opt/python    /opt/python |  | ||||||
| COPY --from=python /opt/_internal /opt/_internal |  | ||||||
| ENV PATH=/opt/conda/bin:/usr/local/cuda/bin:$PATH |  | ||||||
| @ -1,83 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| set -eou pipefail |  | ||||||
|  |  | ||||||
| image="$1" |  | ||||||
| shift |  | ||||||
|  |  | ||||||
| if [ -z "${image}" ]; then |  | ||||||
|   echo "Usage: $0 IMAGE" |  | ||||||
|   exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| DOCKER_IMAGE="pytorch/${image}" |  | ||||||
|  |  | ||||||
| TOPDIR=$(git rev-parse --show-toplevel) |  | ||||||
|  |  | ||||||
| GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu} |  | ||||||
| GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-} |  | ||||||
|  |  | ||||||
| WITH_PUSH=${WITH_PUSH:-} |  | ||||||
|  |  | ||||||
| DOCKER=${DOCKER:-docker} |  | ||||||
|  |  | ||||||
| case ${GPU_ARCH_TYPE} in |  | ||||||
|     cpu) |  | ||||||
|         BASE_TARGET=cpu |  | ||||||
|         DOCKER_TAG=cpu |  | ||||||
|         GPU_IMAGE=ubuntu:20.04 |  | ||||||
|         DOCKER_GPU_BUILD_ARG="" |  | ||||||
|         ;; |  | ||||||
|     cuda) |  | ||||||
|         BASE_TARGET=cuda${GPU_ARCH_VERSION} |  | ||||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} |  | ||||||
|         GPU_IMAGE=ubuntu:20.04 |  | ||||||
|         DOCKER_GPU_BUILD_ARG="" |  | ||||||
|         ;; |  | ||||||
|     rocm) |  | ||||||
|         BASE_TARGET=rocm |  | ||||||
|         DOCKER_TAG=rocm${GPU_ARCH_VERSION} |  | ||||||
|         GPU_IMAGE=rocm/dev-ubuntu-20.04:${GPU_ARCH_VERSION}-complete |  | ||||||
|         PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx1030;gfx1100;gfx1101;gfx942" |  | ||||||
|         DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}" |  | ||||||
|         ;; |  | ||||||
|     *) |  | ||||||
|         echo "ERROR: Unrecognized GPU_ARCH_TYPE: ${GPU_ARCH_TYPE}" |  | ||||||
|         exit 1 |  | ||||||
|         ;; |  | ||||||
| esac |  | ||||||
|  |  | ||||||
|  |  | ||||||
| ( |  | ||||||
|     set -x |  | ||||||
|     DOCKER_BUILDKIT=1 ${DOCKER} build \ |  | ||||||
|          --target final \ |  | ||||||
|         ${DOCKER_GPU_BUILD_ARG} \ |  | ||||||
|         --build-arg "GPU_IMAGE=${GPU_IMAGE}" \ |  | ||||||
|         --build-arg "BASE_TARGET=${BASE_TARGET}" \ |  | ||||||
|         -t "${DOCKER_IMAGE}" \ |  | ||||||
|         $@ \ |  | ||||||
|         -f "${TOPDIR}/.ci/docker/libtorch/Dockerfile" \ |  | ||||||
|         "${TOPDIR}/.ci/docker/" |  | ||||||
|  |  | ||||||
| ) |  | ||||||
|  |  | ||||||
| GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)} |  | ||||||
| GIT_BRANCH_NAME=${GITHUB_REF##*/} |  | ||||||
| GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)} |  | ||||||
| DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE}-${GIT_BRANCH_NAME} |  | ||||||
| DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE}-${GIT_COMMIT_SHA} |  | ||||||
|  |  | ||||||
| if [[ "${WITH_PUSH}" == true ]]; then |  | ||||||
|   ( |  | ||||||
|     set -x |  | ||||||
|     ${DOCKER} push "${DOCKER_IMAGE}" |  | ||||||
|     if [[ -n ${GITHUB_REF} ]]; then |  | ||||||
|         ${DOCKER} tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_BRANCH_TAG} |  | ||||||
|         ${DOCKER} tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_SHA_TAG} |  | ||||||
|         ${DOCKER} push "${DOCKER_IMAGE_BRANCH_TAG}" |  | ||||||
|         ${DOCKER} push "${DOCKER_IMAGE_SHA_TAG}" |  | ||||||
|     fi |  | ||||||
|   ) |  | ||||||
| fi |  | ||||||
| @ -25,12 +25,11 @@ ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH | |||||||
| COPY requirements-ci.txt /opt/conda/requirements-ci.txt | COPY requirements-ci.txt /opt/conda/requirements-ci.txt | ||||||
| COPY ./common/install_conda.sh install_conda.sh | COPY ./common/install_conda.sh install_conda.sh | ||||||
| COPY ./common/common_utils.sh common_utils.sh | COPY ./common/common_utils.sh common_utils.sh | ||||||
| COPY ./common/install_magma_conda.sh install_magma_conda.sh | RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh install_magma_conda.sh common_utils.sh /opt/conda/requirements-ci.txt |  | ||||||
|  |  | ||||||
| # Install cuda and cudnn | # Install cuda and cudnn | ||||||
| ARG CUDA_VERSION | ARG CUDA_VERSION | ||||||
| COPY ./common/install_cuda.sh install_cuda.sh | RUN wget -q https://raw.githubusercontent.com/pytorch/builder/main/common/install_cuda.sh -O install_cuda.sh | ||||||
| RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh | RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh | ||||||
| ENV DESIRED_CUDA ${CUDA_VERSION} | ENV DESIRED_CUDA ${CUDA_VERSION} | ||||||
| ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH | ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH | ||||||
|  | |||||||
| @ -1,207 +0,0 @@ | |||||||
| # syntax = docker/dockerfile:experimental |  | ||||||
| ARG ROCM_VERSION=3.7 |  | ||||||
| ARG BASE_CUDA_VERSION=11.8 |  | ||||||
|  |  | ||||||
| ARG GPU_IMAGE=centos:7 |  | ||||||
| FROM centos:7 as base |  | ||||||
|  |  | ||||||
| ENV LC_ALL en_US.UTF-8 |  | ||||||
| ENV LANG en_US.UTF-8 |  | ||||||
| ENV LANGUAGE en_US.UTF-8 |  | ||||||
|  |  | ||||||
| ARG DEVTOOLSET_VERSION=9 |  | ||||||
|  |  | ||||||
| # Note: This is required patch since CentOS have reached EOL |  | ||||||
| # otherwise any yum install setp will fail |  | ||||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN yum install -y wget curl perl util-linux xz bzip2 git patch which perl zlib-devel |  | ||||||
| # Just add everything as a safe.directory for git since these will be used in multiple places with git |  | ||||||
| RUN git config --global --add safe.directory '*' |  | ||||||
| RUN yum install -y yum-utils centos-release-scl |  | ||||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms |  | ||||||
| # Note: After running yum-config-manager --enable rhel-server-rhscl-7-rpms |  | ||||||
| # patch is required once again. Somehow this steps adds mirror.centos.org |  | ||||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN yum install -y devtoolset-${DEVTOOLSET_VERSION}-gcc devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran devtoolset-${DEVTOOLSET_VERSION}-binutils |  | ||||||
| ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH |  | ||||||
|  |  | ||||||
| RUN yum --enablerepo=extras install -y epel-release |  | ||||||
|  |  | ||||||
| # cmake-3.18.4 from pip |  | ||||||
| RUN yum install -y python3-pip && \ |  | ||||||
|     python3 -mpip install cmake==3.18.4 && \ |  | ||||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake |  | ||||||
|  |  | ||||||
| RUN yum install -y autoconf aclocal automake make sudo |  | ||||||
|  |  | ||||||
| FROM base as openssl |  | ||||||
| # Install openssl (this must precede `build python` step) |  | ||||||
| # (In order to have a proper SSL module, Python is compiled |  | ||||||
| # against a recent openssl [see env vars above], which is linked |  | ||||||
| # statically. We delete openssl afterwards.) |  | ||||||
| ADD ./common/install_openssl.sh install_openssl.sh |  | ||||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh |  | ||||||
|  |  | ||||||
| # EPEL for cmake |  | ||||||
| FROM base as patchelf |  | ||||||
| # Install patchelf |  | ||||||
| ADD ./common/install_patchelf.sh install_patchelf.sh |  | ||||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh |  | ||||||
| RUN cp $(which patchelf) /patchelf |  | ||||||
|  |  | ||||||
| FROM patchelf as python |  | ||||||
| # build python |  | ||||||
| COPY manywheel/build_scripts /build_scripts |  | ||||||
| ADD ./common/install_cpython.sh /build_scripts/install_cpython.sh |  | ||||||
| RUN bash build_scripts/build.sh && rm -r build_scripts |  | ||||||
|  |  | ||||||
| FROM base as cuda |  | ||||||
| ARG BASE_CUDA_VERSION=10.2 |  | ||||||
| # Install CUDA |  | ||||||
| ADD ./common/install_cuda.sh install_cuda.sh |  | ||||||
| RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh |  | ||||||
|  |  | ||||||
| FROM base as intel |  | ||||||
| # MKL |  | ||||||
| ADD ./common/install_mkl.sh install_mkl.sh |  | ||||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh |  | ||||||
|  |  | ||||||
| FROM base as magma |  | ||||||
| ARG BASE_CUDA_VERSION=10.2 |  | ||||||
| # Install magma |  | ||||||
| ADD ./common/install_magma.sh install_magma.sh |  | ||||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh |  | ||||||
|  |  | ||||||
| FROM base as jni |  | ||||||
| # Install java jni header |  | ||||||
| ADD ./common/install_jni.sh install_jni.sh |  | ||||||
| ADD ./java/jni.h jni.h |  | ||||||
| RUN bash ./install_jni.sh && rm install_jni.sh |  | ||||||
|  |  | ||||||
| FROM base as libpng |  | ||||||
| # Install libpng |  | ||||||
| ADD ./common/install_libpng.sh install_libpng.sh |  | ||||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh |  | ||||||
|  |  | ||||||
| FROM ${GPU_IMAGE} as common |  | ||||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo |  | ||||||
| ENV LC_ALL en_US.UTF-8 |  | ||||||
| ENV LANG en_US.UTF-8 |  | ||||||
| ENV LANGUAGE en_US.UTF-8 |  | ||||||
| RUN yum install -y \ |  | ||||||
|         aclocal \ |  | ||||||
|         autoconf \ |  | ||||||
|         automake \ |  | ||||||
|         bison \ |  | ||||||
|         bzip2 \ |  | ||||||
|         curl \ |  | ||||||
|         diffutils \ |  | ||||||
|         file \ |  | ||||||
|         git \ |  | ||||||
|         make \ |  | ||||||
|         patch \ |  | ||||||
|         perl \ |  | ||||||
|         unzip \ |  | ||||||
|         util-linux \ |  | ||||||
|         wget \ |  | ||||||
|         which \ |  | ||||||
|         xz \ |  | ||||||
|         yasm |  | ||||||
| RUN yum install -y \ |  | ||||||
|     https://repo.ius.io/ius-release-el7.rpm \ |  | ||||||
|     https://ossci-linux.s3.amazonaws.com/epel-release-7-14.noarch.rpm |  | ||||||
|  |  | ||||||
| RUN yum swap -y git git236-core |  | ||||||
| # git236+ would refuse to run git commands in repos owned by other users |  | ||||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image |  | ||||||
| # Override this behaviour by treating every folder as safe |  | ||||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 |  | ||||||
| RUN git config --global --add safe.directory "*" |  | ||||||
|  |  | ||||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem |  | ||||||
| # Install LLVM version |  | ||||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl |  | ||||||
| COPY --from=python             /opt/python                           /opt/python |  | ||||||
| COPY --from=python             /opt/_internal                        /opt/_internal |  | ||||||
| COPY --from=python             /opt/python/cp39-cp39/bin/auditwheel /usr/local/bin/auditwheel |  | ||||||
| COPY --from=intel              /opt/intel                            /opt/intel |  | ||||||
| COPY --from=patchelf           /usr/local/bin/patchelf               /usr/local/bin/patchelf |  | ||||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h |  | ||||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ |  | ||||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ |  | ||||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ |  | ||||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ |  | ||||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ |  | ||||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig |  | ||||||
|  |  | ||||||
| FROM common as cpu_final |  | ||||||
| ARG BASE_CUDA_VERSION=10.1 |  | ||||||
| ARG DEVTOOLSET_VERSION=9 |  | ||||||
| # Install Anaconda |  | ||||||
| ADD ./common/install_conda_docker.sh install_conda.sh |  | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh |  | ||||||
| ENV PATH /opt/conda/bin:$PATH |  | ||||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo |  | ||||||
|  |  | ||||||
| RUN yum install -y yum-utils centos-release-scl |  | ||||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms |  | ||||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN yum install -y devtoolset-${DEVTOOLSET_VERSION}-gcc devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran devtoolset-${DEVTOOLSET_VERSION}-binutils |  | ||||||
| ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH |  | ||||||
|  |  | ||||||
| # cmake is already installed inside the rocm base image, so remove if present |  | ||||||
| RUN rpm -e cmake || true |  | ||||||
| # cmake-3.18.4 from pip |  | ||||||
| RUN yum install -y python3-pip && \ |  | ||||||
|     python3 -mpip install cmake==3.18.4 && \ |  | ||||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake |  | ||||||
|  |  | ||||||
| # ninja |  | ||||||
| RUN yum install -y ninja-build |  | ||||||
|  |  | ||||||
| FROM cpu_final as cuda_final |  | ||||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
| RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda |  | ||||||
| ENV PATH=/usr/local/cuda/bin:$PATH |  | ||||||
|  |  | ||||||
| FROM cpu_final as rocm_final |  | ||||||
| ARG ROCM_VERSION=3.7 |  | ||||||
| ARG PYTORCH_ROCM_ARCH |  | ||||||
| ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH} |  | ||||||
| # Adding ROCM_PATH env var so that LoadHip.cmake (even with logic updated for ROCm6.0) |  | ||||||
| # find HIP works for ROCm5.7. Not needed for ROCm6.0 and above. |  | ||||||
| # Remove below when ROCm5.7 is not in support matrix anymore. |  | ||||||
| ENV ROCM_PATH /opt/rocm |  | ||||||
| ENV MKLROOT /opt/intel |  | ||||||
| # No need to install ROCm as base docker image should have full ROCm install |  | ||||||
| #ADD ./common/install_rocm.sh install_rocm.sh |  | ||||||
| #RUN ROCM_VERSION=${ROCM_VERSION} bash ./install_rocm.sh && rm install_rocm.sh |  | ||||||
| ADD ./common/install_rocm_drm.sh install_rocm_drm.sh |  | ||||||
| RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh |  | ||||||
| # cmake3 is needed for the MIOpen build |  | ||||||
| RUN ln -sf /usr/local/bin/cmake /usr/bin/cmake3 |  | ||||||
| ADD ./common/install_rocm_magma.sh install_rocm_magma.sh |  | ||||||
| RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh |  | ||||||
| ADD ./common/install_miopen.sh install_miopen.sh |  | ||||||
| RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh |  | ||||||
|  |  | ||||||
| # Install AOTriton |  | ||||||
| COPY ./common/common_utils.sh common_utils.sh |  | ||||||
| COPY ./aotriton_version.txt aotriton_version.txt |  | ||||||
| COPY ./common/install_aotriton.sh install_aotriton.sh |  | ||||||
| RUN bash ./install_aotriton.sh /opt/rocm && rm install_aotriton.sh aotriton_version.txt |  | ||||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton |  | ||||||
| @ -1,153 +0,0 @@ | |||||||
| # syntax = docker/dockerfile:experimental |  | ||||||
| ARG ROCM_VERSION=3.7 |  | ||||||
| ARG BASE_CUDA_VERSION=10.2 |  | ||||||
| ARG GPU_IMAGE=nvidia/cuda:${BASE_CUDA_VERSION}-devel-centos7 |  | ||||||
| FROM quay.io/pypa/manylinux2014_x86_64 as base |  | ||||||
|  |  | ||||||
| ENV LC_ALL en_US.UTF-8 |  | ||||||
| ENV LANG en_US.UTF-8 |  | ||||||
| ENV LANGUAGE en_US.UTF-8 |  | ||||||
|  |  | ||||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN yum install -y wget curl perl util-linux xz bzip2 git patch which perl zlib-devel |  | ||||||
| RUN yum install -y yum-utils centos-release-scl sudo |  | ||||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms |  | ||||||
| RUN yum install -y devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-gcc-gfortran devtoolset-7-binutils |  | ||||||
| ENV PATH=/opt/rh/devtoolset-7/root/usr/bin:$PATH |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-7/root/usr/lib64:/opt/rh/devtoolset-7/root/usr/lib:$LD_LIBRARY_PATH |  | ||||||
|  |  | ||||||
| # cmake |  | ||||||
| RUN yum install -y cmake3 && \ |  | ||||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake |  | ||||||
| FROM base as openssl |  | ||||||
| # Install openssl (this must precede `build python` step) |  | ||||||
| # (In order to have a proper SSL module, Python is compiled |  | ||||||
| # against a recent openssl [see env vars above], which is linked |  | ||||||
| # statically. We delete openssl afterwards.) |  | ||||||
| ADD ./common/install_openssl.sh install_openssl.sh |  | ||||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # remove unncessary python versions |  | ||||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 |  | ||||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 |  | ||||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 |  | ||||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 |  | ||||||
|  |  | ||||||
| FROM base as cuda |  | ||||||
| ARG BASE_CUDA_VERSION=10.2 |  | ||||||
| # Install CUDA |  | ||||||
| ADD ./common/install_cuda.sh install_cuda.sh |  | ||||||
| RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh |  | ||||||
|  |  | ||||||
| FROM base as intel |  | ||||||
| # MKL |  | ||||||
| ADD ./common/install_mkl.sh install_mkl.sh |  | ||||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh |  | ||||||
|  |  | ||||||
| FROM base as magma |  | ||||||
| ARG BASE_CUDA_VERSION=10.2 |  | ||||||
| # Install magma |  | ||||||
| ADD ./common/install_magma.sh install_magma.sh |  | ||||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh |  | ||||||
|  |  | ||||||
| FROM base as jni |  | ||||||
| # Install java jni header |  | ||||||
| ADD ./common/install_jni.sh install_jni.sh |  | ||||||
| ADD ./java/jni.h jni.h |  | ||||||
| RUN bash ./install_jni.sh && rm install_jni.sh |  | ||||||
|  |  | ||||||
| FROM base as libpng |  | ||||||
| # Install libpng |  | ||||||
| ADD ./common/install_libpng.sh install_libpng.sh |  | ||||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh |  | ||||||
|  |  | ||||||
| FROM ${GPU_IMAGE} as common |  | ||||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo |  | ||||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo |  | ||||||
| ENV LC_ALL en_US.UTF-8 |  | ||||||
| ENV LANG en_US.UTF-8 |  | ||||||
| ENV LANGUAGE en_US.UTF-8 |  | ||||||
| RUN yum install -y \ |  | ||||||
|         aclocal \ |  | ||||||
|         autoconf \ |  | ||||||
|         automake \ |  | ||||||
|         bison \ |  | ||||||
|         bzip2 \ |  | ||||||
|         curl \ |  | ||||||
|         diffutils \ |  | ||||||
|         file \ |  | ||||||
|         git \ |  | ||||||
|         make \ |  | ||||||
|         patch \ |  | ||||||
|         perl \ |  | ||||||
|         unzip \ |  | ||||||
|         util-linux \ |  | ||||||
|         wget \ |  | ||||||
|         which \ |  | ||||||
|         xz \ |  | ||||||
|         yasm |  | ||||||
| RUN yum install -y \ |  | ||||||
|     https://repo.ius.io/ius-release-el7.rpm \ |  | ||||||
|     https://ossci-linux.s3.amazonaws.com/epel-release-7-14.noarch.rpm |  | ||||||
|  |  | ||||||
| RUN yum swap -y git git236-core |  | ||||||
| # git236+ would refuse to run git commands in repos owned by other users |  | ||||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image |  | ||||||
| # Override this behaviour by treating every folder as safe |  | ||||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 |  | ||||||
| RUN git config --global --add safe.directory "*" |  | ||||||
|  |  | ||||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem |  | ||||||
| # Install LLVM version |  | ||||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl |  | ||||||
| COPY --from=base               /opt/python                           /opt/python |  | ||||||
| COPY --from=base               /opt/_internal                        /opt/_internal |  | ||||||
| COPY --from=base               /usr/local/bin/auditwheel             /usr/local/bin/auditwheel |  | ||||||
| COPY --from=intel              /opt/intel                            /opt/intel |  | ||||||
| COPY --from=base               /usr/local/bin/patchelf               /usr/local/bin/patchelf |  | ||||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ |  | ||||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ |  | ||||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ |  | ||||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ |  | ||||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ |  | ||||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig |  | ||||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h |  | ||||||
|  |  | ||||||
| FROM common as cpu_final |  | ||||||
| ARG BASE_CUDA_VERSION=10.2 |  | ||||||
| RUN yum install -y yum-utils centos-release-scl |  | ||||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms |  | ||||||
| RUN yum install -y devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-gcc-gfortran devtoolset-7-binutils |  | ||||||
| ENV PATH=/opt/rh/devtoolset-7/root/usr/bin:$PATH |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-7/root/usr/lib64:/opt/rh/devtoolset-7/root/usr/lib:$LD_LIBRARY_PATH |  | ||||||
|  |  | ||||||
| # cmake |  | ||||||
| RUN yum install -y cmake3 && \ |  | ||||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake |  | ||||||
|  |  | ||||||
| # ninja |  | ||||||
| RUN yum install -y http://repo.okay.com.mx/centos/7/x86_64/release/okay-release-1-1.noarch.rpm |  | ||||||
| RUN yum install -y ninja-build |  | ||||||
|  |  | ||||||
| FROM cpu_final as cuda_final |  | ||||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
|  |  | ||||||
| FROM common as rocm_final |  | ||||||
| ARG ROCM_VERSION=3.7 |  | ||||||
| # Install ROCm |  | ||||||
| ADD ./common/install_rocm.sh install_rocm.sh |  | ||||||
| RUN bash ./install_rocm.sh ${ROCM_VERSION} && rm install_rocm.sh |  | ||||||
| # cmake is already installed inside the rocm base image, but both 2 and 3 exist |  | ||||||
| # cmake3 is needed for the later MIOpen custom build, so that step is last. |  | ||||||
| RUN yum install -y cmake3 && \ |  | ||||||
|     rm -f /usr/bin/cmake && \ |  | ||||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake |  | ||||||
| ADD ./common/install_miopen.sh install_miopen.sh |  | ||||||
| RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh |  | ||||||
| @ -1,174 +0,0 @@ | |||||||
| # syntax = docker/dockerfile:experimental |  | ||||||
| ARG BASE_CUDA_VERSION=11.8 |  | ||||||
| ARG GPU_IMAGE=amd64/almalinux:8 |  | ||||||
| FROM quay.io/pypa/manylinux_2_28_x86_64 as base |  | ||||||
|  |  | ||||||
| ENV LC_ALL en_US.UTF-8 |  | ||||||
| ENV LANG en_US.UTF-8 |  | ||||||
| ENV LANGUAGE en_US.UTF-8 |  | ||||||
|  |  | ||||||
| ARG DEVTOOLSET_VERSION=11 |  | ||||||
| RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel yum-utils gcc-toolset-${DEVTOOLSET_VERSION}-toolchain |  | ||||||
| ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH |  | ||||||
|  |  | ||||||
| # cmake-3.18.4 from pip |  | ||||||
| RUN yum install -y python3-pip && \ |  | ||||||
|     python3 -mpip install cmake==3.18.4 && \ |  | ||||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake3 |  | ||||||
|  |  | ||||||
| FROM base as openssl |  | ||||||
| # Install openssl (this must precede `build python` step) |  | ||||||
| # (In order to have a proper SSL module, Python is compiled |  | ||||||
| # against a recent openssl [see env vars above], which is linked |  | ||||||
| # statically. We delete openssl afterwards.) |  | ||||||
| ADD ./common/install_openssl.sh install_openssl.sh |  | ||||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # remove unncessary python versions |  | ||||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 |  | ||||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 |  | ||||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 |  | ||||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 |  | ||||||
|  |  | ||||||
| FROM base as cuda |  | ||||||
| ARG BASE_CUDA_VERSION=11.8 |  | ||||||
| # Install CUDA |  | ||||||
| ADD ./common/install_cuda.sh install_cuda.sh |  | ||||||
| RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh |  | ||||||
|  |  | ||||||
| FROM base as intel |  | ||||||
| # MKL |  | ||||||
| ADD ./common/install_mkl.sh install_mkl.sh |  | ||||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh |  | ||||||
|  |  | ||||||
| FROM base as magma |  | ||||||
| ARG BASE_CUDA_VERSION=10.2 |  | ||||||
| # Install magma |  | ||||||
| ADD ./common/install_magma.sh install_magma.sh |  | ||||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh |  | ||||||
|  |  | ||||||
| FROM base as jni |  | ||||||
| # Install java jni header |  | ||||||
| ADD ./common/install_jni.sh install_jni.sh |  | ||||||
| ADD ./java/jni.h jni.h |  | ||||||
| RUN bash ./install_jni.sh && rm install_jni.sh |  | ||||||
|  |  | ||||||
| FROM base as libpng |  | ||||||
| # Install libpng |  | ||||||
| ADD ./common/install_libpng.sh install_libpng.sh |  | ||||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh |  | ||||||
|  |  | ||||||
| FROM ${GPU_IMAGE} as common |  | ||||||
| ARG DEVTOOLSET_VERSION=11 |  | ||||||
| ENV LC_ALL en_US.UTF-8 |  | ||||||
| ENV LANG en_US.UTF-8 |  | ||||||
| ENV LANGUAGE en_US.UTF-8 |  | ||||||
| RUN yum -y install epel-release |  | ||||||
| RUN yum -y update |  | ||||||
| RUN yum install -y \ |  | ||||||
|         autoconf \ |  | ||||||
|         automake \ |  | ||||||
|         bison \ |  | ||||||
|         bzip2 \ |  | ||||||
|         curl \ |  | ||||||
|         diffutils \ |  | ||||||
|         file \ |  | ||||||
|         git \ |  | ||||||
|         make \ |  | ||||||
|         patch \ |  | ||||||
|         perl \ |  | ||||||
|         unzip \ |  | ||||||
|         util-linux \ |  | ||||||
|         wget \ |  | ||||||
|         which \ |  | ||||||
|         xz \ |  | ||||||
|         gcc-toolset-${DEVTOOLSET_VERSION}-toolchain \ |  | ||||||
|         glibc-langpack-en |  | ||||||
| RUN yum install -y \ |  | ||||||
|     https://repo.ius.io/ius-release-el7.rpm \ |  | ||||||
|     https://ossci-linux.s3.amazonaws.com/epel-release-7-14.noarch.rpm |  | ||||||
|  |  | ||||||
| RUN yum swap -y git git236-core |  | ||||||
| # git236+ would refuse to run git commands in repos owned by other users |  | ||||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image |  | ||||||
| # Override this behaviour by treating every folder as safe |  | ||||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 |  | ||||||
| RUN git config --global --add safe.directory "*" |  | ||||||
|  |  | ||||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem |  | ||||||
| # Install LLVM version |  | ||||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl |  | ||||||
| COPY --from=base               /opt/python                           /opt/python |  | ||||||
| COPY --from=base               /opt/_internal                        /opt/_internal |  | ||||||
| COPY --from=base               /usr/local/bin/auditwheel             /usr/local/bin/auditwheel |  | ||||||
| COPY --from=intel              /opt/intel                            /opt/intel |  | ||||||
| COPY --from=base               /usr/local/bin/patchelf               /usr/local/bin/patchelf |  | ||||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ |  | ||||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ |  | ||||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ |  | ||||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ |  | ||||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ |  | ||||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig |  | ||||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h |  | ||||||
|  |  | ||||||
| FROM common as cpu_final |  | ||||||
| ARG BASE_CUDA_VERSION=11.8 |  | ||||||
| ARG DEVTOOLSET_VERSION=11 |  | ||||||
| # Install Anaconda |  | ||||||
| ADD ./common/install_conda_docker.sh install_conda.sh |  | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh |  | ||||||
| ENV PATH /opt/conda/bin:$PATH |  | ||||||
| # Ensure the expected devtoolset is used |  | ||||||
| ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH |  | ||||||
| # Install setuptools and wheel for python 3.12/3.13 |  | ||||||
| RUN for cpython_version in "cp312-cp312" "cp313-cp313" "cp313-cp313t"; do \ |  | ||||||
|     /opt/python/${cpython_version}/bin/python -m pip install setuptools wheel; \ |  | ||||||
|     done; |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # cmake-3.18.4 from pip; force in case cmake3 already exists |  | ||||||
| RUN yum install -y python3-pip && \ |  | ||||||
|     python3 -mpip install cmake==3.18.4 && \ |  | ||||||
|     ln -sf /usr/local/bin/cmake /usr/bin/cmake3 |  | ||||||
|  |  | ||||||
| FROM cpu_final as cuda_final |  | ||||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
| RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda |  | ||||||
| ENV PATH=/usr/local/cuda/bin:$PATH |  | ||||||
|  |  | ||||||
| FROM cpu_final as rocm_final |  | ||||||
| ARG ROCM_VERSION=6.0 |  | ||||||
| ARG PYTORCH_ROCM_ARCH |  | ||||||
| ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH} |  | ||||||
| ARG DEVTOOLSET_VERSION=11 |  | ||||||
| ENV LDFLAGS="-Wl,-rpath=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64 -Wl,-rpath=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib" |  | ||||||
| # Somewhere in ROCm stack, we still use non-existing /opt/rocm/hip path, |  | ||||||
| # below workaround helps avoid error |  | ||||||
| ENV ROCM_PATH /opt/rocm |  | ||||||
| # cmake-3.28.4 from pip to get enable_language(HIP) |  | ||||||
| # and avoid 3.21.0 cmake+ninja issues with ninja inserting "-Wl,--no-as-needed" in LINK_FLAGS for static linker |  | ||||||
| RUN python3 -m pip install --upgrade pip && \ |  | ||||||
|     python3 -mpip install cmake==3.28.4 |  | ||||||
| ADD ./common/install_rocm_drm.sh install_rocm_drm.sh |  | ||||||
| RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh |  | ||||||
| ENV MKLROOT /opt/intel |  | ||||||
| ADD ./common/install_rocm_magma.sh install_rocm_magma.sh |  | ||||||
| RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh |  | ||||||
| ADD ./common/install_miopen.sh install_miopen.sh |  | ||||||
| RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh |  | ||||||
|  |  | ||||||
| FROM cpu_final as xpu_final |  | ||||||
| # XPU CD use rolling driver |  | ||||||
| ENV XPU_DRIVER_TYPE ROLLING |  | ||||||
| # cmake-3.28.4 from pip |  | ||||||
| RUN python3 -m pip install --upgrade pip && \ |  | ||||||
|     python3 -mpip install cmake==3.28.4 |  | ||||||
| ADD ./common/install_xpu.sh install_xpu.sh |  | ||||||
| ENV XPU_VERSION 2025.0 |  | ||||||
| RUN bash ./install_xpu.sh && rm install_xpu.sh |  | ||||||
| RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd |  | ||||||
| @ -1,64 +0,0 @@ | |||||||
| FROM quay.io/pypa/manylinux_2_28_aarch64 as base |  | ||||||
|  |  | ||||||
| # Graviton needs GCC 10 or above for the build. GCC12 is the default version in almalinux-8. |  | ||||||
| ARG GCCTOOLSET_VERSION=11 |  | ||||||
|  |  | ||||||
| # Language variabes |  | ||||||
| ENV LC_ALL=en_US.UTF-8 |  | ||||||
| ENV LANG=en_US.UTF-8 |  | ||||||
| ENV LANGUAGE=en_US.UTF-8 |  | ||||||
|  |  | ||||||
| # Installed needed OS packages. This is to support all |  | ||||||
| # the binary builds (torch, vision, audio, text, data) |  | ||||||
| RUN yum -y install epel-release |  | ||||||
| RUN yum -y update |  | ||||||
| RUN yum install -y \ |  | ||||||
|   autoconf \ |  | ||||||
|   automake \ |  | ||||||
|   bison \ |  | ||||||
|   bzip2 \ |  | ||||||
|   curl \ |  | ||||||
|   diffutils \ |  | ||||||
|   file \ |  | ||||||
|   git \ |  | ||||||
|   less \ |  | ||||||
|   libffi-devel \ |  | ||||||
|   libgomp \ |  | ||||||
|   make \ |  | ||||||
|   openssl-devel \ |  | ||||||
|   patch \ |  | ||||||
|   perl \ |  | ||||||
|   unzip \ |  | ||||||
|   util-linux \ |  | ||||||
|   wget \ |  | ||||||
|   which \ |  | ||||||
|   xz \ |  | ||||||
|   yasm \ |  | ||||||
|   zstd \ |  | ||||||
|   sudo \ |  | ||||||
|   gcc-toolset-${GCCTOOLSET_VERSION}-toolchain |  | ||||||
|  |  | ||||||
| # Ensure the expected devtoolset is used |  | ||||||
| ENV PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/bin:$PATH |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH |  | ||||||
|  |  | ||||||
| # git236+ would refuse to run git commands in repos owned by other users |  | ||||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image |  | ||||||
| # Override this behaviour by treating every folder as safe |  | ||||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 |  | ||||||
| RUN git config --global --add safe.directory "*" |  | ||||||
|  |  | ||||||
| FROM base as openblas |  | ||||||
| # Install openblas |  | ||||||
| ADD ./common/install_openblas.sh install_openblas.sh |  | ||||||
| RUN bash ./install_openblas.sh && rm install_openblas.sh |  | ||||||
|  |  | ||||||
| FROM base as final |  | ||||||
|  |  | ||||||
| # remove unncessary python versions |  | ||||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 |  | ||||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 |  | ||||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 |  | ||||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 |  | ||||||
| COPY --from=openblas     /opt/OpenBLAS/  /opt/OpenBLAS/ |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:$LD_LIBRARY_PATH |  | ||||||
| @ -1,94 +0,0 @@ | |||||||
| FROM quay.io/pypa/manylinux2014_aarch64 as base |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Graviton needs GCC 10 for the build |  | ||||||
| ARG DEVTOOLSET_VERSION=10 |  | ||||||
|  |  | ||||||
| # Language variabes |  | ||||||
| ENV LC_ALL=en_US.UTF-8 |  | ||||||
| ENV LANG=en_US.UTF-8 |  | ||||||
| ENV LANGUAGE=en_US.UTF-8 |  | ||||||
|  |  | ||||||
| # Installed needed OS packages. This is to support all |  | ||||||
| # the binary builds (torch, vision, audio, text, data) |  | ||||||
| RUN yum -y install epel-release |  | ||||||
| RUN yum -y update |  | ||||||
| RUN yum install -y \ |  | ||||||
|   autoconf \ |  | ||||||
|   automake \ |  | ||||||
|   bison \ |  | ||||||
|   bzip2 \ |  | ||||||
|   curl \ |  | ||||||
|   diffutils \ |  | ||||||
|   file \ |  | ||||||
|   git \ |  | ||||||
|   make \ |  | ||||||
|   patch \ |  | ||||||
|   perl \ |  | ||||||
|   unzip \ |  | ||||||
|   util-linux \ |  | ||||||
|   wget \ |  | ||||||
|   which \ |  | ||||||
|   xz \ |  | ||||||
|   yasm \ |  | ||||||
|   less \ |  | ||||||
|   zstd \ |  | ||||||
|   libgomp \ |  | ||||||
|   sudo \ |  | ||||||
|   devtoolset-${DEVTOOLSET_VERSION}-gcc \ |  | ||||||
|   devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ \ |  | ||||||
|   devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran \ |  | ||||||
|   devtoolset-${DEVTOOLSET_VERSION}-binutils |  | ||||||
|  |  | ||||||
| # Ensure the expected devtoolset is used |  | ||||||
| ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # git236+ would refuse to run git commands in repos owned by other users |  | ||||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image |  | ||||||
| # Override this behaviour by treating every folder as safe |  | ||||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 |  | ||||||
| RUN git config --global --add safe.directory "*" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| ############################################################################### |  | ||||||
| # libglfortran.a hack |  | ||||||
| # |  | ||||||
| # libgfortran.a from quay.io/pypa/manylinux2014_aarch64 is not compiled with -fPIC. |  | ||||||
| # This causes __stack_chk_guard@@GLIBC_2.17 on pytorch build. To solve, get |  | ||||||
| # ubuntu's libgfortran.a which is compiled with -fPIC |  | ||||||
| # NOTE: Need a better way to get this library as Ubuntu's package can be removed by the vender, or changed |  | ||||||
| ############################################################################### |  | ||||||
| RUN cd ~/ \ |  | ||||||
|   && curl -L -o ~/libgfortran-10-dev.deb http://ports.ubuntu.com/ubuntu-ports/pool/universe/g/gcc-10/libgfortran-10-dev_10.5.0-4ubuntu2_arm64.deb \ |  | ||||||
|   && ar x ~/libgfortran-10-dev.deb \ |  | ||||||
|   && tar --use-compress-program=unzstd -xvf data.tar.zst -C ~/ \ |  | ||||||
|   && cp -f ~/usr/lib/gcc/aarch64-linux-gnu/10/libgfortran.a /opt/rh/devtoolset-10/root/usr/lib/gcc/aarch64-redhat-linux/10/ |  | ||||||
|  |  | ||||||
| # install cmake |  | ||||||
| RUN yum install -y cmake3 && \ |  | ||||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake |  | ||||||
|  |  | ||||||
| FROM base as openssl |  | ||||||
| # Install openssl (this must precede `build python` step) |  | ||||||
| # (In order to have a proper SSL module, Python is compiled |  | ||||||
| # against a recent openssl [see env vars above], which is linked |  | ||||||
| # statically. We delete openssl afterwards.) |  | ||||||
| ADD ./common/install_openssl.sh install_openssl.sh |  | ||||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh |  | ||||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem |  | ||||||
|  |  | ||||||
| FROM base as openblas |  | ||||||
| # Install openblas |  | ||||||
| ADD ./common/install_openblas.sh install_openblas.sh |  | ||||||
| RUN bash ./install_openblas.sh && rm install_openblas.sh |  | ||||||
|  |  | ||||||
| FROM openssl as final |  | ||||||
| # remove unncessary python versions |  | ||||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 |  | ||||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 |  | ||||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 |  | ||||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 |  | ||||||
| COPY --from=openblas     /opt/OpenBLAS/  /opt/OpenBLAS/ |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:$LD_LIBRARY_PATH |  | ||||||
| @ -1,91 +0,0 @@ | |||||||
| FROM quay.io/pypa/manylinux_2_28_aarch64 as base |  | ||||||
|  |  | ||||||
| # Cuda ARM build needs gcc 11 |  | ||||||
| ARG DEVTOOLSET_VERSION=11 |  | ||||||
|  |  | ||||||
| # Language variables |  | ||||||
| ENV LC_ALL=en_US.UTF-8 |  | ||||||
| ENV LANG=en_US.UTF-8 |  | ||||||
| ENV LANGUAGE=en_US.UTF-8 |  | ||||||
|  |  | ||||||
| # Installed needed OS packages. This is to support all |  | ||||||
| # the binary builds (torch, vision, audio, text, data) |  | ||||||
| RUN yum -y install epel-release |  | ||||||
| RUN yum -y update |  | ||||||
| RUN yum install -y \ |  | ||||||
|   autoconf \ |  | ||||||
|   automake \ |  | ||||||
|   bison \ |  | ||||||
|   bzip2 \ |  | ||||||
|   curl \ |  | ||||||
|   diffutils \ |  | ||||||
|   file \ |  | ||||||
|   git \ |  | ||||||
|   make \ |  | ||||||
|   patch \ |  | ||||||
|   perl \ |  | ||||||
|   unzip \ |  | ||||||
|   util-linux \ |  | ||||||
|   wget \ |  | ||||||
|   which \ |  | ||||||
|   xz \ |  | ||||||
|   yasm \ |  | ||||||
|   less \ |  | ||||||
|   zstd \ |  | ||||||
|   libgomp \ |  | ||||||
|   sudo \ |  | ||||||
|   gcc-toolset-${DEVTOOLSET_VERSION}-toolchain |  | ||||||
|  |  | ||||||
| # Ensure the expected devtoolset is used |  | ||||||
| ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH |  | ||||||
|  |  | ||||||
| # git236+ would refuse to run git commands in repos owned by other users |  | ||||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image |  | ||||||
| # Override this behaviour by treating every folder as safe |  | ||||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 |  | ||||||
| RUN git config --global --add safe.directory "*" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| FROM base as openssl |  | ||||||
| # Install openssl (this must precede `build python` step) |  | ||||||
| # (In order to have a proper SSL module, Python is compiled |  | ||||||
| # against a recent openssl [see env vars above], which is linked |  | ||||||
| # statically. We delete openssl afterwards.) |  | ||||||
| ADD ./common/install_openssl.sh install_openssl.sh |  | ||||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh |  | ||||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem |  | ||||||
|  |  | ||||||
| FROM openssl as final |  | ||||||
| # remove unncessary python versions |  | ||||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 |  | ||||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 |  | ||||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 |  | ||||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 |  | ||||||
|  |  | ||||||
| FROM base as cuda |  | ||||||
| ARG BASE_CUDA_VERSION |  | ||||||
| # Install CUDA |  | ||||||
| ADD ./common/install_cuda_aarch64.sh install_cuda_aarch64.sh |  | ||||||
| RUN bash ./install_cuda_aarch64.sh ${BASE_CUDA_VERSION} && rm install_cuda_aarch64.sh |  | ||||||
|  |  | ||||||
| FROM base as magma |  | ||||||
| ARG BASE_CUDA_VERSION |  | ||||||
| # Install magma |  | ||||||
| ADD ./common/install_magma.sh install_magma.sh |  | ||||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh |  | ||||||
|  |  | ||||||
| FROM base as nvpl |  | ||||||
| # Install nvpl |  | ||||||
| ADD ./common/install_nvpl.sh install_nvpl.sh |  | ||||||
| RUN bash ./install_nvpl.sh && rm install_nvpl.sh |  | ||||||
|  |  | ||||||
| FROM final as cuda_final |  | ||||||
| ARG BASE_CUDA_VERSION |  | ||||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} |  | ||||||
| COPY --from=nvpl /opt/nvpl/lib/  /usr/local/lib/ |  | ||||||
| COPY --from=nvpl /opt/nvpl/include/  /usr/local/include/ |  | ||||||
| RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda |  | ||||||
| ENV PATH=/usr/local/cuda/bin:$PATH |  | ||||||
| @ -1,71 +0,0 @@ | |||||||
| FROM centos:8 as base |  | ||||||
|  |  | ||||||
| ENV LC_ALL en_US.UTF-8 |  | ||||||
| ENV LANG en_US.UTF-8 |  | ||||||
| ENV LANGUAGE en_US.UTF-8 |  | ||||||
| ENV PATH /opt/rh/gcc-toolset-11/root/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin |  | ||||||
|  |  | ||||||
| # change to a valid repo |  | ||||||
| RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*.repo |  | ||||||
| # enable to install ninja-build |  | ||||||
| RUN sed -i 's|enabled=0|enabled=1|g' /etc/yum.repos.d/CentOS-Linux-PowerTools.repo |  | ||||||
|  |  | ||||||
| RUN yum -y update |  | ||||||
| RUN yum install -y wget curl perl util-linux xz bzip2 git patch which zlib-devel sudo |  | ||||||
| RUN yum install -y autoconf automake make cmake gdb gcc-toolset-11-gcc-c++ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| FROM base as openssl |  | ||||||
| ADD ./common/install_openssl.sh install_openssl.sh |  | ||||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh |  | ||||||
|  |  | ||||||
| # Install python |  | ||||||
| FROM base as python |  | ||||||
| RUN yum install -y openssl-devel zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel libpcap-devel xz-devel libffi-devel |  | ||||||
| ADD common/install_cpython.sh install_cpython.sh |  | ||||||
| RUN bash ./install_cpython.sh && rm install_cpython.sh |  | ||||||
|  |  | ||||||
| FROM base as conda |  | ||||||
| ADD ./common/install_conda_docker.sh install_conda.sh |  | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh |  | ||||||
| RUN /opt/conda/bin/conda install -y cmake |  | ||||||
|  |  | ||||||
| FROM base as intel |  | ||||||
| # Install MKL |  | ||||||
| COPY --from=python             /opt/python                           /opt/python |  | ||||||
| COPY --from=python             /opt/_internal                        /opt/_internal |  | ||||||
| COPY --from=conda              /opt/conda                            /opt/conda |  | ||||||
| ENV PATH=/opt/conda/bin:$PATH |  | ||||||
| ADD ./common/install_mkl.sh install_mkl.sh |  | ||||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh |  | ||||||
|  |  | ||||||
| FROM base as patchelf |  | ||||||
| ADD ./common/install_patchelf.sh install_patchelf.sh |  | ||||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh |  | ||||||
| RUN cp $(which patchelf) /patchelf |  | ||||||
|  |  | ||||||
| FROM base as jni |  | ||||||
| ADD ./common/install_jni.sh install_jni.sh |  | ||||||
| ADD ./java/jni.h jni.h |  | ||||||
| RUN bash ./install_jni.sh && rm install_jni.sh |  | ||||||
|  |  | ||||||
| FROM base as libpng |  | ||||||
| ADD ./common/install_libpng.sh install_libpng.sh |  | ||||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh |  | ||||||
|  |  | ||||||
| FROM base as final |  | ||||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl |  | ||||||
| COPY --from=python             /opt/python                           /opt/python |  | ||||||
| COPY --from=python             /opt/_internal                        /opt/_internal |  | ||||||
| COPY --from=intel              /opt/intel                            /opt/intel |  | ||||||
| COPY --from=conda              /opt/conda                            /opt/conda |  | ||||||
| COPY --from=patchelf           /usr/local/bin/patchelf               /usr/local/bin/patchelf |  | ||||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h |  | ||||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ |  | ||||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ |  | ||||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ |  | ||||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ |  | ||||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ |  | ||||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig |  | ||||||
|  |  | ||||||
| RUN yum install -y ninja-build |  | ||||||
| @ -1,124 +0,0 @@ | |||||||
| FROM quay.io/pypa/manylinux_2_28_s390x as base |  | ||||||
|  |  | ||||||
| # Language variables |  | ||||||
| ENV LC_ALL=C.UTF-8 |  | ||||||
| ENV LANG=C.UTF-8 |  | ||||||
| ENV LANGUAGE=C.UTF-8 |  | ||||||
|  |  | ||||||
| ARG DEVTOOLSET_VERSION=13 |  | ||||||
| # Installed needed OS packages. This is to support all |  | ||||||
| # the binary builds (torch, vision, audio, text, data) |  | ||||||
| RUN yum -y install epel-release |  | ||||||
| RUN yum -y update |  | ||||||
| RUN yum install -y \ |  | ||||||
|   sudo \ |  | ||||||
|   autoconf \ |  | ||||||
|   automake \ |  | ||||||
|   bison \ |  | ||||||
|   bzip2 \ |  | ||||||
|   curl \ |  | ||||||
|   diffutils \ |  | ||||||
|   file \ |  | ||||||
|   git \ |  | ||||||
|   make \ |  | ||||||
|   patch \ |  | ||||||
|   perl \ |  | ||||||
|   unzip \ |  | ||||||
|   util-linux \ |  | ||||||
|   wget \ |  | ||||||
|   which \ |  | ||||||
|   xz \ |  | ||||||
|   yasm \ |  | ||||||
|   less \ |  | ||||||
|   zstd \ |  | ||||||
|   libgomp \ |  | ||||||
|   gcc-toolset-${DEVTOOLSET_VERSION}-gcc \ |  | ||||||
|   gcc-toolset-${DEVTOOLSET_VERSION}-gcc-c++ \ |  | ||||||
|   gcc-toolset-${DEVTOOLSET_VERSION}-binutils \ |  | ||||||
|   gcc-toolset-${DEVTOOLSET_VERSION}-gcc-gfortran \ |  | ||||||
|   cmake \ |  | ||||||
|   rust \ |  | ||||||
|   cargo \ |  | ||||||
|   llvm-devel \ |  | ||||||
|   libzstd-devel \ |  | ||||||
|   python3.12-devel \ |  | ||||||
|   python3.12-setuptools \ |  | ||||||
|   python3.12-pip \ |  | ||||||
|   python3-virtualenv \ |  | ||||||
|   python3.12-pyyaml \ |  | ||||||
|   python3.12-numpy \ |  | ||||||
|   python3.12-wheel \ |  | ||||||
|   python3.12-cryptography \ |  | ||||||
|   blas-devel \ |  | ||||||
|   openblas-devel \ |  | ||||||
|   lapack-devel \ |  | ||||||
|   atlas-devel \ |  | ||||||
|   libjpeg-devel \ |  | ||||||
|   libxslt-devel \ |  | ||||||
|   libxml2-devel \ |  | ||||||
|   openssl-devel \ |  | ||||||
|   valgrind |  | ||||||
|  |  | ||||||
| ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH |  | ||||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH |  | ||||||
|  |  | ||||||
| # git236+ would refuse to run git commands in repos owned by other users |  | ||||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image |  | ||||||
| # Override this behaviour by treating every folder as safe |  | ||||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 |  | ||||||
| RUN git config --global --add safe.directory "*" |  | ||||||
|  |  | ||||||
| # installed python doesn't have development parts. Rebuild it from scratch |  | ||||||
| RUN /bin/rm -rf /opt/_internal /opt/python /usr/local/*/* |  | ||||||
|  |  | ||||||
| # EPEL for cmake |  | ||||||
| FROM base as patchelf |  | ||||||
| # Install patchelf |  | ||||||
| ADD ./common/install_patchelf.sh install_patchelf.sh |  | ||||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh |  | ||||||
| RUN cp $(which patchelf) /patchelf |  | ||||||
|  |  | ||||||
| FROM patchelf as python |  | ||||||
| # build python |  | ||||||
| COPY manywheel/build_scripts /build_scripts |  | ||||||
| ADD ./common/install_cpython.sh /build_scripts/install_cpython.sh |  | ||||||
| ENV SSL_CERT_FILE= |  | ||||||
| RUN bash build_scripts/build.sh && rm -r build_scripts |  | ||||||
|  |  | ||||||
| FROM base as final |  | ||||||
| COPY --from=python             /opt/python                           /opt/python |  | ||||||
| COPY --from=python             /opt/_internal                        /opt/_internal |  | ||||||
| COPY --from=python             /opt/python/cp39-cp39/bin/auditwheel  /usr/local/bin/auditwheel |  | ||||||
| COPY --from=patchelf           /usr/local/bin/patchelf               /usr/local/bin/patchelf |  | ||||||
|  |  | ||||||
| RUN alternatives --set python /usr/bin/python3.12 |  | ||||||
| RUN alternatives --set python3 /usr/bin/python3.12 |  | ||||||
|  |  | ||||||
| RUN pip-3.12 install typing_extensions |  | ||||||
|  |  | ||||||
| ENTRYPOINT [] |  | ||||||
| CMD ["/bin/bash"] |  | ||||||
|  |  | ||||||
| # install test dependencies: |  | ||||||
| # - grpcio requires system openssl, bundled crypto fails to build |  | ||||||
| # - ml_dtypes 0.4.0 requires some fixes provided in later commits to build |  | ||||||
| RUN dnf install -y \ |  | ||||||
|   protobuf-devel \ |  | ||||||
|   protobuf-c-devel \ |  | ||||||
|   protobuf-lite-devel \ |  | ||||||
|   wget \ |  | ||||||
|   patch |  | ||||||
|  |  | ||||||
| RUN env GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=True pip3 install grpcio==1.65.4 |  | ||||||
| RUN cd ~ && \ |  | ||||||
|   git clone https://github.com/jax-ml/ml_dtypes && \ |  | ||||||
|   cd ml_dtypes && \ |  | ||||||
|   git checkout v0.4.0 && \ |  | ||||||
|   git submodule update --init --recursive && \ |  | ||||||
|   wget https://github.com/jax-ml/ml_dtypes/commit/b969f76914d6b30676721bc92bf0f6021a0d1321.patch && \ |  | ||||||
|   wget https://github.com/jax-ml/ml_dtypes/commit/d4e6d035ecda073eab8bcf60f4eef572ee7087e6.patch && \ |  | ||||||
|   patch -p1 < b969f76914d6b30676721bc92bf0f6021a0d1321.patch && \ |  | ||||||
|   patch -p1 < d4e6d035ecda073eab8bcf60f4eef572ee7087e6.patch && \ |  | ||||||
|   python3 setup.py bdist_wheel && \ |  | ||||||
|   pip3 install dist/*.whl && \ |  | ||||||
|   rm -rf ml_dtypes |  | ||||||
| @ -1,159 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| set -eou pipefail |  | ||||||
|  |  | ||||||
| TOPDIR=$(git rev-parse --show-toplevel) |  | ||||||
|  |  | ||||||
| image="$1" |  | ||||||
| shift |  | ||||||
|  |  | ||||||
| if [ -z "${image}" ]; then |  | ||||||
|   echo "Usage: $0 IMAGE" |  | ||||||
|   exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| DOCKER_IMAGE="pytorch/${image}" |  | ||||||
|  |  | ||||||
| DOCKER_REGISTRY="${DOCKER_REGISTRY:-docker.io}" |  | ||||||
|  |  | ||||||
| GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu} |  | ||||||
| GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-} |  | ||||||
| MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-} |  | ||||||
| DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-} |  | ||||||
| WITH_PUSH=${WITH_PUSH:-} |  | ||||||
|  |  | ||||||
| case ${GPU_ARCH_TYPE} in |  | ||||||
|     cpu) |  | ||||||
|         TARGET=cpu_final |  | ||||||
|         DOCKER_TAG=cpu |  | ||||||
|         GPU_IMAGE=centos:7 |  | ||||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9" |  | ||||||
|         ;; |  | ||||||
|     cpu-manylinux_2_28) |  | ||||||
|         TARGET=cpu_final |  | ||||||
|         DOCKER_TAG=cpu |  | ||||||
|         GPU_IMAGE=amd64/almalinux:8 |  | ||||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11" |  | ||||||
|         MANY_LINUX_VERSION="2_28" |  | ||||||
|         ;; |  | ||||||
|     cpu-aarch64) |  | ||||||
|         TARGET=final |  | ||||||
|         DOCKER_TAG=cpu-aarch64 |  | ||||||
|         GPU_IMAGE=arm64v8/centos:7 |  | ||||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=10" |  | ||||||
|         MANY_LINUX_VERSION="aarch64" |  | ||||||
|         ;; |  | ||||||
|     cpu-aarch64-2_28) |  | ||||||
|         TARGET=final |  | ||||||
|         DOCKER_TAG=cpu-aarch64 |  | ||||||
|         GPU_IMAGE=arm64v8/almalinux:8 |  | ||||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11" |  | ||||||
|         MANY_LINUX_VERSION="2_28_aarch64" |  | ||||||
|         ;; |  | ||||||
|     cpu-cxx11-abi) |  | ||||||
|         TARGET=final |  | ||||||
|         DOCKER_TAG=cpu-cxx11-abi |  | ||||||
|         GPU_IMAGE="" |  | ||||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9" |  | ||||||
|         MANY_LINUX_VERSION="cxx11-abi" |  | ||||||
|         ;; |  | ||||||
|     cpu-s390x) |  | ||||||
|         TARGET=final |  | ||||||
|         DOCKER_TAG=cpu-s390x |  | ||||||
|         GPU_IMAGE=s390x/almalinux:8 |  | ||||||
|         DOCKER_GPU_BUILD_ARG="" |  | ||||||
|         MANY_LINUX_VERSION="s390x" |  | ||||||
|         ;; |  | ||||||
|     cuda) |  | ||||||
|         TARGET=cuda_final |  | ||||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} |  | ||||||
|         # Keep this up to date with the minimum version of CUDA we currently support |  | ||||||
|         GPU_IMAGE=centos:7 |  | ||||||
|         DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=9" |  | ||||||
|         ;; |  | ||||||
|     cuda-manylinux_2_28) |  | ||||||
|         TARGET=cuda_final |  | ||||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} |  | ||||||
|         GPU_IMAGE=amd64/almalinux:8 |  | ||||||
|         DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=11" |  | ||||||
|         MANY_LINUX_VERSION="2_28" |  | ||||||
|         ;; |  | ||||||
|     cuda-aarch64) |  | ||||||
|         TARGET=cuda_final |  | ||||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} |  | ||||||
|         GPU_IMAGE=arm64v8/centos:7 |  | ||||||
|         DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=11" |  | ||||||
|         MANY_LINUX_VERSION="aarch64" |  | ||||||
|         DOCKERFILE_SUFFIX="_cuda_aarch64" |  | ||||||
|         ;; |  | ||||||
|     rocm|rocm-manylinux_2_28) |  | ||||||
|         TARGET=rocm_final |  | ||||||
|         DOCKER_TAG=rocm${GPU_ARCH_VERSION} |  | ||||||
|         GPU_IMAGE=rocm/dev-centos-7:${GPU_ARCH_VERSION}-complete |  | ||||||
|         DEVTOOLSET_VERSION="9" |  | ||||||
|         if [ ${GPU_ARCH_TYPE} == "rocm-manylinux_2_28" ]; then |  | ||||||
|             MANY_LINUX_VERSION="2_28" |  | ||||||
|             DEVTOOLSET_VERSION="11" |  | ||||||
|             GPU_IMAGE=rocm/dev-almalinux-8:${GPU_ARCH_VERSION}-complete |  | ||||||
|         fi |  | ||||||
|         PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101" |  | ||||||
|         DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" |  | ||||||
|         ;; |  | ||||||
|     xpu) |  | ||||||
|         TARGET=xpu_final |  | ||||||
|         DOCKER_TAG=xpu |  | ||||||
|         GPU_IMAGE=amd64/almalinux:8 |  | ||||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11" |  | ||||||
|         MANY_LINUX_VERSION="2_28" |  | ||||||
|         ;; |  | ||||||
|     *) |  | ||||||
|         echo "ERROR: Unrecognized GPU_ARCH_TYPE: ${GPU_ARCH_TYPE}" |  | ||||||
|         exit 1 |  | ||||||
|         ;; |  | ||||||
| esac |  | ||||||
|  |  | ||||||
| IMAGES='' |  | ||||||
|  |  | ||||||
| if [[ -n ${MANY_LINUX_VERSION} && -z ${DOCKERFILE_SUFFIX} ]]; then |  | ||||||
|     DOCKERFILE_SUFFIX=_${MANY_LINUX_VERSION} |  | ||||||
| fi |  | ||||||
| ( |  | ||||||
|     set -x |  | ||||||
|  |  | ||||||
|     if [ "$(uname -m)" != "s390x" ]; then |  | ||||||
|         # TODO: Remove LimitNOFILE=1048576 patch once https://github.com/pytorch/test-infra/issues/5712 |  | ||||||
|         # is resolved. This patch is required in order to fix timing out of Docker build on Amazon Linux 2023. |  | ||||||
|         sudo sed -i s/LimitNOFILE=infinity/LimitNOFILE=1048576/ /usr/lib/systemd/system/docker.service |  | ||||||
|         sudo systemctl daemon-reload |  | ||||||
|         sudo systemctl restart docker |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     DOCKER_BUILDKIT=1 docker build  \ |  | ||||||
|         ${DOCKER_GPU_BUILD_ARG} \ |  | ||||||
|         --build-arg "GPU_IMAGE=${GPU_IMAGE}" \ |  | ||||||
|         --target "${TARGET}" \ |  | ||||||
|         -t "${DOCKER_IMAGE}" \ |  | ||||||
|         $@ \ |  | ||||||
|         -f "${TOPDIR}/.ci/docker/manywheel/Dockerfile${DOCKERFILE_SUFFIX}" \ |  | ||||||
|         "${TOPDIR}/.ci/docker/" |  | ||||||
| ) |  | ||||||
|  |  | ||||||
| GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)} |  | ||||||
| GIT_BRANCH_NAME=${GITHUB_REF##*/} |  | ||||||
| GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)} |  | ||||||
| DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE}-${GIT_BRANCH_NAME} |  | ||||||
| DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE}-${GIT_COMMIT_SHA} |  | ||||||
|  |  | ||||||
| if [[ "${WITH_PUSH}" == true ]]; then |  | ||||||
|     ( |  | ||||||
|         set -x |  | ||||||
|         docker push "${DOCKER_IMAGE}" |  | ||||||
|         if [[ -n ${GITHUB_REF} ]]; then |  | ||||||
|             docker tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_BRANCH_TAG} |  | ||||||
|             docker tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_SHA_TAG} |  | ||||||
|             docker push "${DOCKER_IMAGE_BRANCH_TAG}" |  | ||||||
|             docker push "${DOCKER_IMAGE_SHA_TAG}" |  | ||||||
|         fi |  | ||||||
|     ) |  | ||||||
| fi |  | ||||||
| @ -1,118 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| # Top-level build script called from Dockerfile |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| # Stop at any error, show all commands |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| # openssl version to build, with expected sha256 hash of .tar.gz |  | ||||||
| # archive |  | ||||||
| OPENSSL_ROOT=openssl-1.1.1l |  | ||||||
| OPENSSL_HASH=0b7a3e5e59c34827fe0c3a74b7ec8baef302b98fa80088d7f9153aa16fa76bd1 |  | ||||||
| DEVTOOLS_HASH=a8ebeb4bed624700f727179e6ef771dafe47651131a00a78b342251415646acc |  | ||||||
| PATCHELF_HASH=d9afdff4baeacfbc64861454f368b7f2c15c44d245293f7587bbf726bfe722fb |  | ||||||
| CURL_ROOT=curl-7.73.0 |  | ||||||
| CURL_HASH=cf34fe0b07b800f1c01a499a6e8b2af548f6d0e044dca4a29d88a4bee146d131 |  | ||||||
| AUTOCONF_ROOT=autoconf-2.69 |  | ||||||
| AUTOCONF_HASH=954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969 |  | ||||||
|  |  | ||||||
| # Dependencies for compiling Python that we want to remove from |  | ||||||
| # the final image after compiling Python |  | ||||||
| PYTHON_COMPILE_DEPS="zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel libpcap-devel xz-devel libffi-devel" |  | ||||||
|  |  | ||||||
| if [ "$(uname -m)" != "s390x" ] ; then |  | ||||||
|     PYTHON_COMPILE_DEPS="${PYTHON_COMPILE_DEPS} db4-devel" |  | ||||||
| else |  | ||||||
|     PYTHON_COMPILE_DEPS="${PYTHON_COMPILE_DEPS} libdb-devel" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Libraries that are allowed as part of the manylinux1 profile |  | ||||||
| MANYLINUX1_DEPS="glibc-devel libstdc++-devel glib2-devel libX11-devel libXext-devel libXrender-devel  mesa-libGL-devel libICE-devel libSM-devel ncurses-devel" |  | ||||||
|  |  | ||||||
| # Get build utilities |  | ||||||
| MY_DIR=$(dirname "${BASH_SOURCE[0]}") |  | ||||||
| source $MY_DIR/build_utils.sh |  | ||||||
|  |  | ||||||
| # Development tools and libraries |  | ||||||
| yum -y install bzip2 make git patch unzip bison yasm diffutils \ |  | ||||||
|     automake which file \ |  | ||||||
|     ${PYTHON_COMPILE_DEPS} |  | ||||||
|  |  | ||||||
| # Install newest autoconf |  | ||||||
| build_autoconf $AUTOCONF_ROOT $AUTOCONF_HASH |  | ||||||
| autoconf --version |  | ||||||
|  |  | ||||||
| # Compile the latest Python releases. |  | ||||||
| # (In order to have a proper SSL module, Python is compiled |  | ||||||
| # against a recent openssl [see env vars above], which is linked |  | ||||||
| # statically. We delete openssl afterwards.) |  | ||||||
| build_openssl $OPENSSL_ROOT $OPENSSL_HASH |  | ||||||
| /build_scripts/install_cpython.sh |  | ||||||
|  |  | ||||||
| PY39_BIN=/opt/python/cp39-cp39/bin |  | ||||||
|  |  | ||||||
| # Our openssl doesn't know how to find the system CA trust store |  | ||||||
| #   (https://github.com/pypa/manylinux/issues/53) |  | ||||||
| # And it's not clear how up-to-date that is anyway |  | ||||||
| # So let's just use the same one pip and everyone uses |  | ||||||
| $PY39_BIN/pip install certifi |  | ||||||
| ln -s $($PY39_BIN/python -c 'import certifi; print(certifi.where())') \ |  | ||||||
|       /opt/_internal/certs.pem |  | ||||||
| # If you modify this line you also have to modify the versions in the |  | ||||||
| # Dockerfiles: |  | ||||||
| export SSL_CERT_FILE=/opt/_internal/certs.pem |  | ||||||
|  |  | ||||||
| # Install newest curl |  | ||||||
| build_curl $CURL_ROOT $CURL_HASH |  | ||||||
| rm -rf /usr/local/include/curl /usr/local/lib/libcurl* /usr/local/lib/pkgconfig/libcurl.pc |  | ||||||
| hash -r |  | ||||||
| curl --version |  | ||||||
| curl-config --features |  | ||||||
|  |  | ||||||
| # Install patchelf (latest with unreleased bug fixes) |  | ||||||
| curl -sLOk https://nixos.org/releases/patchelf/patchelf-0.10/patchelf-0.10.tar.gz |  | ||||||
| # check_sha256sum patchelf-0.9njs2.tar.gz $PATCHELF_HASH |  | ||||||
| tar -xzf patchelf-0.10.tar.gz |  | ||||||
| (cd patchelf-0.10 && ./configure && make && make install) |  | ||||||
| rm -rf patchelf-0.10.tar.gz patchelf-0.10 |  | ||||||
|  |  | ||||||
| # Install latest pypi release of auditwheel |  | ||||||
| $PY39_BIN/pip install auditwheel |  | ||||||
| ln -s $PY39_BIN/auditwheel /usr/local/bin/auditwheel |  | ||||||
|  |  | ||||||
| # Clean up development headers and other unnecessary stuff for |  | ||||||
| # final image |  | ||||||
| yum -y erase wireless-tools gtk2 libX11 hicolor-icon-theme \ |  | ||||||
|     avahi freetype bitstream-vera-fonts \ |  | ||||||
|     ${PYTHON_COMPILE_DEPS} || true > /dev/null 2>&1 |  | ||||||
| yum -y install ${MANYLINUX1_DEPS} |  | ||||||
| yum -y clean all > /dev/null 2>&1 |  | ||||||
| yum list installed |  | ||||||
|  |  | ||||||
| # we don't need libpython*.a, and they're many megabytes |  | ||||||
| find /opt/_internal -name '*.a' -print0 | xargs -0 rm -f |  | ||||||
| # Strip what we can -- and ignore errors, because this just attempts to strip |  | ||||||
| # *everything*, including non-ELF files: |  | ||||||
| find /opt/_internal -type f -print0 \ |  | ||||||
|     | xargs -0 -n1 strip --strip-unneeded 2>/dev/null || true |  | ||||||
| # We do not need the Python test suites, or indeed the precompiled .pyc and |  | ||||||
| # .pyo files. Partially cribbed from: |  | ||||||
| #    https://github.com/docker-library/python/blob/master/3.4/slim/Dockerfile |  | ||||||
| find /opt/_internal \ |  | ||||||
|      \( -type d -a -name test -o -name tests \) \ |  | ||||||
|   -o \( -type f -a -name '*.pyc' -o -name '*.pyo' \) \ |  | ||||||
|   -print0 | xargs -0 rm -f |  | ||||||
|  |  | ||||||
| for PYTHON in /opt/python/*/bin/python; do |  | ||||||
|     # Smoke test to make sure that our Pythons work, and do indeed detect as |  | ||||||
|     # being manylinux compatible: |  | ||||||
|     $PYTHON $MY_DIR/manylinux1-check.py |  | ||||||
|     # Make sure that SSL cert checking works |  | ||||||
|     $PYTHON $MY_DIR/ssl-check.py |  | ||||||
| done |  | ||||||
|  |  | ||||||
| # Fix libc headers to remain compatible with C99 compilers. |  | ||||||
| find /usr/include/ -type f -exec sed -i 's/\bextern _*inline_*\b/extern __inline __attribute__ ((__gnu_inline__))/g' {} + |  | ||||||
|  |  | ||||||
| # Now we can delete our built SSL |  | ||||||
| rm -rf /usr/local/ssl |  | ||||||
| @ -1,91 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| # Helper utilities for build |  | ||||||
| # Script used only in CD pipeline |  | ||||||
|  |  | ||||||
| OPENSSL_DOWNLOAD_URL=https://www.openssl.org/source/old/1.1.1/ |  | ||||||
| CURL_DOWNLOAD_URL=https://curl.askapache.com/download |  | ||||||
|  |  | ||||||
| AUTOCONF_DOWNLOAD_URL=https://ftp.gnu.org/gnu/autoconf |  | ||||||
|  |  | ||||||
|  |  | ||||||
| function check_var { |  | ||||||
|     if [ -z "$1" ]; then |  | ||||||
|         echo "required variable not defined" |  | ||||||
|         exit 1 |  | ||||||
|     fi |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| function do_openssl_build { |  | ||||||
|     ./config no-ssl2 no-shared -fPIC --prefix=/usr/local/ssl > /dev/null |  | ||||||
|     make > /dev/null |  | ||||||
|     make install > /dev/null |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| function check_sha256sum { |  | ||||||
|     local fname=$1 |  | ||||||
|     check_var ${fname} |  | ||||||
|     local sha256=$2 |  | ||||||
|     check_var ${sha256} |  | ||||||
|  |  | ||||||
|     echo "${sha256}  ${fname}" > ${fname}.sha256 |  | ||||||
|     sha256sum -c ${fname}.sha256 |  | ||||||
|     rm -f ${fname}.sha256 |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| function build_openssl { |  | ||||||
|     local openssl_fname=$1 |  | ||||||
|     check_var ${openssl_fname} |  | ||||||
|     local openssl_sha256=$2 |  | ||||||
|     check_var ${openssl_sha256} |  | ||||||
|     check_var ${OPENSSL_DOWNLOAD_URL} |  | ||||||
|     curl -sLO ${OPENSSL_DOWNLOAD_URL}/${openssl_fname}.tar.gz |  | ||||||
|     check_sha256sum ${openssl_fname}.tar.gz ${openssl_sha256} |  | ||||||
|     tar -xzf ${openssl_fname}.tar.gz |  | ||||||
|     (cd ${openssl_fname} && do_openssl_build) |  | ||||||
|     rm -rf ${openssl_fname} ${openssl_fname}.tar.gz |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| function do_curl_build { |  | ||||||
|     LIBS=-ldl ./configure --with-ssl --disable-shared > /dev/null |  | ||||||
|     make > /dev/null |  | ||||||
|     make install > /dev/null |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| function build_curl { |  | ||||||
|     local curl_fname=$1 |  | ||||||
|     check_var ${curl_fname} |  | ||||||
|     local curl_sha256=$2 |  | ||||||
|     check_var ${curl_sha256} |  | ||||||
|     check_var ${CURL_DOWNLOAD_URL} |  | ||||||
|     curl -sLO ${CURL_DOWNLOAD_URL}/${curl_fname}.tar.bz2 |  | ||||||
|     check_sha256sum ${curl_fname}.tar.bz2 ${curl_sha256} |  | ||||||
|     tar -jxf ${curl_fname}.tar.bz2 |  | ||||||
|     (cd ${curl_fname} && do_curl_build) |  | ||||||
|     rm -rf ${curl_fname} ${curl_fname}.tar.bz2 |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| function do_standard_install { |  | ||||||
|     ./configure > /dev/null |  | ||||||
|     make > /dev/null |  | ||||||
|     make install > /dev/null |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| function build_autoconf { |  | ||||||
|     local autoconf_fname=$1 |  | ||||||
|     check_var ${autoconf_fname} |  | ||||||
|     local autoconf_sha256=$2 |  | ||||||
|     check_var ${autoconf_sha256} |  | ||||||
|     check_var ${AUTOCONF_DOWNLOAD_URL} |  | ||||||
|     curl -sLO ${AUTOCONF_DOWNLOAD_URL}/${autoconf_fname}.tar.gz |  | ||||||
|     check_sha256sum ${autoconf_fname}.tar.gz ${autoconf_sha256} |  | ||||||
|     tar -zxf ${autoconf_fname}.tar.gz |  | ||||||
|     (cd ${autoconf_fname} && do_standard_install) |  | ||||||
|     rm -rf ${autoconf_fname} ${autoconf_fname}.tar.gz |  | ||||||
| } |  | ||||||
| @ -1,60 +0,0 @@ | |||||||
| # Logic copied from PEP 513 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def is_manylinux1_compatible(): |  | ||||||
|     # Only Linux, and only x86-64 / i686 |  | ||||||
|     from distutils.util import get_platform |  | ||||||
|  |  | ||||||
|     if get_platform() not in ["linux-x86_64", "linux-i686", "linux-s390x"]: |  | ||||||
|         return False |  | ||||||
|  |  | ||||||
|     # Check for presence of _manylinux module |  | ||||||
|     try: |  | ||||||
|         import _manylinux |  | ||||||
|  |  | ||||||
|         return bool(_manylinux.manylinux1_compatible) |  | ||||||
|     except (ImportError, AttributeError): |  | ||||||
|         # Fall through to heuristic check below |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     # Check glibc version. CentOS 5 uses glibc 2.5. |  | ||||||
|     return have_compatible_glibc(2, 5) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def have_compatible_glibc(major, minimum_minor): |  | ||||||
|     import ctypes |  | ||||||
|  |  | ||||||
|     process_namespace = ctypes.CDLL(None) |  | ||||||
|     try: |  | ||||||
|         gnu_get_libc_version = process_namespace.gnu_get_libc_version |  | ||||||
|     except AttributeError: |  | ||||||
|         # Symbol doesn't exist -> therefore, we are not linked to |  | ||||||
|         # glibc. |  | ||||||
|         return False |  | ||||||
|  |  | ||||||
|     # Call gnu_get_libc_version, which returns a string like "2.5". |  | ||||||
|     gnu_get_libc_version.restype = ctypes.c_char_p |  | ||||||
|     version_str = gnu_get_libc_version() |  | ||||||
|     # py2 / py3 compatibility: |  | ||||||
|     if not isinstance(version_str, str): |  | ||||||
|         version_str = version_str.decode("ascii") |  | ||||||
|  |  | ||||||
|     # Parse string and check against requested version. |  | ||||||
|     version = [int(piece) for piece in version_str.split(".")] |  | ||||||
|     assert len(version) == 2 |  | ||||||
|     if major != version[0]: |  | ||||||
|         return False |  | ||||||
|     if minimum_minor > version[1]: |  | ||||||
|         return False |  | ||||||
|     return True |  | ||||||
|  |  | ||||||
|  |  | ||||||
| import sys |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if is_manylinux1_compatible(): |  | ||||||
|     print(f"{sys.executable} is manylinux1 compatible") |  | ||||||
|     sys.exit(0) |  | ||||||
| else: |  | ||||||
|     print(f"{sys.executable} is NOT manylinux1 compatible") |  | ||||||
|     sys.exit(1) |  | ||||||
| @ -1,31 +0,0 @@ | |||||||
| # cf. https://github.com/pypa/manylinux/issues/53 |  | ||||||
|  |  | ||||||
| import sys |  | ||||||
| from urllib.request import urlopen |  | ||||||
|  |  | ||||||
|  |  | ||||||
| GOOD_SSL = "https://google.com" |  | ||||||
| BAD_SSL = "https://self-signed.badssl.com" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| print("Testing SSL certificate checking for Python:", sys.version) |  | ||||||
|  |  | ||||||
| if sys.version_info[:2] < (2, 7) or sys.version_info[:2] < (3, 4): |  | ||||||
|     print("This version never checks SSL certs; skipping tests") |  | ||||||
|     sys.exit(0) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| EXC = OSError |  | ||||||
|  |  | ||||||
| print(f"Connecting to {GOOD_SSL} should work") |  | ||||||
| urlopen(GOOD_SSL) |  | ||||||
| print("...it did, yay.") |  | ||||||
|  |  | ||||||
| print(f"Connecting to {BAD_SSL} should fail") |  | ||||||
| try: |  | ||||||
|     urlopen(BAD_SSL) |  | ||||||
|     # If we get here then we failed: |  | ||||||
|     print("...it DIDN'T!!!!!11!!1one!") |  | ||||||
|     sys.exit(1) |  | ||||||
| except EXC: |  | ||||||
|     print("...it did, yay.") |  | ||||||
| @ -5,7 +5,7 @@ | |||||||
| #Pinned versions: 1.6 | #Pinned versions: 1.6 | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| boto3==1.35.42 | boto3==1.19.12 | ||||||
| #Description: AWS SDK for python | #Description: AWS SDK for python | ||||||
| #Pinned versions: 1.19.12, 1.16.34 | #Pinned versions: 1.19.12, 1.16.34 | ||||||
| #test that import: | #test that import: | ||||||
| @ -30,14 +30,9 @@ dill==0.3.7 | |||||||
| #Pinned versions: 0.3.7 | #Pinned versions: 0.3.7 | ||||||
| #test that import: dynamo/test_replay_record.py test_dataloader.py test_datapipe.py test_serialization.py | #test that import: dynamo/test_replay_record.py test_dataloader.py test_datapipe.py test_serialization.py | ||||||
|  |  | ||||||
| expecttest==0.3.0 | expecttest==0.1.6 | ||||||
| #Description: method for writing tests where test framework auto populates | #Description: method for writing tests where test framework auto populates | ||||||
| # the expected output based on previous runs | # the expected output based on previous runs | ||||||
| #Pinned versions: 0.3.0 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| fbscribelogger==0.1.7 |  | ||||||
| #Description: write to scribe from authenticated jobs on CI |  | ||||||
| #Pinned versions: 0.1.6 | #Pinned versions: 0.1.6 | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| @ -90,10 +85,10 @@ librosa>=0.6.2 ; python_version < "3.11" | |||||||
| #Pinned versions: | #Pinned versions: | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| mypy==1.13.0 | mypy==1.8.0 | ||||||
| # Pin MyPy version because new errors are likely to appear with each release | # Pin MyPy version because new errors are likely to appear with each release | ||||||
| #Description: linter | #Description: linter | ||||||
| #Pinned versions: 1.10.0 | #Pinned versions: 1.8.0 | ||||||
| #test that import: test_typing.py, test_type_hints.py | #test that import: test_typing.py, test_type_hints.py | ||||||
|  |  | ||||||
| networkx==2.8.8 | networkx==2.8.8 | ||||||
| @ -109,7 +104,7 @@ networkx==2.8.8 | |||||||
| #test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py | #test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py | ||||||
|  |  | ||||||
| numba==0.49.0 ; python_version < "3.9" | numba==0.49.0 ; python_version < "3.9" | ||||||
| numba==0.55.2 ; python_version == "3.9" | numba==0.54.1 ; python_version == "3.9" | ||||||
| numba==0.55.2 ; python_version == "3.10" | numba==0.55.2 ; python_version == "3.10" | ||||||
| #Description: Just-In-Time Compiler for Numerical Functions | #Description: Just-In-Time Compiler for Numerical Functions | ||||||
| #Pinned versions: 0.54.1, 0.49.0, <=0.49.1 | #Pinned versions: 0.54.1, 0.49.0, <=0.49.1 | ||||||
| @ -118,7 +113,7 @@ numba==0.55.2 ; python_version == "3.10" | |||||||
|  |  | ||||||
| #numpy | #numpy | ||||||
| #Description: Provides N-dimensional arrays and linear algebra | #Description: Provides N-dimensional arrays and linear algebra | ||||||
| #Pinned versions: 1.26.2 | #Pinned versions: 1.20 | ||||||
| #test that import: test_view_ops.py, test_unary_ufuncs.py, test_type_promotion.py, | #test that import: test_view_ops.py, test_unary_ufuncs.py, test_type_promotion.py, | ||||||
| #test_type_info.py, test_torch.py, test_tensorexpr_pybind.py, test_tensorexpr.py, | #test_type_info.py, test_torch.py, test_tensorexpr_pybind.py, test_tensorexpr.py, | ||||||
| #test_tensorboard.py, test_tensor_creation_ops.py, test_static_runtime.py, | #test_tensorboard.py, test_tensor_creation_ops.py, test_static_runtime.py, | ||||||
| @ -128,12 +123,6 @@ numba==0.55.2 ; python_version == "3.10" | |||||||
| #test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py, | #test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py, | ||||||
| #test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py, | #test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py, | ||||||
| #test_binary_ufuncs.py | #test_binary_ufuncs.py | ||||||
| numpy==1.22.4; python_version == "3.9" or python_version == "3.10" |  | ||||||
| numpy==1.26.2; python_version == "3.11" or python_version == "3.12" |  | ||||||
| numpy==2.1.2; python_version >= "3.13" |  | ||||||
|  |  | ||||||
| pandas==2.0.3; python_version < "3.13" |  | ||||||
| pandas==2.2.3; python_version >= "3.13" |  | ||||||
|  |  | ||||||
| #onnxruntime | #onnxruntime | ||||||
| #Description: scoring engine for Open Neural Network Exchange (ONNX) models | #Description: scoring engine for Open Neural Network Exchange (ONNX) models | ||||||
| @ -145,9 +134,9 @@ opt-einsum==3.3 | |||||||
| #Pinned versions: 3.3 | #Pinned versions: 3.3 | ||||||
| #test that import: test_linalg.py | #test that import: test_linalg.py | ||||||
|  |  | ||||||
| optree==0.13.0 | optree==0.9.1 | ||||||
| #Description: A library for tree manipulation | #Description: A library for tree manipulation | ||||||
| #Pinned versions: 0.13.0 | #Pinned versions: 0.9.1 | ||||||
| #test that import: test_vmap.py, test_aotdispatch.py, test_dynamic_shapes.py, | #test that import: test_vmap.py, test_aotdispatch.py, test_dynamic_shapes.py, | ||||||
| #test_pytree.py, test_ops.py, test_control_flow.py, test_modules.py, | #test_pytree.py, test_ops.py, test_control_flow.py, test_modules.py, | ||||||
| #common_utils.py, test_eager_transforms.py, test_python_dispatch.py, | #common_utils.py, test_eager_transforms.py, test_python_dispatch.py, | ||||||
| @ -158,9 +147,9 @@ optree==0.13.0 | |||||||
| #test_pointwise_ops.py, test_dtensor_ops.py, test_torchinductor.py, test_fx.py, | #test_pointwise_ops.py, test_dtensor_ops.py, test_torchinductor.py, test_fx.py, | ||||||
| #test_fake_tensor.py, test_mps.py | #test_fake_tensor.py, test_mps.py | ||||||
|  |  | ||||||
| pillow==11.0.0 | pillow==10.2.0 | ||||||
| #Description:  Python Imaging Library fork | #Description:  Python Imaging Library fork | ||||||
| #Pinned versions: 10.3.0 | #Pinned versions: 10.2.0 | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| protobuf==3.20.2 | protobuf==3.20.2 | ||||||
| @ -193,11 +182,6 @@ pytest-rerunfailures>=10.3 | |||||||
| #Pinned versions: | #Pinned versions: | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| pytest-subtests==0.13.1 |  | ||||||
| #Description: plugin for subtest support |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #pytest-benchmark | #pytest-benchmark | ||||||
| #Description: fixture for benchmarking code | #Description: fixture for benchmarking code | ||||||
| #Pinned versions: 3.2.3 | #Pinned versions: 3.2.3 | ||||||
| @ -234,7 +218,7 @@ pygments==2.15.0 | |||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| scikit-image==0.19.3 ; python_version < "3.10" | scikit-image==0.19.3 ; python_version < "3.10" | ||||||
| scikit-image==0.22.0 ; python_version >= "3.10" | scikit-image==0.20.0 ; python_version >= "3.10" | ||||||
| #Description: image processing routines | #Description: image processing routines | ||||||
| #Pinned versions: | #Pinned versions: | ||||||
| #test that import: test_nn.py | #test that import: test_nn.py | ||||||
| @ -244,11 +228,13 @@ scikit-image==0.22.0 ; python_version >= "3.10" | |||||||
| #Pinned versions: 0.20.3 | #Pinned versions: 0.20.3 | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| scipy==1.10.1 ; python_version <= "3.11" | scipy==1.6.3 ; python_version < "3.10" | ||||||
| scipy==1.14.1 ; python_version >= "3.12" | scipy==1.8.1 ; python_version == "3.10" | ||||||
|  | scipy==1.10.1 ; python_version == "3.11" | ||||||
|  | scipy==1.12.0 ; python_version == "3.12" | ||||||
| # Pin SciPy because of failing distribution tests (see #60347) | # Pin SciPy because of failing distribution tests (see #60347) | ||||||
| #Description: scientific python | #Description: scientific python | ||||||
| #Pinned versions: 1.10.1 | #Pinned versions: 1.6.3 | ||||||
| #test that import: test_unary_ufuncs.py, test_torch.py,test_tensor_creation_ops.py | #test that import: test_unary_ufuncs.py, test_torch.py,test_tensor_creation_ops.py | ||||||
| #test_spectral_ops.py, test_sparse_csr.py, test_reductions.py,test_nn.py | #test_spectral_ops.py, test_sparse_csr.py, test_reductions.py,test_nn.py | ||||||
| #test_linalg.py, test_binary_ufuncs.py | #test_linalg.py, test_binary_ufuncs.py | ||||||
| @ -264,7 +250,7 @@ tb-nightly==2.13.0a20230426 | |||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| # needed by torchgen utils | # needed by torchgen utils | ||||||
| typing-extensions>=4.10.0 | typing-extensions | ||||||
| #Description: type hints for python | #Description: type hints for python | ||||||
| #Pinned versions: | #Pinned versions: | ||||||
| #test that import: | #test that import: | ||||||
| @ -279,24 +265,25 @@ unittest-xml-reporting<=3.2.0,>=2.0.0 | |||||||
| #Pinned versions: | #Pinned versions: | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| #lintrunner is supported on aarch64-linux only from 0.12.4 version | #wheel not found on aarch64, and source build requires rust | ||||||
| lintrunner==0.12.7 | lintrunner==0.10.7 ; platform_machine == "x86_64" | ||||||
| #Description: all about linters! | #Description: all about linters! | ||||||
| #Pinned versions: 0.12.7 | #Pinned versions: 0.10.7 | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| redis>=4.0.0 | rockset==1.0.3 | ||||||
| #Description: redis database | #Description: queries Rockset | ||||||
| #test that import: anything that tests OSS caching/mocking (inductor/test_codecache.py, inductor/test_max_autotune.py) | #Pinned versions: 1.0.3 | ||||||
|  | #test that import: | ||||||
|  |  | ||||||
| ghstack==0.8.0 | ghstack==0.8.0 | ||||||
| #Description: ghstack tool | #Description: ghstack tool | ||||||
| #Pinned versions: 0.8.0 | #Pinned versions: 0.8.0 | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| jinja2==3.1.4 | jinja2==3.1.3 | ||||||
| #Description: jinja2 template engine | #Description: jinja2 template engine | ||||||
| #Pinned versions: 3.1.4 | #Pinned versions: 3.1.3 | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| pytest-cpp==2.3.0 | pytest-cpp==2.3.0 | ||||||
| @ -309,65 +296,19 @@ z3-solver==4.12.2.0 | |||||||
| #Pinned versions: | #Pinned versions: | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| tensorboard==2.13.0 ; python_version < "3.13" | tensorboard==2.13.0 | ||||||
| tensorboard==2.18.0 ; python_version >= "3.13" |  | ||||||
| #Description: Also included in .ci/docker/requirements-docs.txt | #Description: Also included in .ci/docker/requirements-docs.txt | ||||||
| #Pinned versions: | #Pinned versions: | ||||||
| #test that import: test_tensorboard | #test that import: test_tensorboard | ||||||
|  |  | ||||||
| pywavelets==1.4.1 ; python_version < "3.12" | pywavelets==1.4.1 ; python_version < "3.12" | ||||||
| pywavelets==1.7.0 ; python_version >= "3.12" | pywavelets==1.5.0 ; python_version >= "3.12" | ||||||
| #Description: This is a requirement of scikit-image, we need to pin | #Description: This is a requirement of scikit-image, we need to pin | ||||||
| # it here because 1.5.0 conflicts with numpy 1.21.2 used in CI | # it here because 1.5.0 conflicts with numpy 1.21.2 used in CI | ||||||
| #Pinned versions: 1.4.1 | #Pinned versions: 1.4.1 | ||||||
| #test that import: | #test that import: | ||||||
|  |  | ||||||
| lxml==5.3.0 | lxml==5.0.0. | ||||||
| #Description: This is a requirement of unittest-xml-reporting | #Description: This is a requirement of unittest-xml-reporting | ||||||
|  |  | ||||||
| # Python-3.9 binaries | # Python-3.9 binaries | ||||||
|  |  | ||||||
| PyGithub==2.3.0 |  | ||||||
|  |  | ||||||
| sympy==1.13.1 ; python_version >= "3.9" |  | ||||||
| #Description: Required by coremltools, also pinned in .github/requirements/pip-requirements-macOS.txt |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| onnx==1.17.0 |  | ||||||
| #Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| onnxscript==0.1.0.dev20240817 |  | ||||||
| #Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| parameterized==0.8.1 |  | ||||||
| #Description: Parameterizes unittests, both the tests themselves and the entire testing class |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #Description: required for testing torch/distributed/_tools/sac_estimator.py |  | ||||||
| #Pinned versions: 1.24.0 |  | ||||||
| #test that import: test_sac_estimator.py |  | ||||||
|  |  | ||||||
| pwlf==2.2.1 ; python_version >= "3.8" |  | ||||||
| #Description: required for testing torch/distributed/_tools/sac_estimator.py |  | ||||||
| #Pinned versions: 2.2.1 |  | ||||||
| #test that import: test_sac_estimator.py |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # To build PyTorch itself |  | ||||||
| astunparse |  | ||||||
| PyYAML |  | ||||||
| setuptools |  | ||||||
|  |  | ||||||
| ninja==1.11.1 ; platform_machine == "aarch64" |  | ||||||
| scons==4.5.2 ; platform_machine == "aarch64" |  | ||||||
|  |  | ||||||
| pulp==2.9.0 ; python_version >= "3.8" |  | ||||||
| #Description: required for testing ilp formulaiton under torch/distributed/_tools |  | ||||||
| #Pinned versions: 2.9.0 |  | ||||||
| #test that import: test_sac_ilp.py |  | ||||||
|  | |||||||
| @ -14,8 +14,7 @@ matplotlib==3.5.3 | |||||||
| #Description: This is used to generate PyTorch docs | #Description: This is used to generate PyTorch docs | ||||||
| #Pinned versions: 3.5.3 | #Pinned versions: 3.5.3 | ||||||
|  |  | ||||||
| tensorboard==2.13.0 ; python_version < "3.13" | tensorboard==2.13.0 | ||||||
| tensorboard==2.18.0 ; python_version >= "3.13" |  | ||||||
| #Description: This is used to generate PyTorch docs | #Description: This is used to generate PyTorch docs | ||||||
| #Pinned versions: 2.13.0 | #Pinned versions: 2.13.0 | ||||||
|  |  | ||||||
|  | |||||||
| @ -1 +1 @@ | |||||||
| 3.2.0 | 2.3.1 | ||||||
|  | |||||||
| @ -30,8 +30,7 @@ ARG CONDA_CMAKE | |||||||
| COPY requirements-ci.txt /opt/conda/requirements-ci.txt | COPY requirements-ci.txt /opt/conda/requirements-ci.txt | ||||||
| COPY ./common/install_conda.sh install_conda.sh | COPY ./common/install_conda.sh install_conda.sh | ||||||
| COPY ./common/common_utils.sh common_utils.sh | COPY ./common/common_utils.sh common_utils.sh | ||||||
| COPY ./common/install_magma_conda.sh install_magma_conda.sh | RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh install_magma_conda.sh common_utils.sh /opt/conda/requirements-ci.txt |  | ||||||
|  |  | ||||||
| # Install gcc | # Install gcc | ||||||
| ARG GCC_VERSION | ARG GCC_VERSION | ||||||
| @ -57,7 +56,7 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | |||||||
| RUN rm install_db.sh | RUN rm install_db.sh | ||||||
| ENV INSTALLED_DB ${DB} | ENV INSTALLED_DB ${DB} | ||||||
|  |  | ||||||
| # (optional) Install vision packages like OpenCV | # (optional) Install vision packages like OpenCV and ffmpeg | ||||||
| ARG VISION | ARG VISION | ||||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | ||||||
| @ -81,8 +80,6 @@ RUN bash ./install_openssl.sh | |||||||
| ENV OPENSSL_DIR /opt/openssl | ENV OPENSSL_DIR /opt/openssl | ||||||
|  |  | ||||||
| ARG INDUCTOR_BENCHMARKS | ARG INDUCTOR_BENCHMARKS | ||||||
| ARG ANACONDA_PYTHON_VERSION |  | ||||||
| ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION |  | ||||||
| COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh | COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh | ||||||
| COPY ./common/common_utils.sh common_utils.sh | COPY ./common/common_utils.sh common_utils.sh | ||||||
| COPY ci_commit_pins/huggingface.txt huggingface.txt | COPY ci_commit_pins/huggingface.txt huggingface.txt | ||||||
| @ -106,14 +103,6 @@ COPY triton_version.txt triton_version.txt | |||||||
| RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | ||||||
| RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt | RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt | ||||||
|  |  | ||||||
| ARG HALIDE |  | ||||||
| # Build and install halide |  | ||||||
| COPY ./common/install_halide.sh install_halide.sh |  | ||||||
| COPY ./common/common_utils.sh common_utils.sh |  | ||||||
| COPY ci_commit_pins/halide.txt halide.txt |  | ||||||
| RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi |  | ||||||
| RUN rm install_halide.sh common_utils.sh halide.txt |  | ||||||
|  |  | ||||||
| # Install ccache/sccache (do this last, so we get priority in PATH) | # Install ccache/sccache (do this last, so we get priority in PATH) | ||||||
| COPY ./common/install_cache.sh install_cache.sh | COPY ./common/install_cache.sh install_cache.sh | ||||||
| ENV PATH /opt/cache/bin:$PATH | ENV PATH /opt/cache/bin:$PATH | ||||||
| @ -150,7 +139,7 @@ COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm | |||||||
| ARG CUDNN_VERSION | ARG CUDNN_VERSION | ||||||
| ARG CUDA_VERSION | ARG CUDA_VERSION | ||||||
| COPY ./common/install_cudnn.sh install_cudnn.sh | COPY ./common/install_cudnn.sh install_cudnn.sh | ||||||
| RUN if [ -n "${CUDNN_VERSION}" ]; then bash install_cudnn.sh; fi | RUN if [ "${CUDNN_VERSION}" -eq 8 ]; then bash install_cudnn.sh; fi | ||||||
| RUN rm install_cudnn.sh | RUN rm install_cudnn.sh | ||||||
|  |  | ||||||
| # Install CUSPARSELT | # Install CUSPARSELT | ||||||
| @ -159,17 +148,10 @@ COPY ./common/install_cusparselt.sh install_cusparselt.sh | |||||||
| RUN bash install_cusparselt.sh | RUN bash install_cusparselt.sh | ||||||
| RUN rm install_cusparselt.sh | RUN rm install_cusparselt.sh | ||||||
|  |  | ||||||
| # Install CUDSS |  | ||||||
| ARG CUDA_VERSION |  | ||||||
| COPY ./common/install_cudss.sh install_cudss.sh |  | ||||||
| RUN bash install_cudss.sh |  | ||||||
| RUN rm install_cudss.sh |  | ||||||
|  |  | ||||||
| # Delete /usr/local/cuda-11.X/cuda-11.X symlinks | # Delete /usr/local/cuda-11.X/cuda-11.X symlinks | ||||||
| RUN if [ -h /usr/local/cuda-11.6/cuda-11.6 ]; then rm /usr/local/cuda-11.6/cuda-11.6; fi | RUN if [ -h /usr/local/cuda-11.6/cuda-11.6 ]; then rm /usr/local/cuda-11.6/cuda-11.6; fi | ||||||
| RUN if [ -h /usr/local/cuda-11.7/cuda-11.7 ]; then rm /usr/local/cuda-11.7/cuda-11.7; fi | RUN if [ -h /usr/local/cuda-11.7/cuda-11.7 ]; then rm /usr/local/cuda-11.7/cuda-11.7; fi | ||||||
| RUN if [ -h /usr/local/cuda-12.1/cuda-12.1 ]; then rm /usr/local/cuda-12.1/cuda-12.1; fi | RUN if [ -h /usr/local/cuda-12.1/cuda-12.1 ]; then rm /usr/local/cuda-12.1/cuda-12.1; fi | ||||||
| RUN if [ -h /usr/local/cuda-12.4/cuda-12.4 ]; then rm /usr/local/cuda-12.4/cuda-12.4; fi |  | ||||||
|  |  | ||||||
| USER jenkins | USER jenkins | ||||||
| CMD ["bash"] | CMD ["bash"] | ||||||
|  | |||||||
| @ -53,7 +53,7 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | |||||||
| RUN rm install_db.sh | RUN rm install_db.sh | ||||||
| ENV INSTALLED_DB ${DB} | ENV INSTALLED_DB ${DB} | ||||||
|  |  | ||||||
| # (optional) Install vision packages like OpenCV | # (optional) Install vision packages like OpenCV and ffmpeg | ||||||
| ARG VISION | ARG VISION | ||||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | ||||||
| @ -68,8 +68,6 @@ RUN rm install_rocm.sh | |||||||
| COPY ./common/install_rocm_magma.sh install_rocm_magma.sh | COPY ./common/install_rocm_magma.sh install_rocm_magma.sh | ||||||
| RUN bash ./install_rocm_magma.sh | RUN bash ./install_rocm_magma.sh | ||||||
| RUN rm install_rocm_magma.sh | RUN rm install_rocm_magma.sh | ||||||
| ADD ./common/install_miopen.sh install_miopen.sh |  | ||||||
| RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh |  | ||||||
| ENV ROCM_PATH /opt/rocm | ENV ROCM_PATH /opt/rocm | ||||||
| ENV PATH /opt/rocm/bin:$PATH | ENV PATH /opt/rocm/bin:$PATH | ||||||
| ENV PATH /opt/rocm/hcc/bin:$PATH | ENV PATH /opt/rocm/hcc/bin:$PATH | ||||||
| @ -80,11 +78,6 @@ ENV MAGMA_HOME /opt/rocm/magma | |||||||
| ENV LANG C.UTF-8 | ENV LANG C.UTF-8 | ||||||
| ENV LC_ALL C.UTF-8 | ENV LC_ALL C.UTF-8 | ||||||
|  |  | ||||||
| # Install amdsmi |  | ||||||
| COPY ./common/install_amdsmi.sh install_amdsmi.sh |  | ||||||
| RUN bash ./install_amdsmi.sh |  | ||||||
| RUN rm install_amdsmi.sh |  | ||||||
|  |  | ||||||
| # (optional) Install non-default CMake version | # (optional) Install non-default CMake version | ||||||
| ARG CMAKE_VERSION | ARG CMAKE_VERSION | ||||||
| COPY ./common/install_cmake.sh install_cmake.sh | COPY ./common/install_cmake.sh install_cmake.sh | ||||||
| @ -102,17 +95,10 @@ ARG TRITON | |||||||
| # try to reach out to S3, which docker build runners don't have access | # try to reach out to S3, which docker build runners don't have access | ||||||
| COPY ./common/install_triton.sh install_triton.sh | COPY ./common/install_triton.sh install_triton.sh | ||||||
| COPY ./common/common_utils.sh common_utils.sh | COPY ./common/common_utils.sh common_utils.sh | ||||||
| COPY ci_commit_pins/triton.txt triton.txt | COPY ci_commit_pins/triton-rocm.txt triton-rocm.txt | ||||||
| COPY triton_version.txt triton_version.txt | COPY triton_version.txt triton_version.txt | ||||||
| RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | ||||||
| RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt | RUN rm install_triton.sh common_utils.sh triton-rocm.txt triton_version.txt | ||||||
|  |  | ||||||
| # Install AOTriton |  | ||||||
| COPY ./aotriton_version.txt aotriton_version.txt |  | ||||||
| COPY ./common/common_utils.sh common_utils.sh |  | ||||||
| COPY ./common/install_aotriton.sh install_aotriton.sh |  | ||||||
| RUN ["/bin/bash", "-c", "./install_aotriton.sh /opt/rocm && rm -rf install_aotriton.sh aotriton_version.txt common_utils.sh"] |  | ||||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton |  | ||||||
|  |  | ||||||
| # Install ccache/sccache (do this last, so we get priority in PATH) | # Install ccache/sccache (do this last, so we get priority in PATH) | ||||||
| COPY ./common/install_cache.sh install_cache.sh | COPY ./common/install_cache.sh install_cache.sh | ||||||
| @ -123,8 +109,5 @@ RUN bash ./install_cache.sh && rm install_cache.sh | |||||||
| ARG BUILD_ENVIRONMENT | ARG BUILD_ENVIRONMENT | ||||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} | ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} | ||||||
|  |  | ||||||
| # Install LLVM dev version (Defined in the pytorch/builder github repository) |  | ||||||
| COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm |  | ||||||
|  |  | ||||||
| USER jenkins | USER jenkins | ||||||
| CMD ["bash"] | CMD ["bash"] | ||||||
|  | |||||||
| @ -30,7 +30,6 @@ RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh | |||||||
| ARG ANACONDA_PYTHON_VERSION | ARG ANACONDA_PYTHON_VERSION | ||||||
| ARG CONDA_CMAKE | ARG CONDA_CMAKE | ||||||
| ARG DOCS | ARG DOCS | ||||||
| ARG BUILD_ENVIRONMENT |  | ||||||
| ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION | ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION | ||||||
| ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH | ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH | ||||||
| ENV DOCS=$DOCS | ENV DOCS=$DOCS | ||||||
| @ -62,20 +61,15 @@ COPY ci_commit_pins/timm.txt timm.txt | |||||||
| RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi | RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi | ||||||
| RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt | RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt | ||||||
|  |  | ||||||
| # Install XPU Dependencies |  | ||||||
| ARG XPU_VERSION |  | ||||||
| COPY ./common/install_xpu.sh install_xpu.sh |  | ||||||
| RUN bash ./install_xpu.sh && rm install_xpu.sh |  | ||||||
|  |  | ||||||
| ARG TRITON | ARG TRITON | ||||||
| # Install triton, this needs to be done before sccache because the latter will | # Install triton, this needs to be done before sccache because the latter will | ||||||
| # try to reach out to S3, which docker build runners don't have access | # try to reach out to S3, which docker build runners don't have access | ||||||
| COPY ./common/install_triton.sh install_triton.sh | COPY ./common/install_triton.sh install_triton.sh | ||||||
| COPY ./common/common_utils.sh common_utils.sh | COPY ./common/common_utils.sh common_utils.sh | ||||||
| COPY ci_commit_pins/triton-xpu.txt triton-xpu.txt | # TODO: will add triton xpu commit | ||||||
| COPY triton_version.txt triton_version.txt | COPY ci_commit_pins/triton.txt triton.txt | ||||||
| RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | ||||||
| RUN rm install_triton.sh common_utils.sh triton-xpu.txt triton_version.txt | RUN rm install_triton.sh common_utils.sh triton.txt | ||||||
|  |  | ||||||
| # (optional) Install database packages like LMDB and LevelDB | # (optional) Install database packages like LMDB and LevelDB | ||||||
| ARG DB | ARG DB | ||||||
| @ -84,13 +78,18 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | |||||||
| RUN rm install_db.sh | RUN rm install_db.sh | ||||||
| ENV INSTALLED_DB ${DB} | ENV INSTALLED_DB ${DB} | ||||||
|  |  | ||||||
| # (optional) Install vision packages like OpenCV | # (optional) Install vision packages like OpenCV and ffmpeg | ||||||
| ARG VISION | ARG VISION | ||||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | ||||||
| RUN rm install_vision.sh cache_vision_models.sh common_utils.sh | RUN rm install_vision.sh cache_vision_models.sh common_utils.sh | ||||||
| ENV INSTALLED_VISION ${VISION} | ENV INSTALLED_VISION ${VISION} | ||||||
|  |  | ||||||
|  | # Install XPU Dependencies | ||||||
|  | ARG BASEKIT_VERSION | ||||||
|  | COPY ./common/install_xpu.sh install_xpu.sh | ||||||
|  | RUN bash ./install_xpu.sh && rm install_xpu.sh | ||||||
|  |  | ||||||
| # (optional) Install non-default CMake version | # (optional) Install non-default CMake version | ||||||
| ARG CMAKE_VERSION | ARG CMAKE_VERSION | ||||||
| COPY ./common/install_cmake.sh install_cmake.sh | COPY ./common/install_cmake.sh install_cmake.sh | ||||||
|  | |||||||
| @ -36,8 +36,7 @@ ENV DOCS=$DOCS | |||||||
| COPY requirements-ci.txt requirements-docs.txt /opt/conda/ | COPY requirements-ci.txt requirements-docs.txt /opt/conda/ | ||||||
| COPY ./common/install_conda.sh install_conda.sh | COPY ./common/install_conda.sh install_conda.sh | ||||||
| COPY ./common/common_utils.sh common_utils.sh | COPY ./common/common_utils.sh common_utils.sh | ||||||
| COPY ./common/install_magma_conda.sh install_magma_conda.sh | RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt /opt/conda/requirements-docs.txt | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh install_magma_conda.sh common_utils.sh /opt/conda/requirements-ci.txt /opt/conda/requirements-docs.txt |  | ||||||
| RUN if [ -n "${UNINSTALL_DILL}" ]; then pip uninstall -y dill; fi | RUN if [ -n "${UNINSTALL_DILL}" ]; then pip uninstall -y dill; fi | ||||||
|  |  | ||||||
| # Install gcc | # Install gcc | ||||||
| @ -51,7 +50,7 @@ RUN  bash ./install_lcov.sh && rm install_lcov.sh | |||||||
|  |  | ||||||
| # Install cuda and cudnn | # Install cuda and cudnn | ||||||
| ARG CUDA_VERSION | ARG CUDA_VERSION | ||||||
| COPY ./common/install_cuda.sh install_cuda.sh | RUN wget -q https://raw.githubusercontent.com/pytorch/builder/main/common/install_cuda.sh -O install_cuda.sh | ||||||
| RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh | RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh | ||||||
| ENV DESIRED_CUDA ${CUDA_VERSION} | ENV DESIRED_CUDA ${CUDA_VERSION} | ||||||
| ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH | ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH | ||||||
| @ -81,13 +80,26 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | |||||||
| RUN rm install_db.sh | RUN rm install_db.sh | ||||||
| ENV INSTALLED_DB ${DB} | ENV INSTALLED_DB ${DB} | ||||||
|  |  | ||||||
| # (optional) Install vision packages like OpenCV | # (optional) Install vision packages like OpenCV and ffmpeg | ||||||
| ARG VISION | ARG VISION | ||||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | ||||||
| RUN rm install_vision.sh cache_vision_models.sh common_utils.sh | RUN rm install_vision.sh cache_vision_models.sh common_utils.sh | ||||||
| ENV INSTALLED_VISION ${VISION} | ENV INSTALLED_VISION ${VISION} | ||||||
|  |  | ||||||
|  | # (optional) Install Android NDK | ||||||
|  | ARG ANDROID | ||||||
|  | ARG ANDROID_NDK | ||||||
|  | ARG GRADLE_VERSION | ||||||
|  | COPY ./common/install_android.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||||
|  | COPY ./android/AndroidManifest.xml AndroidManifest.xml | ||||||
|  | COPY ./android/build.gradle build.gradle | ||||||
|  | RUN if [ -n "${ANDROID}" ]; then bash ./install_android.sh; fi | ||||||
|  | RUN rm install_android.sh cache_vision_models.sh common_utils.sh | ||||||
|  | RUN rm AndroidManifest.xml | ||||||
|  | RUN rm build.gradle | ||||||
|  | ENV INSTALLED_ANDROID ${ANDROID} | ||||||
|  |  | ||||||
| # (optional) Install Vulkan SDK | # (optional) Install Vulkan SDK | ||||||
| ARG VULKAN_SDK_VERSION | ARG VULKAN_SDK_VERSION | ||||||
| COPY ./common/install_vulkan_sdk.sh install_vulkan_sdk.sh | COPY ./common/install_vulkan_sdk.sh install_vulkan_sdk.sh | ||||||
| @ -135,13 +147,6 @@ COPY ci_commit_pins/triton.txt triton.txt | |||||||
| RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | ||||||
| RUN rm install_triton.sh common_utils.sh triton.txt | RUN rm install_triton.sh common_utils.sh triton.txt | ||||||
|  |  | ||||||
| ARG TRITON_CPU |  | ||||||
| COPY ./common/install_triton.sh install_triton.sh |  | ||||||
| COPY ./common/common_utils.sh common_utils.sh |  | ||||||
| COPY ci_commit_pins/triton-cpu.txt triton-cpu.txt |  | ||||||
| RUN if [ -n "${TRITON_CPU}" ]; then bash ./install_triton.sh; fi |  | ||||||
| RUN rm install_triton.sh common_utils.sh triton-cpu.txt |  | ||||||
|  |  | ||||||
| ARG EXECUTORCH | ARG EXECUTORCH | ||||||
| # Build and install executorch | # Build and install executorch | ||||||
| COPY ./common/install_executorch.sh install_executorch.sh | COPY ./common/install_executorch.sh install_executorch.sh | ||||||
| @ -150,14 +155,6 @@ COPY ci_commit_pins/executorch.txt executorch.txt | |||||||
| RUN if [ -n "${EXECUTORCH}" ]; then bash ./install_executorch.sh; fi | RUN if [ -n "${EXECUTORCH}" ]; then bash ./install_executorch.sh; fi | ||||||
| RUN rm install_executorch.sh common_utils.sh executorch.txt | RUN rm install_executorch.sh common_utils.sh executorch.txt | ||||||
|  |  | ||||||
| ARG HALIDE |  | ||||||
| # Build and install halide |  | ||||||
| COPY ./common/install_halide.sh install_halide.sh |  | ||||||
| COPY ./common/common_utils.sh common_utils.sh |  | ||||||
| COPY ci_commit_pins/halide.txt halide.txt |  | ||||||
| RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi |  | ||||||
| RUN rm install_halide.sh common_utils.sh halide.txt |  | ||||||
|  |  | ||||||
| ARG ONNX | ARG ONNX | ||||||
| # Install ONNX dependencies | # Install ONNX dependencies | ||||||
| COPY ./common/install_onnx.sh ./common/common_utils.sh ./ | COPY ./common/install_onnx.sh ./common/common_utils.sh ./ | ||||||
| @ -172,11 +169,9 @@ RUN rm install_acl.sh | |||||||
| ENV INSTALLED_ACL ${ACL} | ENV INSTALLED_ACL ${ACL} | ||||||
|  |  | ||||||
| # Install ccache/sccache (do this last, so we get priority in PATH) | # Install ccache/sccache (do this last, so we get priority in PATH) | ||||||
| ARG SKIP_SCCACHE_INSTALL |  | ||||||
| COPY ./common/install_cache.sh install_cache.sh | COPY ./common/install_cache.sh install_cache.sh | ||||||
| ENV PATH /opt/cache/bin:$PATH | ENV PATH /opt/cache/bin:$PATH | ||||||
| RUN if [ -z "${SKIP_SCCACHE_INSTALL}" ]; then bash ./install_cache.sh; fi | RUN bash ./install_cache.sh && rm install_cache.sh | ||||||
| RUN rm install_cache.sh |  | ||||||
|  |  | ||||||
| # Add jni.h for java host build | # Add jni.h for java host build | ||||||
| COPY ./common/install_jni.sh install_jni.sh | COPY ./common/install_jni.sh install_jni.sh | ||||||
| @ -193,9 +188,7 @@ ARG BUILD_ENVIRONMENT | |||||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} | ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} | ||||||
|  |  | ||||||
| # Install LLVM dev version (Defined in the pytorch/builder github repository) | # Install LLVM dev version (Defined in the pytorch/builder github repository) | ||||||
| ARG SKIP_LLVM_SRC_BUILD_INSTALL |  | ||||||
| COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm | COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm | ||||||
| RUN if [ -n "${SKIP_LLVM_SRC_BUILD_INSTALL}" ]; then set -eu; rm -rf /opt/llvm; fi |  | ||||||
|  |  | ||||||
| # AWS specific CUDA build guidance | # AWS specific CUDA build guidance | ||||||
| ENV TORCH_CUDA_ARCH_LIST Maxwell | ENV TORCH_CUDA_ARCH_LIST Maxwell | ||||||
|  | |||||||
| @ -1,10 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
|  |  | ||||||
| # This is mostly just a shim to manywheel/build.sh |  | ||||||
| # TODO: Make this a dedicated script to build just libtorch |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" |  | ||||||
|  |  | ||||||
| USE_CUSPARSELT=0 BUILD_PYTHONLESS=1 DESIRED_PYTHON="3.9" ${SCRIPTPATH}/../manywheel/build.sh |  | ||||||
							
								
								
									
										2
									
								
								.ci/magma/.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.ci/magma/.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -1,2 +0,0 @@ | |||||||
| output/ |  | ||||||
| magma-cuda*/ |  | ||||||
| @ -1,48 +0,0 @@ | |||||||
| SHELL=/usr/bin/env bash |  | ||||||
|  |  | ||||||
| DOCKER_CMD ?= docker |  | ||||||
| DESIRED_CUDA ?= 11.8 |  | ||||||
| DESIRED_CUDA_SHORT = $(subst .,,$(DESIRED_CUDA)) |  | ||||||
| PACKAGE_NAME = magma-cuda |  | ||||||
| CUDA_ARCH_LIST ?= -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90 |  | ||||||
|  |  | ||||||
| DOCKER_RUN = set -eou pipefail; ${DOCKER_CMD} run --rm -i \ |  | ||||||
| 	-v $(shell git rev-parse --show-toplevel)/.ci:/builder \ |  | ||||||
| 	-w /builder \ |  | ||||||
| 	-e PACKAGE_NAME=${PACKAGE_NAME}${DESIRED_CUDA_SHORT} \ |  | ||||||
| 	-e DESIRED_CUDA=${DESIRED_CUDA} \ |  | ||||||
| 	-e CUDA_ARCH_LIST="${CUDA_ARCH_LIST}" \ |  | ||||||
| 	"pytorch/manylinux-builder:cuda${DESIRED_CUDA}-main" \ |  | ||||||
| 	magma/build_magma.sh |  | ||||||
|  |  | ||||||
| .PHONY: all |  | ||||||
| all: magma-cuda126 |  | ||||||
| all: magma-cuda124 |  | ||||||
| all: magma-cuda121 |  | ||||||
| all: magma-cuda118 |  | ||||||
|  |  | ||||||
| .PHONY: |  | ||||||
| clean: |  | ||||||
| 	$(RM) -r magma-* |  | ||||||
| 	$(RM) -r output |  | ||||||
|  |  | ||||||
| .PHONY: magma-cuda126 |  | ||||||
| magma-cuda126: DESIRED_CUDA := 12.6 |  | ||||||
| magma-cuda126: |  | ||||||
| 	$(DOCKER_RUN) |  | ||||||
|  |  | ||||||
| .PHONY: magma-cuda124 |  | ||||||
| magma-cuda124: DESIRED_CUDA := 12.4 |  | ||||||
| magma-cuda124: |  | ||||||
| 	$(DOCKER_RUN) |  | ||||||
|  |  | ||||||
| .PHONY: magma-cuda121 |  | ||||||
| magma-cuda121: DESIRED_CUDA := 12.1 |  | ||||||
| magma-cuda121: |  | ||||||
| 	$(DOCKER_RUN) |  | ||||||
|  |  | ||||||
| .PHONY: magma-cuda118 |  | ||||||
| magma-cuda118: DESIRED_CUDA := 11.8 |  | ||||||
| magma-cuda118: CUDA_ARCH_LIST += -gencode arch=compute_37,code=sm_37 |  | ||||||
| magma-cuda118: |  | ||||||
| 	$(DOCKER_RUN) |  | ||||||
| @ -1,50 +0,0 @@ | |||||||
| # Magma |  | ||||||
|  |  | ||||||
| This folder contains the scripts and configurations to build magma, statically linked for various versions of CUDA. |  | ||||||
|  |  | ||||||
| ## Building |  | ||||||
|  |  | ||||||
| Look in the `Makefile` for available targets to build. To build any target, for example `magma-cuda118`, run |  | ||||||
|  |  | ||||||
| ``` |  | ||||||
| # Using `docker` |  | ||||||
| make magma-cuda118 |  | ||||||
|  |  | ||||||
| # Using `podman` |  | ||||||
| DOCKER_CMD=podman make magma-cuda118 |  | ||||||
| ``` |  | ||||||
|  |  | ||||||
| This spawns a `pytorch/manylinux-cuda<version>` docker image, which has the required `devtoolset` and CUDA versions installed. |  | ||||||
| Within the docker image, it runs `build_magma.sh` with the correct environment variables set, which package the necessary files |  | ||||||
| into a tarball, with the following structure: |  | ||||||
|  |  | ||||||
| ``` |  | ||||||
| . |  | ||||||
| ├── include       # header files |  | ||||||
| ├── lib           # libmagma.a |  | ||||||
| ├── info |  | ||||||
| │   ├── licenses  # license file |  | ||||||
| │   └── recipe    # build script and patches |  | ||||||
| ``` |  | ||||||
|  |  | ||||||
| More specifically, `build_magma.sh` copies over the relevant files from the `package_files` directory depending on the CUDA version. |  | ||||||
| Outputted binaries should be in the `output` folder. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| ## Pushing |  | ||||||
|  |  | ||||||
| Packages can be uploaded to an S3 bucket using: |  | ||||||
|  |  | ||||||
| ``` |  | ||||||
| aws s3 cp output/*/magma-cuda*.bz2 <bucket-with-path> |  | ||||||
| ``` |  | ||||||
|  |  | ||||||
| If you do not have upload permissions, please ping @seemethere or @soumith to gain access |  | ||||||
|  |  | ||||||
| ## New versions |  | ||||||
|  |  | ||||||
| New CUDA versions can be added by creating a new make target with the next desired version. For CUDA version NN.n, the target should be named `magma-cudaNNn`. |  | ||||||
|  |  | ||||||
| Make sure to edit the appropriate environment variables (e.g., DESIRED_CUDA, CUDA_ARCH_LIST) in the `Makefile` accordingly. Remember also to check `build_magma.sh` to ensure the logic for copying over the files remains correct. |  | ||||||
|  |  | ||||||
| New patches can be added by editing `Makefile` and`build_magma.sh` the same way `getrf_nbparam.patch` is implemented. |  | ||||||
| @ -1,50 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
|  |  | ||||||
| set -eou pipefail |  | ||||||
|  |  | ||||||
| # Environment variables |  | ||||||
| # The script expects DESIRED_CUDA and PACKAGE_NAME to be set |  | ||||||
| ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" |  | ||||||
| MAGMA_VERSION=2.6.1 |  | ||||||
|  |  | ||||||
| # Folders for the build |  | ||||||
| PACKAGE_FILES=${ROOT_DIR}/magma/package_files # source patches and metadata |  | ||||||
| PACKAGE_DIR=${ROOT_DIR}/magma/${PACKAGE_NAME} # build workspace |  | ||||||
| PACKAGE_OUTPUT=${ROOT_DIR}/magma/output # where tarballs are stored |  | ||||||
| PACKAGE_BUILD=${PACKAGE_DIR}/build # where the content of the tarball is prepared |  | ||||||
| PACKAGE_RECIPE=${PACKAGE_BUILD}/info/recipe |  | ||||||
| PACKAGE_LICENSE=${PACKAGE_BUILD}/info/licenses |  | ||||||
| mkdir -p ${PACKAGE_DIR} ${PACKAGE_OUTPUT}/linux-64 ${PACKAGE_BUILD} ${PACKAGE_RECIPE} ${PACKAGE_LICENSE} |  | ||||||
|  |  | ||||||
| # Fetch magma sources and verify checksum |  | ||||||
| pushd ${PACKAGE_DIR} |  | ||||||
| curl -LO http://icl.utk.edu/projectsfiles/magma/downloads/magma-${MAGMA_VERSION}.tar.gz |  | ||||||
| tar zxf magma-${MAGMA_VERSION}.tar.gz |  | ||||||
| sha256sum --check < ${PACKAGE_FILES}/magma-${MAGMA_VERSION}.sha256 |  | ||||||
| popd |  | ||||||
|  |  | ||||||
| # Apply patches and build |  | ||||||
| pushd ${PACKAGE_DIR}/magma-${MAGMA_VERSION} |  | ||||||
| patch < ${PACKAGE_FILES}/CMake.patch |  | ||||||
| patch < ${PACKAGE_FILES}/cmakelists.patch |  | ||||||
| patch -p0 < ${PACKAGE_FILES}/thread_queue.patch |  | ||||||
| patch -p1 < ${PACKAGE_FILES}/getrf_shfl.patch |  | ||||||
| patch -p1 < ${PACKAGE_FILES}/getrf_nbparam.patch |  | ||||||
| # The build.sh script expects to be executed from the sources root folder |  | ||||||
| INSTALL_DIR=${PACKAGE_BUILD} ${PACKAGE_FILES}/build.sh |  | ||||||
| popd |  | ||||||
|  |  | ||||||
| # Package recipe, license and tarball |  | ||||||
| # Folder and package name are backward compatible for the build workflow |  | ||||||
| cp ${PACKAGE_FILES}/build.sh ${PACKAGE_RECIPE}/build.sh |  | ||||||
| cp ${PACKAGE_FILES}/thread_queue.patch ${PACKAGE_RECIPE}/thread_queue.patch |  | ||||||
| cp ${PACKAGE_FILES}/cmakelists.patch ${PACKAGE_RECIPE}/cmakelists.patch |  | ||||||
| cp ${PACKAGE_FILES}/getrf_shfl.patch ${PACKAGE_RECIPE}/getrf_shfl.patch |  | ||||||
| cp ${PACKAGE_FILES}/getrf_nbparam.patch ${PACKAGE_RECIPE}/getrf_nbparam.patch |  | ||||||
| cp ${PACKAGE_FILES}/CMake.patch ${PACKAGE_RECIPE}/CMake.patch |  | ||||||
| cp ${PACKAGE_FILES}/magma-${MAGMA_VERSION}.sha256 ${PACKAGE_RECIPE}/magma-${MAGMA_VERSION}.sha256 |  | ||||||
| cp ${PACKAGE_DIR}/magma-${MAGMA_VERSION}/COPYRIGHT ${PACKAGE_LICENSE}/COPYRIGHT |  | ||||||
| pushd ${PACKAGE_BUILD} |  | ||||||
| tar cjf ${PACKAGE_OUTPUT}/linux-64/${PACKAGE_NAME}-${MAGMA_VERSION}-1.tar.bz2 include lib info |  | ||||||
| echo Built in ${PACKAGE_OUTPUT}/linux-64/${PACKAGE_NAME}-${MAGMA_VERSION}-1.tar.bz2 |  | ||||||
| popd |  | ||||||
| @ -1,40 +0,0 @@ | |||||||
| --- CMake.src.cuda	2023-03-29 10:05:32.136954140 +0000 |  | ||||||
| +++ CMake.src.cuda	2023-03-29 10:05:50.281318043 +0000 |  | ||||||
| @@ -283,10 +283,10 @@ |  | ||||||
|  magmablas/zgeadd.cu |  | ||||||
|  magmablas/zgeadd2.cu |  | ||||||
|  magmablas/zgeam.cu |  | ||||||
| -magmablas/zgemm_fermi.cu |  | ||||||
| +#magmablas/zgemm_fermi.cu |  | ||||||
|  magmablas/zgemm_reduce.cu |  | ||||||
|  magmablas/zgemv_conj.cu |  | ||||||
| -magmablas/zgemv_fermi.cu |  | ||||||
| +#magmablas/zgemv_fermi.cu |  | ||||||
|  magmablas/zgerbt.cu |  | ||||||
|  magmablas/zgerbt_kernels.cu |  | ||||||
|  magmablas/zgetmatrix_transpose.cpp |  | ||||||
| @@ -1009,18 +1009,18 @@ |  | ||||||
|  magmablas/sgeam.cu |  | ||||||
|  magmablas/dgeam.cu |  | ||||||
|  magmablas/cgeam.cu |  | ||||||
| -magmablas/sgemm_fermi.cu |  | ||||||
| -magmablas/dgemm_fermi.cu |  | ||||||
| -magmablas/cgemm_fermi.cu |  | ||||||
| +#magmablas/sgemm_fermi.cu |  | ||||||
| +#magmablas/dgemm_fermi.cu |  | ||||||
| +#magmablas/cgemm_fermi.cu |  | ||||||
|  magmablas/sgemm_reduce.cu |  | ||||||
|  magmablas/dgemm_reduce.cu |  | ||||||
|  magmablas/cgemm_reduce.cu |  | ||||||
|  magmablas/sgemv_conj.cu |  | ||||||
|  magmablas/dgemv_conj.cu |  | ||||||
|  magmablas/cgemv_conj.cu |  | ||||||
| -magmablas/sgemv_fermi.cu |  | ||||||
| -magmablas/dgemv_fermi.cu |  | ||||||
| -magmablas/cgemv_fermi.cu |  | ||||||
| +#magmablas/sgemv_fermi.cu |  | ||||||
| +#magmablas/dgemv_fermi.cu |  | ||||||
| +#magmablas/cgemv_fermi.cu |  | ||||||
|  magmablas/sgerbt.cu |  | ||||||
|  magmablas/dgerbt.cu |  | ||||||
|  magmablas/cgerbt.cu |  | ||||||
| @ -1,12 +0,0 @@ | |||||||
| CUDA__VERSION=$(nvcc --version|sed -n 4p|cut -f5 -d" "|cut -f1 -d",") |  | ||||||
| if [ "$CUDA__VERSION" != "$DESIRED_CUDA" ]; then |  | ||||||
|     echo "CUDA Version is not $DESIRED_CUDA. CUDA Version found: $CUDA__VERSION" |  | ||||||
|     exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| mkdir build |  | ||||||
| cd build |  | ||||||
| cmake .. -DUSE_FORTRAN=OFF -DGPU_TARGET="All" -DCMAKE_INSTALL_PREFIX="$INSTALL_DIR" -DCUDA_ARCH_LIST="$CUDA_ARCH_LIST" |  | ||||||
| make -j$(getconf _NPROCESSORS_CONF) |  | ||||||
| make install |  | ||||||
| cd .. |  | ||||||
| @ -1,388 +0,0 @@ | |||||||
| diff --git a/CMakeLists.txt b/CMakeLists.txt |  | ||||||
| index d5d8d87d..8a507334 100644 |  | ||||||
| --- a/CMakeLists.txt |  | ||||||
| +++ b/CMakeLists.txt |  | ||||||
| @@ -3,7 +3,7 @@ cmake_minimum_required( VERSION 2.8.1 ) |  | ||||||
|  # ---------------------------------------- |  | ||||||
|  # to disable Fortran, set this to "off" |  | ||||||
|  # see also -DADD_ below |  | ||||||
| -option( USE_FORTRAN "Fortran is required for some tester checks, but can be disabled with reduced functionality" ON ) |  | ||||||
| +option( USE_FORTRAN "Fortran is required for some tester checks, but can be disabled with reduced functionality" OFF ) |  | ||||||
|  |  | ||||||
|  if (USE_FORTRAN) |  | ||||||
|      project( MAGMA C CXX Fortran ) |  | ||||||
| @@ -75,6 +75,8 @@ else() |  | ||||||
|      message( WARNING "The compiler ${CMAKE_CXX_COMPILER} doesn't support the -std=c++11 flag. Some code may not compile.") |  | ||||||
|  endif() |  | ||||||
|  |  | ||||||
| +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-libstdc++ -fno-exceptions") |  | ||||||
| + |  | ||||||
|  CHECK_C_COMPILER_FLAG("-std=c99" COMPILER_SUPPORTS_C99) |  | ||||||
|  if (COMPILER_SUPPORTS_C99) |  | ||||||
|      set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99") |  | ||||||
| @@ -101,15 +103,15 @@ endif() |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  # ---------------------------------------- |  | ||||||
| -# locate OpenMP |  | ||||||
| -find_package( OpenMP ) |  | ||||||
| -if (OPENMP_FOUND) |  | ||||||
| -    message( STATUS "Found OpenMP" ) |  | ||||||
| -    message( STATUS "    OpenMP_C_FLAGS   ${OpenMP_C_FLAGS}" ) |  | ||||||
| -    message( STATUS "    OpenMP_CXX_FLAGS ${OpenMP_CXX_FLAGS}" ) |  | ||||||
| -    set( CMAKE_C_FLAGS   "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}" ) |  | ||||||
| -    set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}" ) |  | ||||||
| -endif() |  | ||||||
| +# # locate OpenMP |  | ||||||
| +# find_package( OpenMP ) |  | ||||||
| +# if (OPENMP_FOUND) |  | ||||||
| +#     message( STATUS "Found OpenMP" ) |  | ||||||
| +#     message( STATUS "    OpenMP_C_FLAGS   ${OpenMP_C_FLAGS}" ) |  | ||||||
| +#     message( STATUS "    OpenMP_CXX_FLAGS ${OpenMP_CXX_FLAGS}" ) |  | ||||||
| +#     set( CMAKE_C_FLAGS   "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}" ) |  | ||||||
| +#     set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}" ) |  | ||||||
| +# endif() |  | ||||||
|  |  | ||||||
|  if (MAGMA_ENABLE_CUDA) |  | ||||||
|    # ---------------------------------------- |  | ||||||
| @@ -132,7 +134,7 @@ if (MAGMA_ENABLE_CUDA) |  | ||||||
|      set( NV_SM    "" ) |  | ||||||
|      set( NV_COMP  "" ) |  | ||||||
|  |  | ||||||
| -    set(CUDA_SEPARABLE_COMPILATION ON) |  | ||||||
| +    set(CUDA_SEPARABLE_COMPILATION OFF) |  | ||||||
|  |  | ||||||
|      # nvcc >= 6.5 supports -std=c++11, so propagate CXXFLAGS to NVCCFLAGS. |  | ||||||
|      # Older nvcc didn't support -std=c++11, so previously we disabled propagation. |  | ||||||
| @@ -294,11 +296,18 @@ if (MAGMA_ENABLE_CUDA) |  | ||||||
|          message( STATUS "    compile for CUDA arch 8.0 (Ampere)" ) |  | ||||||
|      endif() |  | ||||||
|  |  | ||||||
| +    if ( ${GPU_TARGET} MATCHES "All") |  | ||||||
| +        set( MIN_ARCH 370) |  | ||||||
| +        SET( NV_SM ${CUDA_ARCH_LIST}) |  | ||||||
| +        SET( NV_COMP "") |  | ||||||
| +    endif() |  | ||||||
| + |  | ||||||
|      if (NOT MIN_ARCH) |  | ||||||
|          message( FATAL_ERROR "GPU_TARGET must contain one or more of Fermi, Kepler, Maxwell, Pascal, Volta, Turing, Ampere, or valid sm_[0-9][0-9]" ) |  | ||||||
|      endif() |  | ||||||
|  |  | ||||||
| -    set( CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} -Xcompiler -fPIC ${NV_SM} ${NV_COMP} ${FORTRAN_CONVENTION} ) |  | ||||||
| +    set( CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} -DHAVE_CUBLAS -Xfatbin -compress-all -Xcompiler -fPIC -std=c++11 ${NV_SM} ${NV_COMP} ${FORTRAN_CONVENTION} ) |  | ||||||
| +    MESSAGE(STATUS "CUDA_NVCC_FLAGS: ${CUDA_NVCC_FLAGS}") |  | ||||||
|      #add_definitions( "-DMAGMA_HAVE_CUDA -DMAGMA_CUDA_ARCH_MIN=${MIN_ARCH}" ) |  | ||||||
|      set(MAGMA_HAVE_CUDA "1") |  | ||||||
|      set(MAGMA_CUDA_ARCH_MIN "${MIN_ARCH}") |  | ||||||
| @@ -413,7 +422,7 @@ set_property(CACHE BLA_VENDOR PROPERTY STRINGS |  | ||||||
|  set( LAPACK_LIBRARIES "" CACHE STRING "Libraries for LAPACK and BLAS, to manually override search" ) |  | ||||||
|  if (LAPACK_LIBRARIES STREQUAL "") |  | ||||||
|      message( STATUS "Searching for BLAS and LAPACK. To override, set LAPACK_LIBRARIES using ccmake." ) |  | ||||||
| -    find_package( LAPACK ) |  | ||||||
| +    # find_package( LAPACK ) |  | ||||||
|      # force showing updated LAPACK_LIBRARIES in ccmake / cmake-gui. |  | ||||||
|      set( LAPACK_LIBRARIES ${LAPACK_LIBRARIES} CACHE STRING "Libraries for LAPACK and BLAS, to manually override search" FORCE ) |  | ||||||
|  else() |  | ||||||
| @@ -552,12 +561,12 @@ if (WIN32) |  | ||||||
|      #message( "libmagma_all_f   ${libmagma_all_f}"   ) |  | ||||||
|  |  | ||||||
|      # on Windows, Fortran files aren't compiled if listed here... |  | ||||||
| -    cuda_add_library( magma ${libmagma_all_cpp} ) |  | ||||||
| +    cuda_add_library( magma STATIC ${libmagma_all_cpp} OPTIONS --compiler-options "-fPIC") |  | ||||||
|      target_link_libraries( magma |  | ||||||
|          ${LAPACK_LIBRARIES} |  | ||||||
|          ${CUDA_CUDART_LIBRARY} |  | ||||||
|          ${CUDA_CUBLAS_LIBRARIES} |  | ||||||
| -        ${CUDA_cusparse_LIBRARY} |  | ||||||
| +        # ${CUDA_cusparse_LIBRARY} |  | ||||||
|      ) |  | ||||||
|  |  | ||||||
|      # no Fortran files at the moment (how to test libmagma_all_f is not empty?), |  | ||||||
| @@ -575,13 +584,13 @@ if (WIN32) |  | ||||||
|  else() |  | ||||||
|      # Unix doesn't seem to have a problem with mixing C, CUDA, and Fortran files |  | ||||||
|      if (MAGMA_ENABLE_CUDA) |  | ||||||
| -      cuda_add_library( magma ${libmagma_all} ) |  | ||||||
| +      cuda_add_library( magma STATIC ${libmagma_all} OPTIONS --compiler-options "-fPIC") |  | ||||||
|        target_link_libraries( magma |  | ||||||
|          ${blas_fix} |  | ||||||
|          ${LAPACK_LIBRARIES} |  | ||||||
|          ${CUDA_CUDART_LIBRARY} |  | ||||||
|          ${CUDA_CUBLAS_LIBRARIES} |  | ||||||
| -        ${CUDA_cusparse_LIBRARY} |  | ||||||
| +        # ${CUDA_cusparse_LIBRARY} |  | ||||||
|  	) |  | ||||||
|      else() |  | ||||||
|        find_package( hipBLAS ) |  | ||||||
| @@ -614,138 +623,139 @@ else() |  | ||||||
|      endif() |  | ||||||
|  endif() |  | ||||||
|  add_custom_target( lib DEPENDS magma ) |  | ||||||
| - |  | ||||||
| - |  | ||||||
| -# ---------------------------------------- |  | ||||||
| -# compile lapacktest library |  | ||||||
| -# If use fortran, compile only Fortran files, not magma_[sdcz]_no_fortran.cpp |  | ||||||
| -# else,           compile only C++     files, not Fortran files |  | ||||||
| -if (USE_FORTRAN) |  | ||||||
| -    foreach( filename ${liblapacktest_all} ) |  | ||||||
| -        if (filename MATCHES "\\.(f|f90|F90)$") |  | ||||||
| -            list( APPEND liblapacktest_all_f ${filename} ) |  | ||||||
| -        endif() |  | ||||||
| -    endforeach() |  | ||||||
| -    add_library( lapacktest ${liblapacktest_all_f} ) |  | ||||||
| -else() |  | ||||||
| -    # alternatively, use only C/C++/CUDA files, including magma_[sdcz]_no_fortran.cpp |  | ||||||
| -    foreach( filename ${liblapacktest_all} ) |  | ||||||
| -        if (filename MATCHES "\\.(c|cu|cpp)$") |  | ||||||
| -            list( APPEND liblapacktest_all_cpp ${filename} ) |  | ||||||
| -        endif() |  | ||||||
| -    endforeach() |  | ||||||
| -    add_library( lapacktest ${liblapacktest_all_cpp} ) |  | ||||||
| -endif() |  | ||||||
| -target_link_libraries( lapacktest |  | ||||||
| -    ${blas_fix} |  | ||||||
| -    ${LAPACK_LIBRARIES} |  | ||||||
| -) |  | ||||||
| - |  | ||||||
| - |  | ||||||
| -# ---------------------------------------- |  | ||||||
| -# compile tester library |  | ||||||
| -add_library( tester ${libtest_all} ) |  | ||||||
| -target_link_libraries( tester |  | ||||||
| -    magma |  | ||||||
| -    lapacktest |  | ||||||
| -    ${blas_fix} |  | ||||||
| -    ${LAPACK_LIBRARIES} |  | ||||||
| -) |  | ||||||
| +set_target_properties(magma PROPERTIES POSITION_INDEPENDENT_CODE ON) |  | ||||||
| + |  | ||||||
| + |  | ||||||
| +# # ---------------------------------------- |  | ||||||
| +# # compile lapacktest library |  | ||||||
| +# # If use fortran, compile only Fortran files, not magma_[sdcz]_no_fortran.cpp |  | ||||||
| +# # else,           compile only C++     files, not Fortran files |  | ||||||
| +# if (USE_FORTRAN) |  | ||||||
| +#     foreach( filename ${liblapacktest_all} ) |  | ||||||
| +#         if (filename MATCHES "\\.(f|f90|F90)$") |  | ||||||
| +#             list( APPEND liblapacktest_all_f ${filename} ) |  | ||||||
| +#         endif() |  | ||||||
| +#     endforeach() |  | ||||||
| +#     add_library( lapacktest ${liblapacktest_all_f} ) |  | ||||||
| +# else() |  | ||||||
| +#     # alternatively, use only C/C++/CUDA files, including magma_[sdcz]_no_fortran.cpp |  | ||||||
| +#     foreach( filename ${liblapacktest_all} ) |  | ||||||
| +#         if (filename MATCHES "\\.(c|cu|cpp)$") |  | ||||||
| +#             list( APPEND liblapacktest_all_cpp ${filename} ) |  | ||||||
| +#         endif() |  | ||||||
| +#     endforeach() |  | ||||||
| +#     add_library( lapacktest ${liblapacktest_all_cpp} ) |  | ||||||
| +# endif() |  | ||||||
| +# target_link_libraries( lapacktest |  | ||||||
| +#     ${blas_fix} |  | ||||||
| +#     ${LAPACK_LIBRARIES} |  | ||||||
| +# ) |  | ||||||
| + |  | ||||||
| + |  | ||||||
| +# # ---------------------------------------- |  | ||||||
| +# # compile tester library |  | ||||||
| +# add_library( tester ${libtest_all} ) |  | ||||||
| +# target_link_libraries( tester |  | ||||||
| +#     magma |  | ||||||
| +#     lapacktest |  | ||||||
| +#     ${blas_fix} |  | ||||||
| +#     ${LAPACK_LIBRARIES} |  | ||||||
| +# ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  # ---------------------------------------- |  | ||||||
|  # compile MAGMA sparse library |  | ||||||
|  |  | ||||||
|  # sparse doesn't have Fortran at the moment, so no need for above shenanigans |  | ||||||
| -if (MAGMA_ENABLE_CUDA) |  | ||||||
| -  include_directories( sparse/include ) |  | ||||||
| -  include_directories( sparse/control ) |  | ||||||
| -else() |  | ||||||
| -  include_directories( sparse_hip/include ) |  | ||||||
| -  include_directories( sparse_hip/control ) |  | ||||||
| -endif() |  | ||||||
| -include_directories( testing ) |  | ||||||
| - |  | ||||||
| -if (MAGMA_ENABLE_CUDA) |  | ||||||
| -  cuda_add_library( magma_sparse ${libsparse_all} ) |  | ||||||
| -  target_link_libraries( magma_sparse |  | ||||||
| -    magma |  | ||||||
| -    ${blas_fix} |  | ||||||
| -    ${LAPACK_LIBRARIES} |  | ||||||
| -    ${CUDA_CUDART_LIBRARY} |  | ||||||
| -    ${CUDA_CUBLAS_LIBRARIES} |  | ||||||
| -    ${CUDA_cusparse_LIBRARY} |  | ||||||
| -    ) |  | ||||||
| -else() |  | ||||||
| -  add_library( magma_sparse ${libsparse_all} ) |  | ||||||
| -  target_link_libraries( magma_sparse |  | ||||||
| -    magma |  | ||||||
| -    ${blas_fix} |  | ||||||
| -    ${LAPACK_LIBRARIES} |  | ||||||
| -    hip::device |  | ||||||
| -    roc::hipblas |  | ||||||
| -    roc::hipsparse |  | ||||||
| -    ) |  | ||||||
| -endif() |  | ||||||
| -add_custom_target( sparse-lib DEPENDS magma_sparse ) |  | ||||||
| - |  | ||||||
| - |  | ||||||
| -# ---------------------------------------- |  | ||||||
| -# compile each tester |  | ||||||
| - |  | ||||||
| -# save testers to testing/ |  | ||||||
| -# save tester lib files to testing_lib/ to avoid cluttering lib/ |  | ||||||
| -set( CMAKE_RUNTIME_OUTPUT_DIRECTORY testing ) |  | ||||||
| -set( CMAKE_ARCHIVE_OUTPUT_DIRECTORY testing_lib ) |  | ||||||
| -set( CMAKE_LIBRARY_OUTPUT_DIRECTORY testing_lib ) |  | ||||||
| - |  | ||||||
| -# skip Fortran testers, which require an extra file from CUDA |  | ||||||
| -foreach( filename ${testing_all} ) |  | ||||||
| -    if (filename MATCHES "\\.(c|cu|cpp)$") |  | ||||||
| -        list( APPEND testing_all_cpp ${filename} ) |  | ||||||
| -    endif() |  | ||||||
| -endforeach() |  | ||||||
| -foreach( TEST ${testing_all_cpp} ) |  | ||||||
| -    string( REGEX REPLACE "\\.(cpp|f90|F90)" "" EXE ${TEST} ) |  | ||||||
| -    string( REGEX REPLACE "testing/" "" EXE ${EXE} ) |  | ||||||
| -    #message( "${TEST} --> ${EXE}" ) |  | ||||||
| -    add_executable( ${EXE} ${TEST} ) |  | ||||||
| -    target_link_libraries( ${EXE} tester lapacktest magma ) |  | ||||||
| -    list( APPEND testing ${EXE} ) |  | ||||||
| -endforeach() |  | ||||||
| -add_custom_target( testing DEPENDS ${testing} ) |  | ||||||
| - |  | ||||||
| - |  | ||||||
| -# ---------------------------------------- |  | ||||||
| -# compile each sparse tester |  | ||||||
| - |  | ||||||
| -if (MAGMA_ENABLE_CUDA) |  | ||||||
| -  set(SPARSE_TEST_DIR "sparse/testing") |  | ||||||
| -else() |  | ||||||
| -  set(SPARSE_TEST_DIR "sparse_hip/testing") |  | ||||||
| -endif() |  | ||||||
| - |  | ||||||
| - |  | ||||||
| -set( CMAKE_RUNTIME_OUTPUT_DIRECTORY "${SPARSE_TEST_DIR}" ) |  | ||||||
| -cmake_policy( SET CMP0037 OLD) |  | ||||||
| -foreach( TEST ${sparse_testing_all} ) |  | ||||||
| -    string( REGEX REPLACE "\\.(cpp|f90|F90)"     "" EXE ${TEST} ) |  | ||||||
| -    string( REGEX REPLACE "${SPARSE_TEST_DIR}/" "" EXE ${EXE} ) |  | ||||||
| -    #message( "${TEST} --> ${EXE}" ) |  | ||||||
| -    add_executable( ${EXE} ${TEST} ) |  | ||||||
| -    target_link_libraries( ${EXE} magma_sparse magma ) |  | ||||||
| -    list( APPEND sparse-testing ${EXE} ) |  | ||||||
| -endforeach() |  | ||||||
| -add_custom_target( sparse-testing DEPENDS ${sparse-testing} ) |  | ||||||
| +# if (MAGMA_ENABLE_CUDA) |  | ||||||
| +#   include_directories( sparse/include ) |  | ||||||
| +#   include_directories( sparse/control ) |  | ||||||
| +# else() |  | ||||||
| +#   include_directories( sparse_hip/include ) |  | ||||||
| +#   include_directories( sparse_hip/control ) |  | ||||||
| +# endif() |  | ||||||
| +# include_directories( testing ) |  | ||||||
| + |  | ||||||
| +# if (MAGMA_ENABLE_CUDA) |  | ||||||
| +#   cuda_add_library( magma_sparse ${libsparse_all} ) |  | ||||||
| +#   target_link_libraries( magma_sparse |  | ||||||
| +#     magma |  | ||||||
| +#     ${blas_fix} |  | ||||||
| +#     ${LAPACK_LIBRARIES} |  | ||||||
| +#     ${CUDA_CUDART_LIBRARY} |  | ||||||
| +#     ${CUDA_CUBLAS_LIBRARIES} |  | ||||||
| +#     ${CUDA_cusparse_LIBRARY} |  | ||||||
| +#     ) |  | ||||||
| +# else() |  | ||||||
| +#   add_library( magma_sparse ${libsparse_all} ) |  | ||||||
| +#   target_link_libraries( magma_sparse |  | ||||||
| +#     magma |  | ||||||
| +#     ${blas_fix} |  | ||||||
| +#     ${LAPACK_LIBRARIES} |  | ||||||
| +#     hip::device |  | ||||||
| +#     roc::hipblas |  | ||||||
| +#     roc::hipsparse |  | ||||||
| +#     ) |  | ||||||
| +# endif() |  | ||||||
| +# add_custom_target( sparse-lib DEPENDS magma_sparse ) |  | ||||||
| + |  | ||||||
| + |  | ||||||
| +# # ---------------------------------------- |  | ||||||
| +# # compile each tester |  | ||||||
| + |  | ||||||
| +# # save testers to testing/ |  | ||||||
| +# # save tester lib files to testing_lib/ to avoid cluttering lib/ |  | ||||||
| +# set( CMAKE_RUNTIME_OUTPUT_DIRECTORY testing ) |  | ||||||
| +# set( CMAKE_ARCHIVE_OUTPUT_DIRECTORY testing_lib ) |  | ||||||
| +# set( CMAKE_LIBRARY_OUTPUT_DIRECTORY testing_lib ) |  | ||||||
| + |  | ||||||
| +# # skip Fortran testers, which require an extra file from CUDA |  | ||||||
| +# foreach( filename ${testing_all} ) |  | ||||||
| +#     if (filename MATCHES "\\.(c|cu|cpp)$") |  | ||||||
| +#         list( APPEND testing_all_cpp ${filename} ) |  | ||||||
| +#     endif() |  | ||||||
| +# endforeach() |  | ||||||
| +# foreach( TEST ${testing_all_cpp} ) |  | ||||||
| +#     string( REGEX REPLACE "\\.(cpp|f90|F90)" "" EXE ${TEST} ) |  | ||||||
| +#     string( REGEX REPLACE "testing/" "" EXE ${EXE} ) |  | ||||||
| +#     #message( "${TEST} --> ${EXE}" ) |  | ||||||
| +#     add_executable( ${EXE} ${TEST} ) |  | ||||||
| +#     target_link_libraries( ${EXE} tester lapacktest magma ) |  | ||||||
| +#     list( APPEND testing ${EXE} ) |  | ||||||
| +# endforeach() |  | ||||||
| +# add_custom_target( testing DEPENDS ${testing} ) |  | ||||||
| + |  | ||||||
| + |  | ||||||
| +# # ---------------------------------------- |  | ||||||
| +# # compile each sparse tester |  | ||||||
| + |  | ||||||
| +# if (MAGMA_ENABLE_CUDA) |  | ||||||
| +#   set(SPARSE_TEST_DIR "sparse/testing") |  | ||||||
| +# else() |  | ||||||
| +#   set(SPARSE_TEST_DIR "sparse_hip/testing") |  | ||||||
| +# endif() |  | ||||||
| + |  | ||||||
| + |  | ||||||
| +# set( CMAKE_RUNTIME_OUTPUT_DIRECTORY "${SPARSE_TEST_DIR}" ) |  | ||||||
| +# cmake_policy( SET CMP0037 OLD) |  | ||||||
| +# foreach( TEST ${sparse_testing_all} ) |  | ||||||
| +#     string( REGEX REPLACE "\\.(cpp|f90|F90)"     "" EXE ${TEST} ) |  | ||||||
| +#     string( REGEX REPLACE "${SPARSE_TEST_DIR}/" "" EXE ${EXE} ) |  | ||||||
| +#     #message( "${TEST} --> ${EXE}" ) |  | ||||||
| +#     add_executable( ${EXE} ${TEST} ) |  | ||||||
| +#     target_link_libraries( ${EXE} magma_sparse magma ) |  | ||||||
| +#     list( APPEND sparse-testing ${EXE} ) |  | ||||||
| +# endforeach() |  | ||||||
| +# add_custom_target( sparse-testing DEPENDS ${sparse-testing} ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  # ---------------------------------------- |  | ||||||
|  # what to install |  | ||||||
| -install( TARGETS magma magma_sparse ${blas_fix} |  | ||||||
| +install( TARGETS magma ${blas_fix} |  | ||||||
|           RUNTIME DESTINATION bin |  | ||||||
|           LIBRARY DESTINATION lib |  | ||||||
|           ARCHIVE DESTINATION lib ) |  | ||||||
| -file( GLOB headers include/*.h sparse/include/*.h "${CMAKE_BINARY_DIR}/include/*.h" ) |  | ||||||
| +file( GLOB headers include/*.h "${CMAKE_BINARY_DIR}/include/*.h" ) |  | ||||||
|  if (USE_FORTRAN) |  | ||||||
|      install( FILES ${headers} ${modules} |  | ||||||
|               DESTINATION include ) |  | ||||||
| @@ -769,9 +779,9 @@ else() |  | ||||||
|      "${blas_fix_lib} ${LAPACK_LIBS} hip::device roc::hipblas roc::hipsparse" ) |  | ||||||
|  endif() |  | ||||||
|  set( MAGMA_REQUIRED "" ) |  | ||||||
| -configure_file( "${pkgconfig}.in" "${pkgconfig}" @ONLY ) |  | ||||||
| -install( FILES "${CMAKE_BINARY_DIR}/${pkgconfig}" |  | ||||||
| -         DESTINATION lib/pkgconfig ) |  | ||||||
| +# configure_file( "${pkgconfig}.in" "${pkgconfig}" @ONLY ) |  | ||||||
| +# install( FILES "${CMAKE_BINARY_DIR}/${pkgconfig}" |  | ||||||
| +#          DESTINATION lib/pkgconfig ) |  | ||||||
|  |  | ||||||
|  # ---------------------------------------- |  | ||||||
|  get_directory_property( compile_definitions COMPILE_DEFINITIONS ) |  | ||||||
| @ -1,40 +0,0 @@ | |||||||
| diff --git a/control/get_batched_crossover.cpp b/control/get_batched_crossover.cpp |  | ||||||
| index 4ec57306..912f8608 100644 |  | ||||||
| --- a/control/get_batched_crossover.cpp |  | ||||||
| +++ b/control/get_batched_crossover.cpp |  | ||||||
| @@ -119,7 +119,7 @@ void magma_get_spotrf_batched_nbparam(magma_int_t n, magma_int_t *nb, magma_int_ |  | ||||||
|  void magma_get_zgetrf_batched_nbparam(magma_int_t n, magma_int_t *nb, magma_int_t *recnb) |  | ||||||
|  { |  | ||||||
|      *nb    = 64; |  | ||||||
| -    *recnb = 32; |  | ||||||
| +    *recnb = 16; |  | ||||||
|      return; |  | ||||||
|  } |  | ||||||
|   |  | ||||||
| @@ -127,7 +127,7 @@ void magma_get_zgetrf_batched_nbparam(magma_int_t n, magma_int_t *nb, magma_int_ |  | ||||||
|  void magma_get_cgetrf_batched_nbparam(magma_int_t n, magma_int_t *nb, magma_int_t *recnb) |  | ||||||
|  { |  | ||||||
|      *nb    = 128; |  | ||||||
| -    *recnb =  32; |  | ||||||
| +    *recnb =  16; |  | ||||||
|      return; |  | ||||||
|  } |  | ||||||
|   |  | ||||||
| @@ -135,7 +135,7 @@ void magma_get_cgetrf_batched_nbparam(magma_int_t n, magma_int_t *nb, magma_int_ |  | ||||||
|  void magma_get_dgetrf_batched_nbparam(magma_int_t n, magma_int_t *nb, magma_int_t *recnb) |  | ||||||
|  { |  | ||||||
|      *nb    = 128; |  | ||||||
| -    *recnb =  32; |  | ||||||
| +    *recnb =  16; |  | ||||||
|      return; |  | ||||||
|  } |  | ||||||
|   |  | ||||||
| @@ -143,7 +143,7 @@ void magma_get_dgetrf_batched_nbparam(magma_int_t n, magma_int_t *nb, magma_int_ |  | ||||||
|  void magma_get_sgetrf_batched_nbparam(magma_int_t n, magma_int_t *nb, magma_int_t *recnb) |  | ||||||
|  { |  | ||||||
|      *nb    = 128; |  | ||||||
| -    *recnb =  32; |  | ||||||
| +    *recnb =  16; |  | ||||||
|      return; |  | ||||||
|  } |  | ||||||
|   |  | ||||||
| @ -1,15 +0,0 @@ | |||||||
| diff --git a/src/zgetrf_batched.cpp b/src/zgetrf_batched.cpp |  | ||||||
| index 24a65a90..884d9352 100644 |  | ||||||
| --- a/src/zgetrf_batched.cpp |  | ||||||
| +++ b/src/zgetrf_batched.cpp |  | ||||||
| @@ -116,7 +116,9 @@ magma_zgetrf_batched( |  | ||||||
|              return magma_zgetrf_batched_smallsq_noshfl( m, dA_array, ldda, ipiv_array, info_array, batchCount, queue ); |  | ||||||
|          } |  | ||||||
|          else{ |  | ||||||
| -            return magma_zgetrf_batched_smallsq_shfl( m, dA_array, ldda, ipiv_array, info_array, batchCount, queue ); |  | ||||||
| +            // magma_cgetrf_batched_smallsq_shfl is broken, therefore let's call noshfl version for arch < 700 |  | ||||||
| +            // return magma_zgetrf_batched_smallsq_shfl( m, dA_array, ldda, ipiv_array, info_array, batchCount, queue ); |  | ||||||
| +            return magma_zgetrf_batched_smallsq_noshfl( m, dA_array, ldda, ipiv_array, info_array, batchCount, queue ); |  | ||||||
|          } |  | ||||||
|          #else |  | ||||||
|          return magma_zgetrf_batched_smallsq_noshfl( m, dA_array, ldda, ipiv_array, info_array, batchCount, queue ); |  | ||||||
| @ -1 +0,0 @@ | |||||||
| 6cd83808c6e8bc7a44028e05112b3ab4e579bcc73202ed14733f66661127e213  magma-2.6.1.tar.gz |  | ||||||
| @ -1,20 +0,0 @@ | |||||||
| --- control/thread_queue.cpp	2016-08-30 06:37:49.000000000 -0700 |  | ||||||
| +++ control/thread_queue.cpp	2016-10-10 19:47:28.911580965 -0700 |  | ||||||
| @@ -15,7 +15,7 @@ |  | ||||||
|  { |  | ||||||
|      if ( err != 0 ) { |  | ||||||
|          fprintf( stderr, "Error: %s (%d)\n", strerror(err), err ); |  | ||||||
| -        throw std::exception(); |  | ||||||
| +        // throw std::exception(); |  | ||||||
|      } |  | ||||||
|  } |  | ||||||
|   |  | ||||||
| @@ -172,7 +172,7 @@ |  | ||||||
|      check( pthread_mutex_lock( &mutex )); |  | ||||||
|      if ( quit_flag ) { |  | ||||||
|          fprintf( stderr, "Error: push_task() called after quit()\n" ); |  | ||||||
| -        throw std::exception(); |  | ||||||
| +        // throw std::exception(); |  | ||||||
|      } |  | ||||||
|      q.push( task ); |  | ||||||
|      ntask += 1; |  | ||||||
| @ -1,21 +0,0 @@ | |||||||
| The MIT License (MIT) |  | ||||||
|  |  | ||||||
| Copyright (c) 2016 manylinux |  | ||||||
|  |  | ||||||
| Permission is hereby granted, free of charge, to any person obtaining a copy |  | ||||||
| of this software and associated documentation files (the "Software"), to deal |  | ||||||
| in the Software without restriction, including without limitation the rights |  | ||||||
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |  | ||||||
| copies of the Software, and to permit persons to whom the Software is |  | ||||||
| furnished to do so, subject to the following conditions: |  | ||||||
|  |  | ||||||
| The above copyright notice and this permission notice shall be included in all |  | ||||||
| copies or substantial portions of the Software. |  | ||||||
|  |  | ||||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |  | ||||||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |  | ||||||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |  | ||||||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |  | ||||||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |  | ||||||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |  | ||||||
| SOFTWARE. |  | ||||||
| @ -1,28 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" |  | ||||||
|  |  | ||||||
| case "${GPU_ARCH_TYPE:-BLANK}" in |  | ||||||
|     BLANK) |  | ||||||
|         # Legacy behavior for CircleCI |  | ||||||
|         bash "${SCRIPTPATH}/build_cuda.sh" |  | ||||||
|         ;; |  | ||||||
|     cuda) |  | ||||||
|         bash "${SCRIPTPATH}/build_cuda.sh" |  | ||||||
|         ;; |  | ||||||
|     rocm) |  | ||||||
|         bash "${SCRIPTPATH}/build_rocm.sh" |  | ||||||
|         ;; |  | ||||||
|     cpu | cpu-cxx11-abi | cpu-s390x) |  | ||||||
|         bash "${SCRIPTPATH}/build_cpu.sh" |  | ||||||
|         ;; |  | ||||||
|     xpu) |  | ||||||
|         bash "${SCRIPTPATH}/build_xpu.sh" |  | ||||||
|         ;; |  | ||||||
|     *) |  | ||||||
|         echo "Un-recognized GPU_ARCH_TYPE '${GPU_ARCH_TYPE}', exiting..." |  | ||||||
|         exit 1 |  | ||||||
|         ;; |  | ||||||
| esac |  | ||||||
| @ -1,498 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
| # meant to be called only from the neighboring build.sh and build_cpu.sh scripts |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
| SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" |  | ||||||
|  |  | ||||||
| source ${SOURCE_DIR}/set_desired_python.sh |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if [[ -n "$BUILD_PYTHONLESS" && -z "$LIBTORCH_VARIANT" ]]; then |  | ||||||
|     echo "BUILD_PYTHONLESS is set, so need LIBTORCH_VARIANT to also be set" |  | ||||||
|     echo "LIBTORCH_VARIANT should be one of shared-with-deps shared-without-deps static-with-deps static-without-deps" |  | ||||||
|     exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Function to retry functions that sometimes timeout or have flaky failures |  | ||||||
| retry () { |  | ||||||
|     $*  || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| PLATFORM="manylinux2014_x86_64" |  | ||||||
| # TODO move this into the Docker images |  | ||||||
| OS_NAME=$(awk -F= '/^NAME/{print $2}' /etc/os-release) |  | ||||||
| if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then |  | ||||||
|     retry yum install -q -y zip openssl |  | ||||||
| elif [[ "$OS_NAME" == *"AlmaLinux"* ]]; then |  | ||||||
|     retry yum install -q -y zip openssl |  | ||||||
|     PLATFORM="manylinux_2_28_x86_64" |  | ||||||
| elif [[ "$OS_NAME" == *"Red Hat Enterprise Linux"* ]]; then |  | ||||||
|     retry dnf install -q -y zip openssl |  | ||||||
| elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then |  | ||||||
|     # TODO: Remove this once nvidia package repos are back online |  | ||||||
|     # Comment out nvidia repositories to prevent them from getting apt-get updated, see https://github.com/pytorch/pytorch/issues/74968 |  | ||||||
|     # shellcheck disable=SC2046 |  | ||||||
|     sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list") |  | ||||||
|  |  | ||||||
|     retry apt-get update |  | ||||||
|     retry apt-get -y install zip openssl |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # We use the package name to test the package by passing this to 'pip install' |  | ||||||
| # This is the env variable that setup.py uses to name the package. Note that |  | ||||||
| # pip 'normalizes' the name first by changing all - to _ |  | ||||||
| if [[ -z "$TORCH_PACKAGE_NAME" ]]; then |  | ||||||
|     TORCH_PACKAGE_NAME='torch' |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ -z "$TORCH_NO_PYTHON_PACKAGE_NAME" ]]; then |  | ||||||
|     TORCH_NO_PYTHON_PACKAGE_NAME='torch_no_python' |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| TORCH_PACKAGE_NAME="$(echo $TORCH_PACKAGE_NAME | tr '-' '_')" |  | ||||||
| TORCH_NO_PYTHON_PACKAGE_NAME="$(echo $TORCH_NO_PYTHON_PACKAGE_NAME | tr '-' '_')" |  | ||||||
| echo "Expecting the built wheels to all be called '$TORCH_PACKAGE_NAME' or '$TORCH_NO_PYTHON_PACKAGE_NAME'" |  | ||||||
|  |  | ||||||
| # Version: setup.py uses $PYTORCH_BUILD_VERSION.post$PYTORCH_BUILD_NUMBER if |  | ||||||
| # PYTORCH_BUILD_NUMBER > 1 |  | ||||||
| build_version="$PYTORCH_BUILD_VERSION" |  | ||||||
| build_number="$PYTORCH_BUILD_NUMBER" |  | ||||||
| if [[ -n "$OVERRIDE_PACKAGE_VERSION" ]]; then |  | ||||||
|     # This will be the *exact* version, since build_number<1 |  | ||||||
|     build_version="$OVERRIDE_PACKAGE_VERSION" |  | ||||||
|     build_number=0 |  | ||||||
| fi |  | ||||||
| if [[ -z "$build_version" ]]; then |  | ||||||
|     build_version=1.0.0 |  | ||||||
| fi |  | ||||||
| if [[ -z "$build_number" ]]; then |  | ||||||
|     build_number=1 |  | ||||||
| fi |  | ||||||
| export PYTORCH_BUILD_VERSION=$build_version |  | ||||||
| export PYTORCH_BUILD_NUMBER=$build_number |  | ||||||
|  |  | ||||||
| export CMAKE_LIBRARY_PATH="/opt/intel/lib:/lib:$CMAKE_LIBRARY_PATH" |  | ||||||
| export CMAKE_INCLUDE_PATH="/opt/intel/include:$CMAKE_INCLUDE_PATH" |  | ||||||
|  |  | ||||||
| if [[ -e /opt/openssl ]]; then |  | ||||||
|     export OPENSSL_ROOT_DIR=/opt/openssl |  | ||||||
|     export CMAKE_INCLUDE_PATH="/opt/openssl/include":$CMAKE_INCLUDE_PATH |  | ||||||
| fi |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| mkdir -p /tmp/$WHEELHOUSE_DIR |  | ||||||
|  |  | ||||||
| export PATCHELF_BIN=/usr/local/bin/patchelf |  | ||||||
| patchelf_version=$($PATCHELF_BIN --version) |  | ||||||
| echo "patchelf version: " $patchelf_version |  | ||||||
| if [[ "$patchelf_version" == "patchelf 0.9" ]]; then |  | ||||||
|     echo "Your patchelf version is too old. Please use version >= 0.10." |  | ||||||
|     exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| ######################################################## |  | ||||||
| # Compile wheels as well as libtorch |  | ||||||
| ####################################################### |  | ||||||
| if [[ -z "$PYTORCH_ROOT" ]]; then |  | ||||||
|     echo "Need to set PYTORCH_ROOT env variable" |  | ||||||
|     exit 1 |  | ||||||
| fi |  | ||||||
| pushd "$PYTORCH_ROOT" |  | ||||||
| python setup.py clean |  | ||||||
| retry pip install -qr requirements.txt |  | ||||||
| case ${DESIRED_PYTHON} in |  | ||||||
|   cp31*) |  | ||||||
|     retry pip install -q --pre numpy==2.1.0 |  | ||||||
|     ;; |  | ||||||
|   # Should catch 3.9+ |  | ||||||
|   *) |  | ||||||
|     retry pip install -q --pre numpy==2.0.2 |  | ||||||
|     ;; |  | ||||||
| esac |  | ||||||
|  |  | ||||||
| if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then |  | ||||||
|     export _GLIBCXX_USE_CXX11_ABI=1 |  | ||||||
| else |  | ||||||
|     export _GLIBCXX_USE_CXX11_ABI=0 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then |  | ||||||
|     echo "Calling build_amd.py at $(date)" |  | ||||||
|     python tools/amd_build/build_amd.py |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # This value comes from binary_linux_build.sh (and should only be set to true |  | ||||||
| # for master / release branches) |  | ||||||
| BUILD_DEBUG_INFO=${BUILD_DEBUG_INFO:=0} |  | ||||||
|  |  | ||||||
| if [[ $BUILD_DEBUG_INFO == "1" ]]; then |  | ||||||
|     echo "Building wheel and debug info" |  | ||||||
| else |  | ||||||
|     echo "BUILD_DEBUG_INFO was not set, skipping debug info" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ "$DISABLE_RCCL" = 1 ]]; then |  | ||||||
|     echo "Disabling NCCL/RCCL in pyTorch" |  | ||||||
|     USE_RCCL=0 |  | ||||||
|     USE_NCCL=0 |  | ||||||
|     USE_KINETO=0 |  | ||||||
| else |  | ||||||
|     USE_RCCL=1 |  | ||||||
|     USE_NCCL=1 |  | ||||||
|     USE_KINETO=1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| echo "Calling setup.py bdist at $(date)" |  | ||||||
|  |  | ||||||
| if [[ "$USE_SPLIT_BUILD" == "true" ]]; then |  | ||||||
|     echo "Calling setup.py bdist_wheel for split build (BUILD_LIBTORCH_WHL)" |  | ||||||
|     time EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \ |  | ||||||
|     BUILD_LIBTORCH_WHL=1 BUILD_PYTHON_ONLY=0 \ |  | ||||||
|     BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \ |  | ||||||
|     USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \ |  | ||||||
|     python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR |  | ||||||
|     echo "Finished setup.py bdist_wheel for split build (BUILD_LIBTORCH_WHL)" |  | ||||||
|     echo "Calling setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)" |  | ||||||
|     time EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \ |  | ||||||
|     BUILD_LIBTORCH_WHL=0 BUILD_PYTHON_ONLY=1 \ |  | ||||||
|     BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \ |  | ||||||
|     USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \ |  | ||||||
|     python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR --cmake |  | ||||||
|     echo "Finished setup.py bdist_wheel for split build (BUILD_PYTHON_ONLY)" |  | ||||||
| else |  | ||||||
|     time CMAKE_ARGS=${CMAKE_ARGS[@]} \ |  | ||||||
|         EXTRA_CAFFE2_CMAKE_FLAGS=${EXTRA_CAFFE2_CMAKE_FLAGS[@]} \ |  | ||||||
|         BUILD_LIBTORCH_CPU_WITH_DEBUG=$BUILD_DEBUG_INFO \ |  | ||||||
|         USE_NCCL=${USE_NCCL} USE_RCCL=${USE_RCCL} USE_KINETO=${USE_KINETO} \ |  | ||||||
|         python setup.py bdist_wheel -d /tmp/$WHEELHOUSE_DIR |  | ||||||
| fi |  | ||||||
| echo "Finished setup.py bdist at $(date)" |  | ||||||
|  |  | ||||||
| # Build libtorch packages |  | ||||||
| if [[ -n "$BUILD_PYTHONLESS" ]]; then |  | ||||||
|     # Now build pythonless libtorch |  | ||||||
|     # Note - just use whichever python we happen to be on |  | ||||||
|     python setup.py clean |  | ||||||
|  |  | ||||||
|     if [[ $LIBTORCH_VARIANT = *"static"* ]]; then |  | ||||||
|         STATIC_CMAKE_FLAG="-DTORCH_STATIC=1" |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     mkdir -p build |  | ||||||
|     pushd build |  | ||||||
|     echo "Calling tools/build_libtorch.py at $(date)" |  | ||||||
|     time CMAKE_ARGS=${CMAKE_ARGS[@]} \ |  | ||||||
|          EXTRA_CAFFE2_CMAKE_FLAGS="${EXTRA_CAFFE2_CMAKE_FLAGS[@]} $STATIC_CMAKE_FLAG" \ |  | ||||||
|          python ../tools/build_libtorch.py |  | ||||||
|     echo "Finished tools/build_libtorch.py at $(date)" |  | ||||||
|     popd |  | ||||||
|  |  | ||||||
|     mkdir -p libtorch/{lib,bin,include,share} |  | ||||||
|     cp -r build/build/lib libtorch/ |  | ||||||
|  |  | ||||||
|     # for now, the headers for the libtorch package will just be copied in |  | ||||||
|     # from one of the wheels (this is from when this script built multiple |  | ||||||
|     # wheels at once) |  | ||||||
|     ANY_WHEEL=$(ls /tmp/$WHEELHOUSE_DIR/torch*.whl | head -n1) |  | ||||||
|     unzip -d any_wheel $ANY_WHEEL |  | ||||||
|     if [[ -d any_wheel/torch/include ]]; then |  | ||||||
|         cp -r any_wheel/torch/include libtorch/ |  | ||||||
|     else |  | ||||||
|         cp -r any_wheel/torch/lib/include libtorch/ |  | ||||||
|     fi |  | ||||||
|     cp -r any_wheel/torch/share/cmake libtorch/share/ |  | ||||||
|     rm -rf any_wheel |  | ||||||
|  |  | ||||||
|     echo $PYTORCH_BUILD_VERSION > libtorch/build-version |  | ||||||
|     echo "$(pushd $PYTORCH_ROOT && git rev-parse HEAD)" > libtorch/build-hash |  | ||||||
|  |  | ||||||
|     mkdir -p /tmp/$LIBTORCH_HOUSE_DIR |  | ||||||
|  |  | ||||||
|     if [[ "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* ]]; then |  | ||||||
|         LIBTORCH_ABI="cxx11-abi-" |  | ||||||
|     else |  | ||||||
|         LIBTORCH_ABI= |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     zip -rq /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-$PYTORCH_BUILD_VERSION.zip libtorch |  | ||||||
|     cp /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-$PYTORCH_BUILD_VERSION.zip \ |  | ||||||
|        /tmp/$LIBTORCH_HOUSE_DIR/libtorch-$LIBTORCH_ABI$LIBTORCH_VARIANT-latest.zip |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| popd |  | ||||||
|  |  | ||||||
| ####################################################################### |  | ||||||
| # ADD DEPENDENCIES INTO THE WHEEL |  | ||||||
| # |  | ||||||
| # auditwheel repair doesn't work correctly and is buggy |  | ||||||
| # so manually do the work of copying dependency libs and patchelfing |  | ||||||
| # and fixing RECORDS entries correctly |  | ||||||
| ###################################################################### |  | ||||||
|  |  | ||||||
| fname_with_sha256() { |  | ||||||
|     HASH=$(sha256sum $1 | cut -c1-8) |  | ||||||
|     DIRNAME=$(dirname $1) |  | ||||||
|     BASENAME=$(basename $1) |  | ||||||
|     # Do not rename nvrtc-builtins.so as they are dynamically loaded |  | ||||||
|     # by libnvrtc.so |  | ||||||
|     # Similarly don't mangle libcudnn and libcublas library names |  | ||||||
|     if [[ $BASENAME == "libnvrtc-builtins.s"* || $BASENAME == "libcudnn"* || $BASENAME == "libcublas"*  ]]; then |  | ||||||
|         echo $1 |  | ||||||
|     else |  | ||||||
|         INITNAME=$(echo $BASENAME | cut -f1 -d".") |  | ||||||
|         ENDNAME=$(echo $BASENAME | cut -f 2- -d".") |  | ||||||
|         echo "$DIRNAME/$INITNAME-$HASH.$ENDNAME" |  | ||||||
|     fi |  | ||||||
| } |  | ||||||
|  |  | ||||||
| fname_without_so_number() { |  | ||||||
|     LINKNAME=$(echo $1 | sed -e 's/\.so.*/.so/g') |  | ||||||
|     echo "$LINKNAME" |  | ||||||
| } |  | ||||||
|  |  | ||||||
| make_wheel_record() { |  | ||||||
|     FPATH=$1 |  | ||||||
|     if echo $FPATH | grep RECORD >/dev/null 2>&1; then |  | ||||||
|         # if the RECORD file, then |  | ||||||
|         echo "\"$FPATH\",," |  | ||||||
|     else |  | ||||||
|         HASH=$(openssl dgst -sha256 -binary $FPATH | openssl base64 | sed -e 's/+/-/g' | sed -e 's/\//_/g' | sed -e 's/=//g') |  | ||||||
|         FSIZE=$(ls -nl $FPATH | awk '{print $5}') |  | ||||||
|         echo "\"$FPATH\",sha256=$HASH,$FSIZE" |  | ||||||
|     fi |  | ||||||
| } |  | ||||||
|  |  | ||||||
| replace_needed_sofiles() { |  | ||||||
|     find $1 -name '*.so*' | while read sofile; do |  | ||||||
|         origname=$2 |  | ||||||
|         patchedname=$3 |  | ||||||
|         if [[ "$origname" != "$patchedname" ]] || [[ "$DESIRED_CUDA" == *"rocm"* ]]; then |  | ||||||
|             set +e |  | ||||||
|             origname=$($PATCHELF_BIN --print-needed $sofile | grep "$origname.*") |  | ||||||
|             ERRCODE=$? |  | ||||||
|             set -e |  | ||||||
|             if [ "$ERRCODE" -eq "0" ]; then |  | ||||||
|                 echo "patching $sofile entry $origname to $patchedname" |  | ||||||
|                 $PATCHELF_BIN --replace-needed $origname $patchedname $sofile |  | ||||||
|             fi |  | ||||||
|         fi |  | ||||||
|     done |  | ||||||
| } |  | ||||||
|  |  | ||||||
| echo 'Built this wheel:' |  | ||||||
| ls /tmp/$WHEELHOUSE_DIR |  | ||||||
| mkdir -p "/$WHEELHOUSE_DIR" |  | ||||||
| mv /tmp/$WHEELHOUSE_DIR/torch*linux*.whl /$WHEELHOUSE_DIR/ |  | ||||||
|  |  | ||||||
| if [[ "$USE_SPLIT_BUILD" == "true" ]]; then |  | ||||||
|     mv /tmp/$WHEELHOUSE_DIR/torch_no_python*.whl /$WHEELHOUSE_DIR/ || true |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ -n "$BUILD_PYTHONLESS" ]]; then |  | ||||||
|     mkdir -p /$LIBTORCH_HOUSE_DIR |  | ||||||
|     mv /tmp/$LIBTORCH_HOUSE_DIR/*.zip /$LIBTORCH_HOUSE_DIR |  | ||||||
|     rm -rf /tmp/$LIBTORCH_HOUSE_DIR |  | ||||||
| fi |  | ||||||
| rm -rf /tmp/$WHEELHOUSE_DIR |  | ||||||
| rm -rf /tmp_dir |  | ||||||
| mkdir /tmp_dir |  | ||||||
| pushd /tmp_dir |  | ||||||
|  |  | ||||||
| for pkg in /$WHEELHOUSE_DIR/torch_no_python*.whl /$WHEELHOUSE_DIR/torch*linux*.whl /$LIBTORCH_HOUSE_DIR/libtorch*.zip; do |  | ||||||
|  |  | ||||||
|     # if the glob didn't match anything |  | ||||||
|     if [[ ! -e $pkg ]]; then |  | ||||||
|         continue |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     rm -rf tmp |  | ||||||
|     mkdir -p tmp |  | ||||||
|     cd tmp |  | ||||||
|     cp $pkg . |  | ||||||
|  |  | ||||||
|     unzip -q $(basename $pkg) |  | ||||||
|     rm -f $(basename $pkg) |  | ||||||
|  |  | ||||||
|     if [[ -d torch ]]; then |  | ||||||
|         PREFIX=torch |  | ||||||
|     else |  | ||||||
|         PREFIX=libtorch |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     if [[ $pkg != *"without-deps"* ]]; then |  | ||||||
|         # copy over needed dependent .so files over and tag them with their hash |  | ||||||
|         patched=() |  | ||||||
|         for filepath in "${DEPS_LIST[@]}"; do |  | ||||||
|             filename=$(basename $filepath) |  | ||||||
|             destpath=$PREFIX/lib/$filename |  | ||||||
|             if [[ "$filepath" != "$destpath" ]]; then |  | ||||||
|                 cp $filepath $destpath |  | ||||||
|             fi |  | ||||||
|  |  | ||||||
|             # ROCm workaround for roctracer dlopens |  | ||||||
|             if [[ "$DESIRED_CUDA" == *"rocm"* ]]; then |  | ||||||
|                 patchedpath=$(fname_without_so_number $destpath) |  | ||||||
|             # Keep the so number for XPU dependencies |  | ||||||
|             elif [[ "$DESIRED_CUDA" == *"xpu"* ]]; then |  | ||||||
|                 patchedpath=$destpath |  | ||||||
|             else |  | ||||||
|                 patchedpath=$(fname_with_sha256 $destpath) |  | ||||||
|             fi |  | ||||||
|             patchedname=$(basename $patchedpath) |  | ||||||
|             if [[ "$destpath" != "$patchedpath" ]]; then |  | ||||||
|                 mv $destpath $patchedpath |  | ||||||
|             fi |  | ||||||
|             patched+=("$patchedname") |  | ||||||
|             echo "Copied $filepath to $patchedpath" |  | ||||||
|         done |  | ||||||
|  |  | ||||||
|         echo "patching to fix the so names to the hashed names" |  | ||||||
|         for ((i=0;i<${#DEPS_LIST[@]};++i)); do |  | ||||||
|             replace_needed_sofiles $PREFIX ${DEPS_SONAME[i]} ${patched[i]} |  | ||||||
|             # do the same for caffe2, if it exists |  | ||||||
|             if [[ -d caffe2 ]]; then |  | ||||||
|                 replace_needed_sofiles caffe2 ${DEPS_SONAME[i]} ${patched[i]} |  | ||||||
|             fi |  | ||||||
|         done |  | ||||||
|  |  | ||||||
|         # copy over needed auxiliary files |  | ||||||
|         for ((i=0;i<${#DEPS_AUX_SRCLIST[@]};++i)); do |  | ||||||
|             srcpath=${DEPS_AUX_SRCLIST[i]} |  | ||||||
|             dstpath=$PREFIX/${DEPS_AUX_DSTLIST[i]} |  | ||||||
|             mkdir -p $(dirname $dstpath) |  | ||||||
|             cp $srcpath $dstpath |  | ||||||
|         done |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     # set RPATH of _C.so and similar to $ORIGIN, $ORIGIN/lib |  | ||||||
|     find $PREFIX -maxdepth 1 -type f -name "*.so*" | while read sofile; do |  | ||||||
|         echo "Setting rpath of $sofile to ${C_SO_RPATH:-'$ORIGIN:$ORIGIN/lib'}" |  | ||||||
|         $PATCHELF_BIN --set-rpath ${C_SO_RPATH:-'$ORIGIN:$ORIGIN/lib'} ${FORCE_RPATH:-} $sofile |  | ||||||
|         $PATCHELF_BIN --print-rpath $sofile |  | ||||||
|     done |  | ||||||
|  |  | ||||||
|     # set RPATH of lib/ files to $ORIGIN |  | ||||||
|     find $PREFIX/lib -maxdepth 1 -type f -name "*.so*" | while read sofile; do |  | ||||||
|         echo "Setting rpath of $sofile to ${LIB_SO_RPATH:-'$ORIGIN'}" |  | ||||||
|         $PATCHELF_BIN --set-rpath ${LIB_SO_RPATH:-'$ORIGIN'} ${FORCE_RPATH:-} $sofile |  | ||||||
|         $PATCHELF_BIN --print-rpath $sofile |  | ||||||
|     done |  | ||||||
|  |  | ||||||
|     # create Manylinux 2_28 tag this needs to happen before regenerate the RECORD |  | ||||||
|     if [[ $PLATFORM == "manylinux_2_28_x86_64" && $GPU_ARCH_TYPE != "cpu-s390x" && $GPU_ARCH_TYPE != "xpu" ]]; then |  | ||||||
|         wheel_file=$(echo $(basename $pkg) | sed -e 's/-cp.*$/.dist-info\/WHEEL/g') |  | ||||||
|         sed -i -e s#linux_x86_64#"${PLATFORM}"# $wheel_file; |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     # regenerate the RECORD file with new hashes |  | ||||||
|     record_file=$(echo $(basename $pkg) | sed -e 's/-cp.*$/.dist-info\/RECORD/g') |  | ||||||
|     if [[ -e $record_file ]]; then |  | ||||||
|         echo "Generating new record file $record_file" |  | ||||||
|         : > "$record_file" |  | ||||||
|         # generate records for folders in wheel |  | ||||||
|         find * -type f | while read fname; do |  | ||||||
|             make_wheel_record "$fname" >>"$record_file" |  | ||||||
|         done |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     if [[ $BUILD_DEBUG_INFO == "1" ]]; then |  | ||||||
|         pushd "$PREFIX/lib" |  | ||||||
|  |  | ||||||
|         # Duplicate library into debug lib |  | ||||||
|         cp libtorch_cpu.so libtorch_cpu.so.dbg |  | ||||||
|  |  | ||||||
|         # Keep debug symbols on debug lib |  | ||||||
|         strip --only-keep-debug libtorch_cpu.so.dbg |  | ||||||
|  |  | ||||||
|         # Remove debug info from release lib |  | ||||||
|         strip --strip-debug libtorch_cpu.so |  | ||||||
|  |  | ||||||
|         objcopy libtorch_cpu.so --add-gnu-debuglink=libtorch_cpu.so.dbg |  | ||||||
|  |  | ||||||
|         # Zip up debug info |  | ||||||
|         mkdir -p /tmp/debug |  | ||||||
|         mv libtorch_cpu.so.dbg /tmp/debug/libtorch_cpu.so.dbg |  | ||||||
|         CRC32=$(objcopy --dump-section .gnu_debuglink=>(tail -c4 | od -t x4 -An | xargs echo) libtorch_cpu.so) |  | ||||||
|  |  | ||||||
|         pushd /tmp |  | ||||||
|         PKG_NAME=$(basename "$pkg" | sed 's/\.whl$//g') |  | ||||||
|         zip /tmp/debug-whl-libtorch-"$PKG_NAME"-"$CRC32".zip /tmp/debug/libtorch_cpu.so.dbg |  | ||||||
|         cp /tmp/debug-whl-libtorch-"$PKG_NAME"-"$CRC32".zip "$PYTORCH_FINAL_PACKAGE_DIR" |  | ||||||
|         popd |  | ||||||
|  |  | ||||||
|         popd |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     # Rename wheel for Manylinux 2_28 |  | ||||||
|     if [[ $PLATFORM == "manylinux_2_28_x86_64" && $GPU_ARCH_TYPE != "cpu-s390x" && $GPU_ARCH_TYPE != "xpu" ]]; then |  | ||||||
|         pkg_name=$(echo $(basename $pkg) | sed -e s#linux_x86_64#"${PLATFORM}"#) |  | ||||||
|         zip -rq $pkg_name $PREIX* |  | ||||||
|         rm -f $pkg |  | ||||||
|         mv $pkg_name $(dirname $pkg)/$pkg_name |  | ||||||
|     else |  | ||||||
|         # zip up the wheel back |  | ||||||
|         zip -rq $(basename $pkg) $PREIX* |  | ||||||
|         # remove original wheel |  | ||||||
|         rm -f $pkg |  | ||||||
|         mv $(basename $pkg) $pkg |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     cd .. |  | ||||||
|     rm -rf tmp |  | ||||||
| done |  | ||||||
|  |  | ||||||
| # Copy wheels to host machine for persistence before testing |  | ||||||
| if [[ -n "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then |  | ||||||
|     mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true |  | ||||||
|     if [[ -n "$BUILD_PYTHONLESS" ]]; then |  | ||||||
|         cp /$LIBTORCH_HOUSE_DIR/libtorch*.zip "$PYTORCH_FINAL_PACKAGE_DIR" |  | ||||||
|     else |  | ||||||
|         cp /$WHEELHOUSE_DIR/torch*.whl "$PYTORCH_FINAL_PACKAGE_DIR" |  | ||||||
|     fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # remove stuff before testing |  | ||||||
| rm -rf /opt/rh |  | ||||||
| if ls /usr/local/cuda* >/dev/null 2>&1; then |  | ||||||
|     rm -rf /usr/local/cuda* |  | ||||||
| fi |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Test that all the wheels work |  | ||||||
| if [[ -z "$BUILD_PYTHONLESS" ]]; then |  | ||||||
|   export OMP_NUM_THREADS=4 # on NUMA machines this takes too long |  | ||||||
|   pushd $PYTORCH_ROOT/test |  | ||||||
|  |  | ||||||
|   # Install the wheel for this Python version |  | ||||||
|   if [[ "$USE_SPLIT_BUILD" == "true" ]]; then |  | ||||||
|     pip uninstall -y "$TORCH_NO_PYTHON_PACKAGE_NAME" || true |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   pip uninstall -y "$TORCH_PACKAGE_NAME" |  | ||||||
|  |  | ||||||
|   if [[ "$USE_SPLIT_BUILD" == "true" ]]; then |  | ||||||
|     pip install "$TORCH_NO_PYTHON_PACKAGE_NAME" --no-index -f /$WHEELHOUSE_DIR --no-dependencies -v |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   pip install "$TORCH_PACKAGE_NAME" --no-index -f /$WHEELHOUSE_DIR --no-dependencies -v |  | ||||||
|  |  | ||||||
|   # Print info on the libraries installed in this wheel |  | ||||||
|   # Rather than adjust find command to skip non-library files with an embedded *.so* in their name, |  | ||||||
|   # since this is only for reporting purposes, we add the || true to the ldd command. |  | ||||||
|   installed_libraries=($(find "$pydir/lib/python${py_majmin}/site-packages/torch/" -name '*.so*')) |  | ||||||
|   echo "The wheel installed all of the libraries: ${installed_libraries[@]}" |  | ||||||
|   for installed_lib in "${installed_libraries[@]}"; do |  | ||||||
|       ldd "$installed_lib" || true |  | ||||||
|   done |  | ||||||
|  |  | ||||||
|   # Run the tests |  | ||||||
|   echo "$(date) :: Running tests" |  | ||||||
|   pushd "$PYTORCH_ROOT" |  | ||||||
|  |  | ||||||
|  |  | ||||||
|   LD_LIBRARY_PATH=/usr/local/nvidia/lib64 \ |  | ||||||
|           "${PYTORCH_ROOT}/.ci/pytorch/run_tests.sh" manywheel "${py_majmin}" "$DESIRED_CUDA" |  | ||||||
|   popd |  | ||||||
|   echo "$(date) :: Finished tests" |  | ||||||
| fi |  | ||||||
| @ -1,60 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| export TH_BINARY_BUILD=1 |  | ||||||
| export USE_CUDA=0 |  | ||||||
|  |  | ||||||
| # Keep an array of cmake variables to add to |  | ||||||
| if [[ -z "$CMAKE_ARGS" ]]; then |  | ||||||
|     # These are passed to tools/build_pytorch_libs.sh::build() |  | ||||||
|     CMAKE_ARGS=() |  | ||||||
| fi |  | ||||||
| if [[ -z "$EXTRA_CAFFE2_CMAKE_FLAGS" ]]; then |  | ||||||
|     # These are passed to tools/build_pytorch_libs.sh::build_caffe2() |  | ||||||
|     EXTRA_CAFFE2_CMAKE_FLAGS=() |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| WHEELHOUSE_DIR="wheelhousecpu" |  | ||||||
| LIBTORCH_HOUSE_DIR="libtorch_housecpu" |  | ||||||
| if [[ -z "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then |  | ||||||
|     if [[ -z "$BUILD_PYTHONLESS" ]]; then |  | ||||||
|         PYTORCH_FINAL_PACKAGE_DIR="/remote/wheelhousecpu" |  | ||||||
|     else |  | ||||||
|         PYTORCH_FINAL_PACKAGE_DIR="/remote/libtorch_housecpu" |  | ||||||
|     fi |  | ||||||
| fi |  | ||||||
| mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true |  | ||||||
|  |  | ||||||
| OS_NAME=$(awk -F= '/^NAME/{print $2}' /etc/os-release) |  | ||||||
| if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then |  | ||||||
|     LIBGOMP_PATH="/usr/lib64/libgomp.so.1" |  | ||||||
| elif [[ "$OS_NAME" == *"Red Hat Enterprise Linux"* ]]; then |  | ||||||
|     LIBGOMP_PATH="/usr/lib64/libgomp.so.1" |  | ||||||
| elif [[ "$OS_NAME" == *"AlmaLinux"* ]]; then |  | ||||||
|     LIBGOMP_PATH="/usr/lib64/libgomp.so.1" |  | ||||||
| elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then |  | ||||||
|     if [[ "$(uname -m)" == "s390x" ]]; then |  | ||||||
|         LIBGOMP_PATH="/usr/lib/s390x-linux-gnu/libgomp.so.1" |  | ||||||
|     else |  | ||||||
|         LIBGOMP_PATH="/usr/lib/x86_64-linux-gnu/libgomp.so.1" |  | ||||||
|     fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| DEPS_LIST=( |  | ||||||
|     "$LIBGOMP_PATH" |  | ||||||
| ) |  | ||||||
|  |  | ||||||
| DEPS_SONAME=( |  | ||||||
|     "libgomp.so.1" |  | ||||||
| ) |  | ||||||
|  |  | ||||||
| rm -rf /usr/local/cuda* |  | ||||||
|  |  | ||||||
| SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" |  | ||||||
| if [[ -z "$BUILD_PYTHONLESS" ]]; then |  | ||||||
|     BUILD_SCRIPT=build_common.sh |  | ||||||
| else |  | ||||||
|     BUILD_SCRIPT=build_libtorch.sh |  | ||||||
| fi |  | ||||||
| source ${SOURCE_DIR}/${BUILD_SCRIPT} |  | ||||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	