mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-10-30 03:34:56 +08:00 
			
		
		
		
	Compare commits
	
		
			37 Commits
		
	
	
		
			v2.5.1
			...
			v2.0.0-rc3
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| c04134cdb1 | |||
| 72d0863ab2 | |||
| 1bd334dc25 | |||
| 93e13cd429 | |||
| 4e4d4b0afe | |||
| c4fa850827 | |||
| 36ead09873 | |||
| 66d23dbad7 | |||
| e2fff58844 | |||
| 735333a7ff | |||
| 6017488801 | |||
| e51e5e721c | |||
| 91739a0279 | |||
| 531f097b6f | |||
| 00eb7b0d78 | |||
| 2180f342c4 | |||
| a90b4f09ac | |||
| 1211ceeaa4 | |||
| beaa5c5908 | |||
| 4bd5c1e4f4 | |||
| f3c97a4e43 | |||
| 30cf0e70f7 | |||
| 96f627dcde | |||
| 6f11e6d6a1 | |||
| fcec27f7d5 | |||
| cddcb1e526 | |||
| 0553b46df1 | |||
| b45d7697a5 | |||
| 7ebb309457 | |||
| cedfcdab46 | |||
| 0b21e62406 | |||
| 91994c999f | |||
| 0b82f58866 | |||
| 1f7ab1c823 | |||
| 52a27dd0ee | |||
| e0c728c545 | |||
| dbcd11f3a7 | 
| @ -1,4 +0,0 @@ | ||||
| # We do not use this library in our Bazel build. It contains an | ||||
| # infinitely recursing symlink that makes Bazel very unhappy. | ||||
| third_party/ittapi/ | ||||
| third_party/opentelemetry-cpp | ||||
							
								
								
									
										7
									
								
								.bazelrc
									
									
									
									
									
								
							
							
						
						
									
										7
									
								
								.bazelrc
									
									
									
									
									
								
							| @ -69,6 +69,10 @@ build --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=all | ||||
| # The following warnings come from -Wall. We downgrade them from error | ||||
| # to warnings here. | ||||
| # | ||||
| # sign-compare has a tremendous amount of violations in the | ||||
| # codebase. It will be a lot of work to fix them, just disable it for | ||||
| # now. | ||||
| build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-sign-compare | ||||
| # We intentionally use #pragma unroll, which is compiler specific. | ||||
| build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-error=unknown-pragmas | ||||
|  | ||||
| @ -96,9 +100,6 @@ build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-unused-parameter | ||||
| # likely want to have this disabled for the most part. | ||||
| build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-missing-field-initializers | ||||
|  | ||||
| build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-unused-function | ||||
| build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-unused-variable | ||||
|  | ||||
| build --per_file_copt='//:aten/src/ATen/RegisterCompositeExplicitAutograd\.cpp$'@-Wno-error=unused-function | ||||
| build --per_file_copt='//:aten/src/ATen/RegisterCompositeImplicitAutograd\.cpp$'@-Wno-error=unused-function | ||||
| build --per_file_copt='//:aten/src/ATen/RegisterMkldnnCPU\.cpp$'@-Wno-error=unused-function | ||||
|  | ||||
| @ -1 +1 @@ | ||||
| 6.1.1 | ||||
| 4.2.1 | ||||
|  | ||||
| @ -14,7 +14,6 @@ | ||||
|  | ||||
| [cxx] | ||||
|   cxxflags = -std=c++17 | ||||
|   ldflags = -Wl,--no-undefined | ||||
|   should_remap_host_platform = true | ||||
|   cpp = /usr/bin/clang | ||||
|   cc = /usr/bin/clang | ||||
|  | ||||
| @ -1,7 +1,7 @@ | ||||
| # Docker images for GitHub CI and CD | ||||
| # Docker images for Jenkins | ||||
|  | ||||
| This directory contains everything needed to build the Docker images | ||||
| that are used in our CI. | ||||
| that are used in our CI | ||||
|  | ||||
| The Dockerfiles located in subdirectories are parameterized to | ||||
| conditionally run build stages depending on build arguments passed to | ||||
| @ -12,20 +12,13 @@ each image as the `BUILD_ENVIRONMENT` environment variable. | ||||
|  | ||||
| See `build.sh` for valid build environments (it's the giant switch). | ||||
|  | ||||
| ## Docker CI builds | ||||
| Docker builds are now defined with `.circleci/cimodel/data/simple/docker_definitions.py` | ||||
|  | ||||
| ## Contents | ||||
|  | ||||
| * `build.sh` -- dispatch script to launch all builds | ||||
| * `common` -- scripts used to execute individual Docker build stages | ||||
| * `ubuntu` -- Dockerfile for Ubuntu image for CPU build and test jobs | ||||
| * `ubuntu-cuda` -- Dockerfile for Ubuntu image with CUDA support for nvidia-docker | ||||
| * `ubuntu-rocm` -- Dockerfile for Ubuntu image with ROCm support | ||||
| * `ubuntu-xpu` -- Dockerfile for Ubuntu image with XPU support | ||||
|  | ||||
| ### Docker CD builds | ||||
|  | ||||
| * `conda` - Dockerfile and build.sh to build Docker images used in nightly conda builds | ||||
| * `manywheel` - Dockerfile and build.sh to build Docker images used in nightly manywheel builds | ||||
| * `libtorch` - Dockerfile and build.sh to build Docker images used in nightly libtorch builds | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
|  | ||||
| @ -53,7 +53,7 @@ dependencies { | ||||
|     implementation 'androidx.appcompat:appcompat:1.0.0' | ||||
|     implementation 'com.facebook.fbjni:fbjni-java-only:0.2.2' | ||||
|     implementation 'com.google.code.findbugs:jsr305:3.0.1' | ||||
|     implementation 'com.facebook.soloader:nativeloader:0.10.5' | ||||
|     implementation 'com.facebook.soloader:nativeloader:0.10.4' | ||||
|  | ||||
|     implementation 'junit:junit:' + rootProject.junitVersion | ||||
|     implementation 'androidx.test:core:' + rootProject.coreVersion | ||||
|  | ||||
| @ -1,5 +0,0 @@ | ||||
| 0.7b | ||||
| manylinux_2_17 | ||||
| rocm6.2 | ||||
| 9be04068c3c0857a4cfd17d7e39e71d0423ebac2 | ||||
| 3e9e1959d23b93d78a08fcc5f868125dc3854dece32fd9458be9ef4467982291 | ||||
| @ -46,7 +46,9 @@ if [[ "$image" == *xla* ]]; then | ||||
|   exit 0 | ||||
| fi | ||||
|  | ||||
| if [[ "$image" == *-focal* ]]; then | ||||
| if [[ "$image" == *-bionic* ]]; then | ||||
|   UBUNTU_VERSION=18.04 | ||||
| elif [[ "$image" == *-focal* ]]; then | ||||
|   UBUNTU_VERSION=20.04 | ||||
| elif [[ "$image" == *-jammy* ]]; then | ||||
|   UBUNTU_VERSION=22.04 | ||||
| @ -71,11 +73,6 @@ if [[ "$image" == *cuda* && "$UBUNTU_VERSION" != "22.04" ]]; then | ||||
|   DOCKERFILE="${OS}-cuda/Dockerfile" | ||||
| elif [[ "$image" == *rocm* ]]; then | ||||
|   DOCKERFILE="${OS}-rocm/Dockerfile" | ||||
| elif [[ "$image" == *xpu* ]]; then | ||||
|   DOCKERFILE="${OS}-xpu/Dockerfile" | ||||
| elif [[ "$image" == *cuda*linter* ]]; then | ||||
|   # Use a separate Dockerfile for linter to keep a small image size | ||||
|   DOCKERFILE="linter-cuda/Dockerfile" | ||||
| elif [[ "$image" == *linter* ]]; then | ||||
|   # Use a separate Dockerfile for linter to keep a small image size | ||||
|   DOCKERFILE="linter/Dockerfile" | ||||
| @ -84,18 +81,18 @@ fi | ||||
| # CMake 3.18 is needed to support CUDA17 language variant | ||||
| CMAKE_VERSION=3.18.5 | ||||
|  | ||||
| _UCX_COMMIT=7bb2722ff2187a0cad557ae4a6afa090569f83fb | ||||
| _UCC_COMMIT=20eae37090a4ce1b32bcce6144ccad0b49943e0b | ||||
| _UCX_COMMIT=31e74cac7bee0ef66bef2af72e7d86d9c282e5ab | ||||
| _UCC_COMMIT=1c7a7127186e7836f73aafbd7697bbc274a77eee | ||||
|  | ||||
| # It's annoying to rename jobs every time you want to rewrite a | ||||
| # configuration, so we hardcode everything here rather than do it | ||||
| # from scratch | ||||
| case "$image" in | ||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9) | ||||
|     CUDA_VERSION=12.4.1 | ||||
|     CUDNN_VERSION=9 | ||||
|   pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7) | ||||
|     CUDA_VERSION=11.6.2 | ||||
|     CUDNN_VERSION=8 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     GCC_VERSION=7 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
| @ -103,13 +100,12 @@ case "$image" in | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9) | ||||
|     CUDA_VERSION=12.1.1 | ||||
|     CUDNN_VERSION=9 | ||||
|   pytorch-linux-bionic-cuda11.7-cudnn8-py3-gcc7) | ||||
|     CUDA_VERSION=11.7.0 | ||||
|     CUDNN_VERSION=8 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     GCC_VERSION=7 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
| @ -117,73 +113,12 @@ case "$image" in | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks) | ||||
|     CUDA_VERSION=12.4.1 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks) | ||||
|     CUDA_VERSION=12.1.1 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3.12-gcc9-inductor-benchmarks) | ||||
|     CUDA_VERSION=12.1.1 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.12 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks) | ||||
|     CUDA_VERSION=12.4.1 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.12 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9) | ||||
|   pytorch-linux-bionic-cuda11.8-cudnn8-py3-gcc7) | ||||
|     CUDA_VERSION=11.8.0 | ||||
|     CUDNN_VERSION=9 | ||||
|     CUDNN_VERSION=8 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     GCC_VERSION=7 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
| @ -191,194 +126,116 @@ case "$image" in | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9) | ||||
|     CUDA_VERSION=12.4.1 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|   pytorch-linux-focal-py3-clang7-asan) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CLANG_VERSION=7 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9) | ||||
|     CUDA_VERSION=12.1.1 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9) | ||||
|     CUDA_VERSION=12.4.1 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     UCX_COMMIT=${_UCX_COMMIT} | ||||
|     UCC_COMMIT=${_UCC_COMMIT} | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-py3-clang10-onnx) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     ANACONDA_PYTHON_VERSION=3.8 | ||||
|     CLANG_VERSION=10 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     ONNX=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-py3-clang9-android-ndk-r21e) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CLANG_VERSION=9 | ||||
|   pytorch-linux-focal-py3-clang7-android-ndk-r19c) | ||||
|     ANACONDA_PYTHON_VERSION=3.7 | ||||
|     CLANG_VERSION=7 | ||||
|     LLVMDEV=yes | ||||
|     PROTOBUF=yes | ||||
|     ANDROID=yes | ||||
|     ANDROID_NDK_VERSION=r21e | ||||
|     ANDROID_NDK_VERSION=r19c | ||||
|     GRADLE_VERSION=6.8.3 | ||||
|     NINJA_VERSION=1.9.0 | ||||
|     ;; | ||||
|   pytorch-linux-focal-py3.9-clang10) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CLANG_VERSION=10 | ||||
|   pytorch-linux-bionic-py3.8-clang9) | ||||
|     ANACONDA_PYTHON_VERSION=3.8 | ||||
|     CLANG_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     VULKAN_SDK_VERSION=1.2.162.1 | ||||
|     SWIFTSHADER=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-py3.11-clang10) | ||||
|   pytorch-linux-bionic-py3.11-clang9) | ||||
|     ANACONDA_PYTHON_VERSION=3.11 | ||||
|     CLANG_VERSION=10 | ||||
|     CLANG_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     VULKAN_SDK_VERSION=1.2.162.1 | ||||
|     SWIFTSHADER=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-py3.9-gcc9) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|   pytorch-linux-bionic-py3.8-gcc9) | ||||
|     ANACONDA_PYTHON_VERSION=3.8 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-rocm-n-1-py3) | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     ANACONDA_PYTHON_VERSION=3.8 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     ROCM_VERSION=6.1 | ||||
|     ROCM_VERSION=5.3 | ||||
|     NINJA_VERSION=1.9.0 | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-rocm-n-py3) | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     ANACONDA_PYTHON_VERSION=3.8 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     ROCM_VERSION=6.2 | ||||
|     ROCM_VERSION=5.4.2 | ||||
|     NINJA_VERSION=1.9.0 | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-xpu-2024.0-py3) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     GCC_VERSION=11 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     XPU_VERSION=0.5 | ||||
|     NINJA_VERSION=1.9.0 | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|     pytorch-linux-jammy-py3.9-gcc11-inductor-benchmarks) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     GCC_VERSION=11 | ||||
|   pytorch-linux-focal-py3.8-gcc7) | ||||
|     ANACONDA_PYTHON_VERSION=3.8 | ||||
|     GCC_VERSION=7 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     DOCS=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-clang12) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|   pytorch-linux-jammy-cuda11.6-cudnn8-py3.8-clang12) | ||||
|     ANACONDA_PYTHON_VERSION=3.8 | ||||
|     CUDA_VERSION=11.6 | ||||
|     CUDNN_VERSION=8 | ||||
|     CLANG_VERSION=12 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-cuda11.7-cudnn8-py3.8-clang12) | ||||
|     ANACONDA_PYTHON_VERSION=3.8 | ||||
|     CUDA_VERSION=11.7 | ||||
|     CUDNN_VERSION=8 | ||||
|     CLANG_VERSION=12 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12) | ||||
|     ANACONDA_PYTHON_VERSION=3.8 | ||||
|     CUDA_VERSION=11.8 | ||||
|     CUDNN_VERSION=9 | ||||
|     CUDNN_VERSION=8 | ||||
|     CLANG_VERSION=12 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-py3-clang12-asan) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CLANG_VERSION=12 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-py3-clang15-asan) | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     CLANG_VERSION=15 | ||||
|     CONDA_CMAKE=yes | ||||
|     VISION=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-py3.9-gcc11) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     GCC_VERSION=11 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     KATEX=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     DOCS=yes | ||||
|     UNINSTALL_DILL=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-py3-clang12-executorch) | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     CLANG_VERSION=12 | ||||
|     CONDA_CMAKE=yes | ||||
|     EXECUTORCH=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-py3.12-halide) | ||||
|     CUDA_VERSION=12.4 | ||||
|     ANACONDA_PYTHON_VERSION=3.12 | ||||
|     GCC_VERSION=11 | ||||
|     CONDA_CMAKE=yes | ||||
|     HALIDE=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-linter) | ||||
|     # TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627. | ||||
| @ -387,42 +244,6 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CONDA_CMAKE=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CUDA_VERSION=11.8 | ||||
|     CONDA_CMAKE=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-aarch64-py3.10-gcc11) | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=11 | ||||
|     ACL=yes | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     # snadampal: skipping sccache due to the following issue | ||||
|     # https://github.com/pytorch/pytorch/issues/121559 | ||||
|     SKIP_SCCACHE_INSTALL=yes | ||||
|     # snadampal: skipping llvm src build install because the current version | ||||
|     # from pytorch/llvm:9.0.1 is x86 specific | ||||
|     SKIP_LLVM_SRC_BUILD_INSTALL=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-aarch64-py3.10-gcc11-inductor-benchmarks) | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=11 | ||||
|     ACL=yes | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     # snadampal: skipping sccache due to the following issue | ||||
|     # https://github.com/pytorch/pytorch/issues/121559 | ||||
|     SKIP_SCCACHE_INSTALL=yes | ||||
|     # snadampal: skipping llvm src build install because the current version | ||||
|     # from pytorch/llvm:9.0.1 is x86 specific | ||||
|     SKIP_LLVM_SRC_BUILD_INSTALL=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   *) | ||||
|     # Catch-all for builds that are not hardcoded. | ||||
|     PROTOBUF=yes | ||||
| @ -439,10 +260,6 @@ case "$image" in | ||||
|     if [[ "$image" == *rocm* ]]; then | ||||
|       extract_version_from_image_name rocm ROCM_VERSION | ||||
|       NINJA_VERSION=1.9.0 | ||||
|       TRITON=yes | ||||
|       # To ensure that any ROCm config will build using conda cmake | ||||
|       # and thus have LAPACK/MKL enabled | ||||
|       CONDA_CMAKE=yes | ||||
|     fi | ||||
|     if [[ "$image" == *centos7* ]]; then | ||||
|       NINJA_VERSION=1.10.2 | ||||
| @ -470,17 +287,20 @@ tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]') | ||||
| #when using cudnn version 8 install it separately from cuda | ||||
| if [[ "$image" == *cuda*  && ${OS} == "ubuntu" ]]; then | ||||
|   IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}" | ||||
|   if [[ ${CUDNN_VERSION} == 9 ]]; then | ||||
|   if [[ ${CUDNN_VERSION} == 8 ]]; then | ||||
|     IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}" | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| # Build image | ||||
| # TODO: build-arg THRIFT is not turned on for any image, remove it once we confirm | ||||
| # it's no longer needed. | ||||
| docker build \ | ||||
|        --no-cache \ | ||||
|        --progress=plain \ | ||||
|        --build-arg "BUILD_ENVIRONMENT=${image}" \ | ||||
|        --build-arg "PROTOBUF=${PROTOBUF:-}" \ | ||||
|        --build-arg "THRIFT=${THRIFT:-}" \ | ||||
|        --build-arg "LLVMDEV=${LLVMDEV:-}" \ | ||||
|        --build-arg "DB=${DB:-}" \ | ||||
|        --build-arg "VISION=${VISION:-}" \ | ||||
| @ -503,27 +323,17 @@ docker build \ | ||||
|        --build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \ | ||||
|        --build-arg "KATEX=${KATEX:-}" \ | ||||
|        --build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \ | ||||
|        --build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx906;gfx90a}" \ | ||||
|        --build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx906}" \ | ||||
|        --build-arg "IMAGE_NAME=${IMAGE_NAME}" \ | ||||
|        --build-arg "UCX_COMMIT=${UCX_COMMIT}" \ | ||||
|        --build-arg "UCC_COMMIT=${UCC_COMMIT}" \ | ||||
|        --build-arg "CONDA_CMAKE=${CONDA_CMAKE}" \ | ||||
|        --build-arg "TRITON=${TRITON}" \ | ||||
|        --build-arg "ONNX=${ONNX}" \ | ||||
|        --build-arg "DOCS=${DOCS}" \ | ||||
|        --build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \ | ||||
|        --build-arg "EXECUTORCH=${EXECUTORCH}" \ | ||||
|        --build-arg "HALIDE=${HALIDE}" \ | ||||
|        --build-arg "XPU_VERSION=${XPU_VERSION}" \ | ||||
|        --build-arg "ACL=${ACL:-}" \ | ||||
|        --build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \ | ||||
|        --build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \ | ||||
|        -f $(dirname ${DOCKERFILE})/Dockerfile \ | ||||
|        -t "$tmp_tag" \ | ||||
|        "$@" \ | ||||
|        . | ||||
|  | ||||
| # NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`, | ||||
| # NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`, | ||||
| # for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could | ||||
| # find the correct image. As a result, here we have to replace the | ||||
| #   "$UBUNTU_VERSION" == "18.04-rc" | ||||
|  | ||||
							
								
								
									
										60
									
								
								.ci/docker/build_docker.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										60
									
								
								.ci/docker/build_docker.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,60 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| retry () { | ||||
|     $*  || (sleep 1 && $*) || (sleep 2 && $*) | ||||
| } | ||||
|  | ||||
| # If UPSTREAM_BUILD_ID is set (see trigger job), then we can | ||||
| # use it to tag this build with the same ID used to tag all other | ||||
| # base image builds. Also, we can try and pull the previous | ||||
| # image first, to avoid rebuilding layers that haven't changed. | ||||
|  | ||||
| #until we find a way to reliably reuse previous build, this last_tag is not in use | ||||
| # last_tag="$(( CIRCLE_BUILD_NUM - 1 ))" | ||||
| tag="${DOCKER_TAG}" | ||||
|  | ||||
|  | ||||
| registry="308535385114.dkr.ecr.us-east-1.amazonaws.com" | ||||
| image="${registry}/pytorch/${IMAGE_NAME}" | ||||
|  | ||||
| login() { | ||||
|   aws ecr get-authorization-token --region us-east-1 --output text --query 'authorizationData[].authorizationToken' | | ||||
|     base64 -d | | ||||
|     cut -d: -f2 | | ||||
|     docker login -u AWS --password-stdin "$1" | ||||
| } | ||||
|  | ||||
|  | ||||
| # Only run these steps if not on github actions | ||||
| if [[ -z "${GITHUB_ACTIONS}" ]]; then | ||||
|   # Retry on timeouts (can happen on job stampede). | ||||
|   retry login "${registry}" | ||||
|   # Logout on exit | ||||
|   trap "docker logout ${registry}" EXIT | ||||
| fi | ||||
|  | ||||
| # Try to pull the previous image (perhaps we can reuse some layers) | ||||
| # if [ -n "${last_tag}" ]; then | ||||
| #   docker pull "${image}:${last_tag}" || true | ||||
| # fi | ||||
|  | ||||
| # Build new image | ||||
| ./build.sh ${IMAGE_NAME} -t "${image}:${tag}" | ||||
|  | ||||
| # Only push if `DOCKER_SKIP_PUSH` = false | ||||
| if [ "${DOCKER_SKIP_PUSH:-true}" = "false" ]; then | ||||
|   # Only push if docker image doesn't exist already. | ||||
|   # ECR image tags are immutable so this will avoid pushing if only just testing if the docker jobs work | ||||
|   # NOTE: The only workflow that should push these images should be the docker-builds.yml workflow | ||||
|   if ! docker manifest inspect "${image}:${tag}" >/dev/null 2>/dev/null; then | ||||
|     docker push "${image}:${tag}" | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| if [ -z "${DOCKER_SKIP_S3_UPLOAD:-}" ]; then | ||||
|   trap "rm -rf ${IMAGE_NAME}:${tag}.tar" EXIT | ||||
|   docker save -o "${IMAGE_NAME}:${tag}.tar" "${image}:${tag}" | ||||
|   aws s3 cp "${IMAGE_NAME}:${tag}.tar" "s3://ossci-linux-build/pytorch/base/${IMAGE_NAME}:${tag}.tar" --acl public-read | ||||
| fi | ||||
| @ -62,11 +62,11 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | ||||
| RUN rm install_db.sh | ||||
| ENV INSTALLED_DB ${DB} | ||||
|  | ||||
| # (optional) Install vision packages like OpenCV | ||||
| # (optional) Install vision packages like OpenCV and ffmpeg | ||||
| ARG VISION | ||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||
| COPY ./common/install_vision.sh install_vision.sh | ||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | ||||
| RUN rm install_vision.sh cache_vision_models.sh common_utils.sh | ||||
| RUN rm install_vision.sh | ||||
| ENV INSTALLED_VISION ${VISION} | ||||
|  | ||||
| # Install rocm | ||||
| @ -77,9 +77,6 @@ RUN rm install_rocm.sh | ||||
| COPY ./common/install_rocm_magma.sh install_rocm_magma.sh | ||||
| RUN bash ./install_rocm_magma.sh | ||||
| RUN rm install_rocm_magma.sh | ||||
| COPY ./common/install_amdsmi.sh install_amdsmi.sh | ||||
| RUN bash ./install_amdsmi.sh | ||||
| RUN rm install_amdsmi.sh | ||||
| ENV PATH /opt/rocm/bin:$PATH | ||||
| ENV PATH /opt/rocm/hcc/bin:$PATH | ||||
| ENV PATH /opt/rocm/hip/bin:$PATH | ||||
| @ -101,25 +98,6 @@ COPY ./common/install_ninja.sh install_ninja.sh | ||||
| RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi | ||||
| RUN rm install_ninja.sh | ||||
|  | ||||
| ARG TRITON | ||||
| # Install triton, this needs to be done before sccache because the latter will | ||||
| # try to reach out to S3, which docker build runners don't have access | ||||
| ENV CMAKE_C_COMPILER cc | ||||
| ENV CMAKE_CXX_COMPILER c++ | ||||
| COPY ./common/install_triton.sh install_triton.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/triton.txt triton.txt | ||||
| COPY triton_version.txt triton_version.txt | ||||
| RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | ||||
| RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt | ||||
|  | ||||
| # Install AOTriton (Early fail) | ||||
| COPY ./aotriton_version.txt aotriton_version.txt | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ./common/install_aotriton.sh install_aotriton.sh | ||||
| RUN ["/bin/bash", "-c", "./install_aotriton.sh /opt/rocm && rm -rf install_aotriton.sh aotriton_version.txt common_utils.sh"] | ||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton | ||||
|  | ||||
| # Install ccache/sccache (do this last, so we get priority in PATH) | ||||
| COPY ./common/install_cache.sh install_cache.sh | ||||
| ENV PATH /opt/cache/bin:$PATH | ||||
|  | ||||
| @ -1 +0,0 @@ | ||||
| cd1c833b079adb324871dcbbe75b43d42ffc0ade | ||||
| @ -1 +0,0 @@ | ||||
| 461c12871f336fe6f57b55d6a297f13ef209161b | ||||
| @ -1 +0,0 @@ | ||||
| 243e186efbf7fb93328dd6b34927a4e8c8f24395 | ||||
| @ -1 +0,0 @@ | ||||
| ac3470188b914c5d7a5058a7e28b9eb685a62427 | ||||
| @ -1 +0,0 @@ | ||||
| 91b14bf5593cf58a8541f3e6b9125600a867d4ef | ||||
| @ -1 +0,0 @@ | ||||
| 5fe38ffd73c2ac6ed6323b554205186696631c6f | ||||
| @ -1,18 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" | ||||
|  | ||||
| # Cache the test models at ~/.cache/torch/hub/ | ||||
| IMPORT_SCRIPT_FILENAME="/tmp/torchvision_import_script.py" | ||||
| as_jenkins echo 'import torchvision; torchvision.models.mobilenet_v2(pretrained=True); torchvision.models.mobilenet_v3_large(pretrained=True);' > "${IMPORT_SCRIPT_FILENAME}" | ||||
|  | ||||
| pip_install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/cpu | ||||
| # Very weird quoting behavior here https://github.com/conda/conda/issues/10972, | ||||
| # so echo the command to a file and run the file instead | ||||
| conda_run python "${IMPORT_SCRIPT_FILENAME}" | ||||
|  | ||||
| # Cleaning up | ||||
| conda_run pip uninstall -y torch torchvision | ||||
| rm "${IMPORT_SCRIPT_FILENAME}" || true | ||||
| @ -13,7 +13,7 @@ as_jenkins() { | ||||
|   # NB: Pass on PATH and LD_LIBRARY_PATH to sudo invocation | ||||
|   # NB: This must be run from a directory that jenkins has access to, | ||||
|   # works around https://github.com/conda/conda-package-handling/pull/34 | ||||
|   $SUDO -E -H -u jenkins env -u SUDO_UID -u SUDO_GID -u SUDO_COMMAND -u SUDO_USER env "PATH=$PATH" "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" $* | ||||
|   $SUDO -H -u jenkins env -u SUDO_UID -u SUDO_GID -u SUDO_COMMAND -u SUDO_USER env "PATH=$PATH" "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" $* | ||||
| } | ||||
|  | ||||
| conda_install() { | ||||
| @ -30,7 +30,3 @@ conda_run() { | ||||
| pip_install() { | ||||
|   as_jenkins conda run -n py_$ANACONDA_PYTHON_VERSION pip install --progress-bar off $* | ||||
| } | ||||
|  | ||||
| get_pinned_commit() { | ||||
|   cat "${1}".txt | ||||
| } | ||||
|  | ||||
| @ -1,16 +0,0 @@ | ||||
| set -euo pipefail | ||||
|  | ||||
| readonly version=v24.04 | ||||
| readonly src_host=https://review.mlplatform.org/ml | ||||
| readonly src_repo=ComputeLibrary | ||||
|  | ||||
| # Clone ACL | ||||
| [[ ! -d ${src_repo} ]] && git clone ${src_host}/${src_repo}.git | ||||
| cd ${src_repo} | ||||
|  | ||||
| git checkout $version | ||||
|  | ||||
| # Build with scons | ||||
| scons -j8  Werror=0 debug=0 neon=1 opencl=0 embed_kernels=0 \ | ||||
|   os=linux arch=armv8a build=native multi_isa=1 \ | ||||
|   fixed_format_kernels=1 openmp=1 cppthreads=0 | ||||
| @ -1,5 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| cd /opt/rocm/share/amd_smi && pip install . | ||||
| @ -107,6 +107,3 @@ chgrp -R jenkins /var/lib/jenkins/.gradle | ||||
| popd | ||||
|  | ||||
| rm -rf /var/lib/jenkins/.gradle/daemon | ||||
|  | ||||
| # Cache vision models used by the test | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/cache_vision_models.sh" | ||||
|  | ||||
| @ -1,23 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" | ||||
|  | ||||
| TARBALL='aotriton.tar.gz' | ||||
| # This read command alwasy returns with exit code 1 | ||||
| read -d "\n" VER MANYLINUX ROCMBASE PINNED_COMMIT SHA256 < aotriton_version.txt || true | ||||
| ARCH=$(uname -m) | ||||
| AOTRITON_INSTALL_PREFIX="$1" | ||||
| AOTRITON_URL="https://github.com/ROCm/aotriton/releases/download/${VER}/aotriton-${VER}-${MANYLINUX}_${ARCH}-${ROCMBASE}-shared.tar.gz" | ||||
|  | ||||
| cd "${AOTRITON_INSTALL_PREFIX}" | ||||
| # Must use -L to follow redirects | ||||
| curl -L --retry 3 -o "${TARBALL}" "${AOTRITON_URL}" | ||||
| ACTUAL_SHA256=$(sha256sum "${TARBALL}" | cut -d " " -f 1) | ||||
| if [ "${SHA256}" != "${ACTUAL_SHA256}" ]; then | ||||
|   echo -n "Error: The SHA256 of downloaded tarball is ${ACTUAL_SHA256}," | ||||
|   echo " which does not match the expected value ${SHA256}." | ||||
|   exit | ||||
| fi | ||||
| tar xf "${TARBALL}" && rm -rf "${TARBALL}" | ||||
| @ -3,13 +3,16 @@ | ||||
| set -ex | ||||
|  | ||||
| install_ubuntu() { | ||||
|   # NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`, | ||||
|   # NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`, | ||||
|   # for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could | ||||
|   # find the correct image. As a result, here we have to check for | ||||
|   #   "$UBUNTU_VERSION" == "18.04"* | ||||
|   # instead of | ||||
|   #   "$UBUNTU_VERSION" == "18.04" | ||||
|   if [[ "$UBUNTU_VERSION" == "20.04"* ]]; then | ||||
|   if [[ "$UBUNTU_VERSION" == "18.04"* ]]; then | ||||
|     cmake3="cmake=3.10*" | ||||
|     maybe_libiomp_dev="libiomp-dev" | ||||
|   elif [[ "$UBUNTU_VERSION" == "20.04"* ]]; then | ||||
|     cmake3="cmake=3.16*" | ||||
|     maybe_libiomp_dev="" | ||||
|   elif [[ "$UBUNTU_VERSION" == "22.04"* ]]; then | ||||
| @ -20,9 +23,7 @@ install_ubuntu() { | ||||
|     maybe_libiomp_dev="libiomp-dev" | ||||
|   fi | ||||
|  | ||||
|   if [[ "$CLANG_VERSION" == 15 ]]; then | ||||
|     maybe_libomp_dev="libomp-15-dev" | ||||
|   elif [[ "$CLANG_VERSION" == 12 ]]; then | ||||
|   if [[ "$CLANG_VERSION" == 12 ]]; then | ||||
|     maybe_libomp_dev="libomp-12-dev" | ||||
|   elif [[ "$CLANG_VERSION" == 10 ]]; then | ||||
|     maybe_libomp_dev="libomp-10-dev" | ||||
| @ -30,13 +31,10 @@ install_ubuntu() { | ||||
|     maybe_libomp_dev="" | ||||
|   fi | ||||
|  | ||||
|   # HACK: UCC testing relies on libnccl library from NVIDIA repo, and version 2.16 crashes | ||||
|   # See https://github.com/pytorch/pytorch/pull/105260#issuecomment-1673399729 | ||||
|   if [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "11.8"* ]]; then | ||||
|     maybe_libnccl_dev="libnccl2=2.15.5-1+cuda11.8 libnccl-dev=2.15.5-1+cuda11.8 --allow-downgrades --allow-change-held-packages" | ||||
|   else | ||||
|     maybe_libnccl_dev="" | ||||
|   fi | ||||
|   # TODO: Remove this once nvidia package repos are back online | ||||
|   # Comment out nvidia repositories to prevent them from getting apt-get updated, see https://github.com/pytorch/pytorch/issues/74968 | ||||
|   # shellcheck disable=SC2046 | ||||
|   sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list") | ||||
|  | ||||
|   # Install common dependencies | ||||
|   apt-get update | ||||
| @ -61,12 +59,10 @@ install_ubuntu() { | ||||
|     ${maybe_libiomp_dev} \ | ||||
|     libyaml-dev \ | ||||
|     libz-dev \ | ||||
|     libjemalloc2 \ | ||||
|     libjpeg-dev \ | ||||
|     libasound2-dev \ | ||||
|     libsndfile-dev \ | ||||
|     ${maybe_libomp_dev} \ | ||||
|     ${maybe_libnccl_dev} \ | ||||
|     software-properties-common \ | ||||
|     wget \ | ||||
|     sudo \ | ||||
| @ -75,13 +71,26 @@ install_ubuntu() { | ||||
|     libtool \ | ||||
|     vim \ | ||||
|     unzip \ | ||||
|     gpg-agent \ | ||||
|     gdb | ||||
|  | ||||
|   # Should resolve issues related to various apt package repository cert issues | ||||
|   # see: https://github.com/pytorch/pytorch/issues/65931 | ||||
|   apt-get install -y libgnutls30 | ||||
|  | ||||
|   # cuda-toolkit does not work with gcc-11.2.0 which is default in Ubunutu 22.04 | ||||
|   # see: https://github.com/NVlabs/instant-ngp/issues/119 | ||||
|   if [[ "$UBUNTU_VERSION" == "22.04"* ]]; then | ||||
|     apt-get install -y g++-10 | ||||
|     update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 30 | ||||
|     update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 30 | ||||
|     update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-10 30 | ||||
|  | ||||
|     # https://www.spinics.net/lists/libreoffice/msg07549.html | ||||
|     sudo rm -rf /usr/lib/gcc/x86_64-linux-gnu/11 | ||||
|     wget https://github.com/gcc-mirror/gcc/commit/2b2d97fc545635a0f6aa9c9ee3b017394bc494bf.patch -O noexecpt.patch | ||||
|     sudo patch  /usr/include/c++/10/bits/range_access.h noexecpt.patch | ||||
|   fi | ||||
|  | ||||
|   # Cleanup package manager | ||||
|   apt-get autoclean && apt-get clean | ||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* | ||||
| @ -113,6 +122,7 @@ install_centos() { | ||||
|     glibc-devel \ | ||||
|     glibc-headers \ | ||||
|     glog-devel \ | ||||
|     hiredis-devel \ | ||||
|     libstdc++-devel \ | ||||
|     libsndfile-devel \ | ||||
|     make \ | ||||
| @ -152,7 +162,7 @@ wget https://ossci-linux.s3.amazonaws.com/valgrind-${VALGRIND_VERSION}.tar.bz2 | ||||
| tar -xjf valgrind-${VALGRIND_VERSION}.tar.bz2 | ||||
| cd valgrind-${VALGRIND_VERSION} | ||||
| ./configure --prefix=/usr/local | ||||
| make -j$[$(nproc) - 2] | ||||
| make -j6 | ||||
| sudo make install | ||||
| cd ../../ | ||||
| rm -rf valgrind_build | ||||
|  | ||||
| @ -36,11 +36,14 @@ if [ -n "$ROCM_VERSION" ]; then | ||||
|   curl --retry 3 http://repo.radeon.com/misc/.sccache_amd/sccache -o /opt/cache/bin/sccache | ||||
| else | ||||
|   ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ||||
|   # TODO: Install the pre-built binary from S3 as building from source | ||||
|   # https://github.com/pytorch/sccache has started failing mysteriously | ||||
|   # in which sccache server couldn't start with the following error: | ||||
|   #   sccache: error: Invalid argument (os error 22) | ||||
|   install_binary | ||||
|   case "$ID" in | ||||
|     ubuntu) | ||||
|       install_ubuntu | ||||
|       ;; | ||||
|     *) | ||||
|       install_binary | ||||
|       ;; | ||||
|   esac | ||||
| fi | ||||
| chmod a+x /opt/cache/bin/sccache | ||||
|  | ||||
|  | ||||
| @ -4,7 +4,10 @@ set -ex | ||||
|  | ||||
| if [ -n "$CLANG_VERSION" ]; then | ||||
|  | ||||
|   if [[ $CLANG_VERSION == 9 && $UBUNTU_VERSION == 18.04 ]]; then | ||||
|   if [[ $CLANG_VERSION == 7 && $UBUNTU_VERSION == 16.04 ]]; then | ||||
|     wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - | ||||
|     sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main" | ||||
|   elif [[ $CLANG_VERSION == 9 && $UBUNTU_VERSION == 18.04 ]]; then | ||||
|     sudo apt-get update | ||||
|     # gpg-agent is not available by default on 18.04 | ||||
|     sudo apt-get install  -y --no-install-recommends gpg-agent | ||||
| @ -25,11 +28,11 @@ if [ -n "$CLANG_VERSION" ]; then | ||||
|   fi | ||||
|  | ||||
|   # Use update-alternatives to make this version the default | ||||
|   # TODO: Decide if overriding gcc as well is a good idea | ||||
|   # update-alternatives --install /usr/bin/gcc gcc /usr/bin/clang-"$CLANG_VERSION" 50 | ||||
|   # update-alternatives --install /usr/bin/g++ g++ /usr/bin/clang++-"$CLANG_VERSION" 50 | ||||
|   update-alternatives --install /usr/bin/clang clang /usr/bin/clang-"$CLANG_VERSION" 50 | ||||
|   update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-"$CLANG_VERSION" 50 | ||||
|   # Override cc/c++ to clang as well | ||||
|   update-alternatives --install /usr/bin/cc cc /usr/bin/clang 50 | ||||
|   update-alternatives --install /usr/bin/c++ c++ /usr/bin/clang++ 50 | ||||
|  | ||||
|   # clang's packaging is a little messed up (the runtime libs aren't | ||||
|   # added into the linker path), so give it a little help | ||||
|  | ||||
| @ -5,17 +5,16 @@ set -ex | ||||
| # Optionally install conda | ||||
| if [ -n "$ANACONDA_PYTHON_VERSION" ]; then | ||||
|   BASE_URL="https://repo.anaconda.com/miniconda" | ||||
|   CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh" | ||||
|   if [[ $(uname -m) == "aarch64" ]] || [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then | ||||
|     BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download" | ||||
|     CONDA_FILE="Miniforge3-Linux-$(uname -m).sh" | ||||
|   fi | ||||
|  | ||||
|   MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1) | ||||
|   MINOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 2) | ||||
|  | ||||
|   case "$MAJOR_PYTHON_VERSION" in | ||||
|     3);; | ||||
|     2) | ||||
|       CONDA_FILE="Miniconda2-latest-Linux-x86_64.sh" | ||||
|     ;; | ||||
|     3) | ||||
|       CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh" | ||||
|     ;; | ||||
|     *) | ||||
|       echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION" | ||||
|       exit 1 | ||||
| @ -47,45 +46,28 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then | ||||
|   # Uncomment the below when resolved to track the latest conda update | ||||
|   # as_jenkins conda update -y -n base conda | ||||
|  | ||||
|   if [[ $(uname -m) == "aarch64" ]]; then | ||||
|     export SYSROOT_DEP="sysroot_linux-aarch64=2.17" | ||||
|   else | ||||
|     export SYSROOT_DEP="sysroot_linux-64=2.17" | ||||
|   fi | ||||
|  | ||||
|   # Install correct Python version | ||||
|   # Also ensure sysroot is using a modern GLIBC to match system compilers | ||||
|   as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y\ | ||||
|              python="$ANACONDA_PYTHON_VERSION" \ | ||||
|              ${SYSROOT_DEP} | ||||
|  | ||||
|   # libstdcxx from conda default channels are too old, we need GLIBCXX_3.4.30 | ||||
|   # which is provided in libstdcxx 12 and up. | ||||
|   conda_install libstdcxx-ng=12.3.0 -c conda-forge | ||||
|   as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y python="$ANACONDA_PYTHON_VERSION" | ||||
|  | ||||
|   # Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README | ||||
|   if [[ $(uname -m) == "aarch64" ]]; then | ||||
|     CONDA_COMMON_DEPS="astunparse pyyaml setuptools openblas==0.3.25=*openmp* ninja==1.11.1 scons==4.5.2" | ||||
|  | ||||
|     if [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then | ||||
|       NUMPY_VERSION=1.24.4 | ||||
|     else | ||||
|       NUMPY_VERSION=1.26.2 | ||||
|     fi | ||||
|   CONDA_COMMON_DEPS="astunparse pyyaml mkl=2021.4.0 mkl-include=2021.4.0 setuptools" | ||||
|   if [ "$ANACONDA_PYTHON_VERSION" = "3.11" ]; then | ||||
|     # Install llvm-8 as it is required to compile llvmlite-0.30.0 from source | ||||
|     # TODO: Stop using `-c malfet` | ||||
|     conda_install numpy=1.23.5 ${CONDA_COMMON_DEPS} llvmdev=8.0.0 -c malfet | ||||
|   elif [ "$ANACONDA_PYTHON_VERSION" = "3.10" ]; then | ||||
|     # Install llvm-8 as it is required to compile llvmlite-0.30.0 from source | ||||
|     conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS} llvmdev=8.0.0 | ||||
|   elif [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then | ||||
|     # Install llvm-8 as it is required to compile llvmlite-0.30.0 from source | ||||
|     conda_install numpy=1.19.2 ${CONDA_COMMON_DEPS} llvmdev=8.0.0 | ||||
|   elif [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then | ||||
|     # Install llvm-8 as it is required to compile llvmlite-0.30.0 from source | ||||
|     conda_install numpy=1.18.5 ${CONDA_COMMON_DEPS} llvmdev=8.0.0 | ||||
|   else | ||||
|     CONDA_COMMON_DEPS="astunparse pyyaml mkl=2021.4.0 mkl-include=2021.4.0 setuptools" | ||||
|  | ||||
|     if [ "$ANACONDA_PYTHON_VERSION" = "3.11" ] || [ "$ANACONDA_PYTHON_VERSION" = "3.12" ] || [ "$ANACONDA_PYTHON_VERSION" = "3.13" ]; then | ||||
|       NUMPY_VERSION=1.26.0 | ||||
|     else | ||||
|       NUMPY_VERSION=1.21.2 | ||||
|     fi | ||||
|     # Install `typing-extensions` for 3.7 | ||||
|     conda_install numpy=1.18.5 ${CONDA_COMMON_DEPS} typing-extensions | ||||
|   fi | ||||
|   conda_install ${CONDA_COMMON_DEPS} | ||||
|  | ||||
|   # Install llvm-8 as it is required to compile llvmlite-0.30.0 from source | ||||
|   # and libpython-static for torch deploy | ||||
|   conda_install llvmdev=8.0.0 "libpython-static=${ANACONDA_PYTHON_VERSION}" | ||||
|  | ||||
|   # Use conda cmake in some cases. Conda cmake will be newer than our supported | ||||
|   # min version (3.5 for xenial and 3.10 for bionic), so we only do it in those | ||||
| @ -103,15 +85,13 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then | ||||
|  | ||||
|   # Install some other packages, including those needed for Python test reporting | ||||
|   pip_install -r /opt/conda/requirements-ci.txt | ||||
|   pip_install numpy=="$NUMPY_VERSION" | ||||
|   pip_install -U scikit-learn | ||||
|  | ||||
|   if [ -n "$DOCS" ]; then | ||||
|     apt-get update | ||||
|     apt-get -y install expect-dev | ||||
|  | ||||
|     # We are currently building docs with python 3.8 (min support version) | ||||
|     pip_install -r /opt/conda/requirements-docs.txt | ||||
|   # Update scikit-learn to a python-3.8 compatible version | ||||
|   if [[ $(python -c "import sys; print(int(sys.version_info >= (3, 8)))") == "1" ]]; then | ||||
|     pip_install -U scikit-learn | ||||
|   else | ||||
|     # Pinned scikit-learn due to https://github.com/scikit-learn/scikit-learn/issues/14485 (affects gcc 5.5 only) | ||||
|     pip_install scikit-learn==0.20.3 | ||||
|   fi | ||||
|  | ||||
|   popd | ||||
|  | ||||
| @ -1,20 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
| set -ex | ||||
|  | ||||
| # Anaconda | ||||
| # Latest anaconda is using openssl-3 which is incompatible with all currently published versions of git | ||||
| # Which are using openssl-1.1.1, see https://anaconda.org/anaconda/git/files?version=2.40.1 for example | ||||
| MINICONDA_URL=https://repo.anaconda.com/miniconda/Miniconda3-py311_23.5.2-0-Linux-x86_64.sh | ||||
| wget -q $MINICONDA_URL | ||||
| # NB: Manually invoke bash per https://github.com/conda/conda/issues/10431 | ||||
| bash $(basename "$MINICONDA_URL") -b -p /opt/conda | ||||
| rm $(basename "$MINICONDA_URL") | ||||
| export PATH=/opt/conda/bin:$PATH | ||||
| # See https://github.com/pytorch/builder/issues/1473 | ||||
| # Pin conda to 23.5.2 as it's the last one compatible with openssl-1.1.1 | ||||
| conda install -y conda=23.5.2 conda-build anaconda-client git ninja | ||||
| # The cmake version here needs to match with the minimum version of cmake | ||||
| # supported by PyTorch (3.18). There is only 3.18.2 on anaconda | ||||
| /opt/conda/bin/pip3 install cmake==3.18.2 | ||||
| conda remove -y --force patchelf | ||||
| @ -1,96 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
| set -uex -o pipefail | ||||
|  | ||||
| PYTHON_DOWNLOAD_URL=https://www.python.org/ftp/python | ||||
| PYTHON_DOWNLOAD_GITHUB_BRANCH=https://github.com/python/cpython/archive/refs/heads | ||||
| GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py | ||||
|  | ||||
| # Python versions to be installed in /opt/$VERSION_NO | ||||
| CPYTHON_VERSIONS=${CPYTHON_VERSIONS:-"3.8.1 3.9.0 3.10.1 3.11.0 3.12.0 3.13.0"} | ||||
|  | ||||
| function check_var { | ||||
|     if [ -z "$1" ]; then | ||||
|         echo "required variable not defined" | ||||
|         exit 1 | ||||
|     fi | ||||
| } | ||||
|  | ||||
| function do_cpython_build { | ||||
|     local py_ver=$1 | ||||
|     local py_folder=$2 | ||||
|     check_var $py_ver | ||||
|     check_var $py_folder | ||||
|     tar -xzf Python-$py_ver.tgz | ||||
|     pushd $py_folder | ||||
|  | ||||
|     local prefix="/opt/_internal/cpython-${py_ver}" | ||||
|     mkdir -p ${prefix}/lib | ||||
|     if [[ -n $(which patchelf) ]]; then | ||||
|         local shared_flags="--enable-shared" | ||||
|     else | ||||
|         local shared_flags="--disable-shared" | ||||
|     fi | ||||
|     if [[ -z  "${WITH_OPENSSL+x}" ]]; then | ||||
|         local openssl_flags="" | ||||
|     else | ||||
|         local openssl_flags="--with-openssl=${WITH_OPENSSL} --with-openssl-rpath=auto" | ||||
|     fi | ||||
|  | ||||
|     # -Wformat added for https://bugs.python.org/issue17547 on Python 2.6 | ||||
|     CFLAGS="-Wformat" ./configure --prefix=${prefix} ${openssl_flags} ${shared_flags} > /dev/null | ||||
|  | ||||
|     make -j40 > /dev/null | ||||
|     make install > /dev/null | ||||
|  | ||||
|     if [[ "${shared_flags}" == "--enable-shared" ]]; then | ||||
|         patchelf --set-rpath '$ORIGIN/../lib' ${prefix}/bin/python3 | ||||
|     fi | ||||
|  | ||||
|     popd | ||||
|     rm -rf $py_folder | ||||
|     # Some python's install as bin/python3. Make them available as | ||||
|     # bin/python. | ||||
|     if [ -e ${prefix}/bin/python3 ]; then | ||||
|         ln -s python3 ${prefix}/bin/python | ||||
|     fi | ||||
|     ${prefix}/bin/python get-pip.py | ||||
|     if [ -e ${prefix}/bin/pip3 ] && [ ! -e ${prefix}/bin/pip ]; then | ||||
|         ln -s pip3 ${prefix}/bin/pip | ||||
|     fi | ||||
|     # install setuptools since python 3.12 is required to use distutils | ||||
|     ${prefix}/bin/pip install wheel==0.34.2 setuptools==68.2.2 | ||||
|     local abi_tag=$(${prefix}/bin/python -c "from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag; print('{0}{1}-{2}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()))") | ||||
|     ln -s ${prefix} /opt/python/${abi_tag} | ||||
| } | ||||
|  | ||||
| function build_cpython { | ||||
|     local py_ver=$1 | ||||
|     check_var $py_ver | ||||
|     check_var $PYTHON_DOWNLOAD_URL | ||||
|     local py_ver_folder=$py_ver | ||||
|     if [ "$py_ver" = "3.13.0" ]; then | ||||
|         PY_VER_SHORT="3.13" | ||||
|         check_var $PYTHON_DOWNLOAD_GITHUB_BRANCH | ||||
|         wget $PYTHON_DOWNLOAD_GITHUB_BRANCH/$PY_VER_SHORT.tar.gz -O Python-$py_ver.tgz | ||||
|         do_cpython_build $py_ver cpython-$PY_VER_SHORT | ||||
|     else | ||||
|         wget -q $PYTHON_DOWNLOAD_URL/$py_ver_folder/Python-$py_ver.tgz | ||||
|         do_cpython_build $py_ver Python-$py_ver | ||||
|     fi | ||||
|  | ||||
|     rm -f Python-$py_ver.tgz | ||||
| } | ||||
|  | ||||
| function build_cpythons { | ||||
|     check_var $GET_PIP_URL | ||||
|     curl -sLO $GET_PIP_URL | ||||
|     for py_ver in $@; do | ||||
|         build_cpython $py_ver | ||||
|     done | ||||
|     rm -f get-pip.py | ||||
| } | ||||
|  | ||||
| mkdir -p /opt/python | ||||
| mkdir -p /opt/_internal | ||||
| build_cpythons $CPYTHON_VERSIONS | ||||
| @ -1,250 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| NCCL_VERSION=v2.21.5-1 | ||||
| CUDNN_VERSION=9.1.0.70 | ||||
|  | ||||
| function install_cusparselt_040 { | ||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html | ||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt | ||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.4.0.7-archive.tar.xz | ||||
|     tar xf libcusparse_lt-linux-x86_64-0.4.0.7-archive.tar.xz | ||||
|     cp -a libcusparse_lt-linux-x86_64-0.4.0.7-archive/include/* /usr/local/cuda/include/ | ||||
|     cp -a libcusparse_lt-linux-x86_64-0.4.0.7-archive/lib/* /usr/local/cuda/lib64/ | ||||
|     popd | ||||
|     rm -rf tmp_cusparselt | ||||
| } | ||||
|  | ||||
| function install_cusparselt_052 { | ||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html | ||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt | ||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.5.2.1-archive.tar.xz | ||||
|     tar xf libcusparse_lt-linux-x86_64-0.5.2.1-archive.tar.xz | ||||
|     cp -a libcusparse_lt-linux-x86_64-0.5.2.1-archive/include/* /usr/local/cuda/include/ | ||||
|     cp -a libcusparse_lt-linux-x86_64-0.5.2.1-archive/lib/* /usr/local/cuda/lib64/ | ||||
|     popd | ||||
|     rm -rf tmp_cusparselt | ||||
| } | ||||
|  | ||||
| function install_cusparselt_062 { | ||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html | ||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt | ||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.6.2.3-archive.tar.xz | ||||
|     tar xf libcusparse_lt-linux-x86_64-0.6.2.3-archive.tar.xz | ||||
|     cp -a libcusparse_lt-linux-x86_64-0.6.2.3-archive/include/* /usr/local/cuda/include/ | ||||
|     cp -a libcusparse_lt-linux-x86_64-0.6.2.3-archive/lib/* /usr/local/cuda/lib64/ | ||||
|     popd | ||||
|     rm -rf tmp_cusparselt | ||||
| } | ||||
|  | ||||
| function install_118 { | ||||
|     echo "Installing CUDA 11.8 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.4.0" | ||||
|     rm -rf /usr/local/cuda-11.8 /usr/local/cuda | ||||
|     # install CUDA 11.8.0 in the same container | ||||
|     wget -q https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run | ||||
|     chmod +x cuda_11.8.0_520.61.05_linux.run | ||||
|     ./cuda_11.8.0_520.61.05_linux.run --toolkit --silent | ||||
|     rm -f cuda_11.8.0_520.61.05_linux.run | ||||
|     rm -f /usr/local/cuda && ln -s /usr/local/cuda-11.8 /usr/local/cuda | ||||
|  | ||||
|     # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|     mkdir tmp_cudnn && cd tmp_cudnn | ||||
|     wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz | ||||
|     tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz | ||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive/include/* /usr/local/cuda/include/ | ||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive/lib/* /usr/local/cuda/lib64/ | ||||
|     cd .. | ||||
|     rm -rf tmp_cudnn | ||||
|  | ||||
|     # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses | ||||
|     # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build | ||||
|     git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git | ||||
|     cd nccl && make -j src.build | ||||
|     cp -a build/include/* /usr/local/cuda/include/ | ||||
|     cp -a build/lib/* /usr/local/cuda/lib64/ | ||||
|     cd .. | ||||
|     rm -rf nccl | ||||
|  | ||||
|     install_cusparselt_040 | ||||
|  | ||||
|     ldconfig | ||||
| } | ||||
|  | ||||
| function install_121 { | ||||
|     echo "Installing CUDA 12.1 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.5.2" | ||||
|     rm -rf /usr/local/cuda-12.1 /usr/local/cuda | ||||
|     # install CUDA 12.1.0 in the same container | ||||
|     wget -q https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run | ||||
|     chmod +x cuda_12.1.1_530.30.02_linux.run | ||||
|     ./cuda_12.1.1_530.30.02_linux.run --toolkit --silent | ||||
|     rm -f cuda_12.1.1_530.30.02_linux.run | ||||
|     rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.1 /usr/local/cuda | ||||
|  | ||||
|     # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|     mkdir tmp_cudnn && cd tmp_cudnn | ||||
|     wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz | ||||
|     tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz | ||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/ | ||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/ | ||||
|     cd .. | ||||
|     rm -rf tmp_cudnn | ||||
|  | ||||
|     # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses | ||||
|     # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build | ||||
|     git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git | ||||
|     cd nccl && make -j src.build | ||||
|     cp -a build/include/* /usr/local/cuda/include/ | ||||
|     cp -a build/lib/* /usr/local/cuda/lib64/ | ||||
|     cd .. | ||||
|     rm -rf nccl | ||||
|  | ||||
|     install_cusparselt_052 | ||||
|  | ||||
|     ldconfig | ||||
| } | ||||
|  | ||||
| function install_124 { | ||||
|   echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.5.2" | ||||
|   rm -rf /usr/local/cuda-12.4 /usr/local/cuda | ||||
|   # install CUDA 12.4.1 in the same container | ||||
|   wget -q https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run | ||||
|   chmod +x cuda_12.4.1_550.54.15_linux.run | ||||
|   ./cuda_12.4.1_550.54.15_linux.run --toolkit --silent | ||||
|   rm -f cuda_12.4.1_550.54.15_linux.run | ||||
|   rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.4 /usr/local/cuda | ||||
|  | ||||
|   # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|   mkdir tmp_cudnn && cd tmp_cudnn | ||||
|   wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz | ||||
|   tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz | ||||
|   cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/ | ||||
|   cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/ | ||||
|   cd .. | ||||
|   rm -rf tmp_cudnn | ||||
|  | ||||
|   # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses | ||||
|   # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build | ||||
|   git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git | ||||
|   cd nccl && make -j src.build | ||||
|   cp -a build/include/* /usr/local/cuda/include/ | ||||
|   cp -a build/lib/* /usr/local/cuda/lib64/ | ||||
|   cd .. | ||||
|   rm -rf nccl | ||||
|  | ||||
|   install_cusparselt_062 | ||||
|  | ||||
|   ldconfig | ||||
| } | ||||
|  | ||||
| function prune_118 { | ||||
|     echo "Pruning CUDA 11.8 and cuDNN" | ||||
|     ##################################################################################### | ||||
|     # CUDA 11.8 prune static libs | ||||
|     ##################################################################################### | ||||
|     export NVPRUNE="/usr/local/cuda-11.8/bin/nvprune" | ||||
|     export CUDA_LIB_DIR="/usr/local/cuda-11.8/lib64" | ||||
|  | ||||
|     export GENCODE="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|     export GENCODE_CUDNN="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|  | ||||
|     if [[ -n "$OVERRIDE_GENCODE" ]]; then | ||||
|         export GENCODE=$OVERRIDE_GENCODE | ||||
|     fi | ||||
|  | ||||
|     # all CUDA libs except CuDNN and CuBLAS (cudnn and cublas need arch 3.7 included) | ||||
|     ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ | ||||
|       | xargs -I {} bash -c \ | ||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" | ||||
|  | ||||
|     # prune CuDNN and CuBLAS | ||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a | ||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a | ||||
|  | ||||
|     ##################################################################################### | ||||
|     # CUDA 11.8 prune visual tools | ||||
|     ##################################################################################### | ||||
|     export CUDA_BASE="/usr/local/cuda-11.8/" | ||||
|     rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2022.3.0 $CUDA_BASE/nsight-systems-2022.4.2/ | ||||
| } | ||||
|  | ||||
| function prune_121 { | ||||
|   echo "Pruning CUDA 12.1" | ||||
|   ##################################################################################### | ||||
|   # CUDA 12.1 prune static libs | ||||
|   ##################################################################################### | ||||
|     export NVPRUNE="/usr/local/cuda-12.1/bin/nvprune" | ||||
|     export CUDA_LIB_DIR="/usr/local/cuda-12.1/lib64" | ||||
|  | ||||
|     export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|     export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|  | ||||
|     if [[ -n "$OVERRIDE_GENCODE" ]]; then | ||||
|         export GENCODE=$OVERRIDE_GENCODE | ||||
|     fi | ||||
|  | ||||
|     # all CUDA libs except CuDNN and CuBLAS | ||||
|     ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ | ||||
|       | xargs -I {} bash -c \ | ||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" | ||||
|  | ||||
|     # prune CuDNN and CuBLAS | ||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a | ||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a | ||||
|  | ||||
|     ##################################################################################### | ||||
|     # CUDA 12.1 prune visual tools | ||||
|     ##################################################################################### | ||||
|     export CUDA_BASE="/usr/local/cuda-12.1/" | ||||
|     rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2023.1.0 $CUDA_BASE/nsight-systems-2023.1.2/ | ||||
| } | ||||
|  | ||||
| function prune_124 { | ||||
|   echo "Pruning CUDA 12.4" | ||||
|   ##################################################################################### | ||||
|   # CUDA 12.4 prune static libs | ||||
|   ##################################################################################### | ||||
|   export NVPRUNE="/usr/local/cuda-12.4/bin/nvprune" | ||||
|   export CUDA_LIB_DIR="/usr/local/cuda-12.4/lib64" | ||||
|  | ||||
|   export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|   export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|  | ||||
|   if [[ -n "$OVERRIDE_GENCODE" ]]; then | ||||
|       export GENCODE=$OVERRIDE_GENCODE | ||||
|   fi | ||||
|   if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then | ||||
|       export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN | ||||
|   fi | ||||
|  | ||||
|   # all CUDA libs except CuDNN and CuBLAS | ||||
|   ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ | ||||
|       | xargs -I {} bash -c \ | ||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" | ||||
|  | ||||
|   # prune CuDNN and CuBLAS | ||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a | ||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a | ||||
|  | ||||
|   ##################################################################################### | ||||
|   # CUDA 12.1 prune visual tools | ||||
|   ##################################################################################### | ||||
|   export CUDA_BASE="/usr/local/cuda-12.4/" | ||||
|   rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.1.0 $CUDA_BASE/nsight-systems-2023.4.4/ | ||||
| } | ||||
|  | ||||
| # idiomatic parameter and option handling in sh | ||||
| while test $# -gt 0 | ||||
| do | ||||
|     case "$1" in | ||||
|     11.8) install_118; prune_118 | ||||
|         ;; | ||||
|     12.1) install_121; prune_121 | ||||
|         ;; | ||||
|     12.4) install_124; prune_124 | ||||
|         ;; | ||||
|     *) echo "bad argument $1"; exit 1 | ||||
|         ;; | ||||
|     esac | ||||
|     shift | ||||
| done | ||||
| @ -1,93 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| NCCL_VERSION=v2.21.5-1 | ||||
|  | ||||
| function install_cusparselt_052 { | ||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html | ||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt | ||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-sbsa/libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz | ||||
|     tar xf libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz | ||||
|     cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/include/* /usr/local/cuda/include/ | ||||
|     cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/lib/* /usr/local/cuda/lib64/ | ||||
|     popd | ||||
|     rm -rf tmp_cusparselt | ||||
| } | ||||
|  | ||||
| function install_124 { | ||||
|   echo "Installing CUDA 12.4.1 and cuDNN 9.1 and NCCL ${NCCL_VERSION} and cuSparseLt-0.5.2" | ||||
|   rm -rf /usr/local/cuda-12.4 /usr/local/cuda | ||||
|   # install CUDA 12.4.1 in the same container | ||||
|   wget -q https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux_sbsa.run | ||||
|   chmod +x cuda_12.4.1_550.54.15_linux_sbsa.run | ||||
|   ./cuda_12.4.1_550.54.15_linux_sbsa.run --toolkit --silent | ||||
|   rm -f cuda_12.4.1_550.54.15_linux_sbsa.run | ||||
|   rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.4 /usr/local/cuda | ||||
|  | ||||
|   # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|   mkdir tmp_cudnn && cd tmp_cudnn | ||||
|   wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-9.1.0.70_cuda12-archive.tar.xz -O cudnn-linux-sbsa-9.1.0.70_cuda12-archive.tar.xz | ||||
|   tar xf cudnn-linux-sbsa-9.1.0.70_cuda12-archive.tar.xz | ||||
|   cp -a cudnn-linux-sbsa-9.1.0.70_cuda12-archive/include/* /usr/local/cuda/include/ | ||||
|   cp -a cudnn-linux-sbsa-9.1.0.70_cuda12-archive/lib/* /usr/local/cuda/lib64/ | ||||
|   cd .. | ||||
|   rm -rf tmp_cudnn | ||||
|  | ||||
|   # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses | ||||
|   # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build | ||||
|   git clone -b ${NCCL_VERSION} --depth 1 https://github.com/NVIDIA/nccl.git | ||||
|   cd nccl && make -j src.build | ||||
|   cp -a build/include/* /usr/local/cuda/include/ | ||||
|   cp -a build/lib/* /usr/local/cuda/lib64/ | ||||
|   cd .. | ||||
|   rm -rf nccl | ||||
|  | ||||
|   install_cusparselt_052 | ||||
|  | ||||
|   ldconfig | ||||
| } | ||||
|  | ||||
| function prune_124 { | ||||
|   echo "Pruning CUDA 12.4" | ||||
|   ##################################################################################### | ||||
|   # CUDA 12.4 prune static libs | ||||
|   ##################################################################################### | ||||
|   export NVPRUNE="/usr/local/cuda-12.4/bin/nvprune" | ||||
|   export CUDA_LIB_DIR="/usr/local/cuda-12.4/lib64" | ||||
|  | ||||
|   export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|   export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|  | ||||
|   if [[ -n "$OVERRIDE_GENCODE" ]]; then | ||||
|       export GENCODE=$OVERRIDE_GENCODE | ||||
|   fi | ||||
|  | ||||
|   # all CUDA libs except CuDNN and CuBLAS | ||||
|   ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ | ||||
|       | xargs -I {} bash -c \ | ||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" | ||||
|  | ||||
|   # prune CuDNN and CuBLAS | ||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a | ||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a | ||||
|  | ||||
|   ##################################################################################### | ||||
|   # CUDA 12.1 prune visual tools | ||||
|   ##################################################################################### | ||||
|   export CUDA_BASE="/usr/local/cuda-12.4/" | ||||
|   rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.1.0 $CUDA_BASE/nsight-systems-2023.4.4/ | ||||
| } | ||||
|  | ||||
| # idiomatic parameter and option handling in sh | ||||
| while test $# -gt 0 | ||||
| do | ||||
|     case "$1" in | ||||
|     12.4) install_124; prune_124 | ||||
|         ;; | ||||
|     *) echo "bad argument $1"; exit 1 | ||||
|         ;; | ||||
|     esac | ||||
|     shift | ||||
| done | ||||
| @ -1,22 +1,27 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| if [[ -n "${CUDNN_VERSION}" ]]; then | ||||
| if [[ ${CUDNN_VERSION} == 8 ]]; then | ||||
|     # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|     mkdir tmp_cudnn | ||||
|     pushd tmp_cudnn | ||||
|     if [[ ${CUDA_VERSION:0:2} == "12" ]]; then | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda12-archive" | ||||
|     elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive" | ||||
|     mkdir tmp_cudnn && cd tmp_cudnn | ||||
|     CUDNN_NAME="cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive" | ||||
|     if [[ ${CUDA_VERSION:0:4} == "11.7" ]]; then | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-8.5.0.96_cuda11-archive" | ||||
|         curl --retry 3 -OLs https://ossci-linux.s3.amazonaws.com/${CUDNN_NAME}.tar.xz | ||||
|     elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-8.7.0.84_cuda11-archive" | ||||
|         curl --retry 3 -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.7.0/local_installers/11.8/${CUDNN_NAME}.tar.xz | ||||
|     else | ||||
|         print "Unsupported CUDA version ${CUDA_VERSION}" | ||||
|         exit 1 | ||||
|         curl --retry 3 -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.2/local_installers/11.5/${CUDNN_NAME}.tar.xz | ||||
|     fi | ||||
|     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz | ||||
|  | ||||
|     tar xf ${CUDNN_NAME}.tar.xz | ||||
|     cp -a ${CUDNN_NAME}/include/* /usr/include/ | ||||
|     cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/ | ||||
|     cp -a ${CUDNN_NAME}/include/* /usr/include/x86_64-linux-gnu/ | ||||
|  | ||||
|     cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/ | ||||
|     popd | ||||
|     cp -a ${CUDNN_NAME}/lib/* /usr/lib/x86_64-linux-gnu/ | ||||
|     cd .. | ||||
|     rm -rf tmp_cudnn | ||||
|     ldconfig | ||||
| fi | ||||
|  | ||||
| @ -1,25 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # cudss license: https://docs.nvidia.com/cuda/cudss/license.html | ||||
| mkdir tmp_cudss && cd tmp_cudss | ||||
|  | ||||
| if [[ ${CUDA_VERSION:0:4} =~ ^12\.[1-4]$ ]]; then | ||||
|     arch_path='sbsa' | ||||
|     export TARGETARCH=${TARGETARCH:-$(uname -m)} | ||||
|     if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then | ||||
|         arch_path='x86_64' | ||||
|     fi | ||||
|     CUDSS_NAME="libcudss-linux-${arch_path}-0.3.0.9_cuda12-archive" | ||||
|     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudss/redist/libcudss/linux-${arch_path}/${CUDSS_NAME}.tar.xz | ||||
|  | ||||
|     # only for cuda 12 | ||||
|     tar xf ${CUDSS_NAME}.tar.xz | ||||
|     cp -a ${CUDSS_NAME}/include/* /usr/local/cuda/include/ | ||||
|     cp -a ${CUDSS_NAME}/lib/* /usr/local/cuda/lib64/ | ||||
| fi | ||||
|  | ||||
| cd .. | ||||
| rm -rf tmp_cudss | ||||
| ldconfig | ||||
| @ -1,34 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # cuSPARSELt license: https://docs.nvidia.com/cuda/cusparselt/license.html | ||||
| mkdir tmp_cusparselt && cd tmp_cusparselt | ||||
|  | ||||
| if [[ ${CUDA_VERSION:0:4} =~ ^12\.[2-4]$ ]]; then | ||||
|     arch_path='sbsa' | ||||
|     export TARGETARCH=${TARGETARCH:-$(uname -m)} | ||||
|     if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then | ||||
|         arch_path='x86_64' | ||||
|     fi | ||||
|     CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.6.2.3-archive" | ||||
|     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz | ||||
| elif [[ ${CUDA_VERSION:0:4} == "12.1" ]]; then | ||||
|     arch_path='sbsa' | ||||
|     export TARGETARCH=${TARGETARCH:-$(uname -m)} | ||||
|     if [ ${TARGETARCH} = 'amd64' ] || [ "${TARGETARCH}" = 'x86_64' ]; then | ||||
|         arch_path='x86_64' | ||||
|     fi | ||||
|     CUSPARSELT_NAME="libcusparse_lt-linux-${arch_path}-0.5.2.1-archive" | ||||
|     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-${arch_path}/${CUSPARSELT_NAME}.tar.xz | ||||
| elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then | ||||
|     CUSPARSELT_NAME="libcusparse_lt-linux-x86_64-0.4.0.7-archive" | ||||
|     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/${CUSPARSELT_NAME}.tar.xz | ||||
| fi | ||||
|  | ||||
| tar xf ${CUSPARSELT_NAME}.tar.xz | ||||
| cp -a ${CUSPARSELT_NAME}/include/* /usr/local/cuda/include/ | ||||
| cp -a ${CUSPARSELT_NAME}/lib/* /usr/local/cuda/lib64/ | ||||
| cd .. | ||||
| rm -rf tmp_cusparselt | ||||
| ldconfig | ||||
| @ -4,6 +4,11 @@ set -ex | ||||
|  | ||||
| install_ubuntu() { | ||||
|   apt-get update | ||||
|   apt-get install -y --no-install-recommends \ | ||||
|           libhiredis-dev \ | ||||
|           libleveldb-dev \ | ||||
|           liblmdb-dev \ | ||||
|           libsnappy-dev | ||||
|  | ||||
|   # Cleanup | ||||
|   apt-get autoclean && apt-get clean | ||||
| @ -15,6 +20,12 @@ install_centos() { | ||||
|   # See http://fedoraproject.org/wiki/EPEL | ||||
|   yum --enablerepo=extras install -y epel-release | ||||
|  | ||||
|   yum install -y \ | ||||
|       hiredis-devel \ | ||||
|       leveldb-devel \ | ||||
|       lmdb-devel \ | ||||
|       snappy-devel | ||||
|  | ||||
|   # Cleanup | ||||
|   yum clean all | ||||
|   rm -rf /var/cache/yum | ||||
|  | ||||
| @ -7,7 +7,7 @@ if [ -n "$KATEX" ]; then | ||||
|   # Ignore error if gpg-agent doesn't exist (for Ubuntu 16.04) | ||||
|   apt-get install -y gpg-agent || : | ||||
|  | ||||
|   curl --retry 3 -sL https://deb.nodesource.com/setup_16.x | sudo -E bash - | ||||
|   curl --retry 3 -sL https://deb.nodesource.com/setup_12.x | sudo -E bash - | ||||
|   sudo apt-get install -y nodejs | ||||
|  | ||||
|   curl --retry 3 -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - | ||||
|  | ||||
| @ -1,65 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" | ||||
|  | ||||
| clone_executorch() { | ||||
|   EXECUTORCH_PINNED_COMMIT=$(get_pinned_commit executorch) | ||||
|  | ||||
|   # Clone the Executorch | ||||
|   git clone https://github.com/pytorch/executorch.git | ||||
|  | ||||
|   # and fetch the target commit | ||||
|   pushd executorch | ||||
|   git checkout "${EXECUTORCH_PINNED_COMMIT}" | ||||
|   git submodule update --init | ||||
|   popd | ||||
|  | ||||
|   chown -R jenkins executorch | ||||
| } | ||||
|  | ||||
| install_buck2() { | ||||
|   pushd executorch/.ci/docker | ||||
|  | ||||
|   BUCK2_VERSION=$(cat ci_commit_pins/buck2.txt) | ||||
|   source common/install_buck.sh | ||||
|  | ||||
|   popd | ||||
| } | ||||
|  | ||||
| install_conda_dependencies() { | ||||
|   pushd executorch/.ci/docker | ||||
|   # Install conda dependencies like flatbuffer | ||||
|   conda_install --file conda-env-ci.txt | ||||
|   popd | ||||
| } | ||||
|  | ||||
| install_pip_dependencies() { | ||||
|   pushd executorch/.ci/docker | ||||
|   # Install PyTorch CPU build beforehand to avoid installing the much bigger CUDA | ||||
|   # binaries later, ExecuTorch only needs CPU | ||||
|   pip_install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu | ||||
|   # Install all Python dependencies | ||||
|   pip_install -r requirements-ci.txt | ||||
|   popd | ||||
| } | ||||
|  | ||||
| setup_executorch() { | ||||
|   pushd executorch | ||||
|   # Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate | ||||
|   as_jenkins bash .ci/scripts/setup-vulkan-linux-deps.sh | ||||
|  | ||||
|   export PYTHON_EXECUTABLE=python | ||||
|   export EXECUTORCH_BUILD_PYBIND=ON | ||||
|   export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" | ||||
|  | ||||
|   as_jenkins .ci/scripts/setup-linux.sh cmake | ||||
|   popd | ||||
| } | ||||
|  | ||||
| clone_executorch | ||||
| install_buck2 | ||||
| install_conda_dependencies | ||||
| install_pip_dependencies | ||||
| setup_executorch | ||||
| @ -7,10 +7,17 @@ if [ -n "$GCC_VERSION" ]; then | ||||
|   # Need the official toolchain repo to get alternate packages | ||||
|   add-apt-repository ppa:ubuntu-toolchain-r/test | ||||
|   apt-get update | ||||
|   apt-get install -y g++-$GCC_VERSION | ||||
|   update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50 | ||||
|   update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50 | ||||
|   update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-"$GCC_VERSION" 50 | ||||
|   if [[ "$UBUNTU_VERSION" == "16.04" && "${GCC_VERSION:0:1}" == "5" ]]; then | ||||
|     apt-get install -y g++-5=5.4.0-6ubuntu1~16.04.12 | ||||
|     update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 50 | ||||
|     update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-5 50 | ||||
|     update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-5 50 | ||||
|   else | ||||
|     apt-get install -y g++-$GCC_VERSION | ||||
|     update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50 | ||||
|     update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50 | ||||
|     update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-"$GCC_VERSION" 50 | ||||
|   fi | ||||
|  | ||||
|  | ||||
|   # Cleanup package manager | ||||
|  | ||||
| @ -1,46 +0,0 @@ | ||||
| #!/bin/bash | ||||
| set -ex | ||||
|  | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" | ||||
|  | ||||
| COMMIT=$(get_pinned_commit halide) | ||||
| test -n "$COMMIT" | ||||
|  | ||||
| # activate conda to populate CONDA_PREFIX | ||||
| test -n "$ANACONDA_PYTHON_VERSION" | ||||
| eval "$(conda shell.bash hook)" | ||||
| conda activate py_$ANACONDA_PYTHON_VERSION | ||||
|  | ||||
| if [ -n "${UBUNTU_VERSION}" ];then | ||||
|     apt update | ||||
|     apt-get install -y lld liblld-15-dev libpng-dev libjpeg-dev libgl-dev \ | ||||
|                   libopenblas-dev libeigen3-dev libatlas-base-dev libzstd-dev | ||||
| fi | ||||
|  | ||||
| conda_install numpy scipy imageio cmake ninja | ||||
|  | ||||
| git clone --depth 1 --branch release/16.x --recursive https://github.com/llvm/llvm-project.git | ||||
| cmake -DCMAKE_BUILD_TYPE=Release \ | ||||
|         -DLLVM_ENABLE_PROJECTS="clang" \ | ||||
|         -DLLVM_TARGETS_TO_BUILD="X86;NVPTX" \ | ||||
|         -DLLVM_ENABLE_TERMINFO=OFF -DLLVM_ENABLE_ASSERTIONS=ON \ | ||||
|         -DLLVM_ENABLE_EH=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_BUILD_32_BITS=OFF \ | ||||
|         -S llvm-project/llvm -B llvm-build -G Ninja | ||||
| cmake --build llvm-build | ||||
| cmake --install llvm-build --prefix llvm-install | ||||
| export LLVM_ROOT=`pwd`/llvm-install | ||||
| export LLVM_CONFIG=$LLVM_ROOT/bin/llvm-config | ||||
|  | ||||
| git clone https://github.com/halide/Halide.git | ||||
| pushd Halide | ||||
| git checkout ${COMMIT} && git submodule update --init --recursive | ||||
| pip_install -r requirements.txt | ||||
| cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -S . -B build | ||||
| cmake --build build | ||||
| test -e ${CONDA_PREFIX}/lib/python3 || ln -s python${ANACONDA_PYTHON_VERSION} ${CONDA_PREFIX}/lib/python3 | ||||
| cmake --install build --prefix ${CONDA_PREFIX} | ||||
| chown -R jenkins ${CONDA_PREFIX} | ||||
| popd | ||||
| rm -rf Halide llvm-build llvm-project llvm-install | ||||
|  | ||||
| python -c "import halide"  # check for errors | ||||
| @ -1,26 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" | ||||
|  | ||||
| function install_huggingface() { | ||||
|   local version | ||||
|   commit=$(get_pinned_commit huggingface) | ||||
|   pip_install pandas==2.0.3 | ||||
|   pip_install "git+https://github.com/huggingface/transformers@${commit}" | ||||
| } | ||||
|  | ||||
| function install_timm() { | ||||
|   local commit | ||||
|   commit=$(get_pinned_commit timm) | ||||
|   pip_install pandas==2.0.3 | ||||
|   pip_install "git+https://github.com/huggingface/pytorch-image-models@${commit}" | ||||
|   # Clean up | ||||
|   conda_run pip uninstall -y cmake torch torchvision triton | ||||
| } | ||||
|  | ||||
| # Pango is needed for weasyprint which is needed for doctr | ||||
| conda_install pango | ||||
| install_huggingface | ||||
| install_timm | ||||
| @ -1,23 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| LIBPNG_VERSION=1.6.37 | ||||
|  | ||||
| mkdir -p libpng | ||||
| pushd libpng | ||||
|  | ||||
| wget http://download.sourceforge.net/libpng/libpng-$LIBPNG_VERSION.tar.gz | ||||
| tar -xvzf libpng-$LIBPNG_VERSION.tar.gz | ||||
|  | ||||
| pushd libpng-$LIBPNG_VERSION | ||||
|  | ||||
| ./configure | ||||
| make | ||||
| make install | ||||
|  | ||||
| popd | ||||
|  | ||||
| popd | ||||
| rm -rf libpng | ||||
| @ -1,29 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -eou pipefail | ||||
|  | ||||
| MAGMA_VERSION="2.5.2" | ||||
|  | ||||
| function do_install() { | ||||
|     cuda_version=$1 | ||||
|     cuda_version_nodot=${1/./} | ||||
|  | ||||
|     MAGMA_VERSION="2.6.1" | ||||
|     magma_archive="magma-cuda${cuda_version_nodot}-${MAGMA_VERSION}-1.tar.bz2" | ||||
|  | ||||
|     cuda_dir="/usr/local/cuda-${cuda_version}" | ||||
|     ( | ||||
|         set -x | ||||
|         tmp_dir=$(mktemp -d) | ||||
|         pushd ${tmp_dir} | ||||
|         curl -OLs https://anaconda.org/pytorch/magma-cuda${cuda_version_nodot}/${MAGMA_VERSION}/download/linux-64/${magma_archive} | ||||
|         tar -xvf "${magma_archive}" | ||||
|         mkdir -p "${cuda_dir}/magma" | ||||
|         mv include "${cuda_dir}/magma/include" | ||||
|         mv lib "${cuda_dir}/magma/lib" | ||||
|         popd | ||||
|     ) | ||||
| } | ||||
|  | ||||
| do_install $1 | ||||
| @ -1,169 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| ROCM_VERSION=$1 | ||||
|  | ||||
| if [[ -z $ROCM_VERSION ]]; then | ||||
|     echo "missing ROCM_VERSION" | ||||
|     exit 1; | ||||
| fi | ||||
|  | ||||
| IS_UBUNTU=0 | ||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ||||
| case "$ID" in | ||||
|   ubuntu) | ||||
|     IS_UBUNTU=1 | ||||
|     ;; | ||||
|   centos) | ||||
|     IS_UBUNTU=0 | ||||
|     ;; | ||||
|   *) | ||||
|     echo "Unable to determine OS..." | ||||
|     exit 1 | ||||
|     ;; | ||||
| esac | ||||
|  | ||||
| # To make version comparison easier, create an integer representation. | ||||
| save_IFS="$IFS" | ||||
| IFS=. ROCM_VERSION_ARRAY=(${ROCM_VERSION}) | ||||
| IFS="$save_IFS" | ||||
| if [[ ${#ROCM_VERSION_ARRAY[@]} == 2 ]]; then | ||||
|     ROCM_VERSION_MAJOR=${ROCM_VERSION_ARRAY[0]} | ||||
|     ROCM_VERSION_MINOR=${ROCM_VERSION_ARRAY[1]} | ||||
|     ROCM_VERSION_PATCH=0 | ||||
| elif [[ ${#ROCM_VERSION_ARRAY[@]} == 3 ]]; then | ||||
|     ROCM_VERSION_MAJOR=${ROCM_VERSION_ARRAY[0]} | ||||
|     ROCM_VERSION_MINOR=${ROCM_VERSION_ARRAY[1]} | ||||
|     ROCM_VERSION_PATCH=${ROCM_VERSION_ARRAY[2]} | ||||
| else | ||||
|     echo "Unhandled ROCM_VERSION ${ROCM_VERSION}" | ||||
|     exit 1 | ||||
| fi | ||||
| ROCM_INT=$(($ROCM_VERSION_MAJOR * 10000 + $ROCM_VERSION_MINOR * 100 + $ROCM_VERSION_PATCH)) | ||||
|  | ||||
| # Install custom MIOpen + COMgr for ROCm >= 4.0.1 | ||||
| if [[ $ROCM_INT -lt 40001 ]]; then | ||||
|     echo "ROCm version < 4.0.1; will not install custom MIOpen" | ||||
|     exit 0 | ||||
| fi | ||||
|  | ||||
| # Function to retry functions that sometimes timeout or have flaky failures | ||||
| retry () { | ||||
|     $*  || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) | ||||
| } | ||||
|  | ||||
| # Build custom MIOpen to use comgr for offline compilation. | ||||
|  | ||||
| ## Need a sanitized ROCM_VERSION without patchlevel; patchlevel version 0 must be added to paths. | ||||
| ROCM_DOTS=$(echo ${ROCM_VERSION} | tr -d -c '.' | wc -c) | ||||
| if [[ ${ROCM_DOTS} == 1 ]]; then | ||||
|     ROCM_VERSION_NOPATCH="${ROCM_VERSION}" | ||||
|     ROCM_INSTALL_PATH="/opt/rocm-${ROCM_VERSION}.0" | ||||
| else | ||||
|     ROCM_VERSION_NOPATCH="${ROCM_VERSION%.*}" | ||||
|     ROCM_INSTALL_PATH="/opt/rocm-${ROCM_VERSION}" | ||||
| fi | ||||
|  | ||||
| # MIOPEN_USE_HIP_KERNELS is a Workaround for COMgr issues | ||||
| MIOPEN_CMAKE_COMMON_FLAGS=" | ||||
| -DMIOPEN_USE_COMGR=ON | ||||
| -DMIOPEN_BUILD_DRIVER=OFF | ||||
| " | ||||
| # Pull MIOpen repo and set DMIOPEN_EMBED_DB based on ROCm version | ||||
| if [[ $ROCM_INT -ge 60200 ]] && [[ $ROCM_INT -lt 60300 ]]; then | ||||
|     MIOPEN_BRANCH="release/rocm-rel-6.2-staging" | ||||
| elif [[ $ROCM_INT -ge 60100 ]] && [[ $ROCM_INT -lt 60200 ]]; then | ||||
|     echo "ROCm 6.1 MIOpen does not need any patches, do not build from source" | ||||
|     exit 0 | ||||
| elif [[ $ROCM_INT -ge 60000 ]] && [[ $ROCM_INT -lt 60100 ]]; then | ||||
|     echo "ROCm 6.0 MIOpen does not need any patches, do not build from source" | ||||
|     exit 0 | ||||
| elif [[ $ROCM_INT -ge 50700 ]] && [[ $ROCM_INT -lt 60000 ]]; then | ||||
|     echo "ROCm 5.7 MIOpen does not need any patches, do not build from source" | ||||
|     exit 0 | ||||
| elif [[ $ROCM_INT -ge 50600 ]] && [[ $ROCM_INT -lt 50700 ]]; then | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.6-staging" | ||||
| elif [[ $ROCM_INT -ge 50500 ]] && [[ $ROCM_INT -lt 50600 ]]; then | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.5-gfx11" | ||||
| elif [[ $ROCM_INT -ge 50400 ]] && [[ $ROCM_INT -lt 50500 ]]; then | ||||
|     MIOPEN_CMAKE_DB_FLAGS="-DMIOPEN_EMBED_DB=gfx900_56;gfx906_60;gfx90878;gfx90a6e;gfx1030_36 -DMIOPEN_USE_MLIR=Off" | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.4-staging" | ||||
| elif [[ $ROCM_INT -ge 50300 ]] && [[ $ROCM_INT -lt 50400 ]]; then | ||||
|     MIOPEN_CMAKE_DB_FLAGS="-DMIOPEN_EMBED_DB=gfx900_56;gfx906_60;gfx90878;gfx90a6e;gfx1030_36 -DMIOPEN_USE_MLIR=Off" | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.3-staging" | ||||
| elif [[ $ROCM_INT -ge 50200 ]] && [[ $ROCM_INT -lt 50300 ]]; then | ||||
|     MIOPEN_CMAKE_DB_FLAGS="-DMIOPEN_EMBED_DB=gfx900_56;gfx906_60;gfx90878;gfx90a6e;gfx1030_36 -DMIOPEN_USE_MLIR=Off" | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.2-staging" | ||||
| elif [[ $ROCM_INT -ge 50100 ]] && [[ $ROCM_INT -lt 50200 ]]; then | ||||
|     MIOPEN_CMAKE_DB_FLAGS="-DMIOPEN_EMBED_DB=gfx900_56;gfx906_60;gfx90878;gfx90a6e;gfx1030_36" | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.1-staging" | ||||
| elif [[ $ROCM_INT -ge 50000 ]] && [[ $ROCM_INT -lt 50100 ]]; then | ||||
|     MIOPEN_CMAKE_DB_FLAGS="-DMIOPEN_EMBED_DB=gfx900_56;gfx906_60;gfx90878;gfx90a6e;gfx1030_36" | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.0-staging" | ||||
| else | ||||
|     echo "Unhandled ROCM_VERSION ${ROCM_VERSION}" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
|  | ||||
| if [[ ${IS_UBUNTU} == 1 ]]; then | ||||
|   apt-get remove -y miopen-hip | ||||
| else | ||||
|   yum remove -y miopen-hip | ||||
| fi | ||||
|  | ||||
| git clone https://github.com/ROCm/MIOpen -b ${MIOPEN_BRANCH} | ||||
| pushd MIOpen | ||||
| # remove .git to save disk space since CI runner was running out | ||||
| rm -rf .git | ||||
| # Don't build CK to save docker build time | ||||
| if [[ $ROCM_INT -ge 60200 ]]; then | ||||
|     sed -i '/composable_kernel/d' requirements.txt | ||||
| fi | ||||
| # Don't build MLIR to save docker build time | ||||
| # since we are disabling MLIR backend for MIOpen anyway | ||||
| if [[ $ROCM_INT -ge 50400 ]] && [[ $ROCM_INT -lt 50500 ]]; then | ||||
|     sed -i '/rocMLIR/d' requirements.txt | ||||
| elif [[ $ROCM_INT -ge 50200 ]] && [[ $ROCM_INT -lt 50400 ]]; then | ||||
|     sed -i '/llvm-project-mlir/d' requirements.txt | ||||
| fi | ||||
| ## MIOpen minimum requirements | ||||
| cmake -P install_deps.cmake --minimum | ||||
|  | ||||
| # clean up since CI runner was running out of disk space | ||||
| rm -rf /tmp/* | ||||
| if [[ ${IS_UBUNTU} == 1 ]]; then | ||||
|   apt-get autoclean && apt-get clean | ||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* | ||||
| else | ||||
|   yum clean all | ||||
|   rm -rf /var/cache/yum | ||||
|   rm -rf /var/lib/yum/yumdb | ||||
|   rm -rf /var/lib/yum/history | ||||
| fi | ||||
|  | ||||
| ## Build MIOpen | ||||
| mkdir -p build | ||||
| cd build | ||||
| PKG_CONFIG_PATH=/usr/local/lib/pkgconfig CXX=${ROCM_INSTALL_PATH}/llvm/bin/clang++ cmake .. \ | ||||
|     ${MIOPEN_CMAKE_COMMON_FLAGS} \ | ||||
|     ${MIOPEN_CMAKE_DB_FLAGS} \ | ||||
|     -DCMAKE_PREFIX_PATH="${ROCM_INSTALL_PATH}/hip;${ROCM_INSTALL_PATH}" | ||||
| make MIOpen -j $(nproc) | ||||
|  | ||||
| # Build MIOpen package | ||||
| make -j $(nproc) package | ||||
|  | ||||
| # clean up since CI runner was running out of disk space | ||||
| rm -rf /usr/local/cget | ||||
|  | ||||
| if [[ ${IS_UBUNTU} == 1 ]]; then | ||||
|   sudo dpkg -i miopen-hip*.deb | ||||
| else | ||||
|   yum install -y miopen-*.rpm | ||||
| fi | ||||
|  | ||||
| popd | ||||
| rm -rf MIOpen | ||||
| @ -1,16 +0,0 @@ | ||||
| #!/bin/bash | ||||
| set -ex | ||||
|  | ||||
| # MKL | ||||
| MKL_VERSION=2024.2.0 | ||||
|  | ||||
| MKLROOT=/opt/intel | ||||
| mkdir -p ${MKLROOT} | ||||
| pushd /tmp | ||||
|  | ||||
| python3 -mpip install wheel | ||||
| python3 -mpip download -d . mkl-static==${MKL_VERSION} | ||||
| python3 -m wheel unpack mkl_static-${MKL_VERSION}-py2.py3-none-manylinux1_x86_64.whl | ||||
| python3 -m wheel unpack mkl_include-${MKL_VERSION}-py2.py3-none-manylinux1_x86_64.whl | ||||
| mv mkl_static-${MKL_VERSION}/mkl_static-${MKL_VERSION}.data/data/lib ${MKLROOT} | ||||
| mv mkl_include-${MKL_VERSION}/mkl_include-${MKL_VERSION}.data/data/include ${MKLROOT} | ||||
| @ -1,13 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| mkdir -p /usr/local/mnist/ | ||||
|  | ||||
| cd /usr/local/mnist | ||||
|  | ||||
| for img in train-images-idx3-ubyte.gz train-labels-idx1-ubyte.gz t10k-images-idx3-ubyte.gz t10k-labels-idx1-ubyte.gz; do | ||||
|   wget -q https://ossci-datasets.s3.amazonaws.com/mnist/$img | ||||
|   gzip -d $img | ||||
| done | ||||
| @ -1,20 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| function install_nvpl { | ||||
|  | ||||
|     mkdir -p /opt/nvpl/lib /opt/nvpl/include | ||||
|  | ||||
|     wget https://developer.download.nvidia.com/compute/nvpl/redist/nvpl_blas/linux-sbsa/nvpl_blas-linux-sbsa-0.3.0-archive.tar.xz | ||||
|     tar xf nvpl_blas-linux-sbsa-0.3.0-archive.tar.xz | ||||
|     cp -r nvpl_blas-linux-sbsa-0.3.0-archive/lib/* /opt/nvpl/lib/ | ||||
|     cp -r nvpl_blas-linux-sbsa-0.3.0-archive/include/* /opt/nvpl/include/ | ||||
|  | ||||
|     wget https://developer.download.nvidia.com/compute/nvpl/redist/nvpl_lapack/linux-sbsa/nvpl_lapack-linux-sbsa-0.2.3.1-archive.tar.xz | ||||
|     tar xf nvpl_lapack-linux-sbsa-0.2.3.1-archive.tar.xz | ||||
|     cp -r nvpl_lapack-linux-sbsa-0.2.3.1-archive/lib/* /opt/nvpl/lib/ | ||||
|     cp -r nvpl_lapack-linux-sbsa-0.2.3.1-archive/include/* /opt/nvpl/include/ | ||||
| } | ||||
|  | ||||
| install_nvpl | ||||
| @ -1,52 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" | ||||
|  | ||||
| retry () { | ||||
|     "$@" || (sleep 10 && "$@") || (sleep 20 && "$@") || (sleep 40 && "$@") | ||||
| } | ||||
|  | ||||
| # A bunch of custom pip dependencies for ONNX | ||||
| pip_install \ | ||||
|   beartype==0.15.0 \ | ||||
|   filelock==3.9.0 \ | ||||
|   flatbuffers==2.0 \ | ||||
|   mock==5.0.1 \ | ||||
|   ninja==1.10.2 \ | ||||
|   networkx==2.5 \ | ||||
|   numpy==1.24.2 | ||||
|  | ||||
| # ONNXRuntime should be installed before installing | ||||
| # onnx-weekly. Otherwise, onnx-weekly could be | ||||
| # overwritten by onnx. | ||||
| pip_install \ | ||||
|   parameterized==0.8.1 \ | ||||
|   pytest-cov==4.0.0 \ | ||||
|   pytest-subtests==0.10.0 \ | ||||
|   tabulate==0.9.0 \ | ||||
|   transformers==4.36.2 | ||||
|  | ||||
| pip_install coloredlogs packaging | ||||
|  | ||||
| pip_install onnxruntime==1.18.1 | ||||
| pip_install onnx==1.16.2 | ||||
| pip_install onnxscript==0.1.0.dev20240831 --no-deps | ||||
| # required by onnxscript | ||||
| pip_install ml_dtypes | ||||
|  | ||||
| # Cache the transformers model to be used later by ONNX tests. We need to run the transformers | ||||
| # package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/ | ||||
| IMPORT_SCRIPT_FILENAME="/tmp/onnx_import_script.py" | ||||
| as_jenkins echo 'import transformers; transformers.AutoModel.from_pretrained("sshleifer/tiny-gpt2"); transformers.AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2"); transformers.AutoModelForSpeechSeq2Seq.from_pretrained("openai/whisper-large-v3");' > "${IMPORT_SCRIPT_FILENAME}" | ||||
|  | ||||
| # Need a PyTorch version for transformers to work | ||||
| pip_install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu | ||||
| # Very weird quoting behavior here https://github.com/conda/conda/issues/10972, | ||||
| # so echo the command to a file and run the file instead | ||||
| conda_run python "${IMPORT_SCRIPT_FILENAME}" | ||||
|  | ||||
| # Cleaning up | ||||
| conda_run pip uninstall -y torch | ||||
| rm "${IMPORT_SCRIPT_FILENAME}" || true | ||||
| @ -1,22 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| cd / | ||||
| git clone https://github.com/OpenMathLib/OpenBLAS.git -b v0.3.25 --depth 1 --shallow-submodules | ||||
|  | ||||
|  | ||||
| OPENBLAS_BUILD_FLAGS=" | ||||
| NUM_THREADS=128 | ||||
| USE_OPENMP=1 | ||||
| NO_SHARED=0 | ||||
| DYNAMIC_ARCH=1 | ||||
| TARGET=ARMV8 | ||||
| CFLAGS=-O3 | ||||
| " | ||||
|  | ||||
| OPENBLAS_CHECKOUT_DIR="OpenBLAS" | ||||
|  | ||||
| make -j8 ${OPENBLAS_BUILD_FLAGS} -C ${OPENBLAS_CHECKOUT_DIR} | ||||
| make -j8 ${OPENBLAS_BUILD_FLAGS} install -C ${OPENBLAS_CHECKOUT_DIR} | ||||
| @ -9,8 +9,7 @@ tar xf "${OPENSSL}.tar.gz" | ||||
| cd "${OPENSSL}" | ||||
| ./config --prefix=/opt/openssl -d '-Wl,--enable-new-dtags,-rpath,$(LIBRPATH)' | ||||
| # NOTE: openssl install errors out when built with the -j option | ||||
| NPROC=$[$(nproc) - 2] | ||||
| make -j${NPROC}; make install_sw | ||||
| make -j6; make install_sw | ||||
| # Link the ssl libraries to the /usr/lib folder. | ||||
| sudo ln -s /opt/openssl/lib/lib* /usr/lib | ||||
| cd .. | ||||
|  | ||||
| @ -1,16 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Pin the version to latest release 0.17.2, building newer commit starts | ||||
| # to fail on the current image | ||||
| git clone -b 0.17.2 --single-branch https://github.com/NixOS/patchelf | ||||
| cd patchelf | ||||
| sed -i 's/serial/parallel/g' configure.ac | ||||
| ./bootstrap.sh | ||||
| ./configure | ||||
| make | ||||
| make install | ||||
| cd .. | ||||
| rm -rf patchelf | ||||
| @ -2,18 +2,55 @@ | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| pb_dir="/usr/temp_pb_install_dir" | ||||
| mkdir -p $pb_dir | ||||
| # This function installs protobuf 3.17 | ||||
| install_protobuf_317() { | ||||
|   pb_dir="/usr/temp_pb_install_dir" | ||||
|   mkdir -p $pb_dir | ||||
|  | ||||
| # On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or | ||||
| # else it will fail with | ||||
| #   g++: error: ./../lib64/crti.o: No such file or directory | ||||
| ln -s /usr/lib64 "$pb_dir/lib64" | ||||
|   # On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or | ||||
|   # else it will fail with | ||||
|   #   g++: error: ./../lib64/crti.o: No such file or directory | ||||
|   ln -s /usr/lib64 "$pb_dir/lib64" | ||||
|  | ||||
| curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz" --retry 3 | ||||
|   curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz" --retry 3 | ||||
|   tar -xvz -C "$pb_dir" --strip-components 1 -f protobuf-all-3.17.3.tar.gz | ||||
|   # -j6 to balance memory usage and speed. | ||||
|   # naked `-j` seems to use too much memory. | ||||
|   pushd "$pb_dir" && ./configure && make -j6 && make -j6 check && sudo make -j6 install && sudo ldconfig | ||||
|   popd | ||||
|   rm -rf $pb_dir | ||||
| } | ||||
|  | ||||
| tar -xvz --no-same-owner -C "$pb_dir" --strip-components 1 -f protobuf-all-3.17.3.tar.gz | ||||
| NPROC=$[$(nproc) - 2] | ||||
| pushd "$pb_dir" && ./configure && make -j${NPROC} && make -j${NPROC} check && sudo make -j${NRPOC} install && sudo ldconfig | ||||
| popd | ||||
| rm -rf $pb_dir | ||||
| install_ubuntu() { | ||||
|   # Ubuntu 14.04 has cmake 2.8.12 as the default option, so we will | ||||
|   # install cmake3 here and use cmake3. | ||||
|   apt-get update | ||||
|   if [[ "$UBUNTU_VERSION" == 14.04 ]]; then | ||||
|     apt-get install -y --no-install-recommends cmake3 | ||||
|   fi | ||||
|  | ||||
|   # Cleanup | ||||
|   apt-get autoclean && apt-get clean | ||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* | ||||
|  | ||||
|   install_protobuf_317 | ||||
| } | ||||
|  | ||||
| install_centos() { | ||||
|   install_protobuf_317 | ||||
| } | ||||
|  | ||||
| # Install base packages depending on the base OS | ||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ||||
| case "$ID" in | ||||
|   ubuntu) | ||||
|     install_ubuntu | ||||
|     ;; | ||||
|   centos) | ||||
|     install_centos | ||||
|     ;; | ||||
|   *) | ||||
|     echo "Unable to determine OS..." | ||||
|     exit 1 | ||||
|     ;; | ||||
| esac | ||||
|  | ||||
| @ -6,6 +6,9 @@ ver() { | ||||
|     printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' '); | ||||
| } | ||||
|  | ||||
| # Map ROCm version to AMDGPU version | ||||
| declare -A AMDGPU_VERSIONS=( ["5.0"]="21.50" ["5.1.1"]="22.10.1" ["5.2"]="22.20" ) | ||||
|  | ||||
| install_ubuntu() { | ||||
|     apt-get update | ||||
|     if [[ $UBUNTU_VERSION == 18.04 ]]; then | ||||
| @ -23,14 +26,31 @@ install_ubuntu() { | ||||
|     apt-get install -y libc++1 | ||||
|     apt-get install -y libc++abi1 | ||||
|  | ||||
|     # Add amdgpu repository | ||||
|     UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'` | ||||
|     echo "deb [arch=amd64] https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list | ||||
|     if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then | ||||
|         # Add amdgpu repository | ||||
|         UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'` | ||||
|         local amdgpu_baseurl | ||||
|         if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then | ||||
|           amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu" | ||||
|         else | ||||
|           amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/ubuntu" | ||||
|         fi | ||||
|         echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list | ||||
|     fi | ||||
|  | ||||
|     ROCM_REPO="ubuntu" | ||||
|     if [[ $(ver $ROCM_VERSION) -lt $(ver 4.2) ]]; then | ||||
|         ROCM_REPO="xenial" | ||||
|     fi | ||||
|  | ||||
|     if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then | ||||
|         ROCM_REPO="${UBUNTU_VERSION_NAME}" | ||||
|     fi | ||||
|  | ||||
|     # Add rocm repository | ||||
|     wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add - | ||||
|     local rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}" | ||||
|     echo "deb [arch=amd64] ${rocm_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/rocm.list | ||||
|     echo "deb [arch=amd64] ${rocm_baseurl} ${ROCM_REPO} main" > /etc/apt/sources.list.d/rocm.list | ||||
|     apt-get update --allow-insecure-repositories | ||||
|  | ||||
|     DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ | ||||
| @ -39,29 +59,17 @@ install_ubuntu() { | ||||
|                    rocm-libs \ | ||||
|                    rccl \ | ||||
|                    rocprofiler-dev \ | ||||
|                    roctracer-dev \ | ||||
|                    amd-smi-lib | ||||
|                    roctracer-dev | ||||
|  | ||||
|     if [[ $(ver $ROCM_VERSION) -ge $(ver 6.1) ]]; then | ||||
|         DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated rocm-llvm-dev | ||||
|     fi | ||||
|  | ||||
|     # precompiled miopen kernels added in ROCm 3.5, renamed in ROCm 5.5 | ||||
|     # search for all unversioned packages | ||||
|     # precompiled miopen kernels added in ROCm 3.5; search for all unversioned packages | ||||
|     # if search fails it will abort this script; use true to avoid case where search fails | ||||
|     MIOPENHIPGFX=$(apt-cache search --names-only miopen-hip-gfx | awk '{print $1}' | grep -F -v . || true) | ||||
|     if [[ "x${MIOPENHIPGFX}" = x ]]; then | ||||
|       echo "miopen-hip-gfx package not available" && exit 1 | ||||
|     MIOPENKERNELS=$(apt-cache search --names-only miopenkernels | awk '{print $1}' | grep -F -v . || true) | ||||
|     if [[ "x${MIOPENKERNELS}" = x ]]; then | ||||
|       echo "miopenkernels package not available" | ||||
|     else | ||||
|       DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENHIPGFX} | ||||
|       DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENKERNELS} | ||||
|     fi | ||||
|  | ||||
|     # ROCm 6.0 had a regression where journal_mode was enabled on the kdb files resulting in permission errors at runtime | ||||
|     for kdb in /opt/rocm/share/miopen/db/*.kdb | ||||
|     do | ||||
|         sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;" | ||||
|     done | ||||
|  | ||||
|     # Cleanup | ||||
|     apt-get autoclean && apt-get clean | ||||
|     rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* | ||||
| @ -77,19 +85,25 @@ install_centos() { | ||||
|   yum install -y epel-release | ||||
|   yum install -y dkms kernel-headers-`uname -r` kernel-devel-`uname -r` | ||||
|  | ||||
|   # Add amdgpu repository | ||||
|   local amdgpu_baseurl | ||||
|   if [[ $OS_VERSION == 9 ]]; then | ||||
|       amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/rhel/9.0/main/x86_64" | ||||
|   else | ||||
|       amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/rhel/7.9/main/x86_64" | ||||
|   if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then | ||||
|       # Add amdgpu repository | ||||
|       local amdgpu_baseurl | ||||
|       if [[ $OS_VERSION == 9 ]]; then | ||||
|           amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/rhel/9.0/main/x86_64" | ||||
|       else | ||||
|         if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then | ||||
|           amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/rhel/7.9/main/x86_64" | ||||
|         else | ||||
|           amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/rhel/7.9/main/x86_64" | ||||
|         fi | ||||
|       fi | ||||
|       echo "[AMDGPU]" > /etc/yum.repos.d/amdgpu.repo | ||||
|       echo "name=AMDGPU" >> /etc/yum.repos.d/amdgpu.repo | ||||
|       echo "baseurl=${amdgpu_baseurl}" >> /etc/yum.repos.d/amdgpu.repo | ||||
|       echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo | ||||
|       echo "gpgcheck=1" >> /etc/yum.repos.d/amdgpu.repo | ||||
|       echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/amdgpu.repo | ||||
|   fi | ||||
|   echo "[AMDGPU]" > /etc/yum.repos.d/amdgpu.repo | ||||
|   echo "name=AMDGPU" >> /etc/yum.repos.d/amdgpu.repo | ||||
|   echo "baseurl=${amdgpu_baseurl}" >> /etc/yum.repos.d/amdgpu.repo | ||||
|   echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo | ||||
|   echo "gpgcheck=1" >> /etc/yum.repos.d/amdgpu.repo | ||||
|   echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/amdgpu.repo | ||||
|  | ||||
|   local rocm_baseurl="http://repo.radeon.com/rocm/yum/${ROCM_VERSION}" | ||||
|   echo "[ROCm]" > /etc/yum.repos.d/rocm.repo | ||||
| @ -107,23 +121,7 @@ install_centos() { | ||||
|                    rocm-libs \ | ||||
|                    rccl \ | ||||
|                    rocprofiler-dev \ | ||||
|                    roctracer-dev \ | ||||
|                    amd-smi-lib | ||||
|  | ||||
|   # precompiled miopen kernels; search for all unversioned packages | ||||
|   # if search fails it will abort this script; use true to avoid case where search fails | ||||
|   MIOPENHIPGFX=$(yum -q search miopen-hip-gfx | grep miopen-hip-gfx | awk '{print $1}'| grep -F kdb. || true) | ||||
|   if [[ "x${MIOPENHIPGFX}" = x ]]; then | ||||
|     echo "miopen-hip-gfx package not available" && exit 1 | ||||
|   else | ||||
|     yum install -y ${MIOPENHIPGFX} | ||||
|   fi | ||||
|  | ||||
|   # ROCm 6.0 had a regression where journal_mode was enabled on the kdb files resulting in permission errors at runtime | ||||
|   for kdb in /opt/rocm/share/miopen/db/*.kdb | ||||
|   do | ||||
|       sqlite3 $kdb "PRAGMA journal_mode=off; PRAGMA VACUUM;" | ||||
|   done | ||||
|                    roctracer-dev | ||||
|  | ||||
|   # Cleanup | ||||
|   yum clean all | ||||
|  | ||||
| @ -1,150 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| ########################### | ||||
| ### prereqs | ||||
| ########################### | ||||
| # Install Python packages depending on the base OS | ||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ||||
| case "$ID" in | ||||
|   ubuntu) | ||||
|     apt-get update -y | ||||
|     apt-get install -y libpciaccess-dev pkg-config | ||||
|     apt-get clean | ||||
|     ;; | ||||
|   centos) | ||||
|     yum install -y libpciaccess-devel pkgconfig | ||||
|     ;; | ||||
|   *) | ||||
|     echo "Unable to determine OS..." | ||||
|     exit 1 | ||||
|     ;; | ||||
| esac | ||||
| python3 -m pip install meson ninja | ||||
|  | ||||
| ########################### | ||||
| ### clone repo | ||||
| ########################### | ||||
| GIT_SSL_NO_VERIFY=true git clone https://gitlab.freedesktop.org/mesa/drm.git | ||||
| pushd drm | ||||
|  | ||||
| ########################### | ||||
| ### patch | ||||
| ########################### | ||||
| patch -p1 <<'EOF' | ||||
| diff --git a/amdgpu/amdgpu_asic_id.c b/amdgpu/amdgpu_asic_id.c | ||||
| index a5007ffc..13fa07fc 100644 | ||||
| --- a/amdgpu/amdgpu_asic_id.c | ||||
| +++ b/amdgpu/amdgpu_asic_id.c | ||||
| @@ -22,6 +22,13 @@ | ||||
|   * | ||||
|   */ | ||||
|  | ||||
| +#define _XOPEN_SOURCE 700 | ||||
| +#define _LARGEFILE64_SOURCE | ||||
| +#define _FILE_OFFSET_BITS 64 | ||||
| +#include <ftw.h> | ||||
| +#include <link.h> | ||||
| +#include <limits.h> | ||||
| + | ||||
|  #include <ctype.h> | ||||
|  #include <stdio.h> | ||||
|  #include <stdlib.h> | ||||
| @@ -34,6 +41,19 @@ | ||||
|  #include "amdgpu_drm.h" | ||||
|  #include "amdgpu_internal.h" | ||||
|  | ||||
| +static char *amdgpuids_path = NULL; | ||||
| +static const char* amdgpuids_path_msg = NULL; | ||||
| + | ||||
| +static int check_for_location_of_amdgpuids(const char *filepath, const struct stat *info, const int typeflag, struct FTW *pathinfo) | ||||
| +{ | ||||
| +	if (typeflag == FTW_F && strstr(filepath, "amdgpu.ids")) { | ||||
| +		amdgpuids_path = strdup(filepath); | ||||
| +		return 1; | ||||
| +	} | ||||
| + | ||||
| +	return 0; | ||||
| +} | ||||
| + | ||||
|  static int parse_one_line(struct amdgpu_device *dev, const char *line) | ||||
|  { | ||||
|  	char *buf, *saveptr; | ||||
| @@ -113,10 +133,46 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev) | ||||
|  	int line_num = 1; | ||||
|  	int r = 0; | ||||
|  | ||||
| +	// attempt to find typical location for amdgpu.ids file | ||||
|  	fp = fopen(AMDGPU_ASIC_ID_TABLE, "r"); | ||||
| + | ||||
| +	// if it doesn't exist, search | ||||
| +	if (!fp) { | ||||
| + | ||||
| +	char self_path[ PATH_MAX ]; | ||||
| +	ssize_t count; | ||||
| +	ssize_t i; | ||||
| + | ||||
| +	count = readlink( "/proc/self/exe", self_path, PATH_MAX ); | ||||
| +	if (count > 0) { | ||||
| +		self_path[count] = '\0'; | ||||
| + | ||||
| +		// remove '/bin/python' from self_path | ||||
| +		for (i=count; i>0; --i) { | ||||
| +			if (self_path[i] == '/') break; | ||||
| +			self_path[i] = '\0'; | ||||
| +		} | ||||
| +		self_path[i] = '\0'; | ||||
| +		for (; i>0; --i) { | ||||
| +			if (self_path[i] == '/') break; | ||||
| +			self_path[i] = '\0'; | ||||
| +		} | ||||
| +		self_path[i] = '\0'; | ||||
| + | ||||
| +		if (1 == nftw(self_path, check_for_location_of_amdgpuids, 5, FTW_PHYS)) { | ||||
| +			fp = fopen(amdgpuids_path, "r"); | ||||
| +			amdgpuids_path_msg = amdgpuids_path; | ||||
| +		} | ||||
| +	} | ||||
| + | ||||
| +	} | ||||
| +	else { | ||||
| +		amdgpuids_path_msg = AMDGPU_ASIC_ID_TABLE; | ||||
| +	} | ||||
| + | ||||
| +	// both hard-coded location and search have failed | ||||
|  	if (!fp) { | ||||
| -		fprintf(stderr, "%s: %s\n", AMDGPU_ASIC_ID_TABLE, | ||||
| -			strerror(errno)); | ||||
| +		fprintf(stderr, "amdgpu.ids: No such file or directory\n"); | ||||
|  		return; | ||||
|  	} | ||||
|  | ||||
| @@ -132,7 +188,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev) | ||||
|  			continue; | ||||
|  		} | ||||
|  | ||||
| -		drmMsg("%s version: %s\n", AMDGPU_ASIC_ID_TABLE, line); | ||||
| +		drmMsg("%s version: %s\n", amdgpuids_path_msg, line); | ||||
|  		break; | ||||
|  	} | ||||
|  | ||||
| @@ -150,7 +206,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev) | ||||
|  | ||||
|  	if (r == -EINVAL) { | ||||
|  		fprintf(stderr, "Invalid format: %s: line %d: %s\n", | ||||
| -			AMDGPU_ASIC_ID_TABLE, line_num, line); | ||||
| +			amdgpuids_path_msg, line_num, line); | ||||
|  	} else if (r && r != -EAGAIN) { | ||||
|  		fprintf(stderr, "%s: Cannot parse ASIC IDs: %s\n", | ||||
|  			__func__, strerror(-r)); | ||||
| EOF | ||||
|  | ||||
| ########################### | ||||
| ### build | ||||
| ########################### | ||||
| meson builddir --prefix=/opt/amdgpu | ||||
| pushd builddir | ||||
| ninja install | ||||
|  | ||||
| popd | ||||
| popd | ||||
| @ -1,24 +1,15 @@ | ||||
| #!/bin/bash | ||||
| # Script used in CI and CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
|  | ||||
| MKLROOT=${MKLROOT:-/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION} | ||||
|  | ||||
| # "install" hipMAGMA into /opt/rocm/magma by copying after build | ||||
| git clone https://bitbucket.org/icl/magma.git | ||||
| pushd magma | ||||
|  | ||||
| # Version 2.7.2 + ROCm related updates | ||||
| git checkout a1625ff4d9bc362906bd01f805dbbe12612953f6 | ||||
|  | ||||
| # Fixes memory leaks of magma found while executing linalg UTs | ||||
| git checkout 5959b8783e45f1809812ed96ae762f38ee701972 | ||||
| cp make.inc-examples/make.inc.hip-gcc-mkl make.inc | ||||
| echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc | ||||
| if [[ -f "${MKLROOT}/lib/libmkl_core.a" ]]; then | ||||
|     echo 'LIB = -Wl,--start-group -lmkl_gf_lp64 -lmkl_gnu_thread -lmkl_core -Wl,--end-group -lpthread -lstdc++ -lm -lgomp -lhipblas -lhipsparse' >> make.inc | ||||
| fi | ||||
| echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib -ldl' >> make.inc | ||||
| echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc | ||||
| echo 'DEVCCFLAGS += --gpu-max-threads-per-block=256' >> make.inc | ||||
| export PATH="${PATH}:/opt/rocm/bin" | ||||
| if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then | ||||
| @ -27,12 +18,12 @@ else | ||||
|   amdgpu_targets=`rocm_agent_enumerator | grep -v gfx000 | sort -u | xargs` | ||||
| fi | ||||
| for arch in $amdgpu_targets; do | ||||
|   echo "DEVCCFLAGS += --offload-arch=$arch" >> make.inc | ||||
|   echo "DEVCCFLAGS += --amdgpu-target=$arch" >> make.inc | ||||
| done | ||||
| # hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition | ||||
| sed -i 's/^FOPENMP/#FOPENMP/g' make.inc | ||||
| make -f make.gen.hipMAGMA -j $(nproc) | ||||
| LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT="${MKLROOT}" | ||||
| make testing/testing_dgemm -j $(nproc) MKLROOT="${MKLROOT}" | ||||
| LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION | ||||
| make testing/testing_dgemm -j $(nproc) MKLROOT=/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION | ||||
| popd | ||||
| mv magma /opt/rocm | ||||
|  | ||||
							
								
								
									
										14
									
								
								.ci/docker/common/install_thrift.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										14
									
								
								.ci/docker/common/install_thrift.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,14 @@ | ||||
| apt-get update | ||||
| apt-get install -y sudo wget libboost-dev libboost-test-dev libboost-program-options-dev libboost-filesystem-dev libboost-thread-dev libevent-dev automake libtool flex bison pkg-config g++ libssl-dev | ||||
| wget https://www-us.apache.org/dist/thrift/0.12.0/thrift-0.12.0.tar.gz | ||||
| tar -xvf thrift-0.12.0.tar.gz | ||||
| cd thrift-0.12.0 | ||||
| for file in ./compiler/cpp/Makefile*; do | ||||
|   sed -i 's/\-Werror//' $file | ||||
| done | ||||
| ./bootstrap.sh | ||||
| ./configure --without-php --without-java --without-python --without-nodejs --without-go --without-ruby | ||||
| sudo make | ||||
| sudo make install | ||||
| cd .. | ||||
| rm thrift-0.12.0.tar.gz | ||||
| @ -1,83 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" | ||||
|  | ||||
| get_conda_version() { | ||||
|   as_jenkins conda list -n py_$ANACONDA_PYTHON_VERSION | grep -w $* | head -n 1 | awk '{print $2}' | ||||
| } | ||||
|  | ||||
| conda_reinstall() { | ||||
|   as_jenkins conda install -q -n py_$ANACONDA_PYTHON_VERSION -y --force-reinstall $* | ||||
| } | ||||
|  | ||||
| if [ -n "${XPU_VERSION}" ]; then | ||||
|   TRITON_REPO="https://github.com/intel/intel-xpu-backend-for-triton" | ||||
|   TRITON_TEXT_FILE="triton-xpu" | ||||
| else | ||||
|   TRITON_REPO="https://github.com/openai/triton" | ||||
|   TRITON_TEXT_FILE="triton" | ||||
| fi | ||||
|  | ||||
| # The logic here is copied from .ci/pytorch/common_utils.sh | ||||
| TRITON_PINNED_COMMIT=$(get_pinned_commit ${TRITON_TEXT_FILE}) | ||||
|  | ||||
| if [ -n "${UBUNTU_VERSION}" ];then | ||||
|     apt update | ||||
|     apt-get install -y gpg-agent | ||||
| fi | ||||
|  | ||||
| if [ -n "${CONDA_CMAKE}" ]; then | ||||
|   # Keep the current cmake and numpy version here, so we can reinstall them later | ||||
|   CMAKE_VERSION=$(get_conda_version cmake) | ||||
|   NUMPY_VERSION=$(get_conda_version numpy) | ||||
| fi | ||||
|  | ||||
| if [ -z "${MAX_JOBS}" ]; then | ||||
|     export MAX_JOBS=$(nproc) | ||||
| fi | ||||
|  | ||||
| # Git checkout triton | ||||
| mkdir /var/lib/jenkins/triton | ||||
| chown -R jenkins /var/lib/jenkins/triton | ||||
| chgrp -R jenkins /var/lib/jenkins/triton | ||||
| pushd /var/lib/jenkins/ | ||||
|  | ||||
| as_jenkins git clone ${TRITON_REPO} triton | ||||
| cd triton | ||||
| as_jenkins git checkout ${TRITON_PINNED_COMMIT} | ||||
| cd python | ||||
|  | ||||
| # TODO: remove patch setup.py once we have a proper fix for https://github.com/triton-lang/triton/issues/4527 | ||||
| as_jenkins sed -i -e 's/https:\/\/tritonlang.blob.core.windows.net\/llvm-builds/https:\/\/oaitriton.blob.core.windows.net\/public\/llvm-builds/g' setup.py | ||||
|  | ||||
| if [ -n "${UBUNTU_VERSION}" ] && [ -n "${GCC_VERSION}" ] && [[ "${GCC_VERSION}" == "7" ]]; then | ||||
|   # Triton needs at least gcc-9 to build | ||||
|   apt-get install -y g++-9 | ||||
|  | ||||
|   CXX=g++-9 pip_install -e . | ||||
| elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then | ||||
|   # Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain | ||||
|   add-apt-repository -y ppa:ubuntu-toolchain-r/test | ||||
|   apt-get install -y g++-9 | ||||
|  | ||||
|   CXX=g++-9 pip_install -e . | ||||
| else | ||||
|   pip_install -e . | ||||
| fi | ||||
|  | ||||
| if [ -n "${CONDA_CMAKE}" ]; then | ||||
|   # TODO: This is to make sure that the same cmake and numpy version from install conda | ||||
|   # script is used. Without this step, the newer cmake version (3.25.2) downloaded by | ||||
|   # triton build step via pip will fail to detect conda MKL. Once that issue is fixed, | ||||
|   # this can be removed. | ||||
|   # | ||||
|   # The correct numpy version also needs to be set here because conda claims that it | ||||
|   # causes inconsistent environment.  Without this, conda will attempt to install the | ||||
|   # latest numpy version, which fails ASAN tests with the following import error: Numba | ||||
|   # needs NumPy 1.20 or less. | ||||
|   conda_reinstall cmake="${CMAKE_VERSION}" | ||||
|   # Note that we install numpy with pip as conda might not have the version we want | ||||
|   pip_install --force-reinstall numpy=="${NUMPY_VERSION}" | ||||
| fi | ||||
| @ -36,12 +36,7 @@ function install_ucc() { | ||||
|   git submodule update --init --recursive | ||||
|  | ||||
|   ./autogen.sh | ||||
|   # We only run distributed tests on Tesla M60 and A10G | ||||
|   NVCC_GENCODE="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_86,code=compute_86" | ||||
|   ./configure --prefix=$UCC_HOME          \ | ||||
|     --with-ucx=$UCX_HOME                  \ | ||||
|     --with-cuda=$with_cuda                \ | ||||
|     --with-nvcc-gencode="${NVCC_GENCODE}" | ||||
|   ./configure --prefix=$UCC_HOME --with-ucx=$UCX_HOME --with-cuda=$with_cuda | ||||
|   time make -j | ||||
|   sudo make install | ||||
|  | ||||
|  | ||||
| @ -5,7 +5,8 @@ set -ex | ||||
| install_ubuntu() { | ||||
|   apt-get update | ||||
|   apt-get install -y --no-install-recommends \ | ||||
|           libopencv-dev | ||||
|           libopencv-dev \ | ||||
|           libavcodec-dev | ||||
|  | ||||
|   # Cleanup | ||||
|   apt-get autoclean && apt-get clean | ||||
| @ -18,7 +19,8 @@ install_centos() { | ||||
|   yum --enablerepo=extras install -y epel-release | ||||
|  | ||||
|   yum install -y \ | ||||
|       opencv-devel | ||||
|       opencv-devel \ | ||||
|       ffmpeg-devel | ||||
|  | ||||
|   # Cleanup | ||||
|   yum clean all | ||||
| @ -41,6 +43,3 @@ case "$ID" in | ||||
|     exit 1 | ||||
|     ;; | ||||
| esac | ||||
|  | ||||
| # Cache vision models used by the test | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/cache_vision_models.sh" | ||||
|  | ||||
| @ -1,161 +0,0 @@ | ||||
| #!/bin/bash | ||||
| set -xe | ||||
| # Script used in CI and CD pipeline | ||||
|  | ||||
| # Intel® software for general purpose GPU capabilities. | ||||
| # Refer to https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html | ||||
|  | ||||
| # Users should update to the latest version as it becomes available | ||||
|  | ||||
| function install_ubuntu() { | ||||
|     . /etc/os-release | ||||
|     if [[ ! " jammy " =~ " ${VERSION_CODENAME} " ]]; then | ||||
|         echo "Ubuntu version ${VERSION_CODENAME} not supported" | ||||
|         exit | ||||
|     fi | ||||
|  | ||||
|     apt-get update -y | ||||
|     apt-get install -y gpg-agent wget | ||||
|     # To add the online network package repository for the GPU Driver | ||||
|     wget -qO - https://repositories.intel.com/gpu/intel-graphics.key \ | ||||
|         | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg | ||||
|     echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] \ | ||||
|         https://repositories.intel.com/gpu/ubuntu ${VERSION_CODENAME}${XPU_DRIVER_VERSION} unified" \ | ||||
|         | tee /etc/apt/sources.list.d/intel-gpu-${VERSION_CODENAME}.list | ||||
|     # To add the online network network package repository for the Intel Support Packages | ||||
|     wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ | ||||
|         | gpg --dearmor > /usr/share/keyrings/intel-for-pytorch-gpu-dev-keyring.gpg | ||||
|     echo "deb [signed-by=/usr/share/keyrings/intel-for-pytorch-gpu-dev-keyring.gpg] \ | ||||
|         https://apt.repos.intel.com/intel-for-pytorch-gpu-dev all main" \ | ||||
|         | tee /etc/apt/sources.list.d/intel-for-pytorch-gpu-dev.list | ||||
|  | ||||
|     # Update the packages list and repository index | ||||
|     apt-get update | ||||
|  | ||||
|     # The xpu-smi packages | ||||
|     apt-get install -y flex bison xpu-smi | ||||
|     # Compute and Media Runtimes | ||||
|     apt-get install -y \ | ||||
|         intel-opencl-icd intel-level-zero-gpu level-zero \ | ||||
|         intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 \ | ||||
|         libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \ | ||||
|         libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \ | ||||
|         mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo | ||||
|     # Development Packages | ||||
|     apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev | ||||
|     # Install Intel Support Packages | ||||
|     if [ -n "$XPU_VERSION" ]; then | ||||
|         apt-get install -y intel-for-pytorch-gpu-dev-${XPU_VERSION} intel-pti-dev | ||||
|     else | ||||
|         apt-get install -y intel-for-pytorch-gpu-dev intel-pti-dev | ||||
|     fi | ||||
|  | ||||
|     # Cleanup | ||||
|     apt-get autoclean && apt-get clean | ||||
|     rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* | ||||
| } | ||||
|  | ||||
| function install_rhel() { | ||||
|     . /etc/os-release | ||||
|     if [[ "${ID}" == "rhel" ]]; then | ||||
|         if [[ ! " 8.6 8.8 8.9 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then | ||||
|             echo "RHEL version ${VERSION_ID} not supported" | ||||
|             exit | ||||
|         fi | ||||
|     elif [[ "${ID}" == "almalinux" ]]; then | ||||
|         # Workaround for almalinux8 which used by quay.io/pypa/manylinux_2_28_x86_64 | ||||
|         VERSION_ID="8.6" | ||||
|     fi | ||||
|  | ||||
|     dnf install -y 'dnf-command(config-manager)' | ||||
|     # To add the online network package repository for the GPU Driver | ||||
|     dnf config-manager --add-repo \ | ||||
|         https://repositories.intel.com/gpu/rhel/${VERSION_ID}${XPU_DRIVER_VERSION}/unified/intel-gpu-${VERSION_ID}.repo | ||||
|     # To add the online network network package repository for the Intel Support Packages | ||||
|     tee > /etc/yum.repos.d/intel-for-pytorch-gpu-dev.repo << EOF | ||||
| [intel-for-pytorch-gpu-dev] | ||||
| name=Intel for Pytorch GPU dev repository | ||||
| baseurl=https://yum.repos.intel.com/intel-for-pytorch-gpu-dev | ||||
| enabled=1 | ||||
| gpgcheck=1 | ||||
| repo_gpgcheck=1 | ||||
| gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | ||||
| EOF | ||||
|  | ||||
|     # The xpu-smi packages | ||||
|     dnf install -y xpu-smi | ||||
|     # Compute and Media Runtimes | ||||
|     dnf install --skip-broken -y \ | ||||
|         intel-opencl intel-media intel-mediasdk libmfxgen1 libvpl2\ | ||||
|         level-zero intel-level-zero-gpu mesa-dri-drivers mesa-vulkan-drivers \ | ||||
|         mesa-vdpau-drivers libdrm mesa-libEGL mesa-libgbm mesa-libGL \ | ||||
|         mesa-libxatracker libvpl-tools intel-metrics-discovery \ | ||||
|         intel-metrics-library intel-igc-core intel-igc-cm \ | ||||
|         libva libva-utils intel-gmmlib libmetee intel-gsc intel-ocloc | ||||
|     # Development packages | ||||
|     dnf install -y --refresh \ | ||||
|         intel-igc-opencl-devel level-zero-devel intel-gsc-devel libmetee-devel \ | ||||
|         level-zero-devel | ||||
|     # Install Intel Support Packages | ||||
|     yum install -y intel-for-pytorch-gpu-dev intel-pti-dev | ||||
|  | ||||
|     # Cleanup | ||||
|     dnf clean all | ||||
|     rm -rf /var/cache/yum | ||||
|     rm -rf /var/lib/yum/yumdb | ||||
|     rm -rf /var/lib/yum/history | ||||
| } | ||||
|  | ||||
| function install_sles() { | ||||
|     . /etc/os-release | ||||
|     VERSION_SP=${VERSION_ID//./sp} | ||||
|     if [[ ! " 15sp4 15sp5 " =~ " ${VERSION_SP} " ]]; then | ||||
|         echo "SLES version ${VERSION_ID} not supported" | ||||
|         exit | ||||
|     fi | ||||
|  | ||||
|     # To add the online network package repository for the GPU Driver | ||||
|     zypper addrepo -f -r \ | ||||
|         https://repositories.intel.com/gpu/sles/${VERSION_SP}${XPU_DRIVER_VERSION}/unified/intel-gpu-${VERSION_SP}.repo | ||||
|     rpm --import https://repositories.intel.com/gpu/intel-graphics.key | ||||
|     # To add the online network network package repository for the Intel Support Packages | ||||
|     zypper addrepo https://yum.repos.intel.com/intel-for-pytorch-gpu-dev intel-for-pytorch-gpu-dev | ||||
|     rpm --import https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | ||||
|  | ||||
|     # The xpu-smi packages | ||||
|     zypper install -y lsb-release flex bison xpu-smi | ||||
|     # Compute and Media Runtimes | ||||
|     zypper install -y intel-level-zero-gpu level-zero intel-gsc intel-opencl intel-ocloc \ | ||||
|         intel-media-driver libigfxcmrt7 libvpl2 libvpl-tools libmfxgen1 libmfx1 | ||||
|     # Development packages | ||||
|     zypper install -y libigdfcl-devel intel-igc-cm libigfxcmrt-devel level-zero-devel | ||||
|  | ||||
|     # Install Intel Support Packages | ||||
|     zypper install -y intel-for-pytorch-gpu-dev intel-pti-dev | ||||
|  | ||||
| } | ||||
|  | ||||
| # Default use GPU driver LTS releases | ||||
| XPU_DRIVER_VERSION="/lts/2350" | ||||
| if [[ "${XPU_DRIVER_TYPE,,}" == "rolling" ]]; then | ||||
|     # Use GPU driver rolling releases | ||||
|     XPU_DRIVER_VERSION="" | ||||
| fi | ||||
|  | ||||
| # The installation depends on the base OS | ||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ||||
| case "$ID" in | ||||
|     ubuntu) | ||||
|         install_ubuntu | ||||
|     ;; | ||||
|     rhel|almalinux) | ||||
|         install_rhel | ||||
|     ;; | ||||
|     sles) | ||||
|         install_sles | ||||
|     ;; | ||||
|     *) | ||||
|         echo "Unable to determine OS..." | ||||
|         exit 1 | ||||
|     ;; | ||||
| esac | ||||
| @ -1,100 +0,0 @@ | ||||
| ARG CUDA_VERSION=10.2 | ||||
| ARG BASE_TARGET=cuda${CUDA_VERSION} | ||||
| FROM centos:7 as base | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
|  | ||||
| ARG DEVTOOLSET_VERSION=9 | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum update -y | ||||
| RUN yum install -y wget curl perl util-linux xz bzip2 git patch which unzip | ||||
| # Just add everything as a safe.directory for git since these will be used in multiple places with git | ||||
| RUN git config --global --add safe.directory '*' | ||||
| RUN yum install -y yum-utils centos-release-scl | ||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum install -y devtoolset-${DEVTOOLSET_VERSION}-gcc devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran devtoolset-${DEVTOOLSET_VERSION}-binutils | ||||
| # EPEL for cmake | ||||
| RUN yum --enablerepo=extras install -y epel-release | ||||
|  | ||||
| # cmake | ||||
| RUN yum install -y cmake3 && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
| ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| RUN yum install -y autoconf aclocal automake make sudo | ||||
| RUN rm -rf /usr/local/cuda-* | ||||
|  | ||||
| FROM base as patchelf | ||||
| # Install patchelf | ||||
| ADD ./common/install_patchelf.sh install_patchelf.sh | ||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh && cp $(which patchelf) /patchelf | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
| FROM base as conda | ||||
| # Install Anaconda | ||||
| ADD ./common/install_conda_docker.sh install_conda.sh | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh | ||||
|  | ||||
| # Install CUDA | ||||
| FROM base as cuda | ||||
| ARG CUDA_VERSION=10.2 | ||||
| RUN rm -rf /usr/local/cuda-* | ||||
| ADD ./common/install_cuda.sh install_cuda.sh | ||||
| ENV CUDA_HOME=/usr/local/cuda-${CUDA_VERSION} | ||||
| # Preserve CUDA_VERSION for the builds | ||||
| ENV CUDA_VERSION=${CUDA_VERSION} | ||||
| # Make things in our path by default | ||||
| ENV PATH=/usr/local/cuda-${CUDA_VERSION}/bin:$PATH | ||||
|  | ||||
| FROM cuda as cuda11.8 | ||||
| RUN bash ./install_cuda.sh 11.8 | ||||
| ENV DESIRED_CUDA=11.8 | ||||
|  | ||||
| FROM cuda as cuda12.1 | ||||
| RUN bash ./install_cuda.sh 12.1 | ||||
| ENV DESIRED_CUDA=12.1 | ||||
|  | ||||
| FROM cuda as cuda12.4 | ||||
| RUN bash ./install_cuda.sh 12.4 | ||||
| ENV DESIRED_CUDA=12.4 | ||||
|  | ||||
| # Install MNIST test data | ||||
| FROM base as mnist | ||||
| ADD ./common/install_mnist.sh install_mnist.sh | ||||
| RUN bash ./install_mnist.sh | ||||
|  | ||||
| FROM base as all_cuda | ||||
| COPY --from=cuda11.8  /usr/local/cuda-11.8 /usr/local/cuda-11.8 | ||||
| COPY --from=cuda12.1  /usr/local/cuda-12.1 /usr/local/cuda-12.1 | ||||
| COPY --from=cuda12.4  /usr/local/cuda-12.4 /usr/local/cuda-12.4 | ||||
|  | ||||
| # Final step | ||||
| FROM ${BASE_TARGET} as final | ||||
| COPY --from=openssl            /opt/openssl           /opt/openssl | ||||
| COPY --from=patchelf           /patchelf              /usr/local/bin/patchelf | ||||
| COPY --from=conda              /opt/conda             /opt/conda | ||||
|  | ||||
| # Add jni.h for java host build. | ||||
| COPY ./common/install_jni.sh install_jni.sh | ||||
| COPY ./java/jni.h jni.h | ||||
| RUN bash ./install_jni.sh && rm install_jni.sh | ||||
|  | ||||
| ENV  PATH /opt/conda/bin:$PATH | ||||
| COPY --from=mnist  /usr/local/mnist /usr/local/mnist | ||||
| RUN rm -rf /usr/local/cuda | ||||
| RUN chmod o+rw /usr/local | ||||
| RUN touch /.condarc && \ | ||||
|     chmod o+rw /.condarc && \ | ||||
|     chmod -R o+rw /opt/conda | ||||
| @ -1,76 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -eou pipefail | ||||
|  | ||||
| image="$1" | ||||
| shift | ||||
|  | ||||
| if [ -z "${image}" ]; then | ||||
|   echo "Usage: $0 IMAGE" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| DOCKER_IMAGE_NAME="pytorch/${image}" | ||||
|  | ||||
|  | ||||
| export DOCKER_BUILDKIT=1 | ||||
| TOPDIR=$(git rev-parse --show-toplevel) | ||||
|  | ||||
| CUDA_VERSION=${CUDA_VERSION:-12.1} | ||||
|  | ||||
| case ${CUDA_VERSION} in | ||||
|   cpu) | ||||
|     BASE_TARGET=base | ||||
|     DOCKER_TAG=cpu | ||||
|     ;; | ||||
|   all) | ||||
|     BASE_TARGET=all_cuda | ||||
|     DOCKER_TAG=latest | ||||
|     ;; | ||||
|   *) | ||||
|     BASE_TARGET=cuda${CUDA_VERSION} | ||||
|     DOCKER_TAG=cuda${CUDA_VERSION} | ||||
|     ;; | ||||
| esac | ||||
|  | ||||
|  | ||||
| ( | ||||
|   set -x | ||||
|   docker build \ | ||||
|     --target final \ | ||||
|     --progress plain \ | ||||
|     --build-arg "BASE_TARGET=${BASE_TARGET}" \ | ||||
|     --build-arg "CUDA_VERSION=${CUDA_VERSION}" \ | ||||
|     --build-arg "DEVTOOLSET_VERSION=9" \ | ||||
|     -t ${DOCKER_IMAGE_NAME} \ | ||||
|     $@ \ | ||||
|     -f "${TOPDIR}/.ci/docker/conda/Dockerfile" \ | ||||
|     ${TOPDIR}/.ci/docker/ | ||||
| ) | ||||
|  | ||||
| if [[ "${DOCKER_TAG}" =~ ^cuda* ]]; then | ||||
|   # Test that we're using the right CUDA compiler | ||||
|   ( | ||||
|     set -x | ||||
|     docker run --rm "${DOCKER_IMAGE_NAME}" nvcc --version | grep "cuda_${CUDA_VERSION}" | ||||
|   ) | ||||
| fi | ||||
|  | ||||
| GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)} | ||||
| GIT_BRANCH_NAME=${GITHUB_REF##*/} | ||||
| GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)} | ||||
| DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE_NAME}-${GIT_BRANCH_NAME} | ||||
| DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE_NAME}-${GIT_COMMIT_SHA} | ||||
| if [[ "${WITH_PUSH:-}" == true ]]; then | ||||
|   ( | ||||
|     set -x | ||||
|     docker push "${DOCKER_IMAGE_NAME}" | ||||
|     if [[ -n ${GITHUB_REF} ]]; then | ||||
|         docker tag ${DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_BRANCH_TAG} | ||||
|         docker tag ${DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_SHA_TAG} | ||||
|         docker push "${DOCKER_IMAGE_BRANCH_TAG}" | ||||
|         docker push "${DOCKER_IMAGE_SHA_TAG}" | ||||
|     fi | ||||
|   ) | ||||
| fi | ||||
| @ -1,107 +0,0 @@ | ||||
| ARG BASE_TARGET=base | ||||
| ARG GPU_IMAGE=ubuntu:20.04 | ||||
| FROM ${GPU_IMAGE} as base | ||||
|  | ||||
| ENV DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| RUN apt-get clean && apt-get update | ||||
| RUN apt-get install -y curl locales g++ git-all autoconf automake make cmake wget unzip sudo | ||||
| # Just add everything as a safe.directory for git since these will be used in multiple places with git | ||||
| RUN git config --global --add safe.directory '*' | ||||
|  | ||||
| RUN locale-gen en_US.UTF-8 | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
|  | ||||
| # Install openssl | ||||
| FROM base as openssl | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
| # Install python | ||||
| FROM base as python | ||||
| ADD common/install_cpython.sh install_cpython.sh | ||||
| RUN apt-get update -y && \ | ||||
|     apt-get install build-essential gdb lcov libbz2-dev libffi-dev \ | ||||
|         libgdbm-dev liblzma-dev libncurses5-dev libreadline6-dev \ | ||||
|         libsqlite3-dev libssl-dev lzma lzma-dev tk-dev uuid-dev zlib1g-dev -y && \ | ||||
|     bash ./install_cpython.sh && \ | ||||
|     rm install_cpython.sh && \ | ||||
|     apt-get clean | ||||
|  | ||||
| FROM base as conda | ||||
| ADD ./common/install_conda_docker.sh install_conda.sh | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh | ||||
|  | ||||
| FROM base as cpu | ||||
| # Install Anaconda | ||||
| COPY --from=conda /opt/conda /opt/conda | ||||
| # Install python | ||||
| COPY --from=python /opt/python    /opt/python | ||||
| COPY --from=python /opt/_internal /opt/_internal | ||||
| ENV PATH=/opt/conda/bin:/usr/local/cuda/bin:$PATH | ||||
| # Install MKL | ||||
| ADD ./common/install_mkl.sh install_mkl.sh | ||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh | ||||
|  | ||||
| FROM cpu as cuda | ||||
| ADD ./common/install_cuda.sh install_cuda.sh | ||||
| ADD ./common/install_magma.sh install_magma.sh | ||||
| ENV CUDA_HOME /usr/local/cuda | ||||
|  | ||||
| FROM cuda as cuda11.8 | ||||
| RUN bash ./install_cuda.sh 11.8 | ||||
| RUN bash ./install_magma.sh 11.8 | ||||
| RUN ln -sf /usr/local/cuda-11.8 /usr/local/cuda | ||||
|  | ||||
| FROM cuda as cuda12.1 | ||||
| RUN bash ./install_cuda.sh 12.1 | ||||
| RUN bash ./install_magma.sh 12.1 | ||||
| RUN ln -sf /usr/local/cuda-12.1 /usr/local/cuda | ||||
|  | ||||
| FROM cuda as cuda12.4 | ||||
| RUN bash ./install_cuda.sh 12.4 | ||||
| RUN bash ./install_magma.sh 12.4 | ||||
| RUN ln -sf /usr/local/cuda-12.4 /usr/local/cuda | ||||
|  | ||||
| FROM cpu as rocm | ||||
| ARG PYTORCH_ROCM_ARCH | ||||
| ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH} | ||||
| ENV MKLROOT /opt/intel | ||||
| # Adding ROCM_PATH env var so that LoadHip.cmake (even with logic updated for ROCm6.0) | ||||
| # find HIP works for ROCm5.7. Not needed for ROCm6.0 and above. | ||||
| # Remove below when ROCm5.7 is not in support matrix anymore. | ||||
| ENV ROCM_PATH /opt/rocm | ||||
| # No need to install ROCm as base docker image should have full ROCm install | ||||
| #ADD ./common/install_rocm.sh install_rocm.sh | ||||
| ADD ./common/install_rocm_drm.sh install_rocm_drm.sh | ||||
| ADD ./common/install_rocm_magma.sh install_rocm_magma.sh | ||||
| # gfortran and python needed for building magma from source for ROCm | ||||
| RUN apt-get update -y && \ | ||||
|     apt-get install gfortran -y && \ | ||||
|     apt-get install python -y && \ | ||||
|     apt-get clean | ||||
|  | ||||
| RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh | ||||
| RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh | ||||
|  | ||||
| # Install AOTriton | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ./aotriton_version.txt aotriton_version.txt | ||||
| COPY ./common/install_aotriton.sh install_aotriton.sh | ||||
| RUN bash ./install_aotriton.sh /opt/rocm && rm install_aotriton.sh aotriton_version.txt | ||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton | ||||
|  | ||||
| FROM ${BASE_TARGET} as final | ||||
| COPY --from=openssl            /opt/openssl           /opt/openssl | ||||
| # Install patchelf | ||||
| ADD ./common/install_patchelf.sh install_patchelf.sh | ||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh | ||||
| # Install Anaconda | ||||
| COPY --from=conda /opt/conda /opt/conda | ||||
| # Install python | ||||
| COPY --from=python /opt/python    /opt/python | ||||
| COPY --from=python /opt/_internal /opt/_internal | ||||
| ENV PATH=/opt/conda/bin:/usr/local/cuda/bin:$PATH | ||||
| @ -1,93 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -eou pipefail | ||||
|  | ||||
| image="$1" | ||||
| shift | ||||
|  | ||||
| if [ -z "${image}" ]; then | ||||
|   echo "Usage: $0 IMAGE" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| DOCKER_IMAGE="pytorch/${image}" | ||||
|  | ||||
| TOPDIR=$(git rev-parse --show-toplevel) | ||||
|  | ||||
| GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu} | ||||
| GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-} | ||||
|  | ||||
| WITH_PUSH=${WITH_PUSH:-} | ||||
|  | ||||
| DOCKER=${DOCKER:-docker} | ||||
|  | ||||
| case ${GPU_ARCH_TYPE} in | ||||
|     cpu) | ||||
|         BASE_TARGET=cpu | ||||
|         DOCKER_TAG=cpu | ||||
|         GPU_IMAGE=ubuntu:20.04 | ||||
|         DOCKER_GPU_BUILD_ARG="" | ||||
|         ;; | ||||
|     cuda) | ||||
|         BASE_TARGET=cuda${GPU_ARCH_VERSION} | ||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} | ||||
|         GPU_IMAGE=ubuntu:20.04 | ||||
|         DOCKER_GPU_BUILD_ARG="" | ||||
|         ;; | ||||
|     rocm) | ||||
|         BASE_TARGET=rocm | ||||
|         DOCKER_TAG=rocm${GPU_ARCH_VERSION} | ||||
|         GPU_IMAGE=rocm/dev-ubuntu-20.04:${GPU_ARCH_VERSION}-complete | ||||
|         PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx1030;gfx1100" | ||||
|         ROCM_REGEX="([0-9]+)\.([0-9]+)[\.]?([0-9]*)" | ||||
|         if [[ $GPU_ARCH_VERSION =~ $ROCM_REGEX ]]; then | ||||
|             ROCM_VERSION_INT=$((${BASH_REMATCH[1]}*10000 + ${BASH_REMATCH[2]}*100 + ${BASH_REMATCH[3]:-0})) | ||||
|         else | ||||
|             echo "ERROR: rocm regex failed" | ||||
|             exit 1 | ||||
|         fi | ||||
|         if [[ $ROCM_VERSION_INT -ge 60000 ]]; then | ||||
|             PYTORCH_ROCM_ARCH+=";gfx942" | ||||
|         fi | ||||
|         DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}" | ||||
|         ;; | ||||
|     *) | ||||
|         echo "ERROR: Unrecognized GPU_ARCH_TYPE: ${GPU_ARCH_TYPE}" | ||||
|         exit 1 | ||||
|         ;; | ||||
| esac | ||||
|  | ||||
|  | ||||
| ( | ||||
|     set -x | ||||
|     DOCKER_BUILDKIT=1 ${DOCKER} build \ | ||||
|          --target final \ | ||||
|         ${DOCKER_GPU_BUILD_ARG} \ | ||||
|         --build-arg "GPU_IMAGE=${GPU_IMAGE}" \ | ||||
|         --build-arg "BASE_TARGET=${BASE_TARGET}" \ | ||||
|         -t "${DOCKER_IMAGE}" \ | ||||
|         $@ \ | ||||
|         -f "${TOPDIR}/.ci/docker/libtorch/Dockerfile" \ | ||||
|         "${TOPDIR}/.ci/docker/" | ||||
|  | ||||
| ) | ||||
|  | ||||
| GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)} | ||||
| GIT_BRANCH_NAME=${GITHUB_REF##*/} | ||||
| GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)} | ||||
| DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE}-${GIT_BRANCH_NAME} | ||||
| DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE}-${GIT_COMMIT_SHA} | ||||
|  | ||||
| if [[ "${WITH_PUSH}" == true ]]; then | ||||
|   ( | ||||
|     set -x | ||||
|     ${DOCKER} push "${DOCKER_IMAGE}" | ||||
|     if [[ -n ${GITHUB_REF} ]]; then | ||||
|         ${DOCKER} tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_BRANCH_TAG} | ||||
|         ${DOCKER} tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_SHA_TAG} | ||||
|         ${DOCKER} push "${DOCKER_IMAGE_BRANCH_TAG}" | ||||
|         ${DOCKER} push "${DOCKER_IMAGE_SHA_TAG}" | ||||
|     fi | ||||
|   ) | ||||
| fi | ||||
| @ -1,44 +0,0 @@ | ||||
| ARG UBUNTU_VERSION | ||||
|  | ||||
| FROM ubuntu:${UBUNTU_VERSION} | ||||
|  | ||||
| ARG UBUNTU_VERSION | ||||
|  | ||||
| ENV DEBIAN_FRONTEND noninteractive | ||||
|  | ||||
| # Install common dependencies (so that this step can be cached separately) | ||||
| COPY ./common/install_base.sh install_base.sh | ||||
| RUN bash ./install_base.sh && rm install_base.sh | ||||
|  | ||||
| # Install missing libomp-dev | ||||
| RUN apt-get update && apt-get install -y --no-install-recommends libomp-dev && apt-get autoclean && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* | ||||
|  | ||||
| # Install user | ||||
| COPY ./common/install_user.sh install_user.sh | ||||
| RUN bash ./install_user.sh && rm install_user.sh | ||||
|  | ||||
| # Install conda and other packages (e.g., numpy, pytest) | ||||
| ARG ANACONDA_PYTHON_VERSION | ||||
| ARG CONDA_CMAKE | ||||
| ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION | ||||
| ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH | ||||
| COPY requirements-ci.txt /opt/conda/requirements-ci.txt | ||||
| COPY ./common/install_conda.sh install_conda.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt | ||||
|  | ||||
| # Install cuda and cudnn | ||||
| ARG CUDA_VERSION | ||||
| COPY ./common/install_cuda.sh install_cuda.sh | ||||
| RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh | ||||
| ENV DESIRED_CUDA ${CUDA_VERSION} | ||||
| ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH | ||||
|  | ||||
| # Note that Docker build forbids copying file outside the build context | ||||
| COPY ./common/install_linter.sh install_linter.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| RUN bash ./install_linter.sh | ||||
| RUN rm install_linter.sh common_utils.sh | ||||
|  | ||||
| USER jenkins | ||||
| CMD ["bash"] | ||||
| @ -1,202 +0,0 @@ | ||||
| # syntax = docker/dockerfile:experimental | ||||
| ARG ROCM_VERSION=3.7 | ||||
| ARG BASE_CUDA_VERSION=11.8 | ||||
|  | ||||
| ARG GPU_IMAGE=centos:7 | ||||
| FROM centos:7 as base | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
|  | ||||
| ARG DEVTOOLSET_VERSION=9 | ||||
| # Note: This is required patch since CentOS have reached EOL | ||||
| # otherwise any yum install setp will fail | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum install -y wget curl perl util-linux xz bzip2 git patch which perl zlib-devel | ||||
| # Just add everything as a safe.directory for git since these will be used in multiple places with git | ||||
| RUN git config --global --add safe.directory '*' | ||||
| RUN yum install -y yum-utils centos-release-scl | ||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| # Note: After running yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| # patch is required once again. Somehow this steps adds mirror.centos.org | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum install -y devtoolset-${DEVTOOLSET_VERSION}-gcc devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran devtoolset-${DEVTOOLSET_VERSION}-binutils | ||||
| ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| RUN yum --enablerepo=extras install -y epel-release | ||||
|  | ||||
| # cmake-3.18.4 from pip | ||||
| RUN yum install -y python3-pip && \ | ||||
|     python3 -mpip install cmake==3.18.4 && \ | ||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake | ||||
|  | ||||
| RUN yum install -y autoconf aclocal automake make sudo | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
| # EPEL for cmake | ||||
| FROM base as patchelf | ||||
| # Install patchelf | ||||
| ADD ./common/install_patchelf.sh install_patchelf.sh | ||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh | ||||
| RUN cp $(which patchelf) /patchelf | ||||
|  | ||||
| FROM patchelf as python | ||||
| # build python | ||||
| COPY manywheel/build_scripts /build_scripts | ||||
| ADD ./common/install_cpython.sh /build_scripts/install_cpython.sh | ||||
| RUN bash build_scripts/build.sh && rm -r build_scripts | ||||
|  | ||||
| FROM base as cuda | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| # Install CUDA | ||||
| ADD ./common/install_cuda.sh install_cuda.sh | ||||
| RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh | ||||
|  | ||||
| FROM base as intel | ||||
| # MKL | ||||
| ADD ./common/install_mkl.sh install_mkl.sh | ||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh | ||||
|  | ||||
| FROM base as magma | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| # Install magma | ||||
| ADD ./common/install_magma.sh install_magma.sh | ||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh | ||||
|  | ||||
| FROM base as jni | ||||
| # Install java jni header | ||||
| ADD ./common/install_jni.sh install_jni.sh | ||||
| ADD ./java/jni.h jni.h | ||||
| RUN bash ./install_jni.sh && rm install_jni.sh | ||||
|  | ||||
| FROM base as libpng | ||||
| # Install libpng | ||||
| ADD ./common/install_libpng.sh install_libpng.sh | ||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh | ||||
|  | ||||
| FROM ${GPU_IMAGE} as common | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
| RUN yum install -y \ | ||||
|         aclocal \ | ||||
|         autoconf \ | ||||
|         automake \ | ||||
|         bison \ | ||||
|         bzip2 \ | ||||
|         curl \ | ||||
|         diffutils \ | ||||
|         file \ | ||||
|         git \ | ||||
|         make \ | ||||
|         patch \ | ||||
|         perl \ | ||||
|         unzip \ | ||||
|         util-linux \ | ||||
|         wget \ | ||||
|         which \ | ||||
|         xz \ | ||||
|         yasm | ||||
| RUN yum install -y \ | ||||
|     https://repo.ius.io/ius-release-el7.rpm \ | ||||
|     https://ossci-linux.s3.amazonaws.com/epel-release-7-14.noarch.rpm | ||||
|  | ||||
| RUN yum swap -y git git236-core | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
| # Install LLVM version | ||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl | ||||
| COPY --from=python             /opt/python                           /opt/python | ||||
| COPY --from=python             /opt/_internal                        /opt/_internal | ||||
| COPY --from=python             /opt/python/cp39-cp39/bin/auditwheel /usr/local/bin/auditwheel | ||||
| COPY --from=intel              /opt/intel                            /opt/intel | ||||
| COPY --from=patchelf           /usr/local/bin/patchelf               /usr/local/bin/patchelf | ||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h | ||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ | ||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig | ||||
|  | ||||
| FROM common as cpu_final | ||||
| ARG BASE_CUDA_VERSION=10.1 | ||||
| ARG DEVTOOLSET_VERSION=9 | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
|  | ||||
| RUN yum install -y yum-utils centos-release-scl | ||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum install -y devtoolset-${DEVTOOLSET_VERSION}-gcc devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran devtoolset-${DEVTOOLSET_VERSION}-binutils | ||||
| ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # cmake is already installed inside the rocm base image, so remove if present | ||||
| RUN rpm -e cmake || true | ||||
| # cmake-3.18.4 from pip | ||||
| RUN yum install -y python3-pip && \ | ||||
|     python3 -mpip install cmake==3.18.4 && \ | ||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake | ||||
|  | ||||
| # ninja | ||||
| RUN yum install -y ninja-build | ||||
|  | ||||
| FROM cpu_final as cuda_final | ||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda | ||||
| ENV PATH=/usr/local/cuda/bin:$PATH | ||||
|  | ||||
| FROM cpu_final as rocm_final | ||||
| ARG ROCM_VERSION=3.7 | ||||
| ARG PYTORCH_ROCM_ARCH | ||||
| ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH} | ||||
| # Adding ROCM_PATH env var so that LoadHip.cmake (even with logic updated for ROCm6.0) | ||||
| # find HIP works for ROCm5.7. Not needed for ROCm6.0 and above. | ||||
| # Remove below when ROCm5.7 is not in support matrix anymore. | ||||
| ENV ROCM_PATH /opt/rocm | ||||
| ENV MKLROOT /opt/intel | ||||
| # No need to install ROCm as base docker image should have full ROCm install | ||||
| #ADD ./common/install_rocm.sh install_rocm.sh | ||||
| #RUN ROCM_VERSION=${ROCM_VERSION} bash ./install_rocm.sh && rm install_rocm.sh | ||||
| ADD ./common/install_rocm_drm.sh install_rocm_drm.sh | ||||
| RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh | ||||
| # cmake3 is needed for the MIOpen build | ||||
| RUN ln -sf /usr/local/bin/cmake /usr/bin/cmake3 | ||||
| ADD ./common/install_rocm_magma.sh install_rocm_magma.sh | ||||
| RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh | ||||
| ADD ./common/install_miopen.sh install_miopen.sh | ||||
| RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh | ||||
|  | ||||
| # Install AOTriton | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ./aotriton_version.txt aotriton_version.txt | ||||
| COPY ./common/install_aotriton.sh install_aotriton.sh | ||||
| RUN bash ./install_aotriton.sh /opt/rocm && rm install_aotriton.sh aotriton_version.txt | ||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton | ||||
| @ -1,153 +0,0 @@ | ||||
| # syntax = docker/dockerfile:experimental | ||||
| ARG ROCM_VERSION=3.7 | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| ARG GPU_IMAGE=nvidia/cuda:${BASE_CUDA_VERSION}-devel-centos7 | ||||
| FROM quay.io/pypa/manylinux2014_x86_64 as base | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
|  | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum install -y wget curl perl util-linux xz bzip2 git patch which perl zlib-devel | ||||
| RUN yum install -y yum-utils centos-release-scl sudo | ||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| RUN yum install -y devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-gcc-gfortran devtoolset-7-binutils | ||||
| ENV PATH=/opt/rh/devtoolset-7/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-7/root/usr/lib64:/opt/rh/devtoolset-7/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # cmake | ||||
| RUN yum install -y cmake3 && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
|  | ||||
|  | ||||
| # remove unncessary python versions | ||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 | ||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 | ||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 | ||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 | ||||
|  | ||||
| FROM base as cuda | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| # Install CUDA | ||||
| ADD ./common/install_cuda.sh install_cuda.sh | ||||
| RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh | ||||
|  | ||||
| FROM base as intel | ||||
| # MKL | ||||
| ADD ./common/install_mkl.sh install_mkl.sh | ||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh | ||||
|  | ||||
| FROM base as magma | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| # Install magma | ||||
| ADD ./common/install_magma.sh install_magma.sh | ||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh | ||||
|  | ||||
| FROM base as jni | ||||
| # Install java jni header | ||||
| ADD ./common/install_jni.sh install_jni.sh | ||||
| ADD ./java/jni.h jni.h | ||||
| RUN bash ./install_jni.sh && rm install_jni.sh | ||||
|  | ||||
| FROM base as libpng | ||||
| # Install libpng | ||||
| ADD ./common/install_libpng.sh install_libpng.sh | ||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh | ||||
|  | ||||
| FROM ${GPU_IMAGE} as common | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
| RUN yum install -y \ | ||||
|         aclocal \ | ||||
|         autoconf \ | ||||
|         automake \ | ||||
|         bison \ | ||||
|         bzip2 \ | ||||
|         curl \ | ||||
|         diffutils \ | ||||
|         file \ | ||||
|         git \ | ||||
|         make \ | ||||
|         patch \ | ||||
|         perl \ | ||||
|         unzip \ | ||||
|         util-linux \ | ||||
|         wget \ | ||||
|         which \ | ||||
|         xz \ | ||||
|         yasm | ||||
| RUN yum install -y \ | ||||
|     https://repo.ius.io/ius-release-el7.rpm \ | ||||
|     https://ossci-linux.s3.amazonaws.com/epel-release-7-14.noarch.rpm | ||||
|  | ||||
| RUN yum swap -y git git236-core | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
| # Install LLVM version | ||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl | ||||
| COPY --from=base               /opt/python                           /opt/python | ||||
| COPY --from=base               /opt/_internal                        /opt/_internal | ||||
| COPY --from=base               /usr/local/bin/auditwheel             /usr/local/bin/auditwheel | ||||
| COPY --from=intel              /opt/intel                            /opt/intel | ||||
| COPY --from=base               /usr/local/bin/patchelf               /usr/local/bin/patchelf | ||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ | ||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig | ||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h | ||||
|  | ||||
| FROM common as cpu_final | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| RUN yum install -y yum-utils centos-release-scl | ||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| RUN yum install -y devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-gcc-gfortran devtoolset-7-binutils | ||||
| ENV PATH=/opt/rh/devtoolset-7/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-7/root/usr/lib64:/opt/rh/devtoolset-7/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # cmake | ||||
| RUN yum install -y cmake3 && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
|  | ||||
| # ninja | ||||
| RUN yum install -y http://repo.okay.com.mx/centos/7/x86_64/release/okay-release-1-1.noarch.rpm | ||||
| RUN yum install -y ninja-build | ||||
|  | ||||
| FROM cpu_final as cuda_final | ||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
|  | ||||
| FROM common as rocm_final | ||||
| ARG ROCM_VERSION=3.7 | ||||
| # Install ROCm | ||||
| ADD ./common/install_rocm.sh install_rocm.sh | ||||
| RUN bash ./install_rocm.sh ${ROCM_VERSION} && rm install_rocm.sh | ||||
| # cmake is already installed inside the rocm base image, but both 2 and 3 exist | ||||
| # cmake3 is needed for the later MIOpen custom build, so that step is last. | ||||
| RUN yum install -y cmake3 && \ | ||||
|     rm -f /usr/bin/cmake && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
| ADD ./common/install_miopen.sh install_miopen.sh | ||||
| RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh | ||||
| @ -1,157 +0,0 @@ | ||||
| # syntax = docker/dockerfile:experimental | ||||
| ARG ROCM_VERSION=3.7 | ||||
| ARG BASE_CUDA_VERSION=11.8 | ||||
| ARG GPU_IMAGE=amd64/almalinux:8 | ||||
| FROM quay.io/pypa/manylinux_2_28_x86_64 as base | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
|  | ||||
| ARG DEVTOOLSET_VERSION=11 | ||||
| RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel yum-utils gcc-toolset-${DEVTOOLSET_VERSION}-toolchain | ||||
| ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # cmake-3.18.4 from pip | ||||
| RUN yum install -y python3-pip && \ | ||||
|     python3 -mpip install cmake==3.18.4 && \ | ||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake3 | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
|  | ||||
| # remove unncessary python versions | ||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 | ||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 | ||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 | ||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 | ||||
|  | ||||
| FROM base as cuda | ||||
| ARG BASE_CUDA_VERSION=11.8 | ||||
| # Install CUDA | ||||
| ADD ./common/install_cuda.sh install_cuda.sh | ||||
| RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh | ||||
|  | ||||
| FROM base as intel | ||||
| # MKL | ||||
| ADD ./common/install_mkl.sh install_mkl.sh | ||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh | ||||
|  | ||||
| FROM base as magma | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| # Install magma | ||||
| ADD ./common/install_magma.sh install_magma.sh | ||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh | ||||
|  | ||||
| FROM base as jni | ||||
| # Install java jni header | ||||
| ADD ./common/install_jni.sh install_jni.sh | ||||
| ADD ./java/jni.h jni.h | ||||
| RUN bash ./install_jni.sh && rm install_jni.sh | ||||
|  | ||||
| FROM base as libpng | ||||
| # Install libpng | ||||
| ADD ./common/install_libpng.sh install_libpng.sh | ||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh | ||||
|  | ||||
| FROM ${GPU_IMAGE} as common | ||||
| ARG DEVTOOLSET_VERSION=11 | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
| RUN yum -y install epel-release | ||||
| RUN yum -y update | ||||
| RUN yum install -y \ | ||||
|         autoconf \ | ||||
|         automake \ | ||||
|         bison \ | ||||
|         bzip2 \ | ||||
|         curl \ | ||||
|         diffutils \ | ||||
|         file \ | ||||
|         git \ | ||||
|         make \ | ||||
|         patch \ | ||||
|         perl \ | ||||
|         unzip \ | ||||
|         util-linux \ | ||||
|         wget \ | ||||
|         which \ | ||||
|         xz \ | ||||
|         gcc-toolset-${DEVTOOLSET_VERSION}-toolchain \ | ||||
|         glibc-langpack-en | ||||
| RUN yum install -y \ | ||||
|     https://repo.ius.io/ius-release-el7.rpm \ | ||||
|     https://ossci-linux.s3.amazonaws.com/epel-release-7-14.noarch.rpm | ||||
|  | ||||
| RUN yum swap -y git git236-core | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
| # Install LLVM version | ||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl | ||||
| COPY --from=base               /opt/python                           /opt/python | ||||
| COPY --from=base               /opt/_internal                        /opt/_internal | ||||
| COPY --from=base               /usr/local/bin/auditwheel             /usr/local/bin/auditwheel | ||||
| COPY --from=intel              /opt/intel                            /opt/intel | ||||
| COPY --from=base               /usr/local/bin/patchelf               /usr/local/bin/patchelf | ||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ | ||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig | ||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h | ||||
|  | ||||
| FROM common as cpu_final | ||||
| ARG BASE_CUDA_VERSION=11.8 | ||||
| ARG DEVTOOLSET_VERSION=11 | ||||
| # Ensure the expected devtoolset is used | ||||
| ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # cmake-3.18.4 from pip | ||||
| RUN yum install -y python3-pip && \ | ||||
|     python3 -mpip install cmake==3.18.4 && \ | ||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake3 | ||||
|  | ||||
| FROM cpu_final as cuda_final | ||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
|  | ||||
| FROM common as rocm_final | ||||
| ARG ROCM_VERSION=3.7 | ||||
| # Install ROCm | ||||
| ADD ./common/install_rocm.sh install_rocm.sh | ||||
| RUN bash ./install_rocm.sh ${ROCM_VERSION} && rm install_rocm.sh | ||||
| # cmake is already installed inside the rocm base image, but both 2 and 3 exist | ||||
| # cmake3 is needed for the later MIOpen custom build, so that step is last. | ||||
| RUN yum install -y cmake3 && \ | ||||
|     rm -f /usr/bin/cmake && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
| ADD ./common/install_miopen.sh install_miopen.sh | ||||
| RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh | ||||
|  | ||||
| FROM cpu_final as xpu_final | ||||
| # XPU CD use rolling driver | ||||
| ENV XPU_DRIVER_TYPE ROLLING | ||||
| # cmake-3.28.4 from pip | ||||
| RUN python3 -m pip install --upgrade pip && \ | ||||
|     python3 -mpip install cmake==3.28.4 | ||||
| # Install setuptools and wheel for python 3.13 | ||||
| RUN /opt/python/cp313-cp313/bin/python -m pip install setuptools wheel | ||||
| ADD ./common/install_xpu.sh install_xpu.sh | ||||
| RUN bash ./install_xpu.sh && rm install_xpu.sh | ||||
| RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd | ||||
| @ -1,57 +0,0 @@ | ||||
| FROM quay.io/pypa/manylinux_2_28_aarch64 as base | ||||
|  | ||||
| # Graviton needs GCC 10 or above for the build. GCC12 is the default version in almalinux-8. | ||||
| ARG GCCTOOLSET_VERSION=11 | ||||
|  | ||||
| # Language variabes | ||||
| ENV LC_ALL=en_US.UTF-8 | ||||
| ENV LANG=en_US.UTF-8 | ||||
| ENV LANGUAGE=en_US.UTF-8 | ||||
|  | ||||
| # Installed needed OS packages. This is to support all | ||||
| # the binary builds (torch, vision, audio, text, data) | ||||
| RUN yum -y install epel-release | ||||
| RUN yum -y update | ||||
| RUN yum install -y \ | ||||
|   autoconf \ | ||||
|   automake \ | ||||
|   bison \ | ||||
|   bzip2 \ | ||||
|   curl \ | ||||
|   diffutils \ | ||||
|   file \ | ||||
|   git \ | ||||
|   less \ | ||||
|   libffi-devel \ | ||||
|   libgomp \ | ||||
|   make \ | ||||
|   openssl-devel \ | ||||
|   patch \ | ||||
|   perl \ | ||||
|   unzip \ | ||||
|   util-linux \ | ||||
|   wget \ | ||||
|   which \ | ||||
|   xz \ | ||||
|   yasm \ | ||||
|   zstd \ | ||||
|   sudo \ | ||||
|   gcc-toolset-${GCCTOOLSET_VERSION}-toolchain | ||||
|  | ||||
| # Ensure the expected devtoolset is used | ||||
| ENV PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
| FROM base as final | ||||
|  | ||||
| # remove unncessary python versions | ||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 | ||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 | ||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 | ||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 | ||||
| @ -1,94 +0,0 @@ | ||||
| FROM quay.io/pypa/manylinux2014_aarch64 as base | ||||
|  | ||||
|  | ||||
| # Graviton needs GCC 10 for the build | ||||
| ARG DEVTOOLSET_VERSION=10 | ||||
|  | ||||
| # Language variabes | ||||
| ENV LC_ALL=en_US.UTF-8 | ||||
| ENV LANG=en_US.UTF-8 | ||||
| ENV LANGUAGE=en_US.UTF-8 | ||||
|  | ||||
| # Installed needed OS packages. This is to support all | ||||
| # the binary builds (torch, vision, audio, text, data) | ||||
| RUN yum -y install epel-release | ||||
| RUN yum -y update | ||||
| RUN yum install -y \ | ||||
|   autoconf \ | ||||
|   automake \ | ||||
|   bison \ | ||||
|   bzip2 \ | ||||
|   curl \ | ||||
|   diffutils \ | ||||
|   file \ | ||||
|   git \ | ||||
|   make \ | ||||
|   patch \ | ||||
|   perl \ | ||||
|   unzip \ | ||||
|   util-linux \ | ||||
|   wget \ | ||||
|   which \ | ||||
|   xz \ | ||||
|   yasm \ | ||||
|   less \ | ||||
|   zstd \ | ||||
|   libgomp \ | ||||
|   sudo \ | ||||
|   devtoolset-${DEVTOOLSET_VERSION}-gcc \ | ||||
|   devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ \ | ||||
|   devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran \ | ||||
|   devtoolset-${DEVTOOLSET_VERSION}-binutils | ||||
|  | ||||
| # Ensure the expected devtoolset is used | ||||
| ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
|  | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
|  | ||||
| ############################################################################### | ||||
| # libglfortran.a hack | ||||
| # | ||||
| # libgfortran.a from quay.io/pypa/manylinux2014_aarch64 is not compiled with -fPIC. | ||||
| # This causes __stack_chk_guard@@GLIBC_2.17 on pytorch build. To solve, get | ||||
| # ubuntu's libgfortran.a which is compiled with -fPIC | ||||
| # NOTE: Need a better way to get this library as Ubuntu's package can be removed by the vender, or changed | ||||
| ############################################################################### | ||||
| RUN cd ~/ \ | ||||
|   && curl -L -o ~/libgfortran-10-dev.deb http://ports.ubuntu.com/ubuntu-ports/pool/universe/g/gcc-10/libgfortran-10-dev_10.5.0-1ubuntu1_arm64.deb \ | ||||
|   && ar x ~/libgfortran-10-dev.deb \ | ||||
|   && tar --use-compress-program=unzstd -xvf data.tar.zst -C ~/ \ | ||||
|   && cp -f ~/usr/lib/gcc/aarch64-linux-gnu/10/libgfortran.a /opt/rh/devtoolset-10/root/usr/lib/gcc/aarch64-redhat-linux/10/ | ||||
|  | ||||
| # install cmake | ||||
| RUN yum install -y cmake3 && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
|  | ||||
| FROM base as openblas | ||||
| # Install openblas | ||||
| ADD ./common/install_openblas.sh install_openblas.sh | ||||
| RUN bash ./install_openblas.sh && rm install_openblas.sh | ||||
|  | ||||
| FROM openssl as final | ||||
| # remove unncessary python versions | ||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 | ||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 | ||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 | ||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 | ||||
| COPY --from=openblas     /opt/OpenBLAS/  /opt/OpenBLAS/ | ||||
| ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:$LD_LIBRARY_PATH | ||||
| @ -1,91 +0,0 @@ | ||||
| FROM quay.io/pypa/manylinux_2_28_aarch64 as base | ||||
|  | ||||
| # Cuda ARM build needs gcc 11 | ||||
| ARG DEVTOOLSET_VERSION=11 | ||||
|  | ||||
| # Language variables | ||||
| ENV LC_ALL=en_US.UTF-8 | ||||
| ENV LANG=en_US.UTF-8 | ||||
| ENV LANGUAGE=en_US.UTF-8 | ||||
|  | ||||
| # Installed needed OS packages. This is to support all | ||||
| # the binary builds (torch, vision, audio, text, data) | ||||
| RUN yum -y install epel-release | ||||
| RUN yum -y update | ||||
| RUN yum install -y \ | ||||
|   autoconf \ | ||||
|   automake \ | ||||
|   bison \ | ||||
|   bzip2 \ | ||||
|   curl \ | ||||
|   diffutils \ | ||||
|   file \ | ||||
|   git \ | ||||
|   make \ | ||||
|   patch \ | ||||
|   perl \ | ||||
|   unzip \ | ||||
|   util-linux \ | ||||
|   wget \ | ||||
|   which \ | ||||
|   xz \ | ||||
|   yasm \ | ||||
|   less \ | ||||
|   zstd \ | ||||
|   libgomp \ | ||||
|   sudo \ | ||||
|   gcc-toolset-${DEVTOOLSET_VERSION}-toolchain | ||||
|  | ||||
| # Ensure the expected devtoolset is used | ||||
| ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
|  | ||||
| FROM openssl as final | ||||
| # remove unncessary python versions | ||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 | ||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 | ||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 | ||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 | ||||
|  | ||||
| FROM base as cuda | ||||
| ARG BASE_CUDA_VERSION | ||||
| # Install CUDA | ||||
| ADD ./common/install_cuda_aarch64.sh install_cuda_aarch64.sh | ||||
| RUN bash ./install_cuda_aarch64.sh ${BASE_CUDA_VERSION} && rm install_cuda_aarch64.sh | ||||
|  | ||||
| FROM base as magma | ||||
| ARG BASE_CUDA_VERSION | ||||
| # Install magma | ||||
| ADD ./common/install_magma.sh install_magma.sh | ||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh | ||||
|  | ||||
| FROM base as nvpl | ||||
| # Install nvpl | ||||
| ADD ./common/install_nvpl.sh install_nvpl.sh | ||||
| RUN bash ./install_nvpl.sh && rm install_nvpl.sh | ||||
|  | ||||
| FROM final as cuda_final | ||||
| ARG BASE_CUDA_VERSION | ||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=nvpl /opt/nvpl/lib/  /usr/local/lib/ | ||||
| COPY --from=nvpl /opt/nvpl/include/  /usr/local/include/ | ||||
| RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda | ||||
| ENV PATH=/usr/local/cuda/bin:$PATH | ||||
| @ -1,71 +0,0 @@ | ||||
| FROM centos:8 as base | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
| ENV PATH /opt/rh/gcc-toolset-11/root/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin | ||||
|  | ||||
| # change to a valid repo | ||||
| RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*.repo | ||||
| # enable to install ninja-build | ||||
| RUN sed -i 's|enabled=0|enabled=1|g' /etc/yum.repos.d/CentOS-Linux-PowerTools.repo | ||||
|  | ||||
| RUN yum -y update | ||||
| RUN yum install -y wget curl perl util-linux xz bzip2 git patch which zlib-devel sudo | ||||
| RUN yum install -y autoconf automake make cmake gdb gcc-toolset-11-gcc-c++ | ||||
|  | ||||
|  | ||||
| FROM base as openssl | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
| # Install python | ||||
| FROM base as python | ||||
| RUN yum install -y openssl-devel zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel libpcap-devel xz-devel libffi-devel | ||||
| ADD common/install_cpython.sh install_cpython.sh | ||||
| RUN bash ./install_cpython.sh && rm install_cpython.sh | ||||
|  | ||||
| FROM base as conda | ||||
| ADD ./common/install_conda_docker.sh install_conda.sh | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh | ||||
| RUN /opt/conda/bin/conda install -y cmake | ||||
|  | ||||
| FROM base as intel | ||||
| # Install MKL | ||||
| COPY --from=python             /opt/python                           /opt/python | ||||
| COPY --from=python             /opt/_internal                        /opt/_internal | ||||
| COPY --from=conda              /opt/conda                            /opt/conda | ||||
| ENV PATH=/opt/conda/bin:$PATH | ||||
| ADD ./common/install_mkl.sh install_mkl.sh | ||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh | ||||
|  | ||||
| FROM base as patchelf | ||||
| ADD ./common/install_patchelf.sh install_patchelf.sh | ||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh | ||||
| RUN cp $(which patchelf) /patchelf | ||||
|  | ||||
| FROM base as jni | ||||
| ADD ./common/install_jni.sh install_jni.sh | ||||
| ADD ./java/jni.h jni.h | ||||
| RUN bash ./install_jni.sh && rm install_jni.sh | ||||
|  | ||||
| FROM base as libpng | ||||
| ADD ./common/install_libpng.sh install_libpng.sh | ||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh | ||||
|  | ||||
| FROM base as final | ||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl | ||||
| COPY --from=python             /opt/python                           /opt/python | ||||
| COPY --from=python             /opt/_internal                        /opt/_internal | ||||
| COPY --from=intel              /opt/intel                            /opt/intel | ||||
| COPY --from=conda              /opt/conda                            /opt/conda | ||||
| COPY --from=patchelf           /usr/local/bin/patchelf               /usr/local/bin/patchelf | ||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h | ||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ | ||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig | ||||
|  | ||||
| RUN yum install -y ninja-build | ||||
| @ -1,73 +0,0 @@ | ||||
| FROM --platform=linux/s390x docker.io/ubuntu:24.04 as base | ||||
|  | ||||
| # Language variables | ||||
| ENV LC_ALL=C.UTF-8 | ||||
| ENV LANG=C.UTF-8 | ||||
| ENV LANGUAGE=C.UTF-8 | ||||
|  | ||||
| # Installed needed OS packages. This is to support all | ||||
| # the binary builds (torch, vision, audio, text, data) | ||||
| RUN apt update ; apt upgrade -y | ||||
| RUN apt install -y \ | ||||
|   build-essential \ | ||||
|   autoconf \ | ||||
|   automake \ | ||||
|   bzip2 \ | ||||
|   curl \ | ||||
|   diffutils \ | ||||
|   file \ | ||||
|   git \ | ||||
|   make \ | ||||
|   patch \ | ||||
|   perl \ | ||||
|   unzip \ | ||||
|   util-linux \ | ||||
|   wget \ | ||||
|   which \ | ||||
|   xz-utils \ | ||||
|   less \ | ||||
|   zstd \ | ||||
|   cmake \ | ||||
|   python3 \ | ||||
|   python3-dev \ | ||||
|   python3-setuptools \ | ||||
|   python3-yaml \ | ||||
|   python3-typing-extensions \ | ||||
|   libblas-dev \ | ||||
|   libopenblas-dev \ | ||||
|   liblapack-dev \ | ||||
|   libatlas-base-dev | ||||
|  | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
|  | ||||
| # EPEL for cmake | ||||
| FROM base as patchelf | ||||
| # Install patchelf | ||||
| ADD ./common/install_patchelf.sh install_patchelf.sh | ||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh | ||||
| RUN cp $(which patchelf) /patchelf | ||||
|  | ||||
| FROM patchelf as python | ||||
| # build python | ||||
| COPY manywheel/build_scripts /build_scripts | ||||
| ADD ./common/install_cpython.sh /build_scripts/install_cpython.sh | ||||
| RUN bash build_scripts/build.sh && rm -r build_scripts | ||||
|  | ||||
| FROM openssl as final | ||||
| COPY --from=python             /opt/python                           /opt/python | ||||
| COPY --from=python             /opt/_internal                        /opt/_internal | ||||
| COPY --from=python             /opt/python/cp39-cp39/bin/auditwheel /usr/local/bin/auditwheel | ||||
| COPY --from=patchelf           /usr/local/bin/patchelf               /usr/local/bin/patchelf | ||||
| @ -1,154 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -eou pipefail | ||||
|  | ||||
| TOPDIR=$(git rev-parse --show-toplevel) | ||||
|  | ||||
| image="$1" | ||||
| shift | ||||
|  | ||||
| if [ -z "${image}" ]; then | ||||
|   echo "Usage: $0 IMAGE" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| DOCKER_IMAGE="pytorch/${image}" | ||||
|  | ||||
| DOCKER_REGISTRY="${DOCKER_REGISTRY:-docker.io}" | ||||
|  | ||||
| GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu} | ||||
| GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-} | ||||
| MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-} | ||||
| DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-} | ||||
| WITH_PUSH=${WITH_PUSH:-} | ||||
|  | ||||
| case ${GPU_ARCH_TYPE} in | ||||
|     cpu) | ||||
|         TARGET=cpu_final | ||||
|         DOCKER_TAG=cpu | ||||
|         GPU_IMAGE=centos:7 | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9" | ||||
|         ;; | ||||
|     cpu-manylinux_2_28) | ||||
|         TARGET=cpu_final | ||||
|         DOCKER_TAG=cpu | ||||
|         GPU_IMAGE=amd64/almalinux:8 | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11" | ||||
|         MANY_LINUX_VERSION="2_28" | ||||
|         ;; | ||||
|     cpu-aarch64) | ||||
|         TARGET=final | ||||
|         DOCKER_TAG=cpu-aarch64 | ||||
|         GPU_IMAGE=arm64v8/centos:7 | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=10" | ||||
|         MANY_LINUX_VERSION="aarch64" | ||||
|         ;; | ||||
|     cpu-aarch64-2_28) | ||||
|         TARGET=final | ||||
|         DOCKER_TAG=cpu-aarch64 | ||||
|         GPU_IMAGE=arm64v8/almalinux:8 | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11" | ||||
|         MANY_LINUX_VERSION="2_28_aarch64" | ||||
|         ;; | ||||
|     cpu-cxx11-abi) | ||||
|         TARGET=final | ||||
|         DOCKER_TAG=cpu-cxx11-abi | ||||
|         GPU_IMAGE="" | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9" | ||||
|         MANY_LINUX_VERSION="cxx11-abi" | ||||
|         ;; | ||||
|     cpu-s390x) | ||||
|         TARGET=final | ||||
|         DOCKER_TAG=cpu-s390x | ||||
|         GPU_IMAGE=redhat/ubi9 | ||||
|         DOCKER_GPU_BUILD_ARG="" | ||||
|         MANY_LINUX_VERSION="s390x" | ||||
|         ;; | ||||
|     cuda) | ||||
|         TARGET=cuda_final | ||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} | ||||
|         # Keep this up to date with the minimum version of CUDA we currently support | ||||
|         GPU_IMAGE=centos:7 | ||||
|         DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=9" | ||||
|         ;; | ||||
|     cuda-manylinux_2_28) | ||||
|         TARGET=cuda_final | ||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} | ||||
|         GPU_IMAGE=amd64/almalinux:8 | ||||
|         DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=11" | ||||
|         MANY_LINUX_VERSION="2_28" | ||||
|         ;; | ||||
|     cuda-aarch64) | ||||
|         TARGET=cuda_final | ||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} | ||||
|         GPU_IMAGE=arm64v8/centos:7 | ||||
|         DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=11" | ||||
|         MANY_LINUX_VERSION="aarch64" | ||||
|         DOCKERFILE_SUFFIX="_cuda_aarch64" | ||||
|         ;; | ||||
|     rocm) | ||||
|         TARGET=rocm_final | ||||
|         DOCKER_TAG=rocm${GPU_ARCH_VERSION} | ||||
|         GPU_IMAGE=rocm/dev-centos-7:${GPU_ARCH_VERSION}-complete | ||||
|         PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx1030;gfx1100" | ||||
|         ROCM_REGEX="([0-9]+)\.([0-9]+)[\.]?([0-9]*)" | ||||
|         if [[ $GPU_ARCH_VERSION =~ $ROCM_REGEX ]]; then | ||||
|             ROCM_VERSION_INT=$((${BASH_REMATCH[1]}*10000 + ${BASH_REMATCH[2]}*100 + ${BASH_REMATCH[3]:-0})) | ||||
|         else | ||||
|             echo "ERROR: rocm regex failed" | ||||
|             exit 1 | ||||
|         fi | ||||
|         if [[ $ROCM_VERSION_INT -ge 60000 ]]; then | ||||
|             PYTORCH_ROCM_ARCH+=";gfx942" | ||||
|         fi | ||||
|         DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=9" | ||||
|         ;; | ||||
|     xpu) | ||||
|         TARGET=xpu_final | ||||
|         DOCKER_TAG=xpu | ||||
|         GPU_IMAGE=amd64/almalinux:8 | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11" | ||||
|         MANY_LINUX_VERSION="2_28" | ||||
|         ;; | ||||
|     *) | ||||
|         echo "ERROR: Unrecognized GPU_ARCH_TYPE: ${GPU_ARCH_TYPE}" | ||||
|         exit 1 | ||||
|         ;; | ||||
| esac | ||||
|  | ||||
| IMAGES='' | ||||
|  | ||||
| if [[ -n ${MANY_LINUX_VERSION} && -z ${DOCKERFILE_SUFFIX} ]]; then | ||||
|     DOCKERFILE_SUFFIX=_${MANY_LINUX_VERSION} | ||||
| fi | ||||
| ( | ||||
|     set -x | ||||
|     DOCKER_BUILDKIT=1 docker build \ | ||||
|         ${DOCKER_GPU_BUILD_ARG} \ | ||||
|         --build-arg "GPU_IMAGE=${GPU_IMAGE}" \ | ||||
|         --target "${TARGET}" \ | ||||
|         -t "${DOCKER_IMAGE}" \ | ||||
|         $@ \ | ||||
|         -f "${TOPDIR}/.ci/docker/manywheel/Dockerfile${DOCKERFILE_SUFFIX}" \ | ||||
|         "${TOPDIR}/.ci/docker/" | ||||
| ) | ||||
|  | ||||
| GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)} | ||||
| GIT_BRANCH_NAME="2.5" | ||||
| GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)} | ||||
| DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE}-${GIT_BRANCH_NAME} | ||||
| DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE}-${GIT_COMMIT_SHA} | ||||
|  | ||||
| if [[ "${WITH_PUSH}" == true ]]; then | ||||
|     ( | ||||
|         set -x | ||||
|         docker push "${DOCKER_IMAGE}" | ||||
|         if [[ -n ${GITHUB_REF} ]]; then | ||||
|             docker tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_BRANCH_TAG} | ||||
|             docker tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_SHA_TAG} | ||||
|             docker push "${DOCKER_IMAGE_BRANCH_TAG}" | ||||
|             docker push "${DOCKER_IMAGE_SHA_TAG}" | ||||
|         fi | ||||
|     ) | ||||
| fi | ||||
| @ -1,131 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # Top-level build script called from Dockerfile | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| # Stop at any error, show all commands | ||||
| set -ex | ||||
|  | ||||
| # openssl version to build, with expected sha256 hash of .tar.gz | ||||
| # archive | ||||
| OPENSSL_ROOT=openssl-1.1.1l | ||||
| OPENSSL_HASH=0b7a3e5e59c34827fe0c3a74b7ec8baef302b98fa80088d7f9153aa16fa76bd1 | ||||
| DEVTOOLS_HASH=a8ebeb4bed624700f727179e6ef771dafe47651131a00a78b342251415646acc | ||||
| PATCHELF_HASH=d9afdff4baeacfbc64861454f368b7f2c15c44d245293f7587bbf726bfe722fb | ||||
| CURL_ROOT=curl-7.73.0 | ||||
| CURL_HASH=cf34fe0b07b800f1c01a499a6e8b2af548f6d0e044dca4a29d88a4bee146d131 | ||||
| AUTOCONF_ROOT=autoconf-2.69 | ||||
| AUTOCONF_HASH=954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969 | ||||
|  | ||||
| # Get build utilities | ||||
| MY_DIR=$(dirname "${BASH_SOURCE[0]}") | ||||
| source $MY_DIR/build_utils.sh | ||||
|  | ||||
| if [ "$(uname -m)" != "s390x" ] ; then | ||||
|     # Dependencies for compiling Python that we want to remove from | ||||
|     # the final image after compiling Python | ||||
|     PYTHON_COMPILE_DEPS="zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel db4-devel libpcap-devel xz-devel libffi-devel" | ||||
|  | ||||
|     # Libraries that are allowed as part of the manylinux1 profile | ||||
|     MANYLINUX1_DEPS="glibc-devel libstdc++-devel glib2-devel libX11-devel libXext-devel libXrender-devel  mesa-libGL-devel libICE-devel libSM-devel ncurses-devel" | ||||
|  | ||||
|     # Development tools and libraries | ||||
|     yum -y install bzip2 make git patch unzip bison yasm diffutils \ | ||||
|         automake which file cmake28 \ | ||||
|         kernel-devel-`uname -r` \ | ||||
|         ${PYTHON_COMPILE_DEPS} | ||||
| else | ||||
|     # Dependencies for compiling Python that we want to remove from | ||||
|     # the final image after compiling Python | ||||
|     PYTHON_COMPILE_DEPS="zlib1g-dev libbz2-dev libncurses-dev libsqlite3-dev libdb-dev libpcap-dev liblzma-dev libffi-dev" | ||||
|  | ||||
|     # Libraries that are allowed as part of the manylinux1 profile | ||||
|     MANYLINUX1_DEPS="libglib2.0-dev libX11-dev libncurses-dev" | ||||
|  | ||||
|     # Development tools and libraries | ||||
|     apt install -y bzip2 make git patch unzip diffutils \ | ||||
|         automake which file cmake \ | ||||
|         linux-headers-virtual \ | ||||
|         ${PYTHON_COMPILE_DEPS} | ||||
| fi | ||||
|  | ||||
| # Install newest autoconf | ||||
| build_autoconf $AUTOCONF_ROOT $AUTOCONF_HASH | ||||
| autoconf --version | ||||
|  | ||||
| # Compile the latest Python releases. | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| build_openssl $OPENSSL_ROOT $OPENSSL_HASH | ||||
| /build_scripts/install_cpython.sh | ||||
|  | ||||
| PY39_BIN=/opt/python/cp39-cp39/bin | ||||
|  | ||||
| # Our openssl doesn't know how to find the system CA trust store | ||||
| #   (https://github.com/pypa/manylinux/issues/53) | ||||
| # And it's not clear how up-to-date that is anyway | ||||
| # So let's just use the same one pip and everyone uses | ||||
| $PY39_BIN/pip install certifi | ||||
| ln -s $($PY39_BIN/python -c 'import certifi; print(certifi.where())') \ | ||||
|       /opt/_internal/certs.pem | ||||
| # If you modify this line you also have to modify the versions in the | ||||
| # Dockerfiles: | ||||
| export SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
|  | ||||
| # Install newest curl | ||||
| build_curl $CURL_ROOT $CURL_HASH | ||||
| rm -rf /usr/local/include/curl /usr/local/lib/libcurl* /usr/local/lib/pkgconfig/libcurl.pc | ||||
| hash -r | ||||
| curl --version | ||||
| curl-config --features | ||||
|  | ||||
| # Install patchelf (latest with unreleased bug fixes) | ||||
| curl -sLOk https://nixos.org/releases/patchelf/patchelf-0.10/patchelf-0.10.tar.gz | ||||
| # check_sha256sum patchelf-0.9njs2.tar.gz $PATCHELF_HASH | ||||
| tar -xzf patchelf-0.10.tar.gz | ||||
| (cd patchelf-0.10 && ./configure && make && make install) | ||||
| rm -rf patchelf-0.10.tar.gz patchelf-0.10 | ||||
|  | ||||
| # Install latest pypi release of auditwheel | ||||
| $PY39_BIN/pip install auditwheel | ||||
| ln -s $PY39_BIN/auditwheel /usr/local/bin/auditwheel | ||||
|  | ||||
| # Clean up development headers and other unnecessary stuff for | ||||
| # final image | ||||
| if [ "$(uname -m)" != "s390x" ] ; then | ||||
|     yum -y erase wireless-tools gtk2 libX11 hicolor-icon-theme \ | ||||
|         avahi freetype bitstream-vera-fonts \ | ||||
|         ${PYTHON_COMPILE_DEPS} || true > /dev/null 2>&1 | ||||
|     yum -y install ${MANYLINUX1_DEPS} | ||||
|     yum -y clean all > /dev/null 2>&1 | ||||
|     yum list installed | ||||
| else | ||||
|     apt purge -y ${PYTHON_COMPILE_DEPS} || true > /dev/null 2>&1 | ||||
| fi | ||||
| # we don't need libpython*.a, and they're many megabytes | ||||
| find /opt/_internal -name '*.a' -print0 | xargs -0 rm -f | ||||
| # Strip what we can -- and ignore errors, because this just attempts to strip | ||||
| # *everything*, including non-ELF files: | ||||
| find /opt/_internal -type f -print0 \ | ||||
|     | xargs -0 -n1 strip --strip-unneeded 2>/dev/null || true | ||||
| # We do not need the Python test suites, or indeed the precompiled .pyc and | ||||
| # .pyo files. Partially cribbed from: | ||||
| #    https://github.com/docker-library/python/blob/master/3.4/slim/Dockerfile | ||||
| find /opt/_internal \ | ||||
|      \( -type d -a -name test -o -name tests \) \ | ||||
|   -o \( -type f -a -name '*.pyc' -o -name '*.pyo' \) \ | ||||
|   -print0 | xargs -0 rm -f | ||||
|  | ||||
| for PYTHON in /opt/python/*/bin/python; do | ||||
|     # Smoke test to make sure that our Pythons work, and do indeed detect as | ||||
|     # being manylinux compatible: | ||||
|     $PYTHON $MY_DIR/manylinux1-check.py | ||||
|     # Make sure that SSL cert checking works | ||||
|     $PYTHON $MY_DIR/ssl-check.py | ||||
| done | ||||
|  | ||||
| # Fix libc headers to remain compatible with C99 compilers. | ||||
| find /usr/include/ -type f -exec sed -i 's/\bextern _*inline_*\b/extern __inline __attribute__ ((__gnu_inline__))/g' {} + | ||||
|  | ||||
| # Now we can delete our built SSL | ||||
| rm -rf /usr/local/ssl | ||||
| @ -1,91 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # Helper utilities for build | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| OPENSSL_DOWNLOAD_URL=https://www.openssl.org/source/old/1.1.1/ | ||||
| CURL_DOWNLOAD_URL=https://curl.askapache.com/download | ||||
|  | ||||
| AUTOCONF_DOWNLOAD_URL=https://ftp.gnu.org/gnu/autoconf | ||||
|  | ||||
|  | ||||
| function check_var { | ||||
|     if [ -z "$1" ]; then | ||||
|         echo "required variable not defined" | ||||
|         exit 1 | ||||
|     fi | ||||
| } | ||||
|  | ||||
|  | ||||
| function do_openssl_build { | ||||
|     ./config no-ssl2 no-shared -fPIC --prefix=/usr/local/ssl > /dev/null | ||||
|     make > /dev/null | ||||
|     make install > /dev/null | ||||
| } | ||||
|  | ||||
|  | ||||
| function check_sha256sum { | ||||
|     local fname=$1 | ||||
|     check_var ${fname} | ||||
|     local sha256=$2 | ||||
|     check_var ${sha256} | ||||
|  | ||||
|     echo "${sha256}  ${fname}" > ${fname}.sha256 | ||||
|     sha256sum -c ${fname}.sha256 | ||||
|     rm -f ${fname}.sha256 | ||||
| } | ||||
|  | ||||
|  | ||||
| function build_openssl { | ||||
|     local openssl_fname=$1 | ||||
|     check_var ${openssl_fname} | ||||
|     local openssl_sha256=$2 | ||||
|     check_var ${openssl_sha256} | ||||
|     check_var ${OPENSSL_DOWNLOAD_URL} | ||||
|     curl -sLO ${OPENSSL_DOWNLOAD_URL}/${openssl_fname}.tar.gz | ||||
|     check_sha256sum ${openssl_fname}.tar.gz ${openssl_sha256} | ||||
|     tar -xzf ${openssl_fname}.tar.gz | ||||
|     (cd ${openssl_fname} && do_openssl_build) | ||||
|     rm -rf ${openssl_fname} ${openssl_fname}.tar.gz | ||||
| } | ||||
|  | ||||
|  | ||||
| function do_curl_build { | ||||
|     LIBS=-ldl ./configure --with-ssl --disable-shared > /dev/null | ||||
|     make > /dev/null | ||||
|     make install > /dev/null | ||||
| } | ||||
|  | ||||
|  | ||||
| function build_curl { | ||||
|     local curl_fname=$1 | ||||
|     check_var ${curl_fname} | ||||
|     local curl_sha256=$2 | ||||
|     check_var ${curl_sha256} | ||||
|     check_var ${CURL_DOWNLOAD_URL} | ||||
|     curl -sLO ${CURL_DOWNLOAD_URL}/${curl_fname}.tar.bz2 | ||||
|     check_sha256sum ${curl_fname}.tar.bz2 ${curl_sha256} | ||||
|     tar -jxf ${curl_fname}.tar.bz2 | ||||
|     (cd ${curl_fname} && do_curl_build) | ||||
|     rm -rf ${curl_fname} ${curl_fname}.tar.bz2 | ||||
| } | ||||
|  | ||||
|  | ||||
| function do_standard_install { | ||||
|     ./configure > /dev/null | ||||
|     make > /dev/null | ||||
|     make install > /dev/null | ||||
| } | ||||
|  | ||||
|  | ||||
| function build_autoconf { | ||||
|     local autoconf_fname=$1 | ||||
|     check_var ${autoconf_fname} | ||||
|     local autoconf_sha256=$2 | ||||
|     check_var ${autoconf_sha256} | ||||
|     check_var ${AUTOCONF_DOWNLOAD_URL} | ||||
|     curl -sLO ${AUTOCONF_DOWNLOAD_URL}/${autoconf_fname}.tar.gz | ||||
|     check_sha256sum ${autoconf_fname}.tar.gz ${autoconf_sha256} | ||||
|     tar -zxf ${autoconf_fname}.tar.gz | ||||
|     (cd ${autoconf_fname} && do_standard_install) | ||||
|     rm -rf ${autoconf_fname} ${autoconf_fname}.tar.gz | ||||
| } | ||||
| @ -1,60 +0,0 @@ | ||||
| # Logic copied from PEP 513 | ||||
|  | ||||
|  | ||||
| def is_manylinux1_compatible(): | ||||
|     # Only Linux, and only x86-64 / i686 | ||||
|     from distutils.util import get_platform | ||||
|  | ||||
|     if get_platform() not in ["linux-x86_64", "linux-i686", "linux-s390x"]: | ||||
|         return False | ||||
|  | ||||
|     # Check for presence of _manylinux module | ||||
|     try: | ||||
|         import _manylinux | ||||
|  | ||||
|         return bool(_manylinux.manylinux1_compatible) | ||||
|     except (ImportError, AttributeError): | ||||
|         # Fall through to heuristic check below | ||||
|         pass | ||||
|  | ||||
|     # Check glibc version. CentOS 5 uses glibc 2.5. | ||||
|     return have_compatible_glibc(2, 5) | ||||
|  | ||||
|  | ||||
| def have_compatible_glibc(major, minimum_minor): | ||||
|     import ctypes | ||||
|  | ||||
|     process_namespace = ctypes.CDLL(None) | ||||
|     try: | ||||
|         gnu_get_libc_version = process_namespace.gnu_get_libc_version | ||||
|     except AttributeError: | ||||
|         # Symbol doesn't exist -> therefore, we are not linked to | ||||
|         # glibc. | ||||
|         return False | ||||
|  | ||||
|     # Call gnu_get_libc_version, which returns a string like "2.5". | ||||
|     gnu_get_libc_version.restype = ctypes.c_char_p | ||||
|     version_str = gnu_get_libc_version() | ||||
|     # py2 / py3 compatibility: | ||||
|     if not isinstance(version_str, str): | ||||
|         version_str = version_str.decode("ascii") | ||||
|  | ||||
|     # Parse string and check against requested version. | ||||
|     version = [int(piece) for piece in version_str.split(".")] | ||||
|     assert len(version) == 2 | ||||
|     if major != version[0]: | ||||
|         return False | ||||
|     if minimum_minor > version[1]: | ||||
|         return False | ||||
|     return True | ||||
|  | ||||
|  | ||||
| import sys | ||||
|  | ||||
|  | ||||
| if is_manylinux1_compatible(): | ||||
|     print(f"{sys.executable} is manylinux1 compatible") | ||||
|     sys.exit(0) | ||||
| else: | ||||
|     print(f"{sys.executable} is NOT manylinux1 compatible") | ||||
|     sys.exit(1) | ||||
| @ -1,35 +0,0 @@ | ||||
| # cf. https://github.com/pypa/manylinux/issues/53 | ||||
|  | ||||
| GOOD_SSL = "https://google.com" | ||||
| BAD_SSL = "https://self-signed.badssl.com" | ||||
|  | ||||
| import sys | ||||
|  | ||||
|  | ||||
| print("Testing SSL certificate checking for Python:", sys.version) | ||||
|  | ||||
| if sys.version_info[:2] < (2, 7) or sys.version_info[:2] < (3, 4): | ||||
|     print("This version never checks SSL certs; skipping tests") | ||||
|     sys.exit(0) | ||||
|  | ||||
| if sys.version_info[0] >= 3: | ||||
|     from urllib.request import urlopen | ||||
|  | ||||
|     EXC = OSError | ||||
| else: | ||||
|     from urllib import urlopen | ||||
|  | ||||
|     EXC = IOError | ||||
|  | ||||
| print(f"Connecting to {GOOD_SSL} should work") | ||||
| urlopen(GOOD_SSL) | ||||
| print("...it did, yay.") | ||||
|  | ||||
| print(f"Connecting to {BAD_SSL} should fail") | ||||
| try: | ||||
|     urlopen(BAD_SSL) | ||||
|     # If we get here then we failed: | ||||
|     print("...it DIDN'T!!!!!11!!1one!") | ||||
|     sys.exit(1) | ||||
| except EXC: | ||||
|     print("...it did, yay.") | ||||
| @ -15,7 +15,7 @@ click | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| coremltools==5.0b5 ; python_version < "3.12" | ||||
| coremltools==5.0b5 | ||||
| #Description: Apple framework for ML integration | ||||
| #Pinned versions: 5.0b5 | ||||
| #test that import: | ||||
| @ -25,20 +25,10 @@ coremltools==5.0b5 ; python_version < "3.12" | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| dill==0.3.7 | ||||
| #Description: dill extends pickle with serializing and de-serializing for most built-ins | ||||
| #Pinned versions: 0.3.7 | ||||
| #test that import: dynamo/test_replay_record.py test_dataloader.py test_datapipe.py test_serialization.py | ||||
|  | ||||
| expecttest==0.2.1 | ||||
| expecttest==0.1.3 | ||||
| #Description: method for writing tests where test framework auto populates | ||||
| # the expected output based on previous runs | ||||
| #Pinned versions: 0.2.1 | ||||
| #test that import: | ||||
|  | ||||
| fbscribelogger==0.1.6 | ||||
| #Description: write to scribe from authenticated jobs on CI | ||||
| #Pinned versions: 0.1.6 | ||||
| #Pinned versions: 0.1.3 | ||||
| #test that import: | ||||
|  | ||||
| flatbuffers==2.0 | ||||
| @ -57,11 +47,6 @@ junitparser==2.1.1 | ||||
| #Pinned versions: 2.1.1 | ||||
| #test that import: | ||||
|  | ||||
| lark==0.12.0 | ||||
| #Description: parser | ||||
| #Pinned versions: 0.12.0 | ||||
| #test that import: | ||||
|  | ||||
| librosa>=0.6.2 ; python_version < "3.11" | ||||
| #Description: A python package for music and audio analysis | ||||
| #Pinned versions: >=0.6.2 | ||||
| @ -77,11 +62,11 @@ librosa>=0.6.2 ; python_version < "3.11" | ||||
| #mkl-devel | ||||
| # see mkl | ||||
|  | ||||
| #mock | ||||
| #mock # breaks ci/circleci: docker-pytorch-linux-xenial-py3-clang5-android-ndk-r19c | ||||
| #Description: A testing library that allows you to replace parts of your | ||||
| #system under test with mock objects | ||||
| #Pinned versions: | ||||
| #test that import: test_modules.py, test_nn.py, | ||||
| #test that import: test_module_init.py, test_modules.py, test_nn.py, | ||||
| #test_testing.py | ||||
|  | ||||
| #MonkeyType # breaks pytorch-xla-linux-bionic-py3.7-clang8 | ||||
| @ -90,16 +75,16 @@ librosa>=0.6.2 ; python_version < "3.11" | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| mypy==1.10.0 | ||||
| mypy==0.960 | ||||
| # Pin MyPy version because new errors are likely to appear with each release | ||||
| #Description: linter | ||||
| #Pinned versions: 1.10.0 | ||||
| #Pinned versions: 0.960 | ||||
| #test that import: test_typing.py, test_type_hints.py | ||||
|  | ||||
| networkx==2.8.8 | ||||
| networkx==2.6.3 | ||||
| #Description: creation, manipulation, and study of | ||||
| #the structure, dynamics, and functions of complex networks | ||||
| #Pinned versions: 2.8.8 | ||||
| #Pinned versions: 2.6.3 (latest version that works with Python 3.7+) | ||||
| #test that import: functorch | ||||
|  | ||||
| #ninja | ||||
| @ -109,7 +94,7 @@ networkx==2.8.8 | ||||
| #test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py | ||||
|  | ||||
| numba==0.49.0 ; python_version < "3.9" | ||||
| numba==0.55.2 ; python_version == "3.9" | ||||
| numba==0.54.1 ; python_version == "3.9" | ||||
| numba==0.55.2 ; python_version == "3.10" | ||||
| #Description: Just-In-Time Compiler for Numerical Functions | ||||
| #Pinned versions: 0.54.1, 0.49.0, <=0.49.1 | ||||
| @ -139,22 +124,9 @@ opt-einsum==3.3 | ||||
| #Pinned versions: 3.3 | ||||
| #test that import: test_linalg.py | ||||
|  | ||||
| optree==0.12.1 | ||||
| #Description: A library for tree manipulation | ||||
| #Pinned versions: 0.12.1 | ||||
| #test that import: test_vmap.py, test_aotdispatch.py, test_dynamic_shapes.py, | ||||
| #test_pytree.py, test_ops.py, test_control_flow.py, test_modules.py, | ||||
| #common_utils.py, test_eager_transforms.py, test_python_dispatch.py, | ||||
| #test_expanded_weights.py, test_decomp.py, test_overrides.py, test_masked.py, | ||||
| #test_ops.py, test_prims.py, test_subclass.py, test_functionalization.py, | ||||
| #test_schema_check.py, test_profiler_tree.py, test_meta.py, test_torchxla_num_output.py, | ||||
| #test_utils.py, test_proxy_tensor.py, test_memory_profiler.py, test_view_ops.py, | ||||
| #test_pointwise_ops.py, test_dtensor_ops.py, test_torchinductor.py, test_fx.py, | ||||
| #test_fake_tensor.py, test_mps.py | ||||
|  | ||||
| pillow==10.3.0 | ||||
| #pillow | ||||
| #Description:  Python Imaging Library fork | ||||
| #Pinned versions: 10.3.0 | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| protobuf==3.20.2 | ||||
| @ -167,22 +139,27 @@ psutil | ||||
| #Pinned versions: | ||||
| #test that import: test_profiler.py, test_openmp.py, test_dataloader.py | ||||
|  | ||||
| pytest==7.3.2 | ||||
| pytest | ||||
| #Description: testing framework | ||||
| #Pinned versions: | ||||
| #test that import: test_typing.py, test_cpp_extensions_aot.py, run_test.py | ||||
|  | ||||
| pytest-xdist==3.3.1 | ||||
| pytest-xdist | ||||
| #Description: plugin for running pytest in parallel | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| pytest-shard | ||||
| #Description: plugin spliting up tests in pytest | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| pytest-flakefinder==1.1.0 | ||||
| #Description: plugin for rerunning tests a fixed number of times in pytest | ||||
| #Pinned versions: 1.1.0 | ||||
| #test that import: | ||||
|  | ||||
| pytest-rerunfailures>=10.3 | ||||
| pytest-rerunfailures | ||||
| #Description: plugin for rerunning failure tests in pytest | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
| @ -202,7 +179,7 @@ xdoctest==1.1.0 | ||||
| #Pinned versions: 1.1.0 | ||||
| #test that import: | ||||
|  | ||||
| pygments==2.15.0 | ||||
| pygments==2.12.0 | ||||
| #Description: support doctest highlighting | ||||
| #Pinned versions: 2.12.0 | ||||
| #test that import: the doctests | ||||
| @ -222,8 +199,7 @@ pygments==2.15.0 | ||||
| #Pinned versions: 10.9.0 | ||||
| #test that import: | ||||
|  | ||||
| scikit-image==0.19.3 ; python_version < "3.10" | ||||
| scikit-image==0.22.0 ; python_version >= "3.10" | ||||
| scikit-image | ||||
| #Description: image processing routines | ||||
| #Pinned versions: | ||||
| #test that import: test_nn.py | ||||
| @ -233,11 +209,12 @@ scikit-image==0.22.0 ; python_version >= "3.10" | ||||
| #Pinned versions: 0.20.3 | ||||
| #test that import: | ||||
|  | ||||
| scipy==1.10.1 ; python_version <= "3.11" | ||||
| scipy==1.12.0 ; python_version == "3.12" | ||||
| scipy==1.6.3 ; python_version < "3.10" | ||||
| scipy==1.8.1 ; python_version == "3.10" | ||||
| scipy==1.9.3 ; python_version == "3.11" | ||||
| # Pin SciPy because of failing distribution tests (see #60347) | ||||
| #Description: scientific python | ||||
| #Pinned versions: 1.10.1 | ||||
| #Pinned versions: 1.6.3 | ||||
| #test that import: test_unary_ufuncs.py, test_torch.py,test_tensor_creation_ops.py | ||||
| #test_spectral_ops.py, test_sparse_csr.py, test_reductions.py,test_nn.py | ||||
| #test_linalg.py, test_binary_ufuncs.py | ||||
| @ -247,13 +224,12 @@ scipy==1.12.0 ; python_version == "3.12" | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| tb-nightly==2.13.0a20230426 | ||||
| tb-nightly | ||||
| #Description: TensorBoard | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| # needed by torchgen utils | ||||
| typing-extensions | ||||
| #typing-extensions | ||||
| #Description: type hints for python | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
| @ -268,72 +244,17 @@ unittest-xml-reporting<=3.2.0,>=2.0.0 | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| #lintrunner is supported on aarch64-linux only from 0.12.4 version | ||||
| lintrunner==0.12.5 | ||||
| #Description: all about linters! | ||||
| #Pinned versions: 0.12.5 | ||||
| lintrunner==0.9.2 | ||||
| #Description: all about linters | ||||
| #Pinned versions: 0.9.2 | ||||
| #test that import: | ||||
|  | ||||
| redis>=4.0.0 | ||||
| #Description: redis database | ||||
| #test that import: anything that tests OSS caching/mocking (inductor/test_codecache.py, inductor/test_max_autotune.py) | ||||
|  | ||||
| rockset==1.0.3 | ||||
| #Description: queries Rockset | ||||
| #Pinned versions: 1.0.3 | ||||
| #test that import: | ||||
|  | ||||
| ghstack==0.8.0 | ||||
| ghstack==0.7.1 | ||||
| #Description: ghstack tool | ||||
| #Pinned versions: 0.8.0 | ||||
| #test that import: | ||||
|  | ||||
| jinja2==3.1.4 | ||||
| #Description: jinja2 template engine | ||||
| #Pinned versions: 3.1.4 | ||||
| #test that import: | ||||
|  | ||||
| pytest-cpp==2.3.0 | ||||
| #Description: This is used by pytest to invoke C++ tests | ||||
| #Pinned versions: 2.3.0 | ||||
| #test that import: | ||||
|  | ||||
| z3-solver==4.12.2.0 | ||||
| #Description: The Z3 Theorem Prover Project | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| tensorboard==2.13.0 | ||||
| #Description: Also included in .ci/docker/requirements-docs.txt | ||||
| #Pinned versions: | ||||
| #test that import: test_tensorboard | ||||
|  | ||||
| pywavelets==1.4.1 ; python_version < "3.12" | ||||
| pywavelets==1.5.0 ; python_version >= "3.12" | ||||
| #Description: This is a requirement of scikit-image, we need to pin | ||||
| # it here because 1.5.0 conflicts with numpy 1.21.2 used in CI | ||||
| #Pinned versions: 1.4.1 | ||||
| #test that import: | ||||
|  | ||||
| lxml==5.0.0 | ||||
| #Description: This is a requirement of unittest-xml-reporting | ||||
|  | ||||
| # Python-3.9 binaries | ||||
|  | ||||
| PyGithub==2.3.0 | ||||
|  | ||||
| sympy==1.12.1 ; python_version == "3.8" | ||||
| sympy==1.13.1 ; python_version >= "3.9" | ||||
| #Description: Required by coremltools, also pinned in .github/requirements/pip-requirements-macOS.txt | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| onnx==1.16.1 | ||||
| #Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal | ||||
| #Pinned versions: | ||||
| #test that import: | ||||
|  | ||||
| onnxscript==0.1.0.dev20240817 | ||||
| #Description: Required by mypy and test_public_bindings.py when checking torch.onnx._internal | ||||
| #Pinned versions: | ||||
| #Pinned versions: 0.7.1 | ||||
| #test that import: | ||||
|  | ||||
| @ -1,49 +0,0 @@ | ||||
| sphinx==5.3.0 | ||||
| #Description: This is used to generate PyTorch docs | ||||
| #Pinned versions: 5.3.0 | ||||
| -e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme | ||||
|  | ||||
| # TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering | ||||
| # but it doesn't seem to work and hangs around idly. The initial thought is probably | ||||
| # something related to Docker setup. We can investigate this later | ||||
| sphinxcontrib.katex==0.8.6 | ||||
| #Description: This is used to generate PyTorch docs | ||||
| #Pinned versions: 0.8.6 | ||||
|  | ||||
| matplotlib==3.5.3 | ||||
| #Description: This is used to generate PyTorch docs | ||||
| #Pinned versions: 3.5.3 | ||||
|  | ||||
| tensorboard==2.13.0 | ||||
| #Description: This is used to generate PyTorch docs | ||||
| #Pinned versions: 2.13.0 | ||||
|  | ||||
| breathe==4.34.0 | ||||
| #Description: This is used to generate PyTorch C++ docs | ||||
| #Pinned versions: 4.34.0 | ||||
|  | ||||
| exhale==0.2.3 | ||||
| #Description: This is used to generate PyTorch C++ docs | ||||
| #Pinned versions: 0.2.3 | ||||
|  | ||||
| docutils==0.16 | ||||
| #Description: This is used to generate PyTorch C++ docs | ||||
| #Pinned versions: 0.16 | ||||
|  | ||||
| bs4==0.0.1 | ||||
| #Description: This is used to generate PyTorch C++ docs | ||||
| #Pinned versions: 0.0.1 | ||||
|  | ||||
| IPython==8.12.0 | ||||
| #Description: This is used to generate PyTorch functorch docs | ||||
| #Pinned versions: 8.12.0 | ||||
|  | ||||
| myst-nb==0.17.2 | ||||
| #Description: This is used to generate PyTorch functorch docs | ||||
| #Pinned versions: 0.13.2 | ||||
|  | ||||
| # The following are required to build torch.distributed.elastic.rendezvous.etcd* docs | ||||
| python-etcd==0.4.5 | ||||
| sphinx-copybutton==0.5.0 | ||||
| sphinx-panels==0.4.1 | ||||
| myst-parser==0.18.1 | ||||
| @ -1 +0,0 @@ | ||||
| 3.1.0 | ||||
| @ -56,11 +56,11 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | ||||
| RUN rm install_db.sh | ||||
| ENV INSTALLED_DB ${DB} | ||||
|  | ||||
| # (optional) Install vision packages like OpenCV | ||||
| # (optional) Install vision packages like OpenCV and ffmpeg | ||||
| ARG VISION | ||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||
| COPY ./common/install_vision.sh install_vision.sh | ||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | ||||
| RUN rm install_vision.sh cache_vision_models.sh common_utils.sh | ||||
| RUN rm install_vision.sh | ||||
| ENV INSTALLED_VISION ${VISION} | ||||
|  | ||||
| # (optional) Install UCC | ||||
| @ -79,38 +79,12 @@ ENV OPENSSL_ROOT_DIR /opt/openssl | ||||
| RUN bash ./install_openssl.sh | ||||
| ENV OPENSSL_DIR /opt/openssl | ||||
|  | ||||
| ARG INDUCTOR_BENCHMARKS | ||||
| COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/huggingface.txt huggingface.txt | ||||
| COPY ci_commit_pins/timm.txt timm.txt | ||||
| RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi | ||||
| RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt | ||||
|  | ||||
| # (optional) Install non-default CMake version | ||||
| ARG CMAKE_VERSION | ||||
| COPY ./common/install_cmake.sh install_cmake.sh | ||||
| RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi | ||||
| RUN rm install_cmake.sh | ||||
|  | ||||
| ARG TRITON | ||||
| # Install triton, this needs to be done before sccache because the latter will | ||||
| # try to reach out to S3, which docker build runners don't have access | ||||
| COPY ./common/install_triton.sh install_triton.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/triton.txt triton.txt | ||||
| COPY triton_version.txt triton_version.txt | ||||
| RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | ||||
| RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt | ||||
|  | ||||
| ARG HALIDE | ||||
| # Build and install halide | ||||
| COPY ./common/install_halide.sh install_halide.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/halide.txt halide.txt | ||||
| RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi | ||||
| RUN rm install_halide.sh common_utils.sh halide.txt | ||||
|  | ||||
| # Install ccache/sccache (do this last, so we get priority in PATH) | ||||
| COPY ./common/install_cache.sh install_cache.sh | ||||
| ENV PATH /opt/cache/bin:$PATH | ||||
| @ -147,26 +121,12 @@ COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm | ||||
| ARG CUDNN_VERSION | ||||
| ARG CUDA_VERSION | ||||
| COPY ./common/install_cudnn.sh install_cudnn.sh | ||||
| RUN if [ -n "${CUDNN_VERSION}" ]; then bash install_cudnn.sh; fi | ||||
| RUN if [ "${CUDNN_VERSION}" -eq 8 ]; then bash install_cudnn.sh; fi | ||||
| RUN rm install_cudnn.sh | ||||
|  | ||||
| # Install CUSPARSELT | ||||
| ARG CUDA_VERSION | ||||
| COPY ./common/install_cusparselt.sh install_cusparselt.sh | ||||
| RUN bash install_cusparselt.sh | ||||
| RUN rm install_cusparselt.sh | ||||
|  | ||||
| # Install CUDSS | ||||
| ARG CUDA_VERSION | ||||
| COPY ./common/install_cudss.sh install_cudss.sh | ||||
| RUN bash install_cudss.sh | ||||
| RUN rm install_cudss.sh | ||||
|  | ||||
| # Delete /usr/local/cuda-11.X/cuda-11.X symlinks | ||||
| RUN if [ -h /usr/local/cuda-11.6/cuda-11.6 ]; then rm /usr/local/cuda-11.6/cuda-11.6; fi | ||||
| RUN if [ -h /usr/local/cuda-11.7/cuda-11.7 ]; then rm /usr/local/cuda-11.7/cuda-11.7; fi | ||||
| RUN if [ -h /usr/local/cuda-12.1/cuda-12.1 ]; then rm /usr/local/cuda-12.1/cuda-12.1; fi | ||||
| RUN if [ -h /usr/local/cuda-12.4/cuda-12.4 ]; then rm /usr/local/cuda-12.4/cuda-12.4; fi | ||||
|  | ||||
| USER jenkins | ||||
| CMD ["bash"] | ||||
|  | ||||
| @ -53,11 +53,11 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | ||||
| RUN rm install_db.sh | ||||
| ENV INSTALLED_DB ${DB} | ||||
|  | ||||
| # (optional) Install vision packages like OpenCV | ||||
| # (optional) Install vision packages like OpenCV and ffmpeg | ||||
| ARG VISION | ||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||
| COPY ./common/install_vision.sh install_vision.sh | ||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | ||||
| RUN rm install_vision.sh cache_vision_models.sh common_utils.sh | ||||
| RUN rm install_vision.sh | ||||
| ENV INSTALLED_VISION ${VISION} | ||||
|  | ||||
| # Install rocm | ||||
| @ -68,9 +68,6 @@ RUN rm install_rocm.sh | ||||
| COPY ./common/install_rocm_magma.sh install_rocm_magma.sh | ||||
| RUN bash ./install_rocm_magma.sh | ||||
| RUN rm install_rocm_magma.sh | ||||
| ADD ./common/install_miopen.sh install_miopen.sh | ||||
| RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh | ||||
| ENV ROCM_PATH /opt/rocm | ||||
| ENV PATH /opt/rocm/bin:$PATH | ||||
| ENV PATH /opt/rocm/hcc/bin:$PATH | ||||
| ENV PATH /opt/rocm/hip/bin:$PATH | ||||
| @ -80,11 +77,6 @@ ENV MAGMA_HOME /opt/rocm/magma | ||||
| ENV LANG C.UTF-8 | ||||
| ENV LC_ALL C.UTF-8 | ||||
|  | ||||
| # Install amdsmi | ||||
| COPY ./common/install_amdsmi.sh install_amdsmi.sh | ||||
| RUN bash ./install_amdsmi.sh | ||||
| RUN rm install_amdsmi.sh | ||||
|  | ||||
| # (optional) Install non-default CMake version | ||||
| ARG CMAKE_VERSION | ||||
| COPY ./common/install_cmake.sh install_cmake.sh | ||||
| @ -97,23 +89,6 @@ COPY ./common/install_ninja.sh install_ninja.sh | ||||
| RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi | ||||
| RUN rm install_ninja.sh | ||||
|  | ||||
| ARG TRITON | ||||
| # Install triton, this needs to be done before sccache because the latter will | ||||
| # try to reach out to S3, which docker build runners don't have access | ||||
| COPY ./common/install_triton.sh install_triton.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/triton.txt triton.txt | ||||
| COPY triton_version.txt triton_version.txt | ||||
| RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | ||||
| RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt | ||||
|  | ||||
| # Install AOTriton | ||||
| COPY ./aotriton_version.txt aotriton_version.txt | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ./common/install_aotriton.sh install_aotriton.sh | ||||
| RUN ["/bin/bash", "-c", "./install_aotriton.sh /opt/rocm && rm -rf install_aotriton.sh aotriton_version.txt common_utils.sh"] | ||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton | ||||
|  | ||||
| # Install ccache/sccache (do this last, so we get priority in PATH) | ||||
| COPY ./common/install_cache.sh install_cache.sh | ||||
| ENV PATH /opt/cache/bin:$PATH | ||||
| @ -123,8 +98,5 @@ RUN bash ./install_cache.sh && rm install_cache.sh | ||||
| ARG BUILD_ENVIRONMENT | ||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} | ||||
|  | ||||
| # Install LLVM dev version (Defined in the pytorch/builder github repository) | ||||
| COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm | ||||
|  | ||||
| USER jenkins | ||||
| CMD ["bash"] | ||||
|  | ||||
| @ -1,119 +0,0 @@ | ||||
| ARG UBUNTU_VERSION | ||||
|  | ||||
| FROM ubuntu:${UBUNTU_VERSION} | ||||
|  | ||||
| ARG UBUNTU_VERSION | ||||
|  | ||||
| ENV DEBIAN_FRONTEND noninteractive | ||||
|  | ||||
| ARG CLANG_VERSION | ||||
|  | ||||
| # Install common dependencies (so that this step can be cached separately) | ||||
| COPY ./common/install_base.sh install_base.sh | ||||
| RUN bash ./install_base.sh && rm install_base.sh | ||||
|  | ||||
| # Install clang | ||||
| ARG LLVMDEV | ||||
| COPY ./common/install_clang.sh install_clang.sh | ||||
| RUN bash ./install_clang.sh && rm install_clang.sh | ||||
|  | ||||
| # Install user | ||||
| COPY ./common/install_user.sh install_user.sh | ||||
| RUN bash ./install_user.sh && rm install_user.sh | ||||
|  | ||||
| # Install katex | ||||
| ARG KATEX | ||||
| COPY ./common/install_docs_reqs.sh install_docs_reqs.sh | ||||
| RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh | ||||
|  | ||||
| # Install conda and other packages (e.g., numpy, pytest) | ||||
| ARG ANACONDA_PYTHON_VERSION | ||||
| ARG CONDA_CMAKE | ||||
| ARG DOCS | ||||
| ARG BUILD_ENVIRONMENT | ||||
| ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION | ||||
| ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH | ||||
| ENV DOCS=$DOCS | ||||
| COPY requirements-ci.txt requirements-docs.txt /opt/conda/ | ||||
| COPY ./common/install_conda.sh install_conda.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt /opt/conda/requirements-docs.txt | ||||
|  | ||||
| # Install gcc | ||||
| ARG GCC_VERSION | ||||
| COPY ./common/install_gcc.sh install_gcc.sh | ||||
| RUN bash ./install_gcc.sh && rm install_gcc.sh | ||||
|  | ||||
| # Install lcov for C++ code coverage | ||||
| COPY ./common/install_lcov.sh install_lcov.sh | ||||
| RUN  bash ./install_lcov.sh && rm install_lcov.sh | ||||
|  | ||||
| COPY ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh | ||||
| ENV OPENSSL_ROOT_DIR /opt/openssl | ||||
| ENV OPENSSL_DIR /opt/openssl | ||||
| RUN rm install_openssl.sh | ||||
|  | ||||
| ARG INDUCTOR_BENCHMARKS | ||||
| COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/huggingface.txt huggingface.txt | ||||
| COPY ci_commit_pins/timm.txt timm.txt | ||||
| RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi | ||||
| RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt | ||||
|  | ||||
| # Install XPU Dependencies | ||||
| ARG XPU_VERSION | ||||
| COPY ./common/install_xpu.sh install_xpu.sh | ||||
| RUN bash ./install_xpu.sh && rm install_xpu.sh | ||||
|  | ||||
| ARG TRITON | ||||
| # Install triton, this needs to be done before sccache because the latter will | ||||
| # try to reach out to S3, which docker build runners don't have access | ||||
| COPY ./common/install_triton.sh install_triton.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/triton-xpu.txt triton-xpu.txt | ||||
| COPY triton_version.txt triton_version.txt | ||||
| RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | ||||
| RUN rm install_triton.sh common_utils.sh triton-xpu.txt triton_version.txt | ||||
|  | ||||
| # (optional) Install database packages like LMDB and LevelDB | ||||
| ARG DB | ||||
| COPY ./common/install_db.sh install_db.sh | ||||
| RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | ||||
| RUN rm install_db.sh | ||||
| ENV INSTALLED_DB ${DB} | ||||
|  | ||||
| # (optional) Install vision packages like OpenCV | ||||
| ARG VISION | ||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | ||||
| RUN rm install_vision.sh cache_vision_models.sh common_utils.sh | ||||
| ENV INSTALLED_VISION ${VISION} | ||||
|  | ||||
| # (optional) Install non-default CMake version | ||||
| ARG CMAKE_VERSION | ||||
| COPY ./common/install_cmake.sh install_cmake.sh | ||||
| RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi | ||||
| RUN rm install_cmake.sh | ||||
|  | ||||
| # (optional) Install non-default Ninja version | ||||
| ARG NINJA_VERSION | ||||
| COPY ./common/install_ninja.sh install_ninja.sh | ||||
| RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi | ||||
| RUN rm install_ninja.sh | ||||
|  | ||||
| # Install ccache/sccache (do this last, so we get priority in PATH) | ||||
| COPY ./common/install_cache.sh install_cache.sh | ||||
| ENV PATH /opt/cache/bin:$PATH | ||||
| RUN bash ./install_cache.sh && rm install_cache.sh | ||||
|  | ||||
| # Include BUILD_ENVIRONMENT environment variable in image | ||||
| ARG BUILD_ENVIRONMENT | ||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} | ||||
|  | ||||
| # Install LLVM dev version (Defined in the pytorch/builder github repository) | ||||
| COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm | ||||
|  | ||||
| USER jenkins | ||||
| CMD ["bash"] | ||||
| @ -17,6 +17,13 @@ ARG LLVMDEV | ||||
| COPY ./common/install_clang.sh install_clang.sh | ||||
| RUN bash ./install_clang.sh && rm install_clang.sh | ||||
|  | ||||
| # (optional) Install thrift. | ||||
| ARG THRIFT | ||||
| COPY ./common/install_thrift.sh install_thrift.sh | ||||
| RUN if [ -n "${THRIFT}" ]; then bash ./install_thrift.sh; fi | ||||
| RUN rm install_thrift.sh | ||||
| ENV INSTALLED_THRIFT ${THRIFT} | ||||
|  | ||||
| # Install user | ||||
| COPY ./common/install_user.sh install_user.sh | ||||
| RUN bash ./install_user.sh && rm install_user.sh | ||||
| @ -29,15 +36,12 @@ RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh | ||||
| # Install conda and other packages (e.g., numpy, pytest) | ||||
| ARG ANACONDA_PYTHON_VERSION | ||||
| ARG CONDA_CMAKE | ||||
| ARG DOCS | ||||
| ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION | ||||
| ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH | ||||
| ENV DOCS=$DOCS | ||||
| COPY requirements-ci.txt requirements-docs.txt /opt/conda/ | ||||
| COPY requirements-ci.txt /opt/conda/requirements-ci.txt | ||||
| COPY ./common/install_conda.sh install_conda.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt /opt/conda/requirements-docs.txt | ||||
| RUN if [ -n "${UNINSTALL_DILL}" ]; then pip uninstall -y dill; fi | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt | ||||
|  | ||||
| # Install gcc | ||||
| ARG GCC_VERSION | ||||
| @ -50,7 +54,7 @@ RUN  bash ./install_lcov.sh && rm install_lcov.sh | ||||
|  | ||||
| # Install cuda and cudnn | ||||
| ARG CUDA_VERSION | ||||
| COPY ./common/install_cuda.sh install_cuda.sh | ||||
| RUN wget -q https://raw.githubusercontent.com/pytorch/builder/main/common/install_cuda.sh -O install_cuda.sh | ||||
| RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh | ||||
| ENV DESIRED_CUDA ${CUDA_VERSION} | ||||
| ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH | ||||
| @ -80,22 +84,22 @@ RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi | ||||
| RUN rm install_db.sh | ||||
| ENV INSTALLED_DB ${DB} | ||||
|  | ||||
| # (optional) Install vision packages like OpenCV | ||||
| # (optional) Install vision packages like OpenCV and ffmpeg | ||||
| ARG VISION | ||||
| COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||
| COPY ./common/install_vision.sh install_vision.sh | ||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi | ||||
| RUN rm install_vision.sh cache_vision_models.sh common_utils.sh | ||||
| RUN rm install_vision.sh | ||||
| ENV INSTALLED_VISION ${VISION} | ||||
|  | ||||
| # (optional) Install Android NDK | ||||
| ARG ANDROID | ||||
| ARG ANDROID_NDK | ||||
| ARG GRADLE_VERSION | ||||
| COPY ./common/install_android.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./ | ||||
| COPY ./common/install_android.sh install_android.sh | ||||
| COPY ./android/AndroidManifest.xml AndroidManifest.xml | ||||
| COPY ./android/build.gradle build.gradle | ||||
| RUN if [ -n "${ANDROID}" ]; then bash ./install_android.sh; fi | ||||
| RUN rm install_android.sh cache_vision_models.sh common_utils.sh | ||||
| RUN rm install_android.sh | ||||
| RUN rm AndroidManifest.xml | ||||
| RUN rm build.gradle | ||||
| ENV INSTALLED_ANDROID ${ANDROID} | ||||
| @ -130,58 +134,10 @@ ENV OPENSSL_ROOT_DIR /opt/openssl | ||||
| ENV OPENSSL_DIR /opt/openssl | ||||
| RUN rm install_openssl.sh | ||||
|  | ||||
| ARG INDUCTOR_BENCHMARKS | ||||
| COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/huggingface.txt huggingface.txt | ||||
| COPY ci_commit_pins/timm.txt timm.txt | ||||
| RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi | ||||
| RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt | ||||
|  | ||||
| ARG TRITON | ||||
| # Install triton, this needs to be done before sccache because the latter will | ||||
| # try to reach out to S3, which docker build runners don't have access | ||||
| COPY ./common/install_triton.sh install_triton.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/triton.txt triton.txt | ||||
| RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi | ||||
| RUN rm install_triton.sh common_utils.sh triton.txt | ||||
|  | ||||
| ARG EXECUTORCH | ||||
| # Build and install executorch | ||||
| COPY ./common/install_executorch.sh install_executorch.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/executorch.txt executorch.txt | ||||
| RUN if [ -n "${EXECUTORCH}" ]; then bash ./install_executorch.sh; fi | ||||
| RUN rm install_executorch.sh common_utils.sh executorch.txt | ||||
|  | ||||
| ARG HALIDE | ||||
| # Build and install halide | ||||
| COPY ./common/install_halide.sh install_halide.sh | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ci_commit_pins/halide.txt halide.txt | ||||
| RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi | ||||
| RUN rm install_halide.sh common_utils.sh halide.txt | ||||
|  | ||||
| ARG ONNX | ||||
| # Install ONNX dependencies | ||||
| COPY ./common/install_onnx.sh ./common/common_utils.sh ./ | ||||
| RUN if [ -n "${ONNX}" ]; then bash ./install_onnx.sh; fi | ||||
| RUN rm install_onnx.sh common_utils.sh | ||||
|  | ||||
| # (optional) Build ACL | ||||
| ARG ACL | ||||
| COPY ./common/install_acl.sh install_acl.sh | ||||
| RUN if [ -n "${ACL}" ]; then bash ./install_acl.sh; fi | ||||
| RUN rm install_acl.sh | ||||
| ENV INSTALLED_ACL ${ACL} | ||||
|  | ||||
| # Install ccache/sccache (do this last, so we get priority in PATH) | ||||
| ARG SKIP_SCCACHE_INSTALL | ||||
| COPY ./common/install_cache.sh install_cache.sh | ||||
| ENV PATH /opt/cache/bin:$PATH | ||||
| RUN if [ -z "${SKIP_SCCACHE_INSTALL}" ]; then bash ./install_cache.sh; fi | ||||
| RUN rm install_cache.sh | ||||
| RUN bash ./install_cache.sh && rm install_cache.sh | ||||
|  | ||||
| # Add jni.h for java host build | ||||
| COPY ./common/install_jni.sh install_jni.sh | ||||
| @ -198,9 +154,7 @@ ARG BUILD_ENVIRONMENT | ||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} | ||||
|  | ||||
| # Install LLVM dev version (Defined in the pytorch/builder github repository) | ||||
| ARG SKIP_LLVM_SRC_BUILD_INSTALL | ||||
| COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm | ||||
| RUN if [ -n "${SKIP_LLVM_SRC_BUILD_INSTALL}" ]; then set -eu; rm -rf /opt/llvm; fi | ||||
|  | ||||
| # AWS specific CUDA build guidance | ||||
| ENV TORCH_CUDA_ARCH_LIST Maxwell | ||||
|  | ||||
| @ -1,9 +1,5 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/../pytorch/common_utils.sh" | ||||
|  | ||||
| LOCAL_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) | ||||
| ROOT_DIR=$(cd "$LOCAL_DIR"/../.. && pwd) | ||||
| TEST_DIR="$ROOT_DIR/test" | ||||
|  | ||||
| @ -3,27 +3,72 @@ | ||||
| # shellcheck source=./common.sh | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common.sh" | ||||
|  | ||||
| # Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96) | ||||
| WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace") | ||||
| cleanup_workspace() { | ||||
|   echo "sudo may print the following warning message that can be ignored. The chown command will still run." | ||||
|   echo "    sudo: setrlimit(RLIMIT_STACK): Operation not permitted" | ||||
|   echo "For more details refer to https://github.com/sudo-project/sudo/issues/42" | ||||
|   sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace | ||||
| } | ||||
| # Disable shellcheck SC2064 as we want to parse the original owner immediately. | ||||
| # shellcheck disable=SC2064 | ||||
| trap_add cleanup_workspace EXIT | ||||
| sudo chown -R jenkins /var/lib/jenkins/workspace | ||||
| git config --global --add safe.directory /var/lib/jenkins/workspace | ||||
| if [[ ${BUILD_ENVIRONMENT} == *onnx* ]]; then | ||||
|   pip install click mock tabulate networkx==2.0 | ||||
|   pip -q install --user "file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx" | ||||
| fi | ||||
|  | ||||
| # Skip tests in environments where they are not built/applicable | ||||
| if [[ "${BUILD_ENVIRONMENT}" == *-android* ]]; then | ||||
|   echo 'Skipping tests' | ||||
|   exit 0 | ||||
| fi | ||||
| if [[ "${BUILD_ENVIRONMENT}" == *-rocm* ]]; then | ||||
|   # temporary to locate some kernel issues on the CI nodes | ||||
|   export HSAKMT_DEBUG_LEVEL=4 | ||||
| fi | ||||
| # These additional packages are needed for circleci ROCm builds. | ||||
| if [[ $BUILD_ENVIRONMENT == *rocm* ]]; then | ||||
|     # Need networkx 2.0 because bellmand_ford was moved in 2.1 . Scikit-image by | ||||
|     # defaults installs the most recent networkx version, so we install this lower | ||||
|     # version explicitly before scikit-image pulls it in as a dependency | ||||
|     pip install networkx==2.0 | ||||
|     # click - onnx | ||||
|     pip install --progress-bar off click protobuf tabulate virtualenv mock typing-extensions | ||||
| fi | ||||
|  | ||||
| ################################################################################ | ||||
| # Python tests # | ||||
| ################################################################################ | ||||
| if [[ "$BUILD_ENVIRONMENT" == *cmake* ]]; then | ||||
|   exit 0 | ||||
| fi | ||||
|  | ||||
| # If pip is installed as root, we must use sudo. | ||||
| # CircleCI docker images could install conda as jenkins user, or use the OS's python package. | ||||
| PIP=$(which pip) | ||||
| PIP_USER=$(stat --format '%U' $PIP) | ||||
| CURRENT_USER=$(id -u -n) | ||||
| if [[ "$PIP_USER" = root && "$CURRENT_USER" != root ]]; then | ||||
|   MAYBE_SUDO=sudo | ||||
| fi | ||||
|  | ||||
| # Uninstall pre-installed hypothesis and coverage to use an older version as newer | ||||
| # versions remove the timeout parameter from settings which ideep/conv_transpose_test.py uses | ||||
| $MAYBE_SUDO pip -q uninstall -y hypothesis | ||||
| $MAYBE_SUDO pip -q uninstall -y coverage | ||||
|  | ||||
| # "pip install hypothesis==3.44.6" from official server is unreliable on | ||||
| # CircleCI, so we host a copy on S3 instead | ||||
| $MAYBE_SUDO pip -q install attrs==18.1.0 -f https://s3.amazonaws.com/ossci-linux/wheels/attrs-18.1.0-py2.py3-none-any.whl | ||||
| $MAYBE_SUDO pip -q install coverage==4.5.1 -f https://s3.amazonaws.com/ossci-linux/wheels/coverage-4.5.1-cp36-cp36m-macosx_10_12_x86_64.whl | ||||
| $MAYBE_SUDO pip -q install hypothesis==4.57.1 | ||||
|  | ||||
| ############## | ||||
| # ONNX tests # | ||||
| ############## | ||||
| if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then | ||||
|   # TODO: This can be removed later once vision is also part of the Docker image | ||||
|   pip install -q --user --no-use-pep517 "git+https://github.com/pytorch/vision.git@$(cat .github/ci_commit_pins/vision.txt)" | ||||
|   pip install -q --user transformers==4.25.1 | ||||
|   pip install -q --user ninja flatbuffers==2.0 numpy==1.22.4 onnxruntime==1.14.0 beartype==0.10.4 | ||||
|   # TODO: change this when onnx 1.13.1 is released. | ||||
|   pip install --no-use-pep517 'onnx @ git+https://github.com/onnx/onnx@e192ba01e438d22ca2dedd7956e28e3551626c91' | ||||
|   # TODO: change this when onnx-script is on testPypi | ||||
|   pip install 'onnx-script @ git+https://github.com/microsoft/onnx-script@a71e35bcd72537bf7572536ee57250a0c0488bf6' | ||||
|   # numba requires numpy <= 1.20, onnxruntime requires numpy >= 1.21. | ||||
|   # We don't actually need it for our tests, but it's imported if it's present, so uninstall. | ||||
|   pip uninstall -q --yes numba | ||||
|   # JIT C++ extensions require ninja, so put it into PATH. | ||||
|   export PATH="/var/lib/jenkins/.local/bin:$PATH" | ||||
|   # NB: ONNX test is fast (~15m) so it's ok to retry it few more times to avoid any flaky issue, we | ||||
|   # need to bring this to the standard PyTorch run_test eventually. The issue will be tracked in | ||||
|   # https://github.com/pytorch/pytorch/issues/98626 | ||||
|   "$ROOT_DIR/scripts/onnx/test.sh" | ||||
| fi | ||||
|  | ||||
| @ -1 +1,42 @@ | ||||
| This directory contains scripts for our continuous integration. | ||||
|  | ||||
| One important thing to keep in mind when reading the scripts here is | ||||
| that they are all based off of Docker images, which we build for each of | ||||
| the various system configurations we want to run on Jenkins.  This means | ||||
| it is very easy to run these tests yourself: | ||||
|  | ||||
| 1. Figure out what Docker image you want.  The general template for our | ||||
|    images look like: | ||||
|    ``registry.pytorch.org/pytorch/pytorch-$BUILD_ENVIRONMENT:$DOCKER_VERSION``, | ||||
|    where ``$BUILD_ENVIRONMENT`` is one of the build environments | ||||
|    enumerated in | ||||
|    [pytorch-dockerfiles](https://github.com/pytorch/pytorch/blob/master/.ci/docker/build.sh). The dockerfile used by jenkins can be found under the `.ci` [directory](https://github.com/pytorch/pytorch/blob/master/.ci/docker) | ||||
|  | ||||
| 2. Run ``docker run -it -u jenkins $DOCKER_IMAGE``, clone PyTorch and | ||||
|    run one of the scripts in this directory. | ||||
|  | ||||
| The Docker images are designed so that any "reasonable" build commands | ||||
| will work; if you look in [build.sh](build.sh) you will see that it is a | ||||
| very simple script.  This is intentional.  Idiomatic build instructions | ||||
| should work inside all of our Docker images.  You can tweak the commands | ||||
| however you need (e.g., in case you want to rebuild with DEBUG, or rerun | ||||
| the build with higher verbosity, etc.). | ||||
|  | ||||
| We have to do some work to make this so.  Here is a summary of the | ||||
| mechanisms we use: | ||||
|  | ||||
| - We install binaries to directories like `/usr/local/bin` which | ||||
|   are automatically part of your PATH. | ||||
|  | ||||
| - We add entries to the PATH using Docker ENV variables (so | ||||
|   they apply when you enter Docker) and `/etc/environment` (so they | ||||
|   continue to apply even if you sudo), instead of modifying | ||||
|   `PATH` in our build scripts. | ||||
|  | ||||
| - We use `/etc/ld.so.conf.d` to register directories containing | ||||
|   shared libraries, instead of modifying `LD_LIBRARY_PATH` in our | ||||
|   build scripts. | ||||
|  | ||||
| - We reroute well known paths like `/usr/bin/gcc` to alternate | ||||
|   implementations with `update-alternatives`, instead of setting | ||||
|   `CC` and `CXX` in our implementations. | ||||
|  | ||||
							
								
								
									
										42
									
								
								.ci/pytorch/build-asan.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										42
									
								
								.ci/pytorch/build-asan.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,42 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Required environment variable: $BUILD_ENVIRONMENT | ||||
| # (This is set by default in the Docker images we build, so you don't | ||||
| # need to set it yourself. | ||||
|  | ||||
| # shellcheck source=./common.sh | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common.sh" | ||||
| # shellcheck source=./common-build.sh | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh" | ||||
|  | ||||
| echo "Clang version:" | ||||
| clang --version | ||||
|  | ||||
| python tools/stats/export_test_times.py | ||||
|  | ||||
| # detect_leaks=0: Python is very leaky, so we need suppress it | ||||
| # symbolize=1: Gives us much better errors when things go wrong | ||||
| export ASAN_OPTIONS=detect_leaks=0:detect_stack_use_after_return=1:symbolize=1:detect_odr_violation=0 | ||||
| if [ -n "$(which conda)" ]; then | ||||
|   export CMAKE_PREFIX_PATH=/opt/conda | ||||
| fi | ||||
|  | ||||
| # TODO: Make the ASAN flags a centralized env var and unify with USE_ASAN option | ||||
| CC="clang" CXX="clang++" LDSHARED="clang --shared" \ | ||||
|   CFLAGS="-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize-address-use-after-scope -shared-libasan" \ | ||||
|   USE_ASAN=1 USE_CUDA=0 USE_MKLDNN=0 \ | ||||
|   python setup.py bdist_wheel | ||||
|   pip_install_whl "$(echo dist/*.whl)" | ||||
|  | ||||
| # Test building via the sdist source tarball | ||||
| python setup.py sdist | ||||
| mkdir -p /tmp/tmp | ||||
| pushd /tmp/tmp | ||||
| tar zxf "$(dirname "${BASH_SOURCE[0]}")/../../dist/"*.tar.gz | ||||
| cd torch-* | ||||
| python setup.py build --cmake-only | ||||
| popd | ||||
|  | ||||
| print_sccache_stats | ||||
|  | ||||
| assert_git_not_dirty | ||||
							
								
								
									
										29
									
								
								.ci/pytorch/build-tsan.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										29
									
								
								.ci/pytorch/build-tsan.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,29 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Required environment variable: $BUILD_ENVIRONMENT | ||||
| # (This is set by default in the Docker images we build, so you don't | ||||
| # need to set it yourself. | ||||
|  | ||||
| # shellcheck source=./common.sh | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common.sh" | ||||
| # shellcheck source=./common-build.sh | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh" | ||||
|  | ||||
| echo "Clang version:" | ||||
| clang --version | ||||
|  | ||||
| python tools/stats/export_test_times.py | ||||
|  | ||||
| if [ -n "$(which conda)" ]; then | ||||
|   export CMAKE_PREFIX_PATH=/opt/conda | ||||
| fi | ||||
|  | ||||
| CC="clang" CXX="clang++" LDSHARED="clang --shared" \ | ||||
|   CFLAGS="-fsanitize=thread" \ | ||||
|   USE_TSAN=1 USE_CUDA=0 USE_MKLDNN=0 \ | ||||
|   python setup.py bdist_wheel | ||||
|   pip_install_whl "$(echo dist/*.whl)" | ||||
|  | ||||
| print_sccache_stats | ||||
|  | ||||
| assert_git_not_dirty | ||||
| @ -11,6 +11,14 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh" | ||||
| # shellcheck source=./common-build.sh | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh" | ||||
|  | ||||
| if [[ "$BUILD_ENVIRONMENT" == *-clang7-asan* ]]; then | ||||
|   exec "$(dirname "${BASH_SOURCE[0]}")/build-asan.sh" "$@" | ||||
| fi | ||||
|  | ||||
| if [[ "$BUILD_ENVIRONMENT" == *-clang7-tsan* ]]; then | ||||
|   exec "$(dirname "${BASH_SOURCE[0]}")/build-tsan.sh" "$@" | ||||
| fi | ||||
|  | ||||
| if [[ "$BUILD_ENVIRONMENT" == *-mobile-*build* ]]; then | ||||
|   exec "$(dirname "${BASH_SOURCE[0]}")/build-mobile.sh" "$@" | ||||
| fi | ||||
| @ -28,8 +36,6 @@ echo "Environment variables:" | ||||
| env | ||||
|  | ||||
| if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then | ||||
|   # Use jemalloc during compilation to mitigate https://github.com/pytorch/pytorch/issues/116289 | ||||
|   export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2 | ||||
|   echo "NVCC version:" | ||||
|   nvcc --version | ||||
| fi | ||||
| @ -38,24 +44,30 @@ if [[ "$BUILD_ENVIRONMENT" == *cuda11* ]]; then | ||||
|   if [[ "$BUILD_ENVIRONMENT" != *cuda11.3* && "$BUILD_ENVIRONMENT" != *clang* ]]; then | ||||
|     # TODO: there is a linking issue when building with UCC using clang, | ||||
|     # disable it for now and to be fix later. | ||||
|     # TODO: disable UCC temporarily to enable CUDA 12.1 in CI | ||||
|     export USE_UCC=1 | ||||
|     export USE_SYSTEM_UCC=1 | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| if [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then | ||||
| if [[ ${BUILD_ENVIRONMENT} == *"caffe2"* ]]; then | ||||
|   echo "Caffe2 build is ON" | ||||
|   export BUILD_CAFFE2=ON | ||||
| fi | ||||
|  | ||||
| if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then | ||||
|   export ATEN_THREADING=TBB | ||||
|   export USE_TBB=1 | ||||
| elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then | ||||
|   export ATEN_THREADING=NATIVE | ||||
| fi | ||||
|  | ||||
| # Enable LLVM dependency for TensorExpr testing | ||||
| export USE_LLVM=/opt/llvm | ||||
| export LLVM_DIR=/opt/llvm/lib/cmake/llvm | ||||
|  | ||||
| if [[ "$BUILD_ENVIRONMENT" == *executorch* ]]; then | ||||
|   # To build test_edge_op_registration | ||||
|   export BUILD_EXECUTORCH=ON | ||||
|   export USE_CUDA=0 | ||||
| if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then | ||||
|   export USE_LLVM=/opt/rocm/llvm | ||||
|   export LLVM_DIR=/opt/rocm/llvm/lib/cmake/llvm | ||||
| else | ||||
|   export USE_LLVM=/opt/llvm | ||||
|   export LLVM_DIR=/opt/llvm/lib/cmake/llvm | ||||
| fi | ||||
|  | ||||
| if ! which conda; then | ||||
| @ -68,35 +80,7 @@ if ! which conda; then | ||||
|     export USE_MKLDNN=0 | ||||
|   fi | ||||
| else | ||||
|   # CMAKE_PREFIX_PATH precedences | ||||
|   # 1. $CONDA_PREFIX, if defined. This follows the pytorch official build instructions. | ||||
|   # 2. /opt/conda/envs/py_${ANACONDA_PYTHON_VERSION}, if ANACONDA_PYTHON_VERSION defined. | ||||
|   #    This is for CI, which defines ANACONDA_PYTHON_VERSION but not CONDA_PREFIX. | ||||
|   # 3. $(conda info --base). The fallback value of pytorch official build | ||||
|   #    instructions actually refers to this. | ||||
|   #    Commonly this is /opt/conda/ | ||||
|   if [[ -v CONDA_PREFIX ]]; then | ||||
|     export CMAKE_PREFIX_PATH=${CONDA_PREFIX} | ||||
|   elif [[ -v ANACONDA_PYTHON_VERSION ]]; then | ||||
|     export CMAKE_PREFIX_PATH="/opt/conda/envs/py_${ANACONDA_PYTHON_VERSION}" | ||||
|   else | ||||
|     # already checked by `! which conda` | ||||
|     CMAKE_PREFIX_PATH="$(conda info --base)" | ||||
|     export CMAKE_PREFIX_PATH | ||||
|   fi | ||||
|  | ||||
|   # Workaround required for MKL library linkage | ||||
|   # https://github.com/pytorch/pytorch/issues/119557 | ||||
|   if [ "$ANACONDA_PYTHON_VERSION" = "3.12" ]; then | ||||
|     export CMAKE_LIBRARY_PATH="/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/lib/" | ||||
|     export CMAKE_INCLUDE_PATH="/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/include/" | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then | ||||
|   export USE_MKLDNN=1 | ||||
|   export USE_MKLDNN_ACL=1 | ||||
|   export ACL_ROOT_DIR=/ComputeLibrary | ||||
|   export CMAKE_PREFIX_PATH=/opt/conda | ||||
| fi | ||||
|  | ||||
| if [[ "$BUILD_ENVIRONMENT" == *libtorch* ]]; then | ||||
| @ -168,13 +152,6 @@ if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then | ||||
|   python tools/amd_build/build_amd.py | ||||
| fi | ||||
|  | ||||
| if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then | ||||
|   # shellcheck disable=SC1091 | ||||
|   source /opt/intel/oneapi/compiler/latest/env/vars.sh | ||||
|   # XPU kineto feature dependencies are not fully ready, disable kineto build as temp WA | ||||
|   export USE_KINETO=0 | ||||
| fi | ||||
|  | ||||
| # sccache will fail for CUDA builds if all cores are used for compiling | ||||
| # gcc 7 with sccache seems to have intermittent OOM issue if all cores are used | ||||
| if [ -z "$MAX_JOBS" ]; then | ||||
| @ -189,27 +166,11 @@ if [[ "$BUILD_ENVIRONMENT" == *cuda* && -z "$TORCH_CUDA_ARCH_LIST" ]]; then | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| # We only build FlashAttention files for CUDA 8.0+, and they require large amounts of | ||||
| # memory to build and will OOM | ||||
| if [[ "$BUILD_ENVIRONMENT" == *cuda* ]] && [[ "$TORCH_CUDA_ARCH_LIST" == *"8.6"* || "$TORCH_CUDA_ARCH_LIST" == *"8.0"* ]]; then | ||||
|   echo "WARNING: FlashAttention files require large amounts of memory to build and will OOM" | ||||
|   echo "Setting MAX_JOBS=(nproc-2)/3 to reduce memory usage" | ||||
|   export MAX_JOBS="$(( $(nproc --ignore=2) / 3 ))" | ||||
| fi | ||||
|  | ||||
| if [[ "${BUILD_ENVIRONMENT}" == *clang* ]]; then | ||||
|   export CC=clang | ||||
|   export CXX=clang++ | ||||
| fi | ||||
|  | ||||
| if [[ "$BUILD_ENVIRONMENT" == *-clang*-asan* ]]; then | ||||
|   export LDSHARED="clang --shared" | ||||
|   export USE_CUDA=0 | ||||
|   export USE_ASAN=1 | ||||
|   export UBSAN_FLAGS="-fno-sanitize-recover=all;-fno-sanitize=float-divide-by-zero;-fno-sanitize=float-cast-overflow" | ||||
|   unset USE_LLVM | ||||
| fi | ||||
|  | ||||
| if [[ "${BUILD_ENVIRONMENT}" == *no-ops* ]]; then | ||||
|   export USE_PER_OPERATOR_HEADERS=0 | ||||
| fi | ||||
| @ -226,45 +187,20 @@ if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* ]] | ||||
|   export BUILD_STATIC_RUNTIME_BENCHMARK=ON | ||||
| fi | ||||
|  | ||||
| if [[ "$BUILD_ENVIRONMENT" == *-debug* ]]; then | ||||
|   export CMAKE_BUILD_TYPE=RelWithAssert | ||||
| fi | ||||
|  | ||||
| # Do not change workspace permissions for ROCm CI jobs | ||||
| # as it can leave workspace with bad permissions for cancelled jobs | ||||
| if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then | ||||
|   # Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96) | ||||
|   WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace") | ||||
|   cleanup_workspace() { | ||||
|     echo "sudo may print the following warning message that can be ignored. The chown command will still run." | ||||
|     echo "    sudo: setrlimit(RLIMIT_STACK): Operation not permitted" | ||||
|     echo "For more details refer to https://github.com/sudo-project/sudo/issues/42" | ||||
|     sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace | ||||
|   } | ||||
|   # Disable shellcheck SC2064 as we want to parse the original owner immediately. | ||||
|   # shellcheck disable=SC2064 | ||||
|   trap_add cleanup_workspace EXIT | ||||
|   sudo chown -R jenkins /var/lib/jenkins/workspace | ||||
|   git config --global --add safe.directory /var/lib/jenkins/workspace | ||||
| fi | ||||
|  | ||||
| if [[ "$BUILD_ENVIRONMENT" == *-bazel-* ]]; then | ||||
|   set -e | ||||
|  | ||||
|   get_bazel | ||||
|   install_sccache_nvcc_for_bazel | ||||
|  | ||||
|   # Leave 1 CPU free and use only up to 80% of memory to reduce the change of crashing | ||||
|   # the runner | ||||
|   BAZEL_MEM_LIMIT="--local_ram_resources=HOST_RAM*.8" | ||||
|   BAZEL_CPU_LIMIT="--local_cpu_resources=HOST_CPUS-1" | ||||
|  | ||||
|   if [[ "$CUDA_VERSION" == "cpu" ]]; then | ||||
|     # Build torch, the Python module, and tests for CPU-only | ||||
|     tools/bazel build --config=no-tty "${BAZEL_MEM_LIMIT}" "${BAZEL_CPU_LIMIT}" --config=cpu-only :torch :torch/_C.so :all_tests | ||||
|   else | ||||
|     tools/bazel build --config=no-tty "${BAZEL_MEM_LIMIT}" "${BAZEL_CPU_LIMIT}" //... | ||||
|   fi | ||||
|   tools/bazel build --config=no-tty "${BAZEL_MEM_LIMIT}" "${BAZEL_CPU_LIMIT}" //... | ||||
|   # Build torch, the Python module, and tests for CPU-only | ||||
|   tools/bazel build --config=no-tty "${BAZEL_MEM_LIMIT}" "${BAZEL_CPU_LIMIT}" --config=cpu-only :torch :_C.so :all_tests | ||||
|  | ||||
| else | ||||
|   # check that setup.py would fail with bad arguments | ||||
|   echo "The next three invocations are expected to fail with invalid command error messages." | ||||
| @ -273,36 +209,16 @@ else | ||||
|   ( ! get_exit_code python setup.py clean bad_argument ) | ||||
|  | ||||
|   if [[ "$BUILD_ENVIRONMENT" != *libtorch* ]]; then | ||||
|  | ||||
|     # rocm builds fail when WERROR=1 | ||||
|     # XLA test build fails when WERROR=1 | ||||
|     # set only when building other architectures | ||||
|     # or building non-XLA tests. | ||||
|     if [[ "$BUILD_ENVIRONMENT" != *rocm*  && | ||||
|           "$BUILD_ENVIRONMENT" != *xla* ]]; then | ||||
|       if [[ "$BUILD_ENVIRONMENT" != *py3.8* ]]; then | ||||
|         # Install numpy-2.0.2 for builds which are backward compatible with 1.X | ||||
|         python -mpip install --pre numpy==2.0.2 | ||||
|       fi | ||||
|  | ||||
|       WERROR=1 python setup.py clean | ||||
|  | ||||
|       if [[ "$USE_SPLIT_BUILD" == "true" ]]; then | ||||
|         BUILD_LIBTORCH_WHL=1 BUILD_PYTHON_ONLY=0 python setup.py bdist_wheel | ||||
|         BUILD_LIBTORCH_WHL=0 BUILD_PYTHON_ONLY=1 python setup.py bdist_wheel --cmake | ||||
|       else | ||||
|         WERROR=1 python setup.py bdist_wheel | ||||
|       fi | ||||
|       WERROR=1 python setup.py bdist_wheel | ||||
|     else | ||||
|       python setup.py clean | ||||
|       if [[ "$BUILD_ENVIRONMENT" == *xla* ]]; then | ||||
|         source .ci/pytorch/install_cache_xla.sh | ||||
|       fi | ||||
|       if [[ "$USE_SPLIT_BUILD" == "true" ]]; then | ||||
|         echo "USE_SPLIT_BUILD cannot be used with xla or rocm" | ||||
|         exit 1 | ||||
|       else | ||||
|         python setup.py bdist_wheel | ||||
|       fi | ||||
|       python setup.py bdist_wheel | ||||
|     fi | ||||
|     pip_install_whl "$(echo dist/*.whl)" | ||||
|  | ||||
| @ -341,10 +257,9 @@ else | ||||
|     CUSTOM_OP_TEST="$PWD/test/custom_operator" | ||||
|     python --version | ||||
|     SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')" | ||||
|  | ||||
|     mkdir -p "$CUSTOM_OP_BUILD" | ||||
|     pushd "$CUSTOM_OP_BUILD" | ||||
|     cmake "$CUSTOM_OP_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \ | ||||
|     cmake "$CUSTOM_OP_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPYTHON_EXECUTABLE="$(which python)" \ | ||||
|           -DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM" | ||||
|     make VERBOSE=1 | ||||
|     popd | ||||
| @ -357,7 +272,7 @@ else | ||||
|     SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')" | ||||
|     mkdir -p "$JIT_HOOK_BUILD" | ||||
|     pushd "$JIT_HOOK_BUILD" | ||||
|     cmake "$JIT_HOOK_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \ | ||||
|     cmake "$JIT_HOOK_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPYTHON_EXECUTABLE="$(which python)" \ | ||||
|           -DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM" | ||||
|     make VERBOSE=1 | ||||
|     popd | ||||
| @ -369,7 +284,7 @@ else | ||||
|     python --version | ||||
|     mkdir -p "$CUSTOM_BACKEND_BUILD" | ||||
|     pushd "$CUSTOM_BACKEND_BUILD" | ||||
|     cmake "$CUSTOM_BACKEND_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \ | ||||
|     cmake "$CUSTOM_BACKEND_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPYTHON_EXECUTABLE="$(which python)" \ | ||||
|           -DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM" | ||||
|     make VERBOSE=1 | ||||
|     popd | ||||
| @ -400,8 +315,4 @@ if [[ "$BUILD_ENVIRONMENT" != *libtorch* && "$BUILD_ENVIRONMENT" != *bazel* ]]; | ||||
|   python tools/stats/export_test_times.py | ||||
| fi | ||||
|  | ||||
| # snadampal: skipping it till sccache support added for aarch64 | ||||
| # https://github.com/pytorch/pytorch/issues/121559 | ||||
| if [[ "$BUILD_ENVIRONMENT" != *aarch64* ]]; then | ||||
|   print_sccache_stats | ||||
| fi | ||||
| print_sccache_stats | ||||
|  | ||||
| @ -31,7 +31,7 @@ if [[ "$BUILD_ENVIRONMENT" != *win-* ]]; then | ||||
|             # as though sccache still gets used even when the sscache server isn't started | ||||
|             # explicitly | ||||
|             echo "Skipping sccache server initialization, setting environment variables" | ||||
|             export SCCACHE_IDLE_TIMEOUT=0 | ||||
|             export SCCACHE_IDLE_TIMEOUT=1200 | ||||
|             export SCCACHE_ERROR_LOG=~/sccache_error.log | ||||
|             export RUST_LOG=sccache::server=error | ||||
|         elif [[ "${BUILD_ENVIRONMENT}" == *rocm* ]]; then | ||||
| @ -39,12 +39,11 @@ if [[ "$BUILD_ENVIRONMENT" != *win-* ]]; then | ||||
|         else | ||||
|             # increasing SCCACHE_IDLE_TIMEOUT so that extension_backend_test.cpp can build after this PR: | ||||
|             # https://github.com/pytorch/pytorch/pull/16645 | ||||
|             SCCACHE_ERROR_LOG=~/sccache_error.log SCCACHE_IDLE_TIMEOUT=0 RUST_LOG=sccache::server=error sccache --start-server | ||||
|             SCCACHE_ERROR_LOG=~/sccache_error.log SCCACHE_IDLE_TIMEOUT=1200 RUST_LOG=sccache::server=error sccache --start-server | ||||
|         fi | ||||
|  | ||||
|         # Report sccache stats for easier debugging. It's ok if this commands | ||||
|         # timeouts and fails on MacOS | ||||
|         sccache --zero-stats || true | ||||
|         # Report sccache stats for easier debugging | ||||
|         sccache --zero-stats | ||||
|     fi | ||||
|  | ||||
|     if which ccache > /dev/null; then | ||||
|  | ||||
| @ -22,3 +22,7 @@ fi | ||||
| # TODO: Renable libtorch testing for MacOS, see https://github.com/pytorch/pytorch/issues/62598 | ||||
| # shellcheck disable=SC2034 | ||||
| BUILD_TEST_LIBTORCH=0 | ||||
|  | ||||
| retry () { | ||||
|   "$@"  || (sleep 1 && "$@") || (sleep 2 && "$@") | ||||
| } | ||||
|  | ||||
| @ -43,7 +43,7 @@ function assert_git_not_dirty() { | ||||
|     # TODO: we should add an option to `build_amd.py` that reverts the repo to | ||||
|     #       an unmodified state. | ||||
|     if [[ "$BUILD_ENVIRONMENT" != *rocm* ]] && [[ "$BUILD_ENVIRONMENT" != *xla* ]] ; then | ||||
|         git_status=$(git status --porcelain | grep -v '?? third_party' || true) | ||||
|         git_status=$(git status --porcelain) | ||||
|         if [[ $git_status ]]; then | ||||
|             echo "Build left local git repository checkout dirty" | ||||
|             echo "git status --porcelain:" | ||||
| @ -56,29 +56,9 @@ function assert_git_not_dirty() { | ||||
| function pip_install_whl() { | ||||
|   # This is used to install PyTorch and other build artifacts wheel locally | ||||
|   # without using any network connection | ||||
|  | ||||
|   # Convert the input arguments into an array | ||||
|   local args=("$@") | ||||
|  | ||||
|   # Check if the first argument contains multiple paths separated by spaces | ||||
|   if [[ "${args[0]}" == *" "* ]]; then | ||||
|     # Split the string by spaces into an array | ||||
|     IFS=' ' read -r -a paths <<< "${args[0]}" | ||||
|     # Loop through each path and install individually | ||||
|     for path in "${paths[@]}"; do | ||||
|       echo "Installing $path" | ||||
|       python3 -mpip install --no-index --no-deps "$path" | ||||
|     done | ||||
|   else | ||||
|     # Loop through each argument and install individually | ||||
|     for path in "${args[@]}"; do | ||||
|       echo "Installing $path" | ||||
|       python3 -mpip install --no-index --no-deps "$path" | ||||
|     done | ||||
|   fi | ||||
|   python3 -mpip install --no-index --no-deps "$@" | ||||
| } | ||||
|  | ||||
|  | ||||
| function pip_install() { | ||||
|   # retry 3 times | ||||
|   # old versions of pip don't have the "--progress-bar" flag | ||||
| @ -100,34 +80,19 @@ function get_exit_code() { | ||||
| } | ||||
|  | ||||
| function get_bazel() { | ||||
|   # Download and use the cross-platform, dependency-free Python | ||||
|   # version of Bazelisk to fetch the platform specific version of | ||||
|   # Bazel to use from .bazelversion. | ||||
|   retry curl --location --output tools/bazel \ | ||||
|     https://raw.githubusercontent.com/bazelbuild/bazelisk/v1.16.0/bazelisk.py | ||||
|   shasum --algorithm=1 --check \ | ||||
|     <(echo 'd4369c3d293814d3188019c9f7527a948972d9f8  tools/bazel') | ||||
|   chmod u+x tools/bazel | ||||
| } | ||||
|   if [[ $(uname) == "Darwin" ]]; then | ||||
|     # download bazel version | ||||
|     retry curl https://github.com/bazelbuild/bazel/releases/download/4.2.1/bazel-4.2.1-darwin-x86_64  -Lo tools/bazel | ||||
|     # verify content | ||||
|     echo '74d93848f0c9d592e341e48341c53c87e3cb304a54a2a1ee9cff3df422f0b23c  tools/bazel' | shasum -a 256 -c >/dev/null | ||||
|   else | ||||
|     # download bazel version | ||||
|     retry curl https://ossci-linux.s3.amazonaws.com/bazel-4.2.1-linux-x86_64 -o tools/bazel | ||||
|     # verify content | ||||
|     echo '1a4f3a3ce292307bceeb44f459883859c793436d564b95319aacb8af1f20557c  tools/bazel' | shasum -a 256 -c >/dev/null | ||||
|   fi | ||||
|  | ||||
| # This function is bazel specific because of the bug | ||||
| # in the bazel that requires some special paths massaging | ||||
| # as a workaround. See | ||||
| # https://github.com/bazelbuild/bazel/issues/10167 | ||||
| function install_sccache_nvcc_for_bazel() { | ||||
|   sudo mv /usr/local/cuda/bin/nvcc /usr/local/cuda/bin/nvcc-real | ||||
|  | ||||
|   # Write the `/usr/local/cuda/bin/nvcc` | ||||
|   cat << EOF | sudo tee /usr/local/cuda/bin/nvcc | ||||
| #!/bin/sh | ||||
| if [ \$(env -u LD_PRELOAD ps -p \$PPID -o comm=) != sccache ]; then | ||||
|   exec sccache /usr/local/cuda/bin/nvcc "\$@" | ||||
| else | ||||
|   exec external/local_cuda/cuda/bin/nvcc-real "\$@" | ||||
| fi | ||||
| EOF | ||||
|  | ||||
|   sudo chmod +x /usr/local/cuda/bin/nvcc | ||||
|   chmod +x tools/bazel | ||||
| } | ||||
|  | ||||
| function install_monkeytype { | ||||
| @ -140,65 +105,21 @@ function get_pinned_commit() { | ||||
|   cat .github/ci_commit_pins/"${1}".txt | ||||
| } | ||||
|  | ||||
| function install_torchaudio() { | ||||
|   local commit | ||||
|   commit=$(get_pinned_commit audio) | ||||
|   if [[ "$1" == "cuda" ]]; then | ||||
|     # TODO: This is better to be passed as a parameter from _linux-test workflow | ||||
|     # so that it can be consistent with what is set in build | ||||
|     TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${commit}" | ||||
|   else | ||||
|     pip_install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${commit}" | ||||
|   fi | ||||
|  | ||||
| } | ||||
|  | ||||
| function install_torchtext() { | ||||
|   local data_commit | ||||
|   local text_commit | ||||
|   data_commit=$(get_pinned_commit data) | ||||
|   text_commit=$(get_pinned_commit text) | ||||
|   pip_install --no-use-pep517 --user "git+https://github.com/pytorch/data.git@${data_commit}" | ||||
|   pip_install --no-use-pep517 --user "git+https://github.com/pytorch/text.git@${text_commit}" | ||||
|   local commit | ||||
|   commit=$(get_pinned_commit text) | ||||
|   pip_install --no-use-pep517 --user "git+https://github.com/pytorch/text.git@${commit}" | ||||
| } | ||||
|  | ||||
| function install_torchvision() { | ||||
|   local orig_preload | ||||
|   local commit | ||||
|   commit=$(get_pinned_commit vision) | ||||
|   orig_preload=${LD_PRELOAD} | ||||
|   if [ -n "${LD_PRELOAD}" ]; then | ||||
|     # Silence dlerror to work-around glibc ASAN bug, see https://sourceware.org/bugzilla/show_bug.cgi?id=27653#c9 | ||||
|     echo 'char* dlerror(void) { return "";}'|gcc -fpic -shared -o "${HOME}/dlerror.so" -x c - | ||||
|     LD_PRELOAD=${orig_preload}:${HOME}/dlerror.so | ||||
|   fi | ||||
|   pip_install --no-use-pep517 --user "git+https://github.com/pytorch/vision.git@${commit}" | ||||
|   if [ -n "${LD_PRELOAD}" ]; then | ||||
|     LD_PRELOAD=${orig_preload} | ||||
|   fi | ||||
| } | ||||
|  | ||||
| function install_tlparse() { | ||||
|   pip_install --user "tlparse==0.3.25" | ||||
|   PATH="$(python -m site --user-base)/bin:$PATH" | ||||
| } | ||||
|  | ||||
| function install_torchrec_and_fbgemm() { | ||||
|   local torchrec_commit | ||||
|   torchrec_commit=$(get_pinned_commit torchrec) | ||||
|   local fbgemm_commit | ||||
|   fbgemm_commit=$(get_pinned_commit fbgemm) | ||||
|   pip_uninstall torchrec-nightly | ||||
|   pip_uninstall fbgemm-gpu-nightly | ||||
|   pip_install setuptools-git-versioning scikit-build pyre-extensions | ||||
|   # See https://github.com/pytorch/pytorch/issues/106971 | ||||
|   CUDA_PATH=/usr/local/cuda-12.1 pip_install --no-use-pep517 --user "git+https://github.com/pytorch/FBGEMM.git@${fbgemm_commit}#egg=fbgemm-gpu&subdirectory=fbgemm_gpu" | ||||
|   pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" | ||||
| } | ||||
|  | ||||
| function clone_pytorch_xla() { | ||||
|   if [[ ! -d ./xla ]]; then | ||||
|     git clone --recursive -b r2.5 https://github.com/pytorch/xla.git | ||||
|     git clone --recursive -b r2.0 --quiet https://github.com/pytorch/xla.git | ||||
|     pushd xla | ||||
|     # pin the xla hash so that we don't get broken by changes to xla | ||||
|     git checkout "$(cat ../.github/ci_commit_pins/xla.txt)" | ||||
| @ -208,12 +129,86 @@ function clone_pytorch_xla() { | ||||
|   fi | ||||
| } | ||||
|  | ||||
| function checkout_install_torchbench() { | ||||
| function install_filelock() { | ||||
|   pip_install filelock | ||||
| } | ||||
|  | ||||
| function install_triton() { | ||||
|   local commit | ||||
|   commit=$(get_pinned_commit torchbench) | ||||
|   if [[ "${TEST_CONFIG}" == *rocm* ]]; then | ||||
|     echo "skipping triton due to rocm" | ||||
|   else | ||||
|     commit=$(get_pinned_commit triton) | ||||
|     if [[ "${BUILD_ENVIRONMENT}" == *gcc7* ]]; then | ||||
|       # Trition needs gcc-9 to build | ||||
|       sudo apt-get install -y g++-9 | ||||
|       CXX=g++-9 pip_install --user "git+https://github.com/openai/triton@${commit}#subdirectory=python" | ||||
|     elif [[ "${BUILD_ENVIRONMENT}" == *clang* ]]; then | ||||
|       # Trition needs <filesystem> which surprisingly is not available with clang-9 toolchain | ||||
|       sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test | ||||
|       sudo apt-get install -y g++-9 | ||||
|       CXX=g++-9 pip_install --user "git+https://github.com/openai/triton@${commit}#subdirectory=python" | ||||
|     else | ||||
|       pip_install --user "git+https://github.com/openai/triton@${commit}#subdirectory=python" | ||||
|     fi | ||||
|     pip_install --user jinja2 | ||||
|   fi | ||||
| } | ||||
|  | ||||
| function setup_torchdeploy_deps(){ | ||||
|   conda install -y -n "py_${ANACONDA_PYTHON_VERSION}" "libpython-static=${ANACONDA_PYTHON_VERSION}" | ||||
|   local CC | ||||
|   local CXX | ||||
|   CC="$(which gcc)" | ||||
|   CXX="$(which g++)" | ||||
|   export CC | ||||
|   export CXX | ||||
|   pip install --upgrade pip | ||||
| } | ||||
|  | ||||
| function checkout_install_torchdeploy() { | ||||
|   local commit | ||||
|   commit=$(get_pinned_commit multipy) | ||||
|   setup_torchdeploy_deps | ||||
|   pushd .. | ||||
|   git clone --recurse-submodules https://github.com/pytorch/multipy.git | ||||
|   pushd multipy | ||||
|   git checkout "${commit}" | ||||
|   python multipy/runtime/example/generate_examples.py | ||||
|   pip install -e . --install-option="--cudatests" | ||||
|   popd | ||||
|   popd | ||||
| } | ||||
|  | ||||
| function test_torch_deploy(){ | ||||
|  pushd .. | ||||
|  pushd multipy | ||||
|  ./multipy/runtime/build/test_deploy | ||||
|  ./multipy/runtime/build/test_deploy_gpu | ||||
|  popd | ||||
|  popd | ||||
| } | ||||
|  | ||||
| function install_huggingface() { | ||||
|   local commit | ||||
|   commit=$(get_pinned_commit huggingface) | ||||
|   pip_install pandas | ||||
|   pip_install scipy | ||||
|   pip_install "git+https://github.com/huggingface/transformers.git@${commit}#egg=transformers" | ||||
| } | ||||
|  | ||||
| function install_timm() { | ||||
|   local commit | ||||
|   commit=$(get_pinned_commit timm) | ||||
|   pip_install pandas | ||||
|   pip_install scipy | ||||
|   pip_install "git+https://github.com/rwightman/pytorch-image-models@${commit}" | ||||
| } | ||||
|  | ||||
| function checkout_install_torchbench() { | ||||
|   git clone https://github.com/pytorch/benchmark torchbench | ||||
|   pushd torchbench | ||||
|   git checkout "$commit" | ||||
|   git checkout no_torchaudio | ||||
|  | ||||
|   if [ "$1" ]; then | ||||
|     python install.py --continue_on_fail models "$@" | ||||
| @ -222,11 +217,13 @@ function checkout_install_torchbench() { | ||||
|     # to install and test other models | ||||
|     python install.py --continue_on_fail | ||||
|   fi | ||||
|   echo "Print all dependencies after TorchBench is installed" | ||||
|   python -mpip freeze | ||||
|   popd | ||||
| } | ||||
|  | ||||
| function test_functorch() { | ||||
|   python test/run_test.py --functorch --verbose | ||||
| } | ||||
|  | ||||
| function print_sccache_stats() { | ||||
|   echo 'PyTorch Build Statistics' | ||||
|   sccache --show-stats | ||||
|  | ||||
| @ -1,11 +1,10 @@ | ||||
| from datetime import datetime, timedelta | ||||
| from tempfile import mkdtemp | ||||
|  | ||||
| from cryptography import x509 | ||||
| from cryptography.hazmat.primitives import hashes, serialization | ||||
| from cryptography.hazmat.primitives import serialization | ||||
| from cryptography.hazmat.primitives.asymmetric import rsa | ||||
| from cryptography import x509 | ||||
| from cryptography.x509.oid import NameOID | ||||
|  | ||||
| from cryptography.hazmat.primitives import hashes | ||||
|  | ||||
| temp_dir = mkdtemp() | ||||
| print(temp_dir) | ||||
| @ -17,43 +16,37 @@ def genrsa(path): | ||||
|         key_size=2048, | ||||
|     ) | ||||
|     with open(path, "wb") as f: | ||||
|         f.write( | ||||
|             key.private_bytes( | ||||
|                 encoding=serialization.Encoding.PEM, | ||||
|                 format=serialization.PrivateFormat.TraditionalOpenSSL, | ||||
|                 encryption_algorithm=serialization.NoEncryption(), | ||||
|             ) | ||||
|         ) | ||||
|         f.write(key.private_bytes( | ||||
|             encoding=serialization.Encoding.PEM, | ||||
|             format=serialization.PrivateFormat.TraditionalOpenSSL, | ||||
|             encryption_algorithm=serialization.NoEncryption(), | ||||
|         )) | ||||
|     return key | ||||
|  | ||||
|  | ||||
| def create_cert(path, C, ST, L, O, key): | ||||
|     subject = issuer = x509.Name( | ||||
|         [ | ||||
|             x509.NameAttribute(NameOID.COUNTRY_NAME, C), | ||||
|             x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ST), | ||||
|             x509.NameAttribute(NameOID.LOCALITY_NAME, L), | ||||
|             x509.NameAttribute(NameOID.ORGANIZATION_NAME, O), | ||||
|         ] | ||||
|     ) | ||||
|     cert = ( | ||||
|         x509.CertificateBuilder() | ||||
|         .subject_name(subject) | ||||
|         .issuer_name(issuer) | ||||
|         .public_key(key.public_key()) | ||||
|         .serial_number(x509.random_serial_number()) | ||||
|         .not_valid_before(datetime.utcnow()) | ||||
|         .not_valid_after( | ||||
|             # Our certificate will be valid for 10 days | ||||
|             datetime.utcnow() | ||||
|             + timedelta(days=10) | ||||
|         ) | ||||
|         .add_extension( | ||||
|             x509.BasicConstraints(ca=True, path_length=None), | ||||
|             critical=True, | ||||
|         ) | ||||
|         .sign(key, hashes.SHA256()) | ||||
|     ) | ||||
|     subject = issuer = x509.Name([ | ||||
|         x509.NameAttribute(NameOID.COUNTRY_NAME, C), | ||||
|         x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ST), | ||||
|         x509.NameAttribute(NameOID.LOCALITY_NAME, L), | ||||
|         x509.NameAttribute(NameOID.ORGANIZATION_NAME, O), | ||||
|     ]) | ||||
|     cert = x509.CertificateBuilder().subject_name( | ||||
|         subject | ||||
|     ).issuer_name( | ||||
|         issuer | ||||
|     ).public_key( | ||||
|         key.public_key() | ||||
|     ).serial_number( | ||||
|         x509.random_serial_number() | ||||
|     ).not_valid_before( | ||||
|         datetime.utcnow() | ||||
|     ).not_valid_after( | ||||
|         # Our certificate will be valid for 10 days | ||||
|         datetime.utcnow() + timedelta(days=10) | ||||
|     ).add_extension( | ||||
|         x509.BasicConstraints(ca=True, path_length=None), critical=True, | ||||
|     ).sign(key, hashes.SHA256()) | ||||
|     # Write our certificate out to disk. | ||||
|     with open(path, "wb") as f: | ||||
|         f.write(cert.public_bytes(serialization.Encoding.PEM)) | ||||
| @ -61,65 +54,43 @@ def create_cert(path, C, ST, L, O, key): | ||||
|  | ||||
|  | ||||
| def create_req(path, C, ST, L, O, key): | ||||
|     csr = ( | ||||
|         x509.CertificateSigningRequestBuilder() | ||||
|         .subject_name( | ||||
|             x509.Name( | ||||
|                 [ | ||||
|                     # Provide various details about who we are. | ||||
|                     x509.NameAttribute(NameOID.COUNTRY_NAME, C), | ||||
|                     x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ST), | ||||
|                     x509.NameAttribute(NameOID.LOCALITY_NAME, L), | ||||
|                     x509.NameAttribute(NameOID.ORGANIZATION_NAME, O), | ||||
|                 ] | ||||
|             ) | ||||
|         ) | ||||
|         .sign(key, hashes.SHA256()) | ||||
|     ) | ||||
|     csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([ | ||||
|         # Provide various details about who we are. | ||||
|         x509.NameAttribute(NameOID.COUNTRY_NAME, C), | ||||
|         x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ST), | ||||
|         x509.NameAttribute(NameOID.LOCALITY_NAME, L), | ||||
|         x509.NameAttribute(NameOID.ORGANIZATION_NAME, O), | ||||
|     ])).sign(key, hashes.SHA256()) | ||||
|     with open(path, "wb") as f: | ||||
|         f.write(csr.public_bytes(serialization.Encoding.PEM)) | ||||
|     return csr | ||||
|  | ||||
|  | ||||
| def sign_certificate_request(path, csr_cert, ca_cert, private_ca_key): | ||||
|     cert = ( | ||||
|         x509.CertificateBuilder() | ||||
|         .subject_name(csr_cert.subject) | ||||
|         .issuer_name(ca_cert.subject) | ||||
|         .public_key(csr_cert.public_key()) | ||||
|         .serial_number(x509.random_serial_number()) | ||||
|         .not_valid_before(datetime.utcnow()) | ||||
|         .not_valid_after( | ||||
|             # Our certificate will be valid for 10 days | ||||
|             datetime.utcnow() | ||||
|             + timedelta(days=10) | ||||
|             # Sign our certificate with our private key | ||||
|         ) | ||||
|         .sign(private_ca_key, hashes.SHA256()) | ||||
|     ) | ||||
|     cert = x509.CertificateBuilder().subject_name( | ||||
|         csr_cert.subject | ||||
|     ).issuer_name( | ||||
|         ca_cert.subject | ||||
|     ).public_key( | ||||
|         csr_cert.public_key() | ||||
|     ).serial_number( | ||||
|         x509.random_serial_number() | ||||
|     ).not_valid_before( | ||||
|         datetime.utcnow() | ||||
|     ).not_valid_after( | ||||
|         # Our certificate will be valid for 10 days | ||||
|         datetime.utcnow() + timedelta(days=10) | ||||
|         # Sign our certificate with our private key | ||||
|     ).sign(private_ca_key, hashes.SHA256()) | ||||
|     with open(path, "wb") as f: | ||||
|         f.write(cert.public_bytes(serialization.Encoding.PEM)) | ||||
|     return cert | ||||
|  | ||||
|  | ||||
| ca_key = genrsa(temp_dir + "/ca.key") | ||||
| ca_cert = create_cert( | ||||
|     temp_dir + "/ca.pem", | ||||
|     "US", | ||||
|     "New York", | ||||
|     "New York", | ||||
|     "Gloo Certificate Authority", | ||||
|     ca_key, | ||||
| ) | ||||
| ca_cert = create_cert(temp_dir + "/ca.pem", u"US", u"New York", u"New York", u"Gloo Certificate Authority", ca_key) | ||||
|  | ||||
| pkey = genrsa(temp_dir + "/pkey.key") | ||||
| csr = create_req( | ||||
|     temp_dir + "/csr.csr", | ||||
|     "US", | ||||
|     "California", | ||||
|     "San Francisco", | ||||
|     "Gloo Testing Company", | ||||
|     pkey, | ||||
| ) | ||||
| csr = create_req(temp_dir + "/csr.csr", u"US", u"California", u"San Francisco", u"Gloo Testing Company", pkey) | ||||
|  | ||||
| cert = sign_certificate_request(temp_dir + "/cert.pem", csr, ca_cert, ca_key) | ||||
|  | ||||
| @ -6,4 +6,5 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh" | ||||
| echo "Testing pytorch docs" | ||||
|  | ||||
| cd docs | ||||
| TERM=vt100 make doctest | ||||
| pip_install -r requirements.txt | ||||
| make doctest | ||||
|  | ||||
| @ -1,40 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # This is where the local pytorch install in the docker image is located | ||||
| pt_checkout="/var/lib/jenkins/workspace" | ||||
| source "$pt_checkout/.ci/pytorch/common_utils.sh" | ||||
| echo "functorch_doc_push_script.sh: Invoked with $*" | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| version=${DOCS_VERSION:-nightly} | ||||
| echo "version: $version" | ||||
|  | ||||
| # Build functorch docs | ||||
| pushd $pt_checkout/functorch/docs | ||||
| make html | ||||
| popd | ||||
|  | ||||
| git clone https://github.com/pytorch/functorch -b gh-pages --depth 1 functorch_ghpages | ||||
| pushd functorch_ghpages | ||||
|  | ||||
| if [ "$version" == "main" ]; then | ||||
|   version=nightly | ||||
| fi | ||||
|  | ||||
| git rm -rf "$version" || true | ||||
| mv "$pt_checkout/functorch/docs/build/html" "$version" | ||||
|  | ||||
| git add "$version" || true | ||||
| git status | ||||
| git config user.email "soumith+bot@pytorch.org" | ||||
| git config user.name "pytorchbot" | ||||
| # If there aren't changes, don't make a commit; push is no-op | ||||
| git commit -m "Generate Python docs from pytorch/pytorch@${GITHUB_SHA}" || true | ||||
| git status | ||||
|  | ||||
| if [[ "${WITH_PUSH:-}" == true ]]; then | ||||
|   git push -u origin gh-pages | ||||
| fi | ||||
|  | ||||
| popd | ||||
| @ -1,37 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Script for installing sccache on the xla build job, which uses xla's docker | ||||
| # image and doesn't have sccache installed on it.  This is mostly copied from | ||||
| # .ci/docker/install_cache.sh.  Changes are: removing checks that will always | ||||
| # return the same thing, ex checks for for rocm, CUDA, and changing the path | ||||
| # where sccache is installed, and not changing /etc/environment. | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| install_binary() { | ||||
|   echo "Downloading sccache binary from S3 repo" | ||||
|   curl --retry 3 https://s3.amazonaws.com/ossci-linux/sccache -o /tmp/cache/bin/sccache | ||||
| } | ||||
|  | ||||
| mkdir -p /tmp/cache/bin | ||||
| mkdir -p /tmp/cache/lib | ||||
| export PATH="/tmp/cache/bin:$PATH" | ||||
|  | ||||
| install_binary | ||||
| chmod a+x /tmp/cache/bin/sccache | ||||
|  | ||||
| function write_sccache_stub() { | ||||
|   # Unset LD_PRELOAD for ps because of asan + ps issues | ||||
|   # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90589 | ||||
|   # shellcheck disable=SC2086 | ||||
|   # shellcheck disable=SC2059 | ||||
|   printf "#!/bin/sh\nif [ \$(env -u LD_PRELOAD ps -p \$PPID -o comm=) != sccache ]; then\n  exec sccache $(which $1) \"\$@\"\nelse\n  exec $(which $1) \"\$@\"\nfi" > "/tmp/cache/bin/$1" | ||||
|   chmod a+x "/tmp/cache/bin/$1" | ||||
| } | ||||
|  | ||||
| write_sccache_stub cc | ||||
| write_sccache_stub c++ | ||||
| write_sccache_stub gcc | ||||
| write_sccache_stub g++ | ||||
| write_sccache_stub clang | ||||
| write_sccache_stub clang++ | ||||
| @ -40,14 +40,8 @@ cross_compile_arm64() { | ||||
|   USE_DISTRIBUTED=0 CMAKE_OSX_ARCHITECTURES=arm64 MACOSX_DEPLOYMENT_TARGET=11.0 USE_MKLDNN=OFF USE_QNNPACK=OFF WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel | ||||
| } | ||||
|  | ||||
| compile_arm64() { | ||||
|   # Compilation for arm64 | ||||
|   # TODO: Compile with OpenMP support (but this causes CI regressions as cross-compilation were done with OpenMP disabled) | ||||
|   USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel | ||||
| } | ||||
|  | ||||
| compile_x86_64() { | ||||
|   USE_DISTRIBUTED=0 WERROR=1 python setup.py bdist_wheel --plat-name=macosx_10_9_x86_64 | ||||
|   USE_DISTRIBUTED=0 WERROR=1 python setup.py bdist_wheel | ||||
| } | ||||
|  | ||||
| build_lite_interpreter() { | ||||
| @ -68,14 +62,8 @@ build_lite_interpreter() { | ||||
|     "${CPP_BUILD}/caffe2/build/bin/test_lite_interpreter_runtime" | ||||
| } | ||||
|  | ||||
| print_cmake_info | ||||
|  | ||||
| if [[ ${BUILD_ENVIRONMENT} = *arm64* ]]; then | ||||
|   if [[ $(uname -m) == "arm64" ]]; then | ||||
|     compile_arm64 | ||||
|   else | ||||
|     cross_compile_arm64 | ||||
|   fi | ||||
|   cross_compile_arm64 | ||||
| elif [[ ${BUILD_ENVIRONMENT} = *lite-interpreter* ]]; then | ||||
|   export BUILD_LITE_INTERPRETER=1 | ||||
|   build_lite_interpreter | ||||
|  | ||||
| @ -9,25 +9,6 @@ sysctl -a | grep machdep.cpu | ||||
|  | ||||
| # These are required for both the build job and the test job. | ||||
| # In the latter to test cpp extensions. | ||||
| export MACOSX_DEPLOYMENT_TARGET=11.1 | ||||
| export MACOSX_DEPLOYMENT_TARGET=10.9 | ||||
| export CXX=clang++ | ||||
| export CC=clang | ||||
|  | ||||
| print_cmake_info() { | ||||
|   CMAKE_EXEC=$(which cmake) | ||||
|   echo "$CMAKE_EXEC" | ||||
|  | ||||
|   CONDA_INSTALLATION_DIR=$(dirname "$CMAKE_EXEC") | ||||
|   # Print all libraries under cmake rpath for debugging | ||||
|   ls -la "$CONDA_INSTALLATION_DIR/../lib" | ||||
|  | ||||
|   export CMAKE_EXEC | ||||
|   # Explicitly add conda env lib folder to cmake rpath to address the flaky issue | ||||
|   # where cmake dependencies couldn't be found. This seems to point to how conda | ||||
|   # links $CMAKE_EXEC to its package cache when cloning a new environment | ||||
|   install_name_tool -add_rpath @executable_path/../lib "${CMAKE_EXEC}" || true | ||||
|   # Adding the rpath will invalidate cmake signature, so signing it again here | ||||
|   # to trust the executable. EXC_BAD_ACCESS (SIGKILL (Code Signature Invalid)) | ||||
|   # with an exit code 137 otherwise | ||||
|   codesign -f -s - "${CMAKE_EXEC}" || true | ||||
| } | ||||
|  | ||||
| @ -25,7 +25,6 @@ setup_test_python() { | ||||
|   # using the address associated with the loopback interface. | ||||
|   export GLOO_SOCKET_IFNAME=lo0 | ||||
|   echo "Ninja version: $(ninja --version)" | ||||
|   echo "Python version: $(which python) ($(python --version))" | ||||
|  | ||||
|   # Increase default limit on open file handles from 256 to 1024 | ||||
|   ulimit -n 1024 | ||||
| @ -71,19 +70,37 @@ test_libtorch() { | ||||
|     VERBOSE=1 DEBUG=1 python "$BUILD_LIBTORCH_PY" | ||||
|     popd | ||||
|  | ||||
|     MNIST_DIR="${PWD}/test/cpp/api/mnist" | ||||
|     python tools/download_mnist.py --quiet -d "${MNIST_DIR}" | ||||
|     python tools/download_mnist.py --quiet -d test/cpp/api/mnist | ||||
|  | ||||
|     # Unfortunately it seems like the test can't load from miniconda3 | ||||
|     # without these paths being set | ||||
|     export DYLD_LIBRARY_PATH="$DYLD_LIBRARY_PATH:$PWD/miniconda3/lib" | ||||
|     export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$PWD/miniconda3/lib" | ||||
|     TORCH_CPP_TEST_MNIST_PATH="${MNIST_DIR}" CPP_TESTS_DIR="${CPP_BUILD}/caffe2/bin" python test/run_test.py --cpp --verbose -i cpp/test_api | ||||
|     TORCH_CPP_TEST_MNIST_PATH="test/cpp/api/mnist" "$CPP_BUILD"/caffe2/bin/test_api | ||||
|  | ||||
|     assert_git_not_dirty | ||||
|   fi | ||||
| } | ||||
|  | ||||
| print_cmake_info() { | ||||
|   CMAKE_EXEC=$(which cmake) | ||||
|   echo "$CMAKE_EXEC" | ||||
|  | ||||
|   CONDA_INSTALLATION_DIR=$(dirname "$CMAKE_EXEC") | ||||
|   # Print all libraries under cmake rpath for debugging | ||||
|   ls -la "$CONDA_INSTALLATION_DIR/../lib" | ||||
|  | ||||
|   export CMAKE_EXEC | ||||
|   # Explicitly add conda env lib folder to cmake rpath to address the flaky issue | ||||
|   # where cmake dependencies couldn't be found. This seems to point to how conda | ||||
|   # links $CMAKE_EXEC to its package cache when cloning a new environment | ||||
|   install_name_tool -add_rpath @executable_path/../lib "${CMAKE_EXEC}" || true | ||||
|   # Adding the rpath will invalidate cmake signature, so signing it again here | ||||
|   # to trust the executable. EXC_BAD_ACCESS (SIGKILL (Code Signature Invalid)) | ||||
|   # with an exit code 137 otherwise | ||||
|   codesign -f -s - "${CMAKE_EXEC}" || true | ||||
| } | ||||
|  | ||||
| test_custom_backend() { | ||||
|   print_cmake_info | ||||
|  | ||||
| @ -149,9 +166,9 @@ test_jit_hooks() { | ||||
|   assert_git_not_dirty | ||||
| } | ||||
|  | ||||
| install_tlparse | ||||
|  | ||||
| if [[ $NUM_TEST_SHARDS -gt 1 ]]; then | ||||
| if [[ "${TEST_CONFIG}" == *functorch* ]]; then | ||||
|   test_functorch | ||||
| elif [[ $NUM_TEST_SHARDS -gt 1 ]]; then | ||||
|   test_python_shard "${SHARD_NUMBER}" | ||||
|   if [[ "${SHARD_NUMBER}" == 1 ]]; then | ||||
|     test_libtorch | ||||
|  | ||||
| @ -8,7 +8,6 @@ | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common.sh" | ||||
|  | ||||
| echo "Testing pytorch" | ||||
| time python test/run_test.py --include test_cuda_multigpu test_cuda_primary_ctx --verbose | ||||
|  | ||||
| # Disabling tests to see if they solve timeout issues; see https://github.com/pytorch/pytorch/issues/70015 | ||||
| # python tools/download_mnist.py --quiet -d test/cpp/api/mnist | ||||
| @ -18,9 +17,7 @@ time python test/run_test.py --verbose -i distributed/test_c10d_gloo | ||||
| time python test/run_test.py --verbose -i distributed/test_c10d_nccl | ||||
| time python test/run_test.py --verbose -i distributed/test_c10d_spawn_gloo | ||||
| time python test/run_test.py --verbose -i distributed/test_c10d_spawn_nccl | ||||
| time python test/run_test.py --verbose -i distributed/test_compute_comm_reordering | ||||
| time python test/run_test.py --verbose -i distributed/test_store | ||||
| time python test/run_test.py --verbose -i distributed/test_symmetric_memory | ||||
| time python test/run_test.py --verbose -i distributed/test_pg_wrapper | ||||
| time python test/run_test.py --verbose -i distributed/rpc/cuda/test_tensorpipe_agent | ||||
| # FSDP tests | ||||
| @ -30,33 +27,23 @@ time python test/run_test.py --verbose -i distributed/checkpoint/test_checkpoint | ||||
| time python test/run_test.py --verbose -i distributed/checkpoint/test_file_system_checkpoint | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharding_spec/test_sharding_spec | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharding_plan/test_sharding_plan | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/test_megatron_prototype | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/test_sharded_tensor | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/test_sharded_tensor_reshard | ||||
|  | ||||
| # functional collective tests | ||||
| time python test/run_test.py --verbose -i distributed/test_functional_api | ||||
|  | ||||
| # DTensor tests | ||||
| time python test/run_test.py --verbose -i distributed/_tensor/test_random_ops | ||||
| time python test/run_test.py --verbose -i distributed/_tensor/test_dtensor_compile | ||||
|  | ||||
| # DeviceMesh test | ||||
| time python test/run_test.py --verbose -i distributed/test_device_mesh | ||||
|  | ||||
| # DTensor/TP tests | ||||
| time python test/run_test.py --verbose -i distributed/tensor/parallel/test_tp_examples | ||||
| time python test/run_test.py --verbose -i distributed/tensor/parallel/test_tp_random_state | ||||
|  | ||||
| # FSDP2 tests | ||||
| time python test/run_test.py --verbose -i distributed/_composable/fsdp/test_fully_shard_training -- -k test_2d_mlp_with_nd_mesh | ||||
|  | ||||
| # ND composability tests | ||||
| time python test/run_test.py --verbose -i distributed/_composable/test_composability/test_2d_composability | ||||
| time python test/run_test.py --verbose -i distributed/_composable/test_composability/test_pp_composability | ||||
|  | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_chunk | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_elementwise_ops | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_embedding | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_embedding_bag | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_binary_cmp | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_init | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_linear | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_math_ops | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_matrix_ops | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_softmax | ||||
| time python test/run_test.py --verbose -i distributed/_shard/sharded_optim/test_sharded_optim | ||||
| time python test/run_test.py --verbose -i distributed/_shard/test_partial_tensor | ||||
| time python test/run_test.py --verbose -i distributed/_shard/test_replicated_tensor | ||||
| # Other tests | ||||
| time python test/run_test.py --verbose -i test_cuda_primary_ctx | ||||
| time python test/run_test.py --verbose -i test_optim -- -k test_forloop_goes_right_direction_multigpu | ||||
| time python test/run_test.py --verbose -i test_optim -- -k test_mixed_device_dtype | ||||
| time python test/run_test.py --verbose -i test_foreach -- -k test_tensors_grouping | ||||
| time python test/run_test.py --verbose -i test_optim -- -k optimizers_with_varying_tensors | ||||
| assert_git_not_dirty | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	