mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-10-31 04:04:57 +08:00 
			
		
		
		
	Compare commits
	
		
			254 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| b2cbcf710b | |||
| f0f20f7e97 | |||
| bfe0079b72 | |||
| 4b985e6f80 | |||
| 5731b486c8 | |||
| aa58af8b43 | |||
| 193f62fde9 | |||
| 709ddf7a9d | |||
| 0455344777 | |||
| 513ce5f69a | |||
| 9039131a89 | |||
| 520182dbff | |||
| a34692c0a3 | |||
| 89bdd9c18f | |||
| 1c58aacbc8 | |||
| 605dfd8fb4 | |||
| fe2e6f0c51 | |||
| 1ad4e6f228 | |||
| 69d63b2318 | |||
| fdc4d6fe96 | |||
| 61d7bb3e79 | |||
| a6ebd56f7b | |||
| 58b8704f28 | |||
| 6c31e02971 | |||
| fba24252bd | |||
| a1fad03fa8 | |||
| 8c4683c978 | |||
| bf6aae1468 | |||
| 2c1851f04e | |||
| dfc9bfc883 | |||
| f3df7deab8 | |||
| 2423d89d0c | |||
| c3679bed35 | |||
| ec3829795d | |||
| 29571c5c06 | |||
| 75c4176b05 | |||
| 6bc8db1d32 | |||
| f063027d54 | |||
| ffc6bf8149 | |||
| 96e8df6a3a | |||
| b07ea91c4c | |||
| 49a8e061b6 | |||
| a4be5cb50e | |||
| c3d099ddd1 | |||
| 745b55d14a | |||
| 1eedb0a962 | |||
| d0e2ab617d | |||
| 4a5a87168e | |||
| 7260eaeca0 | |||
| fddb1bcdea | |||
| c88c90a897 | |||
| 316c0d3e6b | |||
| d962dba0c4 | |||
| 9c4cf866c2 | |||
| e8956c9fe6 | |||
| 91aba7baac | |||
| 2784b3f1b7 | |||
| c04f70bb30 | |||
| 434f60ce33 | |||
| 054d214c50 | |||
| c4bf4005d1 | |||
| 41e9f9cb7c | |||
| 3afdbecb23 | |||
| 059f9fb30b | |||
| ace6decc99 | |||
| 59ef88ea5b | |||
| ee996cd63c | |||
| 42a4df9447 | |||
| ceab3121de | |||
| 35bb0d3638 | |||
| 5f3f14e5e4 | |||
| 00e19ae97a | |||
| 2ce734cee9 | |||
| a2f6eb33d0 | |||
| 62704db5c3 | |||
| 2d7c135757 | |||
| d6115439be | |||
| d98d00487d | |||
| 538258bc13 | |||
| 46e42ae85d | |||
| 03979a599e | |||
| 973a1362b9 | |||
| c047bddbca | |||
| 01bc2a8165 | |||
| b5c006acac | |||
| 8ea4c72eb2 | |||
| ab609d6aa6 | |||
| e20fb5e975 | |||
| c3fe9075a9 | |||
| 803c5b8640 | |||
| 7a42470bcb | |||
| 7535b23a25 | |||
| 29c9f8c782 | |||
| 236e06f9f9 | |||
| 5db5865614 | |||
| a7e20ef7e4 | |||
| b56939dae1 | |||
| f9322c26b2 | |||
| eb54ca7abe | |||
| 544f950d14 | |||
| 7f61324268 | |||
| b90aa18569 | |||
| 44fdf24967 | |||
| 2b83e4f8d7 | |||
| 84cd062fb2 | |||
| a9e6356271 | |||
| 9db567f17d | |||
| 85fa66be04 | |||
| 65ce2bf465 | |||
| 074b46b7d9 | |||
| e0f1bf14a4 | |||
| 05681b6838 | |||
| 05064f2827 | |||
| 8aff6caf67 | |||
| 3ce6f61416 | |||
| aeca9845a6 | |||
| b98b3127f7 | |||
| 7718024d2b | |||
| f0378912a0 | |||
| a86909d251 | |||
| 8fe5b93667 | |||
| 5e6cfb7db5 | |||
| 106c6a49f5 | |||
| abcd329359 | |||
| 0e71a88f9b | |||
| eb5883f8aa | |||
| 72d17d95d7 | |||
| 4c7f22dee2 | |||
| 98984422eb | |||
| bc938184de | |||
| 8ffd109a00 | |||
| 451462dbff | |||
| 0c6f1ca064 | |||
| 85d3ee1d67 | |||
| 89d5391bbf | |||
| 6415c45da5 | |||
| 95c248751b | |||
| a4c3f29047 | |||
| 62e566b345 | |||
| 83d19620f6 | |||
| 1e34870796 | |||
| 276b5238ef | |||
| 41189b0da4 | |||
| e782918b8e | |||
| e9db1b0597 | |||
| eafbd20f23 | |||
| 5772c13f56 | |||
| 9f96d4b61b | |||
| 9575b1afad | |||
| dffbd3a1e2 | |||
| 7124efa81b | |||
| 31da9ee711 | |||
| 0ceaabaf71 | |||
| 0e780a7d69 | |||
| abb313b466 | |||
| aa1c78c7e9 | |||
| 466c167b71 | |||
| 14495ce288 | |||
| 76f7b3e560 | |||
| fdc9a1404e | |||
| 2cf220956a | |||
| 480ae51f85 | |||
| 6850e42266 | |||
| e2b941a1b4 | |||
| e39f136c35 | |||
| 7b375c3682 | |||
| 161c18ed0b | |||
| 1930698140 | |||
| fc3d2b26cd | |||
| 980bb54361 | |||
| 53f1f75061 | |||
| 5a0068cc69 | |||
| e3ca4e79e1 | |||
| c9e74449f3 | |||
| 8a890b72dc | |||
| 4eee2e7a6d | |||
| 8497930766 | |||
| d4e3fd613c | |||
| 7b82ed2d59 | |||
| 93fdd0237d | |||
| faddb0f30c | |||
| 120ca23a1f | |||
| f75d724482 | |||
| aa54bcb6d2 | |||
| 94f22eb6b2 | |||
| f85c35872b | |||
| 22906be8f0 | |||
| cfb9ccab6c | |||
| 6b8ec2b371 | |||
| 3fe72e0c2e | |||
| 68c725a094 | |||
| 404d640c39 | |||
| 979429ca89 | |||
| f93a6a4d31 | |||
| eab1595ce2 | |||
| e4b5645f83 | |||
| f7754c6dc5 | |||
| 5f0b65bee7 | |||
| 4ca8705035 | |||
| ded5bdb0de | |||
| a5ad02d05d | |||
| c1ef214046 | |||
| db376fb643 | |||
| 4f0497c747 | |||
| 154f27455a | |||
| 3aa45cae77 | |||
| b61600f6cc | |||
| 1e86387871 | |||
| f064dac588 | |||
| 6bbef2a06b | |||
| e7c5e06772 | |||
| 0bc5e26067 | |||
| a944cce5b8 | |||
| 250cdb2ac7 | |||
| 4ac77fc6bd | |||
| 027f35d9e6 | |||
| eb146b10db | |||
| 9851c7313d | |||
| 3f3b226ffc | |||
| d8e2e1fe50 | |||
| 16247987a1 | |||
| 16a2a1aad3 | |||
| 102d8e5a63 | |||
| 29e2e2afb6 | |||
| b2ad16f01d | |||
| 99d9b369f4 | |||
| 781189f25d | |||
| 02cd4dbcf4 | |||
| 35a0e0f018 | |||
| 12434504a2 | |||
| 8a591da3e7 | |||
| 6cbb1437c1 | |||
| 28b0ad4f46 | |||
| b435d84261 | |||
| 8963623494 | |||
| 074b420641 | |||
| 1e5ecc4277 | |||
| 26f7dd286b | |||
| 69b1999586 | |||
| 8ae1963a61 | |||
| c74396e890 | |||
| f8f41dcb24 | |||
| 15eb10df02 | |||
| f8875e8277 | |||
| d33804f8b6 | |||
| a136a7d623 | |||
| a3922acc06 | |||
| 0bf59db6cc | |||
| 83b355bad5 | |||
| e3eaa22126 | |||
| 0c074352ab | |||
| 781a33f5d8 | |||
| 406f510f89 | |||
| 9e753d1f20 | 
| @ -1,4 +1,4 @@ | ||||
| # Docker images for GitHub CI | ||||
| # Docker images for GitHub CI and CD | ||||
|  | ||||
| This directory contains everything needed to build the Docker images | ||||
| that are used in our CI. | ||||
| @ -12,7 +12,7 @@ each image as the `BUILD_ENVIRONMENT` environment variable. | ||||
|  | ||||
| See `build.sh` for valid build environments (it's the giant switch). | ||||
|  | ||||
| ## Contents | ||||
| ## Docker CI builds | ||||
|  | ||||
| * `build.sh` -- dispatch script to launch all builds | ||||
| * `common` -- scripts used to execute individual Docker build stages | ||||
| @ -21,6 +21,12 @@ See `build.sh` for valid build environments (it's the giant switch). | ||||
| * `ubuntu-rocm` -- Dockerfile for Ubuntu image with ROCm support | ||||
| * `ubuntu-xpu` -- Dockerfile for Ubuntu image with XPU support | ||||
|  | ||||
| ### Docker CD builds | ||||
|  | ||||
| * `conda` - Dockerfile and build.sh to build Docker images used in nightly conda builds | ||||
| * `manywheel` - Dockerfile and build.sh to build Docker images used in nightly manywheel builds | ||||
| * `libtorch` - Dockerfile and build.sh to build Docker images used in nightly libtorch builds | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| ```bash | ||||
|  | ||||
| @ -407,6 +407,22 @@ case "$image" in | ||||
|     # from pytorch/llvm:9.0.1 is x86 specific | ||||
|     SKIP_LLVM_SRC_BUILD_INSTALL=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-aarch64-py3.10-gcc11-inductor-benchmarks) | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=11 | ||||
|     ACL=yes | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
|     VISION=yes | ||||
|     CONDA_CMAKE=yes | ||||
|     # snadampal: skipping sccache due to the following issue | ||||
|     # https://github.com/pytorch/pytorch/issues/121559 | ||||
|     SKIP_SCCACHE_INSTALL=yes | ||||
|     # snadampal: skipping llvm src build install because the current version | ||||
|     # from pytorch/llvm:9.0.1 is x86 specific | ||||
|     SKIP_LLVM_SRC_BUILD_INSTALL=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   *) | ||||
|     # Catch-all for builds that are not hardcoded. | ||||
|     PROTOBUF=yes | ||||
|  | ||||
| @ -1 +1 @@ | ||||
| 9d859653ae916d0a72f6b2b5c5925bed38832140 | ||||
| 48da61aa34b73ea8e2ee815a6a79eea817e361db | ||||
|  | ||||
							
								
								
									
										5
									
								
								.ci/docker/common/aotriton_version.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								.ci/docker/common/aotriton_version.txt
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,5 @@ | ||||
| 0.6b | ||||
| manylinux_2_17 | ||||
| rocm6.1 | ||||
| 04b5df8c8123f90cba3ede7e971e6fbc6040d506 | ||||
| 77c29fa3f3b614e187d7213d745e989a92708cee2bc6020419ab49019af399d1 | ||||
							
								
								
									
										20
									
								
								.ci/docker/common/install_conda_docker.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										20
									
								
								.ci/docker/common/install_conda_docker.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,20 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
| set -ex | ||||
|  | ||||
| # Anaconda | ||||
| # Latest anaconda is using openssl-3 which is incompatible with all currently published versions of git | ||||
| # Which are using openssl-1.1.1, see https://anaconda.org/anaconda/git/files?version=2.40.1 for example | ||||
| MINICONDA_URL=https://repo.anaconda.com/miniconda/Miniconda3-py311_23.5.2-0-Linux-x86_64.sh | ||||
| wget -q $MINICONDA_URL | ||||
| # NB: Manually invoke bash per https://github.com/conda/conda/issues/10431 | ||||
| bash $(basename "$MINICONDA_URL") -b -p /opt/conda | ||||
| rm $(basename "$MINICONDA_URL") | ||||
| export PATH=/opt/conda/bin:$PATH | ||||
| # See https://github.com/pytorch/builder/issues/1473 | ||||
| # Pin conda to 23.5.2 as it's the last one compatible with openssl-1.1.1 | ||||
| conda install -y conda=23.5.2 conda-build anaconda-client git ninja | ||||
| # The cmake version here needs to match with the minimum version of cmake | ||||
| # supported by PyTorch (3.18). There is only 3.18.2 on anaconda | ||||
| /opt/conda/bin/pip3 install cmake==3.18.2 | ||||
| conda remove -y --force patchelf | ||||
							
								
								
									
										95
									
								
								.ci/docker/common/install_cpython.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										95
									
								
								.ci/docker/common/install_cpython.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,95 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
| set -uex -o pipefail | ||||
|  | ||||
| PYTHON_DOWNLOAD_URL=https://www.python.org/ftp/python | ||||
| PYTHON_DOWNLOAD_GITHUB_BRANCH=https://github.com/python/cpython/archive/refs/heads | ||||
| GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py | ||||
|  | ||||
| # Python versions to be installed in /opt/$VERSION_NO | ||||
| CPYTHON_VERSIONS=${CPYTHON_VERSIONS:-"3.8.1 3.9.0 3.10.1 3.11.0 3.12.0 3.13.0"} | ||||
|  | ||||
| function check_var { | ||||
|     if [ -z "$1" ]; then | ||||
|         echo "required variable not defined" | ||||
|         exit 1 | ||||
|     fi | ||||
| } | ||||
|  | ||||
| function do_cpython_build { | ||||
|     local py_ver=$1 | ||||
|     local py_folder=$2 | ||||
|     check_var $py_ver | ||||
|     check_var $py_folder | ||||
|     tar -xzf Python-$py_ver.tgz | ||||
|     pushd $py_folder | ||||
|  | ||||
|     local prefix="/opt/_internal/cpython-${py_ver}" | ||||
|     mkdir -p ${prefix}/lib | ||||
|     if [[ -n $(which patchelf) ]]; then | ||||
|         local shared_flags="--enable-shared" | ||||
|     else | ||||
|         local shared_flags="--disable-shared" | ||||
|     fi | ||||
|     if [[ -z  "${WITH_OPENSSL+x}" ]]; then | ||||
|         local openssl_flags="" | ||||
|     else | ||||
|         local openssl_flags="--with-openssl=${WITH_OPENSSL} --with-openssl-rpath=auto" | ||||
|     fi | ||||
|  | ||||
|     # -Wformat added for https://bugs.python.org/issue17547 on Python 2.6 | ||||
|     CFLAGS="-Wformat" ./configure --prefix=${prefix} ${openssl_flags} ${shared_flags} > /dev/null | ||||
|  | ||||
|     make -j40 > /dev/null | ||||
|     make install > /dev/null | ||||
|  | ||||
|     if [[ "${shared_flags}" == "--enable-shared" ]]; then | ||||
|         patchelf --set-rpath '$ORIGIN/../lib' ${prefix}/bin/python3 | ||||
|     fi | ||||
|  | ||||
|     popd | ||||
|     rm -rf $py_folder | ||||
|     # Some python's install as bin/python3. Make them available as | ||||
|     # bin/python. | ||||
|     if [ -e ${prefix}/bin/python3 ]; then | ||||
|         ln -s python3 ${prefix}/bin/python | ||||
|     fi | ||||
|     ${prefix}/bin/python get-pip.py | ||||
|     if [ -e ${prefix}/bin/pip3 ] && [ ! -e ${prefix}/bin/pip ]; then | ||||
|         ln -s pip3 ${prefix}/bin/pip | ||||
|     fi | ||||
|     ${prefix}/bin/pip install wheel==0.34.2 | ||||
|     local abi_tag=$(${prefix}/bin/python -c "from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag; print('{0}{1}-{2}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()))") | ||||
|     ln -s ${prefix} /opt/python/${abi_tag} | ||||
| } | ||||
|  | ||||
| function build_cpython { | ||||
|     local py_ver=$1 | ||||
|     check_var $py_ver | ||||
|     check_var $PYTHON_DOWNLOAD_URL | ||||
|     local py_ver_folder=$py_ver | ||||
|     if [ "$py_ver" = "3.13.0" ]; then | ||||
|         PY_VER_SHORT="3.13" | ||||
|         check_var $PYTHON_DOWNLOAD_GITHUB_BRANCH | ||||
|         wget $PYTHON_DOWNLOAD_GITHUB_BRANCH/$PY_VER_SHORT.tar.gz -O Python-$py_ver.tgz | ||||
|         do_cpython_build $py_ver cpython-$PY_VER_SHORT | ||||
|     else | ||||
|         wget -q $PYTHON_DOWNLOAD_URL/$py_ver_folder/Python-$py_ver.tgz | ||||
|         do_cpython_build $py_ver Python-$py_ver | ||||
|     fi | ||||
|  | ||||
|     rm -f Python-$py_ver.tgz | ||||
| } | ||||
|  | ||||
| function build_cpythons { | ||||
|     check_var $GET_PIP_URL | ||||
|     curl -sLO $GET_PIP_URL | ||||
|     for py_ver in $@; do | ||||
|         build_cpython $py_ver | ||||
|     done | ||||
|     rm -f get-pip.py | ||||
| } | ||||
|  | ||||
| mkdir -p /opt/python | ||||
| mkdir -p /opt/_internal | ||||
| build_cpythons $CPYTHON_VERSIONS | ||||
							
								
								
									
										239
									
								
								.ci/docker/common/install_cuda.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										239
									
								
								.ci/docker/common/install_cuda.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,239 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| NCCL_VERSION=v2.21.5-1 | ||||
| CUDNN_VERSION=9.1.0.70 | ||||
|  | ||||
| function install_cusparselt_040 { | ||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html | ||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt | ||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.4.0.7-archive.tar.xz | ||||
|     tar xf libcusparse_lt-linux-x86_64-0.4.0.7-archive.tar.xz | ||||
|     cp -a libcusparse_lt-linux-x86_64-0.4.0.7-archive/include/* /usr/local/cuda/include/ | ||||
|     cp -a libcusparse_lt-linux-x86_64-0.4.0.7-archive/lib/* /usr/local/cuda/lib64/ | ||||
|     popd | ||||
|     rm -rf tmp_cusparselt | ||||
| } | ||||
|  | ||||
| function install_cusparselt_052 { | ||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html | ||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt | ||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/libcusparse_lt-linux-x86_64-0.5.2.1-archive.tar.xz | ||||
|     tar xf libcusparse_lt-linux-x86_64-0.5.2.1-archive.tar.xz | ||||
|     cp -a libcusparse_lt-linux-x86_64-0.5.2.1-archive/include/* /usr/local/cuda/include/ | ||||
|     cp -a libcusparse_lt-linux-x86_64-0.5.2.1-archive/lib/* /usr/local/cuda/lib64/ | ||||
|     popd | ||||
|     rm -rf tmp_cusparselt | ||||
| } | ||||
|  | ||||
| function install_118 { | ||||
|     echo "Installing CUDA 11.8 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.4.0" | ||||
|     rm -rf /usr/local/cuda-11.8 /usr/local/cuda | ||||
|     # install CUDA 11.8.0 in the same container | ||||
|     wget -q https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run | ||||
|     chmod +x cuda_11.8.0_520.61.05_linux.run | ||||
|     ./cuda_11.8.0_520.61.05_linux.run --toolkit --silent | ||||
|     rm -f cuda_11.8.0_520.61.05_linux.run | ||||
|     rm -f /usr/local/cuda && ln -s /usr/local/cuda-11.8 /usr/local/cuda | ||||
|  | ||||
|     # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|     mkdir tmp_cudnn && cd tmp_cudnn | ||||
|     wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz | ||||
|     tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive.tar.xz | ||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive/include/* /usr/local/cuda/include/ | ||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda11-archive/lib/* /usr/local/cuda/lib64/ | ||||
|     cd .. | ||||
|     rm -rf tmp_cudnn | ||||
|  | ||||
|     # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses | ||||
|     # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build | ||||
|     git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git | ||||
|     cd nccl && make -j src.build | ||||
|     cp -a build/include/* /usr/local/cuda/include/ | ||||
|     cp -a build/lib/* /usr/local/cuda/lib64/ | ||||
|     cd .. | ||||
|     rm -rf nccl | ||||
|  | ||||
|     install_cusparselt_040 | ||||
|  | ||||
|     ldconfig | ||||
| } | ||||
|  | ||||
| function install_121 { | ||||
|     echo "Installing CUDA 12.1 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.5.2" | ||||
|     rm -rf /usr/local/cuda-12.1 /usr/local/cuda | ||||
|     # install CUDA 12.1.0 in the same container | ||||
|     wget -q https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run | ||||
|     chmod +x cuda_12.1.1_530.30.02_linux.run | ||||
|     ./cuda_12.1.1_530.30.02_linux.run --toolkit --silent | ||||
|     rm -f cuda_12.1.1_530.30.02_linux.run | ||||
|     rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.1 /usr/local/cuda | ||||
|  | ||||
|     # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|     mkdir tmp_cudnn && cd tmp_cudnn | ||||
|     wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz | ||||
|     tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz | ||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/ | ||||
|     cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/ | ||||
|     cd .. | ||||
|     rm -rf tmp_cudnn | ||||
|  | ||||
|     # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses | ||||
|     # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build | ||||
|     git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git | ||||
|     cd nccl && make -j src.build | ||||
|     cp -a build/include/* /usr/local/cuda/include/ | ||||
|     cp -a build/lib/* /usr/local/cuda/lib64/ | ||||
|     cd .. | ||||
|     rm -rf nccl | ||||
|  | ||||
|     install_cusparselt_052 | ||||
|  | ||||
|     ldconfig | ||||
| } | ||||
|  | ||||
| function install_124 { | ||||
|   echo "Installing CUDA 12.4 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.5.2" | ||||
|   rm -rf /usr/local/cuda-12.4 /usr/local/cuda | ||||
|   # install CUDA 12.4.0 in the same container | ||||
|   wget -q https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_550.54.14_linux.run | ||||
|   chmod +x cuda_12.4.0_550.54.14_linux.run | ||||
|   ./cuda_12.4.0_550.54.14_linux.run --toolkit --silent | ||||
|   rm -f cuda_12.4.0_550.54.14_linux.run | ||||
|   rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.4 /usr/local/cuda | ||||
|  | ||||
|   # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|   mkdir tmp_cudnn && cd tmp_cudnn | ||||
|   wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz -O cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz | ||||
|   tar xf cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive.tar.xz | ||||
|   cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/include/* /usr/local/cuda/include/ | ||||
|   cp -a cudnn-linux-x86_64-${CUDNN_VERSION}_cuda12-archive/lib/* /usr/local/cuda/lib64/ | ||||
|   cd .. | ||||
|   rm -rf tmp_cudnn | ||||
|  | ||||
|   # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses | ||||
|   # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build | ||||
|   git clone -b $NCCL_VERSION --depth 1 https://github.com/NVIDIA/nccl.git | ||||
|   cd nccl && make -j src.build | ||||
|   cp -a build/include/* /usr/local/cuda/include/ | ||||
|   cp -a build/lib/* /usr/local/cuda/lib64/ | ||||
|   cd .. | ||||
|   rm -rf nccl | ||||
|  | ||||
|   install_cusparselt_052 | ||||
|  | ||||
|   ldconfig | ||||
| } | ||||
|  | ||||
| function prune_118 { | ||||
|     echo "Pruning CUDA 11.8 and cuDNN" | ||||
|     ##################################################################################### | ||||
|     # CUDA 11.8 prune static libs | ||||
|     ##################################################################################### | ||||
|     export NVPRUNE="/usr/local/cuda-11.8/bin/nvprune" | ||||
|     export CUDA_LIB_DIR="/usr/local/cuda-11.8/lib64" | ||||
|  | ||||
|     export GENCODE="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|     export GENCODE_CUDNN="-gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|  | ||||
|     if [[ -n "$OVERRIDE_GENCODE" ]]; then | ||||
|         export GENCODE=$OVERRIDE_GENCODE | ||||
|     fi | ||||
|  | ||||
|     # all CUDA libs except CuDNN and CuBLAS (cudnn and cublas need arch 3.7 included) | ||||
|     ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ | ||||
|       | xargs -I {} bash -c \ | ||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" | ||||
|  | ||||
|     # prune CuDNN and CuBLAS | ||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a | ||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a | ||||
|  | ||||
|     ##################################################################################### | ||||
|     # CUDA 11.8 prune visual tools | ||||
|     ##################################################################################### | ||||
|     export CUDA_BASE="/usr/local/cuda-11.8/" | ||||
|     rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2022.3.0 $CUDA_BASE/nsight-systems-2022.4.2/ | ||||
| } | ||||
|  | ||||
| function prune_121 { | ||||
|   echo "Pruning CUDA 12.1" | ||||
|   ##################################################################################### | ||||
|   # CUDA 12.1 prune static libs | ||||
|   ##################################################################################### | ||||
|     export NVPRUNE="/usr/local/cuda-12.1/bin/nvprune" | ||||
|     export CUDA_LIB_DIR="/usr/local/cuda-12.1/lib64" | ||||
|  | ||||
|     export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|     export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|  | ||||
|     if [[ -n "$OVERRIDE_GENCODE" ]]; then | ||||
|         export GENCODE=$OVERRIDE_GENCODE | ||||
|     fi | ||||
|  | ||||
|     # all CUDA libs except CuDNN and CuBLAS | ||||
|     ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ | ||||
|       | xargs -I {} bash -c \ | ||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" | ||||
|  | ||||
|     # prune CuDNN and CuBLAS | ||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a | ||||
|     $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a | ||||
|  | ||||
|     ##################################################################################### | ||||
|     # CUDA 12.1 prune visual tools | ||||
|     ##################################################################################### | ||||
|     export CUDA_BASE="/usr/local/cuda-12.1/" | ||||
|     rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2023.1.0 $CUDA_BASE/nsight-systems-2023.1.2/ | ||||
| } | ||||
|  | ||||
| function prune_124 { | ||||
|   echo "Pruning CUDA 12.4" | ||||
|   ##################################################################################### | ||||
|   # CUDA 12.4 prune static libs | ||||
|   ##################################################################################### | ||||
|   export NVPRUNE="/usr/local/cuda-12.4/bin/nvprune" | ||||
|   export CUDA_LIB_DIR="/usr/local/cuda-12.4/lib64" | ||||
|  | ||||
|   export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|   export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|  | ||||
|   if [[ -n "$OVERRIDE_GENCODE" ]]; then | ||||
|       export GENCODE=$OVERRIDE_GENCODE | ||||
|   fi | ||||
|   if [[ -n "$OVERRIDE_GENCODE_CUDNN" ]]; then | ||||
|       export GENCODE_CUDNN=$OVERRIDE_GENCODE_CUDNN | ||||
|   fi | ||||
|  | ||||
|   # all CUDA libs except CuDNN and CuBLAS | ||||
|   ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ | ||||
|       | xargs -I {} bash -c \ | ||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" | ||||
|  | ||||
|   # prune CuDNN and CuBLAS | ||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a | ||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a | ||||
|  | ||||
|   ##################################################################################### | ||||
|   # CUDA 12.1 prune visual tools | ||||
|   ##################################################################################### | ||||
|   export CUDA_BASE="/usr/local/cuda-12.4/" | ||||
|   rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.1.0 $CUDA_BASE/nsight-systems-2023.4.4/ | ||||
| } | ||||
|  | ||||
| # idiomatic parameter and option handling in sh | ||||
| while test $# -gt 0 | ||||
| do | ||||
|     case "$1" in | ||||
|     11.8) install_118; prune_118 | ||||
|         ;; | ||||
|     12.1) install_121; prune_121 | ||||
|         ;; | ||||
|     12.4) install_124; prune_124 | ||||
|         ;; | ||||
|     *) echo "bad argument $1"; exit 1 | ||||
|         ;; | ||||
|     esac | ||||
|     shift | ||||
| done | ||||
							
								
								
									
										93
									
								
								.ci/docker/common/install_cuda_aarch64.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										93
									
								
								.ci/docker/common/install_cuda_aarch64.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,93 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| NCCL_VERSION=v2.21.5-1 | ||||
|  | ||||
| function install_cusparselt_052 { | ||||
|     # cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html | ||||
|     mkdir tmp_cusparselt && pushd tmp_cusparselt | ||||
|     wget -q https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-sbsa/libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz | ||||
|     tar xf libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz | ||||
|     cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/include/* /usr/local/cuda/include/ | ||||
|     cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/lib/* /usr/local/cuda/lib64/ | ||||
|     popd | ||||
|     rm -rf tmp_cusparselt | ||||
| } | ||||
|  | ||||
| function install_124 { | ||||
|   echo "Installing CUDA 12.4 and cuDNN 9.1 and NCCL ${NCCL_VERSION} and cuSparseLt-0.5.2" | ||||
|   rm -rf /usr/local/cuda-12.4 /usr/local/cuda | ||||
|   # install CUDA 12.4.0 in the same container | ||||
|   wget -q https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_550.54.14_linux_sbsa.run | ||||
|   chmod +x cuda_12.4.0_550.54.14_linux_sbsa.run | ||||
|   ./cuda_12.4.0_550.54.14_linux_sbsa.run --toolkit --silent | ||||
|   rm -f cuda_12.4.0_550.54.14_linux_sbsa.run | ||||
|   rm -f /usr/local/cuda && ln -s /usr/local/cuda-12.4 /usr/local/cuda | ||||
|  | ||||
|   # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|   mkdir tmp_cudnn && cd tmp_cudnn | ||||
|   wget -q https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-9.1.0.70_cuda12-archive.tar.xz -O cudnn-linux-sbsa-9.1.0.70_cuda12-archive.tar.xz | ||||
|   tar xf cudnn-linux-sbsa-9.1.0.70_cuda12-archive.tar.xz | ||||
|   cp -a cudnn-linux-sbsa-9.1.0.70_cuda12-archive/include/* /usr/local/cuda/include/ | ||||
|   cp -a cudnn-linux-sbsa-9.1.0.70_cuda12-archive/lib/* /usr/local/cuda/lib64/ | ||||
|   cd .. | ||||
|   rm -rf tmp_cudnn | ||||
|  | ||||
|   # NCCL license: https://docs.nvidia.com/deeplearning/nccl/#licenses | ||||
|   # Follow build: https://github.com/NVIDIA/nccl/tree/master?tab=readme-ov-file#build | ||||
|   git clone -b ${NCCL_VERSION} --depth 1 https://github.com/NVIDIA/nccl.git | ||||
|   cd nccl && make -j src.build | ||||
|   cp -a build/include/* /usr/local/cuda/include/ | ||||
|   cp -a build/lib/* /usr/local/cuda/lib64/ | ||||
|   cd .. | ||||
|   rm -rf nccl | ||||
|  | ||||
|   install_cusparselt_052 | ||||
|  | ||||
|   ldconfig | ||||
| } | ||||
|  | ||||
| function prune_124 { | ||||
|   echo "Pruning CUDA 12.4" | ||||
|   ##################################################################################### | ||||
|   # CUDA 12.4 prune static libs | ||||
|   ##################################################################################### | ||||
|   export NVPRUNE="/usr/local/cuda-12.4/bin/nvprune" | ||||
|   export CUDA_LIB_DIR="/usr/local/cuda-12.4/lib64" | ||||
|  | ||||
|   export GENCODE="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|   export GENCODE_CUDNN="-gencode arch=compute_50,code=sm_50 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_90,code=sm_90" | ||||
|  | ||||
|   if [[ -n "$OVERRIDE_GENCODE" ]]; then | ||||
|       export GENCODE=$OVERRIDE_GENCODE | ||||
|   fi | ||||
|  | ||||
|   # all CUDA libs except CuDNN and CuBLAS | ||||
|   ls $CUDA_LIB_DIR/ | grep "\.a" | grep -v "culibos" | grep -v "cudart" | grep -v "cudnn" | grep -v "cublas" | grep -v "metis"  \ | ||||
|       | xargs -I {} bash -c \ | ||||
|                 "echo {} && $NVPRUNE $GENCODE $CUDA_LIB_DIR/{} -o $CUDA_LIB_DIR/{}" | ||||
|  | ||||
|   # prune CuDNN and CuBLAS | ||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublas_static.a -o $CUDA_LIB_DIR/libcublas_static.a | ||||
|   $NVPRUNE $GENCODE_CUDNN $CUDA_LIB_DIR/libcublasLt_static.a -o $CUDA_LIB_DIR/libcublasLt_static.a | ||||
|  | ||||
|   ##################################################################################### | ||||
|   # CUDA 12.1 prune visual tools | ||||
|   ##################################################################################### | ||||
|   export CUDA_BASE="/usr/local/cuda-12.4/" | ||||
|   rm -rf $CUDA_BASE/libnvvp $CUDA_BASE/nsightee_plugins $CUDA_BASE/nsight-compute-2024.1.0 $CUDA_BASE/nsight-systems-2023.4.4/ | ||||
| } | ||||
|  | ||||
| # idiomatic parameter and option handling in sh | ||||
| while test $# -gt 0 | ||||
| do | ||||
|     case "$1" in | ||||
|     12.4) install_124; prune_124 | ||||
|         ;; | ||||
|     *) echo "bad argument $1"; exit 1 | ||||
|         ;; | ||||
|     esac | ||||
|     shift | ||||
| done | ||||
							
								
								
									
										23
									
								
								.ci/docker/common/install_libpng.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								.ci/docker/common/install_libpng.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,23 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| LIBPNG_VERSION=1.6.37 | ||||
|  | ||||
| mkdir -p libpng | ||||
| pushd libpng | ||||
|  | ||||
| wget http://download.sourceforge.net/libpng/libpng-$LIBPNG_VERSION.tar.gz | ||||
| tar -xvzf libpng-$LIBPNG_VERSION.tar.gz | ||||
|  | ||||
| pushd libpng-$LIBPNG_VERSION | ||||
|  | ||||
| ./configure | ||||
| make | ||||
| make install | ||||
|  | ||||
| popd | ||||
|  | ||||
| popd | ||||
| rm -rf libpng | ||||
							
								
								
									
										29
									
								
								.ci/docker/common/install_magma.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								.ci/docker/common/install_magma.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,29 @@ | ||||
| #!/usr/bin/env bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -eou pipefail | ||||
|  | ||||
| MAGMA_VERSION="2.5.2" | ||||
|  | ||||
| function do_install() { | ||||
|     cuda_version=$1 | ||||
|     cuda_version_nodot=${1/./} | ||||
|  | ||||
|     MAGMA_VERSION="2.6.1" | ||||
|     magma_archive="magma-cuda${cuda_version_nodot}-${MAGMA_VERSION}-1.tar.bz2" | ||||
|  | ||||
|     cuda_dir="/usr/local/cuda-${cuda_version}" | ||||
|     ( | ||||
|         set -x | ||||
|         tmp_dir=$(mktemp -d) | ||||
|         pushd ${tmp_dir} | ||||
|         curl -OLs https://anaconda.org/pytorch/magma-cuda${cuda_version_nodot}/${MAGMA_VERSION}/download/linux-64/${magma_archive} | ||||
|         tar -xvf "${magma_archive}" | ||||
|         mkdir -p "${cuda_dir}/magma" | ||||
|         mv include "${cuda_dir}/magma/include" | ||||
|         mv lib "${cuda_dir}/magma/lib" | ||||
|         popd | ||||
|     ) | ||||
| } | ||||
|  | ||||
| do_install $1 | ||||
							
								
								
									
										134
									
								
								.ci/docker/common/install_miopen.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										134
									
								
								.ci/docker/common/install_miopen.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,134 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| ROCM_VERSION=$1 | ||||
|  | ||||
| if [[ -z $ROCM_VERSION ]]; then | ||||
|     echo "missing ROCM_VERSION" | ||||
|     exit 1; | ||||
| fi | ||||
|  | ||||
| # To make version comparison easier, create an integer representation. | ||||
| save_IFS="$IFS" | ||||
| IFS=. ROCM_VERSION_ARRAY=(${ROCM_VERSION}) | ||||
| IFS="$save_IFS" | ||||
| if [[ ${#ROCM_VERSION_ARRAY[@]} == 2 ]]; then | ||||
|     ROCM_VERSION_MAJOR=${ROCM_VERSION_ARRAY[0]} | ||||
|     ROCM_VERSION_MINOR=${ROCM_VERSION_ARRAY[1]} | ||||
|     ROCM_VERSION_PATCH=0 | ||||
| elif [[ ${#ROCM_VERSION_ARRAY[@]} == 3 ]]; then | ||||
|     ROCM_VERSION_MAJOR=${ROCM_VERSION_ARRAY[0]} | ||||
|     ROCM_VERSION_MINOR=${ROCM_VERSION_ARRAY[1]} | ||||
|     ROCM_VERSION_PATCH=${ROCM_VERSION_ARRAY[2]} | ||||
| else | ||||
|     echo "Unhandled ROCM_VERSION ${ROCM_VERSION}" | ||||
|     exit 1 | ||||
| fi | ||||
| ROCM_INT=$(($ROCM_VERSION_MAJOR * 10000 + $ROCM_VERSION_MINOR * 100 + $ROCM_VERSION_PATCH)) | ||||
|  | ||||
| # Install custom MIOpen + COMgr for ROCm >= 4.0.1 | ||||
| if [[ $ROCM_INT -lt 40001 ]]; then | ||||
|     echo "ROCm version < 4.0.1; will not install custom MIOpen" | ||||
|     exit 0 | ||||
| fi | ||||
|  | ||||
| # Function to retry functions that sometimes timeout or have flaky failures | ||||
| retry () { | ||||
|     $*  || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) | ||||
| } | ||||
|  | ||||
| # Build custom MIOpen to use comgr for offline compilation. | ||||
|  | ||||
| ## Need a sanitized ROCM_VERSION without patchlevel; patchlevel version 0 must be added to paths. | ||||
| ROCM_DOTS=$(echo ${ROCM_VERSION} | tr -d -c '.' | wc -c) | ||||
| if [[ ${ROCM_DOTS} == 1 ]]; then | ||||
|     ROCM_VERSION_NOPATCH="${ROCM_VERSION}" | ||||
|     ROCM_INSTALL_PATH="/opt/rocm-${ROCM_VERSION}.0" | ||||
| else | ||||
|     ROCM_VERSION_NOPATCH="${ROCM_VERSION%.*}" | ||||
|     ROCM_INSTALL_PATH="/opt/rocm-${ROCM_VERSION}" | ||||
| fi | ||||
|  | ||||
| # MIOPEN_USE_HIP_KERNELS is a Workaround for COMgr issues | ||||
| MIOPEN_CMAKE_COMMON_FLAGS=" | ||||
| -DMIOPEN_USE_COMGR=ON | ||||
| -DMIOPEN_BUILD_DRIVER=OFF | ||||
| " | ||||
| # Pull MIOpen repo and set DMIOPEN_EMBED_DB based on ROCm version | ||||
| if [[ $ROCM_INT -ge 60100 ]] && [[ $ROCM_INT -lt 60200 ]]; then | ||||
|     echo "ROCm 6.1 MIOpen does not need any patches, do not build from source" | ||||
|     exit 0 | ||||
| elif [[ $ROCM_INT -ge 60000 ]] && [[ $ROCM_INT -lt 60100 ]]; then | ||||
|     echo "ROCm 6.0 MIOpen does not need any patches, do not build from source" | ||||
|     exit 0 | ||||
| elif [[ $ROCM_INT -ge 50700 ]] && [[ $ROCM_INT -lt 60000 ]]; then | ||||
|     echo "ROCm 5.7 MIOpen does not need any patches, do not build from source" | ||||
|     exit 0 | ||||
| elif [[ $ROCM_INT -ge 50600 ]] && [[ $ROCM_INT -lt 50700 ]]; then | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.6-staging" | ||||
| elif [[ $ROCM_INT -ge 50500 ]] && [[ $ROCM_INT -lt 50600 ]]; then | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.5-gfx11" | ||||
| elif [[ $ROCM_INT -ge 50400 ]] && [[ $ROCM_INT -lt 50500 ]]; then | ||||
|     MIOPEN_CMAKE_DB_FLAGS="-DMIOPEN_EMBED_DB=gfx900_56;gfx906_60;gfx90878;gfx90a6e;gfx1030_36 -DMIOPEN_USE_MLIR=Off" | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.4-staging" | ||||
| elif [[ $ROCM_INT -ge 50300 ]] && [[ $ROCM_INT -lt 50400 ]]; then | ||||
|     MIOPEN_CMAKE_DB_FLAGS="-DMIOPEN_EMBED_DB=gfx900_56;gfx906_60;gfx90878;gfx90a6e;gfx1030_36 -DMIOPEN_USE_MLIR=Off" | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.3-staging" | ||||
| elif [[ $ROCM_INT -ge 50200 ]] && [[ $ROCM_INT -lt 50300 ]]; then | ||||
|     MIOPEN_CMAKE_DB_FLAGS="-DMIOPEN_EMBED_DB=gfx900_56;gfx906_60;gfx90878;gfx90a6e;gfx1030_36 -DMIOPEN_USE_MLIR=Off" | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.2-staging" | ||||
| elif [[ $ROCM_INT -ge 50100 ]] && [[ $ROCM_INT -lt 50200 ]]; then | ||||
|     MIOPEN_CMAKE_DB_FLAGS="-DMIOPEN_EMBED_DB=gfx900_56;gfx906_60;gfx90878;gfx90a6e;gfx1030_36" | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.1-staging" | ||||
| elif [[ $ROCM_INT -ge 50000 ]] && [[ $ROCM_INT -lt 50100 ]]; then | ||||
|     MIOPEN_CMAKE_DB_FLAGS="-DMIOPEN_EMBED_DB=gfx900_56;gfx906_60;gfx90878;gfx90a6e;gfx1030_36" | ||||
|     MIOPEN_BRANCH="release/rocm-rel-5.0-staging" | ||||
| else | ||||
|     echo "Unhandled ROCM_VERSION ${ROCM_VERSION}" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| yum remove -y miopen-hip | ||||
|  | ||||
| git clone https://github.com/ROCm/MIOpen -b ${MIOPEN_BRANCH} | ||||
| pushd MIOpen | ||||
| # remove .git to save disk space since CI runner was running out | ||||
| rm -rf .git | ||||
| # Don't build MLIR to save docker build time | ||||
| # since we are disabling MLIR backend for MIOpen anyway | ||||
| if [[ $ROCM_INT -ge 50400 ]] && [[ $ROCM_INT -lt 50500 ]]; then | ||||
|     sed -i '/rocMLIR/d' requirements.txt | ||||
| elif [[ $ROCM_INT -ge 50200 ]] && [[ $ROCM_INT -lt 50400 ]]; then | ||||
|     sed -i '/llvm-project-mlir/d' requirements.txt | ||||
| fi | ||||
| ## MIOpen minimum requirements | ||||
| cmake -P install_deps.cmake --minimum | ||||
|  | ||||
| # clean up since CI runner was running out of disk space | ||||
| rm -rf /tmp/* | ||||
| yum clean all | ||||
| rm -rf /var/cache/yum | ||||
| rm -rf /var/lib/yum/yumdb | ||||
| rm -rf /var/lib/yum/history | ||||
|  | ||||
| ## Build MIOpen | ||||
| mkdir -p build | ||||
| cd build | ||||
| PKG_CONFIG_PATH=/usr/local/lib/pkgconfig CXX=${ROCM_INSTALL_PATH}/llvm/bin/clang++ cmake .. \ | ||||
|     ${MIOPEN_CMAKE_COMMON_FLAGS} \ | ||||
|     ${MIOPEN_CMAKE_DB_FLAGS} \ | ||||
|     -DCMAKE_PREFIX_PATH="${ROCM_INSTALL_PATH}/hip;${ROCM_INSTALL_PATH}" | ||||
| make MIOpen -j $(nproc) | ||||
|  | ||||
| # Build MIOpen package | ||||
| make -j $(nproc) package | ||||
|  | ||||
| # clean up since CI runner was running out of disk space | ||||
| rm -rf /usr/local/cget | ||||
|  | ||||
| yum install -y miopen-*.rpm | ||||
|  | ||||
| popd | ||||
| rm -rf MIOpen | ||||
							
								
								
									
										16
									
								
								.ci/docker/common/install_mkl.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								.ci/docker/common/install_mkl.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,16 @@ | ||||
| #!/bin/bash | ||||
| set -ex | ||||
|  | ||||
| # MKL | ||||
| MKL_VERSION=2024.2.0 | ||||
|  | ||||
| MKLROOT=/opt/intel | ||||
| mkdir -p ${MKLROOT} | ||||
| pushd /tmp | ||||
|  | ||||
| python3 -mpip install wheel | ||||
| python3 -mpip download -d . mkl-static==${MKL_VERSION} | ||||
| python3 -m wheel unpack mkl_static-${MKL_VERSION}-py2.py3-none-manylinux1_x86_64.whl | ||||
| python3 -m wheel unpack mkl_include-${MKL_VERSION}-py2.py3-none-manylinux1_x86_64.whl | ||||
| mv mkl_static-${MKL_VERSION}/mkl_static-${MKL_VERSION}.data/data/lib ${MKLROOT} | ||||
| mv mkl_include-${MKL_VERSION}/mkl_include-${MKL_VERSION}.data/data/include ${MKLROOT} | ||||
							
								
								
									
										13
									
								
								.ci/docker/common/install_mnist.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								.ci/docker/common/install_mnist.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,13 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| mkdir -p /usr/local/mnist/ | ||||
|  | ||||
| cd /usr/local/mnist | ||||
|  | ||||
| for img in train-images-idx3-ubyte.gz train-labels-idx1-ubyte.gz t10k-images-idx3-ubyte.gz t10k-labels-idx1-ubyte.gz; do | ||||
|   wget -q https://ossci-datasets.s3.amazonaws.com/mnist/$img | ||||
|   gzip -d $img | ||||
| done | ||||
							
								
								
									
										22
									
								
								.ci/docker/common/install_openblas.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								.ci/docker/common/install_openblas.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,22 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| cd / | ||||
| git clone https://github.com/OpenMathLib/OpenBLAS.git -b v0.3.25 --depth 1 --shallow-submodules | ||||
|  | ||||
|  | ||||
| OPENBLAS_BUILD_FLAGS=" | ||||
| NUM_THREADS=128 | ||||
| USE_OPENMP=1 | ||||
| NO_SHARED=0 | ||||
| DYNAMIC_ARCH=1 | ||||
| TARGET=ARMV8 | ||||
| CFLAGS=-O3 | ||||
| " | ||||
|  | ||||
| OPENBLAS_CHECKOUT_DIR="OpenBLAS" | ||||
|  | ||||
| make -j8 ${OPENBLAS_BUILD_FLAGS} -C ${OPENBLAS_CHECKOUT_DIR} | ||||
| make -j8 ${OPENBLAS_BUILD_FLAGS} install -C ${OPENBLAS_CHECKOUT_DIR} | ||||
							
								
								
									
										16
									
								
								.ci/docker/common/install_patchelf.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								.ci/docker/common/install_patchelf.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,16 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Pin the version to latest release 0.17.2, building newer commit starts | ||||
| # to fail on the current image | ||||
| git clone -b 0.17.2 --single-branch https://github.com/NixOS/patchelf | ||||
| cd patchelf | ||||
| sed -i 's/serial/parallel/g' configure.ac | ||||
| ./bootstrap.sh | ||||
| ./configure | ||||
| make | ||||
| make install | ||||
| cd .. | ||||
| rm -rf patchelf | ||||
							
								
								
									
										150
									
								
								.ci/docker/common/install_rocm_drm.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										150
									
								
								.ci/docker/common/install_rocm_drm.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,150 @@ | ||||
| #!/bin/bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| ########################### | ||||
| ### prereqs | ||||
| ########################### | ||||
| # Install Python packages depending on the base OS | ||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ||||
| case "$ID" in | ||||
|   ubuntu) | ||||
|     apt-get update -y | ||||
|     apt-get install -y libpciaccess-dev pkg-config | ||||
|     apt-get clean | ||||
|     ;; | ||||
|   centos) | ||||
|     yum install -y libpciaccess-devel pkgconfig | ||||
|     ;; | ||||
|   *) | ||||
|     echo "Unable to determine OS..." | ||||
|     exit 1 | ||||
|     ;; | ||||
| esac | ||||
| python3 -m pip install meson ninja | ||||
|  | ||||
| ########################### | ||||
| ### clone repo | ||||
| ########################### | ||||
| GIT_SSL_NO_VERIFY=true git clone https://gitlab.freedesktop.org/mesa/drm.git | ||||
| pushd drm | ||||
|  | ||||
| ########################### | ||||
| ### patch | ||||
| ########################### | ||||
| patch -p1 <<'EOF' | ||||
| diff --git a/amdgpu/amdgpu_asic_id.c b/amdgpu/amdgpu_asic_id.c | ||||
| index a5007ffc..13fa07fc 100644 | ||||
| --- a/amdgpu/amdgpu_asic_id.c | ||||
| +++ b/amdgpu/amdgpu_asic_id.c | ||||
| @@ -22,6 +22,13 @@ | ||||
|   * | ||||
|   */ | ||||
|  | ||||
| +#define _XOPEN_SOURCE 700 | ||||
| +#define _LARGEFILE64_SOURCE | ||||
| +#define _FILE_OFFSET_BITS 64 | ||||
| +#include <ftw.h> | ||||
| +#include <link.h> | ||||
| +#include <limits.h> | ||||
| + | ||||
|  #include <ctype.h> | ||||
|  #include <stdio.h> | ||||
|  #include <stdlib.h> | ||||
| @@ -34,6 +41,19 @@ | ||||
|  #include "amdgpu_drm.h" | ||||
|  #include "amdgpu_internal.h" | ||||
|  | ||||
| +static char *amdgpuids_path = NULL; | ||||
| +static const char* amdgpuids_path_msg = NULL; | ||||
| + | ||||
| +static int check_for_location_of_amdgpuids(const char *filepath, const struct stat *info, const int typeflag, struct FTW *pathinfo) | ||||
| +{ | ||||
| +	if (typeflag == FTW_F && strstr(filepath, "amdgpu.ids")) { | ||||
| +		amdgpuids_path = strdup(filepath); | ||||
| +		return 1; | ||||
| +	} | ||||
| + | ||||
| +	return 0; | ||||
| +} | ||||
| + | ||||
|  static int parse_one_line(struct amdgpu_device *dev, const char *line) | ||||
|  { | ||||
|  	char *buf, *saveptr; | ||||
| @@ -113,10 +133,46 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev) | ||||
|  	int line_num = 1; | ||||
|  	int r = 0; | ||||
|  | ||||
| +	// attempt to find typical location for amdgpu.ids file | ||||
|  	fp = fopen(AMDGPU_ASIC_ID_TABLE, "r"); | ||||
| + | ||||
| +	// if it doesn't exist, search | ||||
| +	if (!fp) { | ||||
| + | ||||
| +	char self_path[ PATH_MAX ]; | ||||
| +	ssize_t count; | ||||
| +	ssize_t i; | ||||
| + | ||||
| +	count = readlink( "/proc/self/exe", self_path, PATH_MAX ); | ||||
| +	if (count > 0) { | ||||
| +		self_path[count] = '\0'; | ||||
| + | ||||
| +		// remove '/bin/python' from self_path | ||||
| +		for (i=count; i>0; --i) { | ||||
| +			if (self_path[i] == '/') break; | ||||
| +			self_path[i] = '\0'; | ||||
| +		} | ||||
| +		self_path[i] = '\0'; | ||||
| +		for (; i>0; --i) { | ||||
| +			if (self_path[i] == '/') break; | ||||
| +			self_path[i] = '\0'; | ||||
| +		} | ||||
| +		self_path[i] = '\0'; | ||||
| + | ||||
| +		if (1 == nftw(self_path, check_for_location_of_amdgpuids, 5, FTW_PHYS)) { | ||||
| +			fp = fopen(amdgpuids_path, "r"); | ||||
| +			amdgpuids_path_msg = amdgpuids_path; | ||||
| +		} | ||||
| +	} | ||||
| + | ||||
| +	} | ||||
| +	else { | ||||
| +		amdgpuids_path_msg = AMDGPU_ASIC_ID_TABLE; | ||||
| +	} | ||||
| + | ||||
| +	// both hard-coded location and search have failed | ||||
|  	if (!fp) { | ||||
| -		fprintf(stderr, "%s: %s\n", AMDGPU_ASIC_ID_TABLE, | ||||
| -			strerror(errno)); | ||||
| +		fprintf(stderr, "amdgpu.ids: No such file or directory\n"); | ||||
|  		return; | ||||
|  	} | ||||
|  | ||||
| @@ -132,7 +188,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev) | ||||
|  			continue; | ||||
|  		} | ||||
|  | ||||
| -		drmMsg("%s version: %s\n", AMDGPU_ASIC_ID_TABLE, line); | ||||
| +		drmMsg("%s version: %s\n", amdgpuids_path_msg, line); | ||||
|  		break; | ||||
|  	} | ||||
|  | ||||
| @@ -150,7 +206,7 @@ void amdgpu_parse_asic_ids(struct amdgpu_device *dev) | ||||
|  | ||||
|  	if (r == -EINVAL) { | ||||
|  		fprintf(stderr, "Invalid format: %s: line %d: %s\n", | ||||
| -			AMDGPU_ASIC_ID_TABLE, line_num, line); | ||||
| +			amdgpuids_path_msg, line_num, line); | ||||
|  	} else if (r && r != -EAGAIN) { | ||||
|  		fprintf(stderr, "%s: Cannot parse ASIC IDs: %s\n", | ||||
|  			__func__, strerror(-r)); | ||||
| EOF | ||||
|  | ||||
| ########################### | ||||
| ### build | ||||
| ########################### | ||||
| meson builddir --prefix=/opt/amdgpu | ||||
| pushd builddir | ||||
| ninja install | ||||
|  | ||||
| popd | ||||
| popd | ||||
| @ -1,7 +1,11 @@ | ||||
| #!/bin/bash | ||||
| # Script used in CI and CD pipeline | ||||
|  | ||||
| set -ex | ||||
|  | ||||
|  | ||||
| MKLROOT=${MKLROOT:-/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION} | ||||
|  | ||||
| # "install" hipMAGMA into /opt/rocm/magma by copying after build | ||||
| git clone https://bitbucket.org/icl/magma.git | ||||
| pushd magma | ||||
| @ -11,7 +15,10 @@ git checkout a1625ff4d9bc362906bd01f805dbbe12612953f6 | ||||
|  | ||||
| cp make.inc-examples/make.inc.hip-gcc-mkl make.inc | ||||
| echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc | ||||
| echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc | ||||
| if [[ -f "${MKLROOT}/lib/libmkl_core.a" ]]; then | ||||
|     echo 'LIB = -Wl,--start-group -lmkl_gf_lp64 -lmkl_gnu_thread -lmkl_core -Wl,--end-group -lpthread -lstdc++ -lm -lgomp -lhipblas -lhipsparse' >> make.inc | ||||
| fi | ||||
| echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib -ldl' >> make.inc | ||||
| echo 'DEVCCFLAGS += --gpu-max-threads-per-block=256' >> make.inc | ||||
| export PATH="${PATH}:/opt/rocm/bin" | ||||
| if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then | ||||
| @ -25,7 +32,7 @@ done | ||||
| # hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition | ||||
| sed -i 's/^FOPENMP/#FOPENMP/g' make.inc | ||||
| make -f make.gen.hipMAGMA -j $(nproc) | ||||
| LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION | ||||
| make testing/testing_dgemm -j $(nproc) MKLROOT=/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION | ||||
| LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT="${MKLROOT}" | ||||
| make testing/testing_dgemm -j $(nproc) MKLROOT="${MKLROOT}" | ||||
| popd | ||||
| mv magma /opt/rocm | ||||
|  | ||||
| @ -1,6 +1,6 @@ | ||||
| #!/bin/bash | ||||
| set -xe | ||||
|  | ||||
| # Script used in CI and CD pipeline | ||||
|  | ||||
| # Intel® software for general purpose GPU capabilities. | ||||
| # Refer to https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html | ||||
| @ -8,19 +8,23 @@ set -xe | ||||
| # Users should update to the latest version as it becomes available | ||||
|  | ||||
| function install_ubuntu() { | ||||
|     . /etc/os-release | ||||
|     if [[ ! " jammy " =~ " ${VERSION_CODENAME} " ]]; then | ||||
|         echo "Ubuntu version ${VERSION_CODENAME} not supported" | ||||
|         exit | ||||
|     fi | ||||
|  | ||||
|     apt-get update -y | ||||
|     apt-get install -y gpg-agent wget | ||||
|  | ||||
|     # Set up the repository. To do this, download the key to the system keyring | ||||
|     # To add the online network package repository for the GPU Driver LTS releases | ||||
|     wget -qO - https://repositories.intel.com/gpu/intel-graphics.key \ | ||||
|         | gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg | ||||
|     wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ | ||||
|         | gpg --dearmor --output /usr/share/keyrings/intel-for-pytorch-gpu-dev-keyring.gpg | ||||
|  | ||||
|     # Add the signed entry to APT sources and configure the APT client to use the Intel repository | ||||
|         | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg | ||||
|     echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] \ | ||||
|         https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" \ | ||||
|         | tee /etc/apt/sources.list.d/intel-gpu-jammy.list | ||||
|         https://repositories.intel.com/gpu/ubuntu ${VERSION_CODENAME}/lts/2350 unified" \ | ||||
|         | tee /etc/apt/sources.list.d/intel-gpu-${VERSION_CODENAME}.list | ||||
|     # To add the online network network package repository for the Intel Support Packages | ||||
|     wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ | ||||
|         | gpg --dearmor > /usr/share/keyrings/intel-for-pytorch-gpu-dev-keyring.gpg | ||||
|     echo "deb [signed-by=/usr/share/keyrings/intel-for-pytorch-gpu-dev-keyring.gpg] \ | ||||
|         https://apt.repos.intel.com/intel-for-pytorch-gpu-dev all main" \ | ||||
|         | tee /etc/apt/sources.list.d/intel-for-pytorch-gpu-dev.list | ||||
| @ -97,6 +101,86 @@ EOF | ||||
|     rm -rf /var/lib/yum/history | ||||
| } | ||||
|  | ||||
| function install_rhel() { | ||||
|     . /etc/os-release | ||||
|     if [[ "${ID}" == "rhel" ]]; then | ||||
|         if [[ ! " 8.6 8.8 8.9 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then | ||||
|             echo "RHEL version ${VERSION_ID} not supported" | ||||
|             exit | ||||
|         fi | ||||
|     elif [[ "${ID}" == "almalinux" ]]; then | ||||
|         # Workaround for almalinux8 which used by quay.io/pypa/manylinux_2_28_x86_64 | ||||
|         VERSION_ID="8.6" | ||||
|     fi | ||||
|  | ||||
|     dnf install -y 'dnf-command(config-manager)' | ||||
|     # To add the online network package repository for the GPU Driver LTS releases | ||||
|     dnf config-manager --add-repo \ | ||||
|         https://repositories.intel.com/gpu/rhel/${VERSION_ID}/lts/2350/unified/intel-gpu-${VERSION_ID}.repo | ||||
|     # To add the online network network package repository for the Intel Support Packages | ||||
|     tee > /etc/yum.repos.d/intel-for-pytorch-gpu-dev.repo << EOF | ||||
| [intel-for-pytorch-gpu-dev] | ||||
| name=Intel for Pytorch GPU dev repository | ||||
| baseurl=https://yum.repos.intel.com/intel-for-pytorch-gpu-dev | ||||
| enabled=1 | ||||
| gpgcheck=1 | ||||
| repo_gpgcheck=1 | ||||
| gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | ||||
| EOF | ||||
|  | ||||
|     # The xpu-smi packages | ||||
|     dnf install -y xpu-smi | ||||
|     # Compute and Media Runtimes | ||||
|     dnf install -y \ | ||||
|         intel-opencl intel-media intel-mediasdk libmfxgen1 libvpl2\ | ||||
|         level-zero intel-level-zero-gpu mesa-dri-drivers mesa-vulkan-drivers \ | ||||
|         mesa-vdpau-drivers libdrm mesa-libEGL mesa-libgbm mesa-libGL \ | ||||
|         mesa-libxatracker libvpl-tools intel-metrics-discovery \ | ||||
|         intel-metrics-library intel-igc-core intel-igc-cm \ | ||||
|         libva libva-utils intel-gmmlib libmetee intel-gsc intel-ocloc | ||||
|     # Development packages | ||||
|     dnf install -y --refresh \ | ||||
|         intel-igc-opencl-devel level-zero-devel intel-gsc-devel libmetee-devel \ | ||||
|         level-zero-devel | ||||
|     # Install Intel Support Packages | ||||
|     yum install -y intel-for-pytorch-gpu-dev intel-pti-dev | ||||
|  | ||||
|     # Cleanup | ||||
|     dnf clean all | ||||
|     rm -rf /var/cache/yum | ||||
|     rm -rf /var/lib/yum/yumdb | ||||
|     rm -rf /var/lib/yum/history | ||||
| } | ||||
|  | ||||
| function install_sles() { | ||||
|     . /etc/os-release | ||||
|     VERSION_SP=${VERSION_ID//./sp} | ||||
|     if [[ ! " 15sp4 15sp5 " =~ " ${VERSION_SP} " ]]; then | ||||
|         echo "SLES version ${VERSION_ID} not supported" | ||||
|         exit | ||||
|     fi | ||||
|  | ||||
|     # To add the online network package repository for the GPU Driver LTS releases | ||||
|     zypper addrepo -f -r \ | ||||
|         https://repositories.intel.com/gpu/sles/${VERSION_SP}/lts/2350/unified/intel-gpu-${VERSION_SP}.repo | ||||
|     rpm --import https://repositories.intel.com/gpu/intel-graphics.key | ||||
|     # To add the online network network package repository for the Intel Support Packages | ||||
|     zypper addrepo https://yum.repos.intel.com/intel-for-pytorch-gpu-dev intel-for-pytorch-gpu-dev | ||||
|     rpm --import https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | ||||
|  | ||||
|     # The xpu-smi packages | ||||
|     zypper install -y lsb-release flex bison xpu-smi | ||||
|     # Compute and Media Runtimes | ||||
|     zypper install -y intel-level-zero-gpu level-zero intel-gsc intel-opencl intel-ocloc \ | ||||
|         intel-media-driver libigfxcmrt7 libvpl2 libvpl-tools libmfxgen1 libmfx1 | ||||
|     # Development packages | ||||
|     zypper install -y libigdfcl-devel intel-igc-cm libigfxcmrt-devel level-zero-devel | ||||
|  | ||||
|     # Install Intel Support Packages | ||||
|     zypper install -y intel-for-pytorch-gpu-dev intel-pti-dev | ||||
|  | ||||
| } | ||||
|  | ||||
|  | ||||
| # The installation depends on the base OS | ||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') | ||||
| @ -107,6 +191,12 @@ case "$ID" in | ||||
|     centos) | ||||
|         install_centos | ||||
|     ;; | ||||
|     rhel|almalinux) | ||||
|         install_rhel | ||||
|     ;; | ||||
|     sles) | ||||
|         install_sles | ||||
|     ;; | ||||
|     *) | ||||
|         echo "Unable to determine OS..." | ||||
|         exit 1 | ||||
|  | ||||
							
								
								
									
										101
									
								
								.ci/docker/conda/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										101
									
								
								.ci/docker/conda/Dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,101 @@ | ||||
| ARG CUDA_VERSION=10.2 | ||||
| ARG BASE_TARGET=cuda${CUDA_VERSION} | ||||
| FROM centos:7 as base | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
|  | ||||
| ARG DEVTOOLSET_VERSION=9 | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum update -y | ||||
| RUN yum install -y wget curl perl util-linux xz bzip2 git patch which unzip | ||||
| # Just add everything as a safe.directory for git since these will be used in multiple places with git | ||||
| RUN git config --global --add safe.directory '*' | ||||
| RUN yum install -y yum-utils centos-release-scl | ||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum install -y devtoolset-${DEVTOOLSET_VERSION}-gcc devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran devtoolset-${DEVTOOLSET_VERSION}-binutils | ||||
| # EPEL for cmake | ||||
| RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ | ||||
|     rpm -ivh epel-release-latest-7.noarch.rpm && \ | ||||
|     rm -f epel-release-latest-7.noarch.rpm | ||||
| # cmake | ||||
| RUN yum install -y cmake3 && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
| ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| RUN yum install -y autoconf aclocal automake make sudo | ||||
| RUN rm -rf /usr/local/cuda-* | ||||
|  | ||||
| FROM base as patchelf | ||||
| # Install patchelf | ||||
| ADD ./common/install_patchelf.sh install_patchelf.sh | ||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh && cp $(which patchelf) /patchelf | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
| FROM base as conda | ||||
| # Install Anaconda | ||||
| ADD ./common/install_conda_docker.sh install_conda.sh | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh | ||||
|  | ||||
| # Install CUDA | ||||
| FROM base as cuda | ||||
| ARG CUDA_VERSION=10.2 | ||||
| RUN rm -rf /usr/local/cuda-* | ||||
| ADD ./common/install_cuda.sh install_cuda.sh | ||||
| ENV CUDA_HOME=/usr/local/cuda-${CUDA_VERSION} | ||||
| # Preserve CUDA_VERSION for the builds | ||||
| ENV CUDA_VERSION=${CUDA_VERSION} | ||||
| # Make things in our path by default | ||||
| ENV PATH=/usr/local/cuda-${CUDA_VERSION}/bin:$PATH | ||||
|  | ||||
| FROM cuda as cuda11.8 | ||||
| RUN bash ./install_cuda.sh 11.8 | ||||
| ENV DESIRED_CUDA=11.8 | ||||
|  | ||||
| FROM cuda as cuda12.1 | ||||
| RUN bash ./install_cuda.sh 12.1 | ||||
| ENV DESIRED_CUDA=12.1 | ||||
|  | ||||
| FROM cuda as cuda12.4 | ||||
| RUN bash ./install_cuda.sh 12.4 | ||||
| ENV DESIRED_CUDA=12.4 | ||||
|  | ||||
| # Install MNIST test data | ||||
| FROM base as mnist | ||||
| ADD ./common/install_mnist.sh install_mnist.sh | ||||
| RUN bash ./install_mnist.sh | ||||
|  | ||||
| FROM base as all_cuda | ||||
| COPY --from=cuda11.8  /usr/local/cuda-11.8 /usr/local/cuda-11.8 | ||||
| COPY --from=cuda12.1  /usr/local/cuda-12.1 /usr/local/cuda-12.1 | ||||
| COPY --from=cuda12.4  /usr/local/cuda-12.4 /usr/local/cuda-12.4 | ||||
|  | ||||
| # Final step | ||||
| FROM ${BASE_TARGET} as final | ||||
| COPY --from=openssl            /opt/openssl           /opt/openssl | ||||
| COPY --from=patchelf           /patchelf              /usr/local/bin/patchelf | ||||
| COPY --from=conda              /opt/conda             /opt/conda | ||||
|  | ||||
| # Add jni.h for java host build. | ||||
| COPY ./common/install_jni.sh install_jni.sh | ||||
| COPY ./java/jni.h jni.h | ||||
| RUN bash ./install_jni.sh && rm install_jni.sh | ||||
|  | ||||
| ENV  PATH /opt/conda/bin:$PATH | ||||
| COPY --from=mnist  /usr/local/mnist /usr/local/mnist | ||||
| RUN rm -rf /usr/local/cuda | ||||
| RUN chmod o+rw /usr/local | ||||
| RUN touch /.condarc && \ | ||||
|     chmod o+rw /.condarc && \ | ||||
|     chmod -R o+rw /opt/conda | ||||
							
								
								
									
										76
									
								
								.ci/docker/conda/build.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										76
									
								
								.ci/docker/conda/build.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,76 @@ | ||||
| #!/usr/bin/env bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -eou pipefail | ||||
|  | ||||
| image="$1" | ||||
| shift | ||||
|  | ||||
| if [ -z "${image}" ]; then | ||||
|   echo "Usage: $0 IMAGE" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| DOCKER_IMAGE_NAME="pytorch/${image}" | ||||
|  | ||||
|  | ||||
| export DOCKER_BUILDKIT=1 | ||||
| TOPDIR=$(git rev-parse --show-toplevel) | ||||
|  | ||||
| CUDA_VERSION=${CUDA_VERSION:-12.1} | ||||
|  | ||||
| case ${CUDA_VERSION} in | ||||
|   cpu) | ||||
|     BASE_TARGET=base | ||||
|     DOCKER_TAG=cpu | ||||
|     ;; | ||||
|   all) | ||||
|     BASE_TARGET=all_cuda | ||||
|     DOCKER_TAG=latest | ||||
|     ;; | ||||
|   *) | ||||
|     BASE_TARGET=cuda${CUDA_VERSION} | ||||
|     DOCKER_TAG=cuda${CUDA_VERSION} | ||||
|     ;; | ||||
| esac | ||||
|  | ||||
|  | ||||
| ( | ||||
|   set -x | ||||
|   docker build \ | ||||
|     --target final \ | ||||
|     --progress plain \ | ||||
|     --build-arg "BASE_TARGET=${BASE_TARGET}" \ | ||||
|     --build-arg "CUDA_VERSION=${CUDA_VERSION}" \ | ||||
|     --build-arg "DEVTOOLSET_VERSION=9" \ | ||||
|     -t ${DOCKER_IMAGE_NAME} \ | ||||
|     $@ \ | ||||
|     -f "${TOPDIR}/.ci/docker/conda/Dockerfile" \ | ||||
|     ${TOPDIR}/.ci/docker/ | ||||
| ) | ||||
|  | ||||
| if [[ "${DOCKER_TAG}" =~ ^cuda* ]]; then | ||||
|   # Test that we're using the right CUDA compiler | ||||
|   ( | ||||
|     set -x | ||||
|     docker run --rm "${DOCKER_IMAGE_NAME}" nvcc --version | grep "cuda_${CUDA_VERSION}" | ||||
|   ) | ||||
| fi | ||||
|  | ||||
| GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)} | ||||
| GIT_BRANCH_NAME=${GITHUB_REF##*/} | ||||
| GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)} | ||||
| DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE_NAME}-${GIT_BRANCH_NAME} | ||||
| DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE_NAME}-${GIT_COMMIT_SHA} | ||||
| if [[ "${WITH_PUSH:-}" == true ]]; then | ||||
|   ( | ||||
|     set -x | ||||
|     docker push "${DOCKER_IMAGE_NAME}" | ||||
|     if [[ -n ${GITHUB_REF} ]]; then | ||||
|         docker tag ${DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_BRANCH_TAG} | ||||
|         docker tag ${DOCKER_IMAGE_NAME} ${DOCKER_IMAGE_SHA_TAG} | ||||
|         docker push "${DOCKER_IMAGE_BRANCH_TAG}" | ||||
|         docker push "${DOCKER_IMAGE_SHA_TAG}" | ||||
|     fi | ||||
|   ) | ||||
| fi | ||||
							
								
								
									
										107
									
								
								.ci/docker/libtorch/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										107
									
								
								.ci/docker/libtorch/Dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,107 @@ | ||||
| ARG BASE_TARGET=base | ||||
| ARG GPU_IMAGE=ubuntu:20.04 | ||||
| FROM ${GPU_IMAGE} as base | ||||
|  | ||||
| ENV DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| RUN apt-get clean && apt-get update | ||||
| RUN apt-get install -y curl locales g++ git-all autoconf automake make cmake wget unzip sudo | ||||
| # Just add everything as a safe.directory for git since these will be used in multiple places with git | ||||
| RUN git config --global --add safe.directory '*' | ||||
|  | ||||
| RUN locale-gen en_US.UTF-8 | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
|  | ||||
| # Install openssl | ||||
| FROM base as openssl | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
| # Install python | ||||
| FROM base as python | ||||
| ADD common/install_cpython.sh install_cpython.sh | ||||
| RUN apt-get update -y && \ | ||||
|     apt-get install build-essential gdb lcov libbz2-dev libffi-dev \ | ||||
|         libgdbm-dev liblzma-dev libncurses5-dev libreadline6-dev \ | ||||
|         libsqlite3-dev libssl-dev lzma lzma-dev tk-dev uuid-dev zlib1g-dev -y && \ | ||||
|     bash ./install_cpython.sh && \ | ||||
|     rm install_cpython.sh && \ | ||||
|     apt-get clean | ||||
|  | ||||
| FROM base as conda | ||||
| ADD ./common/install_conda_docker.sh install_conda.sh | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh | ||||
|  | ||||
| FROM base as cpu | ||||
| # Install Anaconda | ||||
| COPY --from=conda /opt/conda /opt/conda | ||||
| # Install python | ||||
| COPY --from=python /opt/python    /opt/python | ||||
| COPY --from=python /opt/_internal /opt/_internal | ||||
| ENV PATH=/opt/conda/bin:/usr/local/cuda/bin:$PATH | ||||
| # Install MKL | ||||
| ADD ./common/install_mkl.sh install_mkl.sh | ||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh | ||||
|  | ||||
| FROM cpu as cuda | ||||
| ADD ./common/install_cuda.sh install_cuda.sh | ||||
| ADD ./common/install_magma.sh install_magma.sh | ||||
| ENV CUDA_HOME /usr/local/cuda | ||||
|  | ||||
| FROM cuda as cuda11.8 | ||||
| RUN bash ./install_cuda.sh 11.8 | ||||
| RUN bash ./install_magma.sh 11.8 | ||||
| RUN ln -sf /usr/local/cuda-11.8 /usr/local/cuda | ||||
|  | ||||
| FROM cuda as cuda12.1 | ||||
| RUN bash ./install_cuda.sh 12.1 | ||||
| RUN bash ./install_magma.sh 12.1 | ||||
| RUN ln -sf /usr/local/cuda-12.1 /usr/local/cuda | ||||
|  | ||||
| FROM cuda as cuda12.4 | ||||
| RUN bash ./install_cuda.sh 12.4 | ||||
| RUN bash ./install_magma.sh 12.4 | ||||
| RUN ln -sf /usr/local/cuda-12.4 /usr/local/cuda | ||||
|  | ||||
| FROM cpu as rocm | ||||
| ARG PYTORCH_ROCM_ARCH | ||||
| ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH} | ||||
| ENV MKLROOT /opt/intel | ||||
| # Adding ROCM_PATH env var so that LoadHip.cmake (even with logic updated for ROCm6.0) | ||||
| # find HIP works for ROCm5.7. Not needed for ROCm6.0 and above. | ||||
| # Remove below when ROCm5.7 is not in support matrix anymore. | ||||
| ENV ROCM_PATH /opt/rocm | ||||
| # No need to install ROCm as base docker image should have full ROCm install | ||||
| #ADD ./common/install_rocm.sh install_rocm.sh | ||||
| ADD ./common/install_rocm_drm.sh install_rocm_drm.sh | ||||
| ADD ./common/install_rocm_magma.sh install_rocm_magma.sh | ||||
| # gfortran and python needed for building magma from source for ROCm | ||||
| RUN apt-get update -y && \ | ||||
|     apt-get install gfortran -y && \ | ||||
|     apt-get install python -y && \ | ||||
|     apt-get clean | ||||
|  | ||||
| RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh | ||||
| RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh | ||||
|  | ||||
| # Install AOTriton | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ./common/aotriton_version.txt aotriton_version.txt | ||||
| COPY ./common/install_aotriton.sh install_aotriton.sh | ||||
| RUN bash ./install_aotriton.sh /opt/rocm && rm install_aotriton.sh aotriton_version.txt | ||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton | ||||
|  | ||||
| FROM ${BASE_TARGET} as final | ||||
| COPY --from=openssl            /opt/openssl           /opt/openssl | ||||
| # Install patchelf | ||||
| ADD ./common/install_patchelf.sh install_patchelf.sh | ||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh | ||||
| # Install Anaconda | ||||
| COPY --from=conda /opt/conda /opt/conda | ||||
| # Install python | ||||
| COPY --from=python /opt/python    /opt/python | ||||
| COPY --from=python /opt/_internal /opt/_internal | ||||
| ENV PATH=/opt/conda/bin:/usr/local/cuda/bin:$PATH | ||||
							
								
								
									
										93
									
								
								.ci/docker/libtorch/build.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										93
									
								
								.ci/docker/libtorch/build.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,93 @@ | ||||
| #!/usr/bin/env bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -eou pipefail | ||||
|  | ||||
| image="$1" | ||||
| shift | ||||
|  | ||||
| if [ -z "${image}" ]; then | ||||
|   echo "Usage: $0 IMAGE" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| DOCKER_IMAGE="pytorch/${image}" | ||||
|  | ||||
| TOPDIR=$(git rev-parse --show-toplevel) | ||||
|  | ||||
| GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu} | ||||
| GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-} | ||||
|  | ||||
| WITH_PUSH=${WITH_PUSH:-} | ||||
|  | ||||
| DOCKER=${DOCKER:-docker} | ||||
|  | ||||
| case ${GPU_ARCH_TYPE} in | ||||
|     cpu) | ||||
|         BASE_TARGET=cpu | ||||
|         DOCKER_TAG=cpu | ||||
|         GPU_IMAGE=ubuntu:20.04 | ||||
|         DOCKER_GPU_BUILD_ARG="" | ||||
|         ;; | ||||
|     cuda) | ||||
|         BASE_TARGET=cuda${GPU_ARCH_VERSION} | ||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} | ||||
|         GPU_IMAGE=ubuntu:20.04 | ||||
|         DOCKER_GPU_BUILD_ARG="" | ||||
|         ;; | ||||
|     rocm) | ||||
|         BASE_TARGET=rocm | ||||
|         DOCKER_TAG=rocm${GPU_ARCH_VERSION} | ||||
|         GPU_IMAGE=rocm/dev-ubuntu-20.04:${GPU_ARCH_VERSION}-complete | ||||
|         PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx1030;gfx1100" | ||||
|         ROCM_REGEX="([0-9]+)\.([0-9]+)[\.]?([0-9]*)" | ||||
|         if [[ $GPU_ARCH_VERSION =~ $ROCM_REGEX ]]; then | ||||
|             ROCM_VERSION_INT=$((${BASH_REMATCH[1]}*10000 + ${BASH_REMATCH[2]}*100 + ${BASH_REMATCH[3]:-0})) | ||||
|         else | ||||
|             echo "ERROR: rocm regex failed" | ||||
|             exit 1 | ||||
|         fi | ||||
|         if [[ $ROCM_VERSION_INT -ge 60000 ]]; then | ||||
|             PYTORCH_ROCM_ARCH+=";gfx942" | ||||
|         fi | ||||
|         DOCKER_GPU_BUILD_ARG="--build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}" | ||||
|         ;; | ||||
|     *) | ||||
|         echo "ERROR: Unrecognized GPU_ARCH_TYPE: ${GPU_ARCH_TYPE}" | ||||
|         exit 1 | ||||
|         ;; | ||||
| esac | ||||
|  | ||||
|  | ||||
| ( | ||||
|     set -x | ||||
|     DOCKER_BUILDKIT=1 ${DOCKER} build \ | ||||
|          --target final \ | ||||
|         ${DOCKER_GPU_BUILD_ARG} \ | ||||
|         --build-arg "GPU_IMAGE=${GPU_IMAGE}" \ | ||||
|         --build-arg "BASE_TARGET=${BASE_TARGET}" \ | ||||
|         -t "${DOCKER_IMAGE}" \ | ||||
|         $@ \ | ||||
|         -f "${TOPDIR}/.ci/docker/libtorch/Dockerfile" \ | ||||
|         "${TOPDIR}/.ci/docker/" | ||||
|  | ||||
| ) | ||||
|  | ||||
| GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)} | ||||
| GIT_BRANCH_NAME=${GITHUB_REF##*/} | ||||
| GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)} | ||||
| DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE}-${GIT_BRANCH_NAME} | ||||
| DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE}-${GIT_COMMIT_SHA} | ||||
|  | ||||
| if [[ "${WITH_PUSH}" == true ]]; then | ||||
|   ( | ||||
|     set -x | ||||
|     ${DOCKER} push "${DOCKER_IMAGE}" | ||||
|     if [[ -n ${GITHUB_REF} ]]; then | ||||
|         ${DOCKER} tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_BRANCH_TAG} | ||||
|         ${DOCKER} tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_SHA_TAG} | ||||
|         ${DOCKER} push "${DOCKER_IMAGE_BRANCH_TAG}" | ||||
|         ${DOCKER} push "${DOCKER_IMAGE_SHA_TAG}" | ||||
|     fi | ||||
|   ) | ||||
| fi | ||||
							
								
								
									
										203
									
								
								.ci/docker/manywheel/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										203
									
								
								.ci/docker/manywheel/Dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,203 @@ | ||||
| # syntax = docker/dockerfile:experimental | ||||
| ARG ROCM_VERSION=3.7 | ||||
| ARG BASE_CUDA_VERSION=11.8 | ||||
|  | ||||
| ARG GPU_IMAGE=centos:7 | ||||
| FROM centos:7 as base | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
|  | ||||
| ARG DEVTOOLSET_VERSION=9 | ||||
| # Note: This is required patch since CentOS have reached EOL | ||||
| # otherwise any yum install setp will fail | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum install -y wget curl perl util-linux xz bzip2 git patch which perl zlib-devel | ||||
| # Just add everything as a safe.directory for git since these will be used in multiple places with git | ||||
| RUN git config --global --add safe.directory '*' | ||||
| RUN yum install -y yum-utils centos-release-scl | ||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| # Note: After running yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| # patch is required once again. Somehow this steps adds mirror.centos.org | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum install -y devtoolset-${DEVTOOLSET_VERSION}-gcc devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran devtoolset-${DEVTOOLSET_VERSION}-binutils | ||||
| ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ | ||||
|     rpm -ivh epel-release-latest-7.noarch.rpm && \ | ||||
|     rm -f epel-release-latest-7.noarch.rpm | ||||
|  | ||||
| # cmake-3.18.4 from pip | ||||
| RUN yum install -y python3-pip && \ | ||||
|     python3 -mpip install cmake==3.18.4 && \ | ||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake | ||||
|  | ||||
| RUN yum install -y autoconf aclocal automake make sudo | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
| # EPEL for cmake | ||||
| FROM base as patchelf | ||||
| # Install patchelf | ||||
| ADD ./common/install_patchelf.sh install_patchelf.sh | ||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh | ||||
| RUN cp $(which patchelf) /patchelf | ||||
|  | ||||
| FROM patchelf as python | ||||
| # build python | ||||
| COPY manywheel/build_scripts /build_scripts | ||||
| ADD ./common/install_cpython.sh /build_scripts/install_cpython.sh | ||||
| RUN bash build_scripts/build.sh && rm -r build_scripts | ||||
|  | ||||
| FROM base as cuda | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| # Install CUDA | ||||
| ADD ./common/install_cuda.sh install_cuda.sh | ||||
| RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh | ||||
|  | ||||
| FROM base as intel | ||||
| # MKL | ||||
| ADD ./common/install_mkl.sh install_mkl.sh | ||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh | ||||
|  | ||||
| FROM base as magma | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| # Install magma | ||||
| ADD ./common/install_magma.sh install_magma.sh | ||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh | ||||
|  | ||||
| FROM base as jni | ||||
| # Install java jni header | ||||
| ADD ./common/install_jni.sh install_jni.sh | ||||
| ADD ./java/jni.h jni.h | ||||
| RUN bash ./install_jni.sh && rm install_jni.sh | ||||
|  | ||||
| FROM base as libpng | ||||
| # Install libpng | ||||
| ADD ./common/install_libpng.sh install_libpng.sh | ||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh | ||||
|  | ||||
| FROM ${GPU_IMAGE} as common | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
| RUN yum install -y \ | ||||
|         aclocal \ | ||||
|         autoconf \ | ||||
|         automake \ | ||||
|         bison \ | ||||
|         bzip2 \ | ||||
|         curl \ | ||||
|         diffutils \ | ||||
|         file \ | ||||
|         git \ | ||||
|         make \ | ||||
|         patch \ | ||||
|         perl \ | ||||
|         unzip \ | ||||
|         util-linux \ | ||||
|         wget \ | ||||
|         which \ | ||||
|         xz \ | ||||
|         yasm | ||||
| RUN yum install -y \ | ||||
|     https://repo.ius.io/ius-release-el7.rpm \ | ||||
|     https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm | ||||
| RUN yum swap -y git git236-core | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
| # Install LLVM version | ||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl | ||||
| COPY --from=python             /opt/python                           /opt/python | ||||
| COPY --from=python             /opt/_internal                        /opt/_internal | ||||
| COPY --from=python             /opt/python/cp39-cp39/bin/auditwheel /usr/local/bin/auditwheel | ||||
| COPY --from=intel              /opt/intel                            /opt/intel | ||||
| COPY --from=patchelf           /usr/local/bin/patchelf               /usr/local/bin/patchelf | ||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h | ||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ | ||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig | ||||
|  | ||||
| FROM common as cpu_final | ||||
| ARG BASE_CUDA_VERSION=10.1 | ||||
| ARG DEVTOOLSET_VERSION=9 | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
|  | ||||
| RUN yum install -y yum-utils centos-release-scl | ||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum install -y devtoolset-${DEVTOOLSET_VERSION}-gcc devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran devtoolset-${DEVTOOLSET_VERSION}-binutils | ||||
| ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # cmake is already installed inside the rocm base image, so remove if present | ||||
| RUN rpm -e cmake || true | ||||
| # cmake-3.18.4 from pip | ||||
| RUN yum install -y python3-pip && \ | ||||
|     python3 -mpip install cmake==3.18.4 && \ | ||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake | ||||
|  | ||||
| # ninja | ||||
| RUN yum install -y ninja-build | ||||
|  | ||||
| FROM cpu_final as cuda_final | ||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda | ||||
| ENV PATH=/usr/local/cuda/bin:$PATH | ||||
|  | ||||
| FROM cpu_final as rocm_final | ||||
| ARG ROCM_VERSION=3.7 | ||||
| ARG PYTORCH_ROCM_ARCH | ||||
| ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH} | ||||
| # Adding ROCM_PATH env var so that LoadHip.cmake (even with logic updated for ROCm6.0) | ||||
| # find HIP works for ROCm5.7. Not needed for ROCm6.0 and above. | ||||
| # Remove below when ROCm5.7 is not in support matrix anymore. | ||||
| ENV ROCM_PATH /opt/rocm | ||||
| ENV MKLROOT /opt/intel | ||||
| # No need to install ROCm as base docker image should have full ROCm install | ||||
| #ADD ./common/install_rocm.sh install_rocm.sh | ||||
| #RUN ROCM_VERSION=${ROCM_VERSION} bash ./install_rocm.sh && rm install_rocm.sh | ||||
| ADD ./common/install_rocm_drm.sh install_rocm_drm.sh | ||||
| RUN bash ./install_rocm_drm.sh && rm install_rocm_drm.sh | ||||
| # cmake3 is needed for the MIOpen build | ||||
| RUN ln -sf /usr/local/bin/cmake /usr/bin/cmake3 | ||||
| ADD ./common/install_rocm_magma.sh install_rocm_magma.sh | ||||
| RUN bash ./install_rocm_magma.sh && rm install_rocm_magma.sh | ||||
| ADD ./common/install_miopen.sh install_miopen.sh | ||||
| RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh | ||||
|  | ||||
| # Install AOTriton | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ./common/aotriton_version.txt aotriton_version.txt | ||||
| COPY ./common/install_aotriton.sh install_aotriton.sh | ||||
| RUN bash ./install_aotriton.sh /opt/rocm && rm install_aotriton.sh aotriton_version.txt | ||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton | ||||
							
								
								
									
										152
									
								
								.ci/docker/manywheel/Dockerfile_2014
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										152
									
								
								.ci/docker/manywheel/Dockerfile_2014
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,152 @@ | ||||
| # syntax = docker/dockerfile:experimental | ||||
| ARG ROCM_VERSION=3.7 | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| ARG GPU_IMAGE=nvidia/cuda:${BASE_CUDA_VERSION}-devel-centos7 | ||||
| FROM quay.io/pypa/manylinux2014_x86_64 as base | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
|  | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| RUN yum install -y wget curl perl util-linux xz bzip2 git patch which perl zlib-devel | ||||
| RUN yum install -y yum-utils centos-release-scl sudo | ||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| RUN yum install -y devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-gcc-gfortran devtoolset-7-binutils | ||||
| ENV PATH=/opt/rh/devtoolset-7/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-7/root/usr/lib64:/opt/rh/devtoolset-7/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # cmake | ||||
| RUN yum install -y cmake3 && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
|  | ||||
|  | ||||
| # remove unncessary python versions | ||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 | ||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 | ||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 | ||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 | ||||
|  | ||||
| FROM base as cuda | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| # Install CUDA | ||||
| ADD ./common/install_cuda.sh install_cuda.sh | ||||
| RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh | ||||
|  | ||||
| FROM base as intel | ||||
| # MKL | ||||
| ADD ./common/install_mkl.sh install_mkl.sh | ||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh | ||||
|  | ||||
| FROM base as magma | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| # Install magma | ||||
| ADD ./common/install_magma.sh install_magma.sh | ||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh | ||||
|  | ||||
| FROM base as jni | ||||
| # Install java jni header | ||||
| ADD ./common/install_jni.sh install_jni.sh | ||||
| ADD ./java/jni.h jni.h | ||||
| RUN bash ./install_jni.sh && rm install_jni.sh | ||||
|  | ||||
| FROM base as libpng | ||||
| # Install libpng | ||||
| ADD ./common/install_libpng.sh install_libpng.sh | ||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh | ||||
|  | ||||
| FROM ${GPU_IMAGE} as common | ||||
| RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo | ||||
| RUN sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
| RUN yum install -y \ | ||||
|         aclocal \ | ||||
|         autoconf \ | ||||
|         automake \ | ||||
|         bison \ | ||||
|         bzip2 \ | ||||
|         curl \ | ||||
|         diffutils \ | ||||
|         file \ | ||||
|         git \ | ||||
|         make \ | ||||
|         patch \ | ||||
|         perl \ | ||||
|         unzip \ | ||||
|         util-linux \ | ||||
|         wget \ | ||||
|         which \ | ||||
|         xz \ | ||||
|         yasm | ||||
| RUN yum install -y \ | ||||
|     https://repo.ius.io/ius-release-el7.rpm \ | ||||
|     https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm | ||||
| RUN yum swap -y git git236-core | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
| # Install LLVM version | ||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl | ||||
| COPY --from=base               /opt/python                           /opt/python | ||||
| COPY --from=base               /opt/_internal                        /opt/_internal | ||||
| COPY --from=base               /usr/local/bin/auditwheel             /usr/local/bin/auditwheel | ||||
| COPY --from=intel              /opt/intel                            /opt/intel | ||||
| COPY --from=base               /usr/local/bin/patchelf               /usr/local/bin/patchelf | ||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ | ||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig | ||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h | ||||
|  | ||||
| FROM common as cpu_final | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| RUN yum install -y yum-utils centos-release-scl | ||||
| RUN yum-config-manager --enable rhel-server-rhscl-7-rpms | ||||
| RUN yum install -y devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-gcc-gfortran devtoolset-7-binutils | ||||
| ENV PATH=/opt/rh/devtoolset-7/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-7/root/usr/lib64:/opt/rh/devtoolset-7/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # cmake | ||||
| RUN yum install -y cmake3 && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
|  | ||||
| # ninja | ||||
| RUN yum install -y http://repo.okay.com.mx/centos/7/x86_64/release/okay-release-1-1.noarch.rpm | ||||
| RUN yum install -y ninja-build | ||||
|  | ||||
| FROM cpu_final as cuda_final | ||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
|  | ||||
| FROM common as rocm_final | ||||
| ARG ROCM_VERSION=3.7 | ||||
| # Install ROCm | ||||
| ADD ./common/install_rocm.sh install_rocm.sh | ||||
| RUN bash ./install_rocm.sh ${ROCM_VERSION} && rm install_rocm.sh | ||||
| # cmake is already installed inside the rocm base image, but both 2 and 3 exist | ||||
| # cmake3 is needed for the later MIOpen custom build, so that step is last. | ||||
| RUN yum install -y cmake3 && \ | ||||
|     rm -f /usr/bin/cmake && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
| ADD ./common/install_miopen.sh install_miopen.sh | ||||
| RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh | ||||
							
								
								
									
										153
									
								
								.ci/docker/manywheel/Dockerfile_2_28
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										153
									
								
								.ci/docker/manywheel/Dockerfile_2_28
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,153 @@ | ||||
| # syntax = docker/dockerfile:experimental | ||||
| ARG ROCM_VERSION=3.7 | ||||
| ARG BASE_CUDA_VERSION=11.8 | ||||
| ARG GPU_IMAGE=amd64/almalinux:8 | ||||
| FROM quay.io/pypa/manylinux_2_28_x86_64 as base | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
|  | ||||
| ARG DEVTOOLSET_VERSION=11 | ||||
| RUN yum install -y sudo wget curl perl util-linux xz bzip2 git patch which perl zlib-devel yum-utils gcc-toolset-${DEVTOOLSET_VERSION}-toolchain | ||||
| ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # cmake-3.18.4 from pip | ||||
| RUN yum install -y python3-pip && \ | ||||
|     python3 -mpip install cmake==3.18.4 && \ | ||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake3 | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
|  | ||||
| # remove unncessary python versions | ||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 | ||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 | ||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 | ||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 | ||||
|  | ||||
| FROM base as cuda | ||||
| ARG BASE_CUDA_VERSION=11.8 | ||||
| # Install CUDA | ||||
| ADD ./common/install_cuda.sh install_cuda.sh | ||||
| RUN bash ./install_cuda.sh ${BASE_CUDA_VERSION} && rm install_cuda.sh | ||||
|  | ||||
| FROM base as intel | ||||
| # MKL | ||||
| ADD ./common/install_mkl.sh install_mkl.sh | ||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh | ||||
|  | ||||
| FROM base as magma | ||||
| ARG BASE_CUDA_VERSION=10.2 | ||||
| # Install magma | ||||
| ADD ./common/install_magma.sh install_magma.sh | ||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh | ||||
|  | ||||
| FROM base as jni | ||||
| # Install java jni header | ||||
| ADD ./common/install_jni.sh install_jni.sh | ||||
| ADD ./java/jni.h jni.h | ||||
| RUN bash ./install_jni.sh && rm install_jni.sh | ||||
|  | ||||
| FROM base as libpng | ||||
| # Install libpng | ||||
| ADD ./common/install_libpng.sh install_libpng.sh | ||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh | ||||
|  | ||||
| FROM ${GPU_IMAGE} as common | ||||
| ARG DEVTOOLSET_VERSION=11 | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
| RUN yum -y install epel-release | ||||
| RUN yum -y update | ||||
| RUN yum install -y \ | ||||
|         autoconf \ | ||||
|         automake \ | ||||
|         bison \ | ||||
|         bzip2 \ | ||||
|         curl \ | ||||
|         diffutils \ | ||||
|         file \ | ||||
|         git \ | ||||
|         make \ | ||||
|         patch \ | ||||
|         perl \ | ||||
|         unzip \ | ||||
|         util-linux \ | ||||
|         wget \ | ||||
|         which \ | ||||
|         xz \ | ||||
|         gcc-toolset-${DEVTOOLSET_VERSION}-toolchain \ | ||||
|         glibc-langpack-en | ||||
|  | ||||
| RUN yum install -y \ | ||||
|     https://repo.ius.io/ius-release-el7.rpm \ | ||||
|     https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm | ||||
| RUN yum swap -y git git236-core | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
| # Install LLVM version | ||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl | ||||
| COPY --from=base               /opt/python                           /opt/python | ||||
| COPY --from=base               /opt/_internal                        /opt/_internal | ||||
| COPY --from=base               /usr/local/bin/auditwheel             /usr/local/bin/auditwheel | ||||
| COPY --from=intel              /opt/intel                            /opt/intel | ||||
| COPY --from=base               /usr/local/bin/patchelf               /usr/local/bin/patchelf | ||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ | ||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig | ||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h | ||||
|  | ||||
| FROM common as cpu_final | ||||
| ARG BASE_CUDA_VERSION=11.8 | ||||
| ARG DEVTOOLSET_VERSION=11 | ||||
| # Ensure the expected devtoolset is used | ||||
| ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # cmake-3.18.4 from pip | ||||
| RUN yum install -y python3-pip && \ | ||||
|     python3 -mpip install cmake==3.18.4 && \ | ||||
|     ln -s /usr/local/bin/cmake /usr/bin/cmake3 | ||||
|  | ||||
| FROM cpu_final as cuda_final | ||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
|  | ||||
| FROM common as rocm_final | ||||
| ARG ROCM_VERSION=3.7 | ||||
| # Install ROCm | ||||
| ADD ./common/install_rocm.sh install_rocm.sh | ||||
| RUN bash ./install_rocm.sh ${ROCM_VERSION} && rm install_rocm.sh | ||||
| # cmake is already installed inside the rocm base image, but both 2 and 3 exist | ||||
| # cmake3 is needed for the later MIOpen custom build, so that step is last. | ||||
| RUN yum install -y cmake3 && \ | ||||
|     rm -f /usr/bin/cmake && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
| ADD ./common/install_miopen.sh install_miopen.sh | ||||
| RUN bash ./install_miopen.sh ${ROCM_VERSION} && rm install_miopen.sh | ||||
|  | ||||
| FROM cpu_final as xpu_final | ||||
| # cmake-3.28.4 from pip | ||||
| RUN python3 -m pip install --upgrade pip && \ | ||||
|     python3 -mpip install cmake==3.28.4 | ||||
| ADD ./common/install_xpu.sh install_xpu.sh | ||||
| RUN bash ./install_xpu.sh && rm install_xpu.sh | ||||
| RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd | ||||
							
								
								
									
										57
									
								
								.ci/docker/manywheel/Dockerfile_2_28_aarch64
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										57
									
								
								.ci/docker/manywheel/Dockerfile_2_28_aarch64
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,57 @@ | ||||
| FROM quay.io/pypa/manylinux_2_28_aarch64 as base | ||||
|  | ||||
| # Graviton needs GCC 10 or above for the build. GCC12 is the default version in almalinux-8. | ||||
| ARG GCCTOOLSET_VERSION=11 | ||||
|  | ||||
| # Language variabes | ||||
| ENV LC_ALL=en_US.UTF-8 | ||||
| ENV LANG=en_US.UTF-8 | ||||
| ENV LANGUAGE=en_US.UTF-8 | ||||
|  | ||||
| # Installed needed OS packages. This is to support all | ||||
| # the binary builds (torch, vision, audio, text, data) | ||||
| RUN yum -y install epel-release | ||||
| RUN yum -y update | ||||
| RUN yum install -y \ | ||||
|   autoconf \ | ||||
|   automake \ | ||||
|   bison \ | ||||
|   bzip2 \ | ||||
|   curl \ | ||||
|   diffutils \ | ||||
|   file \ | ||||
|   git \ | ||||
|   less \ | ||||
|   libffi-devel \ | ||||
|   libgomp \ | ||||
|   make \ | ||||
|   openssl-devel \ | ||||
|   patch \ | ||||
|   perl \ | ||||
|   unzip \ | ||||
|   util-linux \ | ||||
|   wget \ | ||||
|   which \ | ||||
|   xz \ | ||||
|   yasm \ | ||||
|   zstd \ | ||||
|   sudo \ | ||||
|   gcc-toolset-${GCCTOOLSET_VERSION}-toolchain | ||||
|  | ||||
| # Ensure the expected devtoolset is used | ||||
| ENV PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${GCCTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
| FROM base as final | ||||
|  | ||||
| # remove unncessary python versions | ||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 | ||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 | ||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 | ||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 | ||||
							
								
								
									
										94
									
								
								.ci/docker/manywheel/Dockerfile_aarch64
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										94
									
								
								.ci/docker/manywheel/Dockerfile_aarch64
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,94 @@ | ||||
| FROM quay.io/pypa/manylinux2014_aarch64 as base | ||||
|  | ||||
|  | ||||
| # Graviton needs GCC 10 for the build | ||||
| ARG DEVTOOLSET_VERSION=10 | ||||
|  | ||||
| # Language variabes | ||||
| ENV LC_ALL=en_US.UTF-8 | ||||
| ENV LANG=en_US.UTF-8 | ||||
| ENV LANGUAGE=en_US.UTF-8 | ||||
|  | ||||
| # Installed needed OS packages. This is to support all | ||||
| # the binary builds (torch, vision, audio, text, data) | ||||
| RUN yum -y install epel-release | ||||
| RUN yum -y update | ||||
| RUN yum install -y \ | ||||
|   autoconf \ | ||||
|   automake \ | ||||
|   bison \ | ||||
|   bzip2 \ | ||||
|   curl \ | ||||
|   diffutils \ | ||||
|   file \ | ||||
|   git \ | ||||
|   make \ | ||||
|   patch \ | ||||
|   perl \ | ||||
|   unzip \ | ||||
|   util-linux \ | ||||
|   wget \ | ||||
|   which \ | ||||
|   xz \ | ||||
|   yasm \ | ||||
|   less \ | ||||
|   zstd \ | ||||
|   libgomp \ | ||||
|   sudo \ | ||||
|   devtoolset-${DEVTOOLSET_VERSION}-gcc \ | ||||
|   devtoolset-${DEVTOOLSET_VERSION}-gcc-c++ \ | ||||
|   devtoolset-${DEVTOOLSET_VERSION}-gcc-gfortran \ | ||||
|   devtoolset-${DEVTOOLSET_VERSION}-binutils | ||||
|  | ||||
| # Ensure the expected devtoolset is used | ||||
| ENV PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/devtoolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
|  | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
|  | ||||
| ############################################################################### | ||||
| # libglfortran.a hack | ||||
| # | ||||
| # libgfortran.a from quay.io/pypa/manylinux2014_aarch64 is not compiled with -fPIC. | ||||
| # This causes __stack_chk_guard@@GLIBC_2.17 on pytorch build. To solve, get | ||||
| # ubuntu's libgfortran.a which is compiled with -fPIC | ||||
| # NOTE: Need a better way to get this library as Ubuntu's package can be removed by the vender, or changed | ||||
| ############################################################################### | ||||
| RUN cd ~/ \ | ||||
|   && curl -L -o ~/libgfortran-10-dev.deb http://ports.ubuntu.com/ubuntu-ports/pool/universe/g/gcc-10/libgfortran-10-dev_10.5.0-1ubuntu1_arm64.deb \ | ||||
|   && ar x ~/libgfortran-10-dev.deb \ | ||||
|   && tar --use-compress-program=unzstd -xvf data.tar.zst -C ~/ \ | ||||
|   && cp -f ~/usr/lib/gcc/aarch64-linux-gnu/10/libgfortran.a /opt/rh/devtoolset-10/root/usr/lib/gcc/aarch64-redhat-linux/10/ | ||||
|  | ||||
| # install cmake | ||||
| RUN yum install -y cmake3 && \ | ||||
|     ln -s /usr/bin/cmake3 /usr/bin/cmake | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
|  | ||||
| FROM base as openblas | ||||
| # Install openblas | ||||
| ADD ./common/install_openblas.sh install_openblas.sh | ||||
| RUN bash ./install_openblas.sh && rm install_openblas.sh | ||||
|  | ||||
| FROM openssl as final | ||||
| # remove unncessary python versions | ||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 | ||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 | ||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 | ||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 | ||||
| COPY --from=openblas     /opt/OpenBLAS/  /opt/OpenBLAS/ | ||||
| ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:$LD_LIBRARY_PATH | ||||
							
								
								
									
										91
									
								
								.ci/docker/manywheel/Dockerfile_cuda_aarch64
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										91
									
								
								.ci/docker/manywheel/Dockerfile_cuda_aarch64
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,91 @@ | ||||
| FROM quay.io/pypa/manylinux_2_28_aarch64 as base | ||||
|  | ||||
| # Cuda ARM build needs gcc 11 | ||||
| ARG DEVTOOLSET_VERSION=11 | ||||
|  | ||||
| # Language variables | ||||
| ENV LC_ALL=en_US.UTF-8 | ||||
| ENV LANG=en_US.UTF-8 | ||||
| ENV LANGUAGE=en_US.UTF-8 | ||||
|  | ||||
| # Installed needed OS packages. This is to support all | ||||
| # the binary builds (torch, vision, audio, text, data) | ||||
| RUN yum -y install epel-release | ||||
| RUN yum -y update | ||||
| RUN yum install -y \ | ||||
|   autoconf \ | ||||
|   automake \ | ||||
|   bison \ | ||||
|   bzip2 \ | ||||
|   curl \ | ||||
|   diffutils \ | ||||
|   file \ | ||||
|   git \ | ||||
|   make \ | ||||
|   patch \ | ||||
|   perl \ | ||||
|   unzip \ | ||||
|   util-linux \ | ||||
|   wget \ | ||||
|   which \ | ||||
|   xz \ | ||||
|   yasm \ | ||||
|   less \ | ||||
|   zstd \ | ||||
|   libgomp \ | ||||
|   sudo \ | ||||
|   gcc-toolset-${DEVTOOLSET_VERSION}-toolchain | ||||
|  | ||||
| # Ensure the expected devtoolset is used | ||||
| ENV PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib:$LD_LIBRARY_PATH | ||||
|  | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
|  | ||||
| FROM openssl as final | ||||
| # remove unncessary python versions | ||||
| RUN rm -rf /opt/python/cp26-cp26m /opt/_internal/cpython-2.6.9-ucs2 | ||||
| RUN rm -rf /opt/python/cp26-cp26mu /opt/_internal/cpython-2.6.9-ucs4 | ||||
| RUN rm -rf /opt/python/cp33-cp33m /opt/_internal/cpython-3.3.6 | ||||
| RUN rm -rf /opt/python/cp34-cp34m /opt/_internal/cpython-3.4.6 | ||||
|  | ||||
| FROM base as cuda | ||||
| ARG BASE_CUDA_VERSION | ||||
| # Install CUDA | ||||
| ADD ./common/install_cuda_aarch64.sh install_cuda_aarch64.sh | ||||
| RUN bash ./install_cuda_aarch64.sh ${BASE_CUDA_VERSION} && rm install_cuda_aarch64.sh | ||||
|  | ||||
| FROM base as magma | ||||
| ARG BASE_CUDA_VERSION | ||||
| # Install magma | ||||
| ADD ./common/install_magma.sh install_magma.sh | ||||
| RUN bash ./install_magma.sh ${BASE_CUDA_VERSION} && rm install_magma.sh | ||||
|  | ||||
| FROM base as openblas | ||||
| # Install openblas | ||||
| ADD ./common/install_openblas.sh install_openblas.sh | ||||
| RUN bash ./install_openblas.sh && rm install_openblas.sh | ||||
|  | ||||
| FROM final as cuda_final | ||||
| ARG BASE_CUDA_VERSION | ||||
| RUN rm -rf /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=cuda     /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=magma    /usr/local/cuda-${BASE_CUDA_VERSION}  /usr/local/cuda-${BASE_CUDA_VERSION} | ||||
| COPY --from=openblas     /opt/OpenBLAS/  /opt/OpenBLAS/ | ||||
| RUN ln -sf /usr/local/cuda-${BASE_CUDA_VERSION} /usr/local/cuda | ||||
| ENV PATH=/usr/local/cuda/bin:$PATH | ||||
| ENV LD_LIBRARY_PATH=/opt/OpenBLAS/lib:$LD_LIBRARY_PATH | ||||
							
								
								
									
										71
									
								
								.ci/docker/manywheel/Dockerfile_cxx11-abi
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								.ci/docker/manywheel/Dockerfile_cxx11-abi
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,71 @@ | ||||
| FROM centos:8 as base | ||||
|  | ||||
| ENV LC_ALL en_US.UTF-8 | ||||
| ENV LANG en_US.UTF-8 | ||||
| ENV LANGUAGE en_US.UTF-8 | ||||
| ENV PATH /opt/rh/gcc-toolset-11/root/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin | ||||
|  | ||||
| # change to a valid repo | ||||
| RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*.repo | ||||
| # enable to install ninja-build | ||||
| RUN sed -i 's|enabled=0|enabled=1|g' /etc/yum.repos.d/CentOS-Linux-PowerTools.repo | ||||
|  | ||||
| RUN yum -y update | ||||
| RUN yum install -y wget curl perl util-linux xz bzip2 git patch which zlib-devel sudo | ||||
| RUN yum install -y autoconf automake make cmake gdb gcc-toolset-11-gcc-c++ | ||||
|  | ||||
|  | ||||
| FROM base as openssl | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
|  | ||||
| # Install python | ||||
| FROM base as python | ||||
| RUN yum install -y openssl-devel zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel libpcap-devel xz-devel libffi-devel | ||||
| ADD common/install_cpython.sh install_cpython.sh | ||||
| RUN bash ./install_cpython.sh && rm install_cpython.sh | ||||
|  | ||||
| FROM base as conda | ||||
| ADD ./common/install_conda_docker.sh install_conda.sh | ||||
| RUN bash ./install_conda.sh && rm install_conda.sh | ||||
| RUN /opt/conda/bin/conda install -y cmake | ||||
|  | ||||
| FROM base as intel | ||||
| # Install MKL | ||||
| COPY --from=python             /opt/python                           /opt/python | ||||
| COPY --from=python             /opt/_internal                        /opt/_internal | ||||
| COPY --from=conda              /opt/conda                            /opt/conda | ||||
| ENV PATH=/opt/conda/bin:$PATH | ||||
| ADD ./common/install_mkl.sh install_mkl.sh | ||||
| RUN bash ./install_mkl.sh && rm install_mkl.sh | ||||
|  | ||||
| FROM base as patchelf | ||||
| ADD ./common/install_patchelf.sh install_patchelf.sh | ||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh | ||||
| RUN cp $(which patchelf) /patchelf | ||||
|  | ||||
| FROM base as jni | ||||
| ADD ./common/install_jni.sh install_jni.sh | ||||
| ADD ./java/jni.h jni.h | ||||
| RUN bash ./install_jni.sh && rm install_jni.sh | ||||
|  | ||||
| FROM base as libpng | ||||
| ADD ./common/install_libpng.sh install_libpng.sh | ||||
| RUN bash ./install_libpng.sh && rm install_libpng.sh | ||||
|  | ||||
| FROM base as final | ||||
| COPY --from=openssl            /opt/openssl                          /opt/openssl | ||||
| COPY --from=python             /opt/python                           /opt/python | ||||
| COPY --from=python             /opt/_internal                        /opt/_internal | ||||
| COPY --from=intel              /opt/intel                            /opt/intel | ||||
| COPY --from=conda              /opt/conda                            /opt/conda | ||||
| COPY --from=patchelf           /usr/local/bin/patchelf               /usr/local/bin/patchelf | ||||
| COPY --from=jni                /usr/local/include/jni.h              /usr/local/include/jni.h | ||||
| COPY --from=libpng             /usr/local/bin/png*                   /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/bin/libpng*                /usr/local/bin/ | ||||
| COPY --from=libpng             /usr/local/include/png*               /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/include/libpng*            /usr/local/include/ | ||||
| COPY --from=libpng             /usr/local/lib/libpng*                /usr/local/lib/ | ||||
| COPY --from=libpng             /usr/local/lib/pkgconfig              /usr/local/lib/pkgconfig | ||||
|  | ||||
| RUN yum install -y ninja-build | ||||
							
								
								
									
										73
									
								
								.ci/docker/manywheel/Dockerfile_s390x
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										73
									
								
								.ci/docker/manywheel/Dockerfile_s390x
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,73 @@ | ||||
| FROM --platform=linux/s390x docker.io/ubuntu:24.04 as base | ||||
|  | ||||
| # Language variables | ||||
| ENV LC_ALL=C.UTF-8 | ||||
| ENV LANG=C.UTF-8 | ||||
| ENV LANGUAGE=C.UTF-8 | ||||
|  | ||||
| # Installed needed OS packages. This is to support all | ||||
| # the binary builds (torch, vision, audio, text, data) | ||||
| RUN apt update ; apt upgrade -y | ||||
| RUN apt install -y \ | ||||
|   build-essential \ | ||||
|   autoconf \ | ||||
|   automake \ | ||||
|   bzip2 \ | ||||
|   curl \ | ||||
|   diffutils \ | ||||
|   file \ | ||||
|   git \ | ||||
|   make \ | ||||
|   patch \ | ||||
|   perl \ | ||||
|   unzip \ | ||||
|   util-linux \ | ||||
|   wget \ | ||||
|   which \ | ||||
|   xz-utils \ | ||||
|   less \ | ||||
|   zstd \ | ||||
|   cmake \ | ||||
|   python3 \ | ||||
|   python3-dev \ | ||||
|   python3-setuptools \ | ||||
|   python3-yaml \ | ||||
|   python3-typing-extensions \ | ||||
|   libblas-dev \ | ||||
|   libopenblas-dev \ | ||||
|   liblapack-dev \ | ||||
|   libatlas-base-dev | ||||
|  | ||||
| # git236+ would refuse to run git commands in repos owned by other users | ||||
| # Which causes version check to fail, as pytorch repo is bind-mounted into the image | ||||
| # Override this behaviour by treating every folder as safe | ||||
| # For more details see https://github.com/pytorch/pytorch/issues/78659#issuecomment-1144107327 | ||||
| RUN git config --global --add safe.directory "*" | ||||
|  | ||||
| FROM base as openssl | ||||
| # Install openssl (this must precede `build python` step) | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| ADD ./common/install_openssl.sh install_openssl.sh | ||||
| RUN bash ./install_openssl.sh && rm install_openssl.sh | ||||
| ENV SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
|  | ||||
| # EPEL for cmake | ||||
| FROM base as patchelf | ||||
| # Install patchelf | ||||
| ADD ./common/install_patchelf.sh install_patchelf.sh | ||||
| RUN bash ./install_patchelf.sh && rm install_patchelf.sh | ||||
| RUN cp $(which patchelf) /patchelf | ||||
|  | ||||
| FROM patchelf as python | ||||
| # build python | ||||
| COPY manywheel/build_scripts /build_scripts | ||||
| ADD ./common/install_cpython.sh /build_scripts/install_cpython.sh | ||||
| RUN bash build_scripts/build.sh && rm -r build_scripts | ||||
|  | ||||
| FROM openssl as final | ||||
| COPY --from=python             /opt/python                           /opt/python | ||||
| COPY --from=python             /opt/_internal                        /opt/_internal | ||||
| COPY --from=python             /opt/python/cp39-cp39/bin/auditwheel /usr/local/bin/auditwheel | ||||
| COPY --from=patchelf           /usr/local/bin/patchelf               /usr/local/bin/patchelf | ||||
							
								
								
									
										154
									
								
								.ci/docker/manywheel/build.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										154
									
								
								.ci/docker/manywheel/build.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,154 @@ | ||||
| #!/usr/bin/env bash | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| set -eou pipefail | ||||
|  | ||||
| TOPDIR=$(git rev-parse --show-toplevel) | ||||
|  | ||||
| image="$1" | ||||
| shift | ||||
|  | ||||
| if [ -z "${image}" ]; then | ||||
|   echo "Usage: $0 IMAGE" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| DOCKER_IMAGE="pytorch/${image}" | ||||
|  | ||||
| DOCKER_REGISTRY="${DOCKER_REGISTRY:-docker.io}" | ||||
|  | ||||
| GPU_ARCH_TYPE=${GPU_ARCH_TYPE:-cpu} | ||||
| GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-} | ||||
| MANY_LINUX_VERSION=${MANY_LINUX_VERSION:-} | ||||
| DOCKERFILE_SUFFIX=${DOCKERFILE_SUFFIX:-} | ||||
| WITH_PUSH=${WITH_PUSH:-} | ||||
|  | ||||
| case ${GPU_ARCH_TYPE} in | ||||
|     cpu) | ||||
|         TARGET=cpu_final | ||||
|         DOCKER_TAG=cpu | ||||
|         GPU_IMAGE=centos:7 | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9" | ||||
|         ;; | ||||
|     cpu-manylinux_2_28) | ||||
|         TARGET=cpu_final | ||||
|         DOCKER_TAG=cpu | ||||
|         GPU_IMAGE=amd64/almalinux:8 | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11" | ||||
|         MANY_LINUX_VERSION="2_28" | ||||
|         ;; | ||||
|     cpu-aarch64) | ||||
|         TARGET=final | ||||
|         DOCKER_TAG=cpu-aarch64 | ||||
|         GPU_IMAGE=arm64v8/centos:7 | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=10" | ||||
|         MANY_LINUX_VERSION="aarch64" | ||||
|         ;; | ||||
|     cpu-aarch64-2_28) | ||||
|         TARGET=final | ||||
|         DOCKER_TAG=cpu-aarch64 | ||||
|         GPU_IMAGE=arm64v8/almalinux:8 | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11" | ||||
|         MANY_LINUX_VERSION="2_28_aarch64" | ||||
|         ;; | ||||
|     cpu-cxx11-abi) | ||||
|         TARGET=final | ||||
|         DOCKER_TAG=cpu-cxx11-abi | ||||
|         GPU_IMAGE="" | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=9" | ||||
|         MANY_LINUX_VERSION="cxx11-abi" | ||||
|         ;; | ||||
|     cpu-s390x) | ||||
|         TARGET=final | ||||
|         DOCKER_TAG=cpu-s390x | ||||
|         GPU_IMAGE=redhat/ubi9 | ||||
|         DOCKER_GPU_BUILD_ARG="" | ||||
|         MANY_LINUX_VERSION="s390x" | ||||
|         ;; | ||||
|     cuda) | ||||
|         TARGET=cuda_final | ||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} | ||||
|         # Keep this up to date with the minimum version of CUDA we currently support | ||||
|         GPU_IMAGE=centos:7 | ||||
|         DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=9" | ||||
|         ;; | ||||
|     cuda-manylinux_2_28) | ||||
|         TARGET=cuda_final | ||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} | ||||
|         GPU_IMAGE=amd64/almalinux:8 | ||||
|         DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=11" | ||||
|         MANY_LINUX_VERSION="2_28" | ||||
|         ;; | ||||
|     cuda-aarch64) | ||||
|         TARGET=cuda_final | ||||
|         DOCKER_TAG=cuda${GPU_ARCH_VERSION} | ||||
|         GPU_IMAGE=arm64v8/centos:7 | ||||
|         DOCKER_GPU_BUILD_ARG="--build-arg BASE_CUDA_VERSION=${GPU_ARCH_VERSION} --build-arg DEVTOOLSET_VERSION=11" | ||||
|         MANY_LINUX_VERSION="aarch64" | ||||
|         DOCKERFILE_SUFFIX="_cuda_aarch64" | ||||
|         ;; | ||||
|     rocm) | ||||
|         TARGET=rocm_final | ||||
|         DOCKER_TAG=rocm${GPU_ARCH_VERSION} | ||||
|         GPU_IMAGE=rocm/dev-centos-7:${GPU_ARCH_VERSION}-complete | ||||
|         PYTORCH_ROCM_ARCH="gfx900;gfx906;gfx908;gfx90a;gfx1030;gfx1100" | ||||
|         ROCM_REGEX="([0-9]+)\.([0-9]+)[\.]?([0-9]*)" | ||||
|         if [[ $GPU_ARCH_VERSION =~ $ROCM_REGEX ]]; then | ||||
|             ROCM_VERSION_INT=$((${BASH_REMATCH[1]}*10000 + ${BASH_REMATCH[2]}*100 + ${BASH_REMATCH[3]:-0})) | ||||
|         else | ||||
|             echo "ERROR: rocm regex failed" | ||||
|             exit 1 | ||||
|         fi | ||||
|         if [[ $ROCM_VERSION_INT -ge 60000 ]]; then | ||||
|             PYTORCH_ROCM_ARCH+=";gfx942" | ||||
|         fi | ||||
|         DOCKER_GPU_BUILD_ARG="--build-arg ROCM_VERSION=${GPU_ARCH_VERSION} --build-arg PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} --build-arg DEVTOOLSET_VERSION=9" | ||||
|         ;; | ||||
|     xpu) | ||||
|         TARGET=xpu_final | ||||
|         DOCKER_TAG=xpu | ||||
|         GPU_IMAGE=amd64/almalinux:8 | ||||
|         DOCKER_GPU_BUILD_ARG=" --build-arg DEVTOOLSET_VERSION=11" | ||||
|         MANY_LINUX_VERSION="2_28" | ||||
|         ;; | ||||
|     *) | ||||
|         echo "ERROR: Unrecognized GPU_ARCH_TYPE: ${GPU_ARCH_TYPE}" | ||||
|         exit 1 | ||||
|         ;; | ||||
| esac | ||||
|  | ||||
| IMAGES='' | ||||
|  | ||||
| if [[ -n ${MANY_LINUX_VERSION} && -z ${DOCKERFILE_SUFFIX} ]]; then | ||||
|     DOCKERFILE_SUFFIX=_${MANY_LINUX_VERSION} | ||||
| fi | ||||
| ( | ||||
|     set -x | ||||
|     DOCKER_BUILDKIT=1 docker build \ | ||||
|         ${DOCKER_GPU_BUILD_ARG} \ | ||||
|         --build-arg "GPU_IMAGE=${GPU_IMAGE}" \ | ||||
|         --target "${TARGET}" \ | ||||
|         -t "${DOCKER_IMAGE}" \ | ||||
|         $@ \ | ||||
|         -f "${TOPDIR}/.ci/docker/manywheel/Dockerfile${DOCKERFILE_SUFFIX}" \ | ||||
|         "${TOPDIR}/.ci/docker/" | ||||
| ) | ||||
|  | ||||
| GITHUB_REF=${GITHUB_REF:-$(git symbolic-ref -q HEAD || git describe --tags --exact-match)} | ||||
| GIT_BRANCH_NAME=${GITHUB_REF##*/} | ||||
| GIT_COMMIT_SHA=${GITHUB_SHA:-$(git rev-parse HEAD)} | ||||
| DOCKER_IMAGE_BRANCH_TAG=${DOCKER_IMAGE}-${GIT_BRANCH_NAME} | ||||
| DOCKER_IMAGE_SHA_TAG=${DOCKER_IMAGE}-${GIT_COMMIT_SHA} | ||||
|  | ||||
| if [[ "${WITH_PUSH}" == true ]]; then | ||||
|     ( | ||||
|         set -x | ||||
|         docker push "${DOCKER_IMAGE}" | ||||
|         if [[ -n ${GITHUB_REF} ]]; then | ||||
|             docker tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_BRANCH_TAG} | ||||
|             docker tag ${DOCKER_IMAGE} ${DOCKER_IMAGE_SHA_TAG} | ||||
|             docker push "${DOCKER_IMAGE_BRANCH_TAG}" | ||||
|             docker push "${DOCKER_IMAGE_SHA_TAG}" | ||||
|         fi | ||||
|     ) | ||||
| fi | ||||
							
								
								
									
										131
									
								
								.ci/docker/manywheel/build_scripts/build.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										131
									
								
								.ci/docker/manywheel/build_scripts/build.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,131 @@ | ||||
| #!/bin/bash | ||||
| # Top-level build script called from Dockerfile | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| # Stop at any error, show all commands | ||||
| set -ex | ||||
|  | ||||
| # openssl version to build, with expected sha256 hash of .tar.gz | ||||
| # archive | ||||
| OPENSSL_ROOT=openssl-1.1.1l | ||||
| OPENSSL_HASH=0b7a3e5e59c34827fe0c3a74b7ec8baef302b98fa80088d7f9153aa16fa76bd1 | ||||
| DEVTOOLS_HASH=a8ebeb4bed624700f727179e6ef771dafe47651131a00a78b342251415646acc | ||||
| PATCHELF_HASH=d9afdff4baeacfbc64861454f368b7f2c15c44d245293f7587bbf726bfe722fb | ||||
| CURL_ROOT=curl-7.73.0 | ||||
| CURL_HASH=cf34fe0b07b800f1c01a499a6e8b2af548f6d0e044dca4a29d88a4bee146d131 | ||||
| AUTOCONF_ROOT=autoconf-2.69 | ||||
| AUTOCONF_HASH=954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969 | ||||
|  | ||||
| # Get build utilities | ||||
| MY_DIR=$(dirname "${BASH_SOURCE[0]}") | ||||
| source $MY_DIR/build_utils.sh | ||||
|  | ||||
| if [ "$(uname -m)" != "s390x" ] ; then | ||||
|     # Dependencies for compiling Python that we want to remove from | ||||
|     # the final image after compiling Python | ||||
|     PYTHON_COMPILE_DEPS="zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel db4-devel libpcap-devel xz-devel libffi-devel" | ||||
|  | ||||
|     # Libraries that are allowed as part of the manylinux1 profile | ||||
|     MANYLINUX1_DEPS="glibc-devel libstdc++-devel glib2-devel libX11-devel libXext-devel libXrender-devel  mesa-libGL-devel libICE-devel libSM-devel ncurses-devel" | ||||
|  | ||||
|     # Development tools and libraries | ||||
|     yum -y install bzip2 make git patch unzip bison yasm diffutils \ | ||||
|         automake which file cmake28 \ | ||||
|         kernel-devel-`uname -r` \ | ||||
|         ${PYTHON_COMPILE_DEPS} | ||||
| else | ||||
|     # Dependencies for compiling Python that we want to remove from | ||||
|     # the final image after compiling Python | ||||
|     PYTHON_COMPILE_DEPS="zlib1g-dev libbz2-dev libncurses-dev libsqlite3-dev libdb-dev libpcap-dev liblzma-dev libffi-dev" | ||||
|  | ||||
|     # Libraries that are allowed as part of the manylinux1 profile | ||||
|     MANYLINUX1_DEPS="libglib2.0-dev libX11-dev libncurses-dev" | ||||
|  | ||||
|     # Development tools and libraries | ||||
|     apt install -y bzip2 make git patch unzip diffutils \ | ||||
|         automake which file cmake \ | ||||
|         linux-headers-virtual \ | ||||
|         ${PYTHON_COMPILE_DEPS} | ||||
| fi | ||||
|  | ||||
| # Install newest autoconf | ||||
| build_autoconf $AUTOCONF_ROOT $AUTOCONF_HASH | ||||
| autoconf --version | ||||
|  | ||||
| # Compile the latest Python releases. | ||||
| # (In order to have a proper SSL module, Python is compiled | ||||
| # against a recent openssl [see env vars above], which is linked | ||||
| # statically. We delete openssl afterwards.) | ||||
| build_openssl $OPENSSL_ROOT $OPENSSL_HASH | ||||
| /build_scripts/install_cpython.sh | ||||
|  | ||||
| PY39_BIN=/opt/python/cp39-cp39/bin | ||||
|  | ||||
| # Our openssl doesn't know how to find the system CA trust store | ||||
| #   (https://github.com/pypa/manylinux/issues/53) | ||||
| # And it's not clear how up-to-date that is anyway | ||||
| # So let's just use the same one pip and everyone uses | ||||
| $PY39_BIN/pip install certifi | ||||
| ln -s $($PY39_BIN/python -c 'import certifi; print(certifi.where())') \ | ||||
|       /opt/_internal/certs.pem | ||||
| # If you modify this line you also have to modify the versions in the | ||||
| # Dockerfiles: | ||||
| export SSL_CERT_FILE=/opt/_internal/certs.pem | ||||
|  | ||||
| # Install newest curl | ||||
| build_curl $CURL_ROOT $CURL_HASH | ||||
| rm -rf /usr/local/include/curl /usr/local/lib/libcurl* /usr/local/lib/pkgconfig/libcurl.pc | ||||
| hash -r | ||||
| curl --version | ||||
| curl-config --features | ||||
|  | ||||
| # Install patchelf (latest with unreleased bug fixes) | ||||
| curl -sLOk https://nixos.org/releases/patchelf/patchelf-0.10/patchelf-0.10.tar.gz | ||||
| # check_sha256sum patchelf-0.9njs2.tar.gz $PATCHELF_HASH | ||||
| tar -xzf patchelf-0.10.tar.gz | ||||
| (cd patchelf-0.10 && ./configure && make && make install) | ||||
| rm -rf patchelf-0.10.tar.gz patchelf-0.10 | ||||
|  | ||||
| # Install latest pypi release of auditwheel | ||||
| $PY39_BIN/pip install auditwheel | ||||
| ln -s $PY39_BIN/auditwheel /usr/local/bin/auditwheel | ||||
|  | ||||
| # Clean up development headers and other unnecessary stuff for | ||||
| # final image | ||||
| if [ "$(uname -m)" != "s390x" ] ; then | ||||
|     yum -y erase wireless-tools gtk2 libX11 hicolor-icon-theme \ | ||||
|         avahi freetype bitstream-vera-fonts \ | ||||
|         ${PYTHON_COMPILE_DEPS} || true > /dev/null 2>&1 | ||||
|     yum -y install ${MANYLINUX1_DEPS} | ||||
|     yum -y clean all > /dev/null 2>&1 | ||||
|     yum list installed | ||||
| else | ||||
|     apt purge -y ${PYTHON_COMPILE_DEPS} || true > /dev/null 2>&1 | ||||
| fi | ||||
| # we don't need libpython*.a, and they're many megabytes | ||||
| find /opt/_internal -name '*.a' -print0 | xargs -0 rm -f | ||||
| # Strip what we can -- and ignore errors, because this just attempts to strip | ||||
| # *everything*, including non-ELF files: | ||||
| find /opt/_internal -type f -print0 \ | ||||
|     | xargs -0 -n1 strip --strip-unneeded 2>/dev/null || true | ||||
| # We do not need the Python test suites, or indeed the precompiled .pyc and | ||||
| # .pyo files. Partially cribbed from: | ||||
| #    https://github.com/docker-library/python/blob/master/3.4/slim/Dockerfile | ||||
| find /opt/_internal \ | ||||
|      \( -type d -a -name test -o -name tests \) \ | ||||
|   -o \( -type f -a -name '*.pyc' -o -name '*.pyo' \) \ | ||||
|   -print0 | xargs -0 rm -f | ||||
|  | ||||
| for PYTHON in /opt/python/*/bin/python; do | ||||
|     # Smoke test to make sure that our Pythons work, and do indeed detect as | ||||
|     # being manylinux compatible: | ||||
|     $PYTHON $MY_DIR/manylinux1-check.py | ||||
|     # Make sure that SSL cert checking works | ||||
|     $PYTHON $MY_DIR/ssl-check.py | ||||
| done | ||||
|  | ||||
| # Fix libc headers to remain compatible with C99 compilers. | ||||
| find /usr/include/ -type f -exec sed -i 's/\bextern _*inline_*\b/extern __inline __attribute__ ((__gnu_inline__))/g' {} + | ||||
|  | ||||
| # Now we can delete our built SSL | ||||
| rm -rf /usr/local/ssl | ||||
							
								
								
									
										91
									
								
								.ci/docker/manywheel/build_scripts/build_utils.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										91
									
								
								.ci/docker/manywheel/build_scripts/build_utils.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,91 @@ | ||||
| #!/bin/bash | ||||
| # Helper utilities for build | ||||
| # Script used only in CD pipeline | ||||
|  | ||||
| OPENSSL_DOWNLOAD_URL=https://www.openssl.org/source/old/1.1.1/ | ||||
| CURL_DOWNLOAD_URL=https://curl.askapache.com/download | ||||
|  | ||||
| AUTOCONF_DOWNLOAD_URL=https://ftp.gnu.org/gnu/autoconf | ||||
|  | ||||
|  | ||||
| function check_var { | ||||
|     if [ -z "$1" ]; then | ||||
|         echo "required variable not defined" | ||||
|         exit 1 | ||||
|     fi | ||||
| } | ||||
|  | ||||
|  | ||||
| function do_openssl_build { | ||||
|     ./config no-ssl2 no-shared -fPIC --prefix=/usr/local/ssl > /dev/null | ||||
|     make > /dev/null | ||||
|     make install > /dev/null | ||||
| } | ||||
|  | ||||
|  | ||||
| function check_sha256sum { | ||||
|     local fname=$1 | ||||
|     check_var ${fname} | ||||
|     local sha256=$2 | ||||
|     check_var ${sha256} | ||||
|  | ||||
|     echo "${sha256}  ${fname}" > ${fname}.sha256 | ||||
|     sha256sum -c ${fname}.sha256 | ||||
|     rm -f ${fname}.sha256 | ||||
| } | ||||
|  | ||||
|  | ||||
| function build_openssl { | ||||
|     local openssl_fname=$1 | ||||
|     check_var ${openssl_fname} | ||||
|     local openssl_sha256=$2 | ||||
|     check_var ${openssl_sha256} | ||||
|     check_var ${OPENSSL_DOWNLOAD_URL} | ||||
|     curl -sLO ${OPENSSL_DOWNLOAD_URL}/${openssl_fname}.tar.gz | ||||
|     check_sha256sum ${openssl_fname}.tar.gz ${openssl_sha256} | ||||
|     tar -xzf ${openssl_fname}.tar.gz | ||||
|     (cd ${openssl_fname} && do_openssl_build) | ||||
|     rm -rf ${openssl_fname} ${openssl_fname}.tar.gz | ||||
| } | ||||
|  | ||||
|  | ||||
| function do_curl_build { | ||||
|     LIBS=-ldl ./configure --with-ssl --disable-shared > /dev/null | ||||
|     make > /dev/null | ||||
|     make install > /dev/null | ||||
| } | ||||
|  | ||||
|  | ||||
| function build_curl { | ||||
|     local curl_fname=$1 | ||||
|     check_var ${curl_fname} | ||||
|     local curl_sha256=$2 | ||||
|     check_var ${curl_sha256} | ||||
|     check_var ${CURL_DOWNLOAD_URL} | ||||
|     curl -sLO ${CURL_DOWNLOAD_URL}/${curl_fname}.tar.bz2 | ||||
|     check_sha256sum ${curl_fname}.tar.bz2 ${curl_sha256} | ||||
|     tar -jxf ${curl_fname}.tar.bz2 | ||||
|     (cd ${curl_fname} && do_curl_build) | ||||
|     rm -rf ${curl_fname} ${curl_fname}.tar.bz2 | ||||
| } | ||||
|  | ||||
|  | ||||
| function do_standard_install { | ||||
|     ./configure > /dev/null | ||||
|     make > /dev/null | ||||
|     make install > /dev/null | ||||
| } | ||||
|  | ||||
|  | ||||
| function build_autoconf { | ||||
|     local autoconf_fname=$1 | ||||
|     check_var ${autoconf_fname} | ||||
|     local autoconf_sha256=$2 | ||||
|     check_var ${autoconf_sha256} | ||||
|     check_var ${AUTOCONF_DOWNLOAD_URL} | ||||
|     curl -sLO ${AUTOCONF_DOWNLOAD_URL}/${autoconf_fname}.tar.gz | ||||
|     check_sha256sum ${autoconf_fname}.tar.gz ${autoconf_sha256} | ||||
|     tar -zxf ${autoconf_fname}.tar.gz | ||||
|     (cd ${autoconf_fname} && do_standard_install) | ||||
|     rm -rf ${autoconf_fname} ${autoconf_fname}.tar.gz | ||||
| } | ||||
							
								
								
									
										60
									
								
								.ci/docker/manywheel/build_scripts/manylinux1-check.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										60
									
								
								.ci/docker/manywheel/build_scripts/manylinux1-check.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,60 @@ | ||||
| # Logic copied from PEP 513 | ||||
|  | ||||
|  | ||||
| def is_manylinux1_compatible(): | ||||
|     # Only Linux, and only x86-64 / i686 | ||||
|     from distutils.util import get_platform | ||||
|  | ||||
|     if get_platform() not in ["linux-x86_64", "linux-i686", "linux-s390x"]: | ||||
|         return False | ||||
|  | ||||
|     # Check for presence of _manylinux module | ||||
|     try: | ||||
|         import _manylinux | ||||
|  | ||||
|         return bool(_manylinux.manylinux1_compatible) | ||||
|     except (ImportError, AttributeError): | ||||
|         # Fall through to heuristic check below | ||||
|         pass | ||||
|  | ||||
|     # Check glibc version. CentOS 5 uses glibc 2.5. | ||||
|     return have_compatible_glibc(2, 5) | ||||
|  | ||||
|  | ||||
| def have_compatible_glibc(major, minimum_minor): | ||||
|     import ctypes | ||||
|  | ||||
|     process_namespace = ctypes.CDLL(None) | ||||
|     try: | ||||
|         gnu_get_libc_version = process_namespace.gnu_get_libc_version | ||||
|     except AttributeError: | ||||
|         # Symbol doesn't exist -> therefore, we are not linked to | ||||
|         # glibc. | ||||
|         return False | ||||
|  | ||||
|     # Call gnu_get_libc_version, which returns a string like "2.5". | ||||
|     gnu_get_libc_version.restype = ctypes.c_char_p | ||||
|     version_str = gnu_get_libc_version() | ||||
|     # py2 / py3 compatibility: | ||||
|     if not isinstance(version_str, str): | ||||
|         version_str = version_str.decode("ascii") | ||||
|  | ||||
|     # Parse string and check against requested version. | ||||
|     version = [int(piece) for piece in version_str.split(".")] | ||||
|     assert len(version) == 2 | ||||
|     if major != version[0]: | ||||
|         return False | ||||
|     if minimum_minor > version[1]: | ||||
|         return False | ||||
|     return True | ||||
|  | ||||
|  | ||||
| import sys | ||||
|  | ||||
|  | ||||
| if is_manylinux1_compatible(): | ||||
|     print(f"{sys.executable} is manylinux1 compatible") | ||||
|     sys.exit(0) | ||||
| else: | ||||
|     print(f"{sys.executable} is NOT manylinux1 compatible") | ||||
|     sys.exit(1) | ||||
							
								
								
									
										35
									
								
								.ci/docker/manywheel/build_scripts/ssl-check.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								.ci/docker/manywheel/build_scripts/ssl-check.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,35 @@ | ||||
| # cf. https://github.com/pypa/manylinux/issues/53 | ||||
|  | ||||
| GOOD_SSL = "https://google.com" | ||||
| BAD_SSL = "https://self-signed.badssl.com" | ||||
|  | ||||
| import sys | ||||
|  | ||||
|  | ||||
| print("Testing SSL certificate checking for Python:", sys.version) | ||||
|  | ||||
| if sys.version_info[:2] < (2, 7) or sys.version_info[:2] < (3, 4): | ||||
|     print("This version never checks SSL certs; skipping tests") | ||||
|     sys.exit(0) | ||||
|  | ||||
| if sys.version_info[0] >= 3: | ||||
|     from urllib.request import urlopen | ||||
|  | ||||
|     EXC = OSError | ||||
| else: | ||||
|     from urllib import urlopen | ||||
|  | ||||
|     EXC = IOError | ||||
|  | ||||
| print(f"Connecting to {GOOD_SSL} should work") | ||||
| urlopen(GOOD_SSL) | ||||
| print("...it did, yay.") | ||||
|  | ||||
| print(f"Connecting to {BAD_SSL} should fail") | ||||
| try: | ||||
|     urlopen(BAD_SSL) | ||||
|     # If we get here then we failed: | ||||
|     print("...it DIDN'T!!!!!11!!1one!") | ||||
|     sys.exit(1) | ||||
| except EXC: | ||||
|     print("...it did, yay.") | ||||
| @ -1,42 +1 @@ | ||||
| This directory contains scripts for our continuous integration. | ||||
|  | ||||
| One important thing to keep in mind when reading the scripts here is | ||||
| that they are all based off of Docker images, which we build for each of | ||||
| the various system configurations we want to run on Jenkins.  This means | ||||
| it is very easy to run these tests yourself: | ||||
|  | ||||
| 1. Figure out what Docker image you want.  The general template for our | ||||
|    images look like: | ||||
|    ``registry.pytorch.org/pytorch/pytorch-$BUILD_ENVIRONMENT:$DOCKER_VERSION``, | ||||
|    where ``$BUILD_ENVIRONMENT`` is one of the build environments | ||||
|    enumerated in | ||||
|    [pytorch-dockerfiles](https://github.com/pytorch/pytorch/blob/master/.ci/docker/build.sh). The dockerfile used by jenkins can be found under the `.ci` [directory](https://github.com/pytorch/pytorch/blob/master/.ci/docker) | ||||
|  | ||||
| 2. Run ``docker run -it -u jenkins $DOCKER_IMAGE``, clone PyTorch and | ||||
|    run one of the scripts in this directory. | ||||
|  | ||||
| The Docker images are designed so that any "reasonable" build commands | ||||
| will work; if you look in [build.sh](build.sh) you will see that it is a | ||||
| very simple script.  This is intentional.  Idiomatic build instructions | ||||
| should work inside all of our Docker images.  You can tweak the commands | ||||
| however you need (e.g., in case you want to rebuild with DEBUG, or rerun | ||||
| the build with higher verbosity, etc.). | ||||
|  | ||||
| We have to do some work to make this so.  Here is a summary of the | ||||
| mechanisms we use: | ||||
|  | ||||
| - We install binaries to directories like `/usr/local/bin` which | ||||
|   are automatically part of your PATH. | ||||
|  | ||||
| - We add entries to the PATH using Docker ENV variables (so | ||||
|   they apply when you enter Docker) and `/etc/environment` (so they | ||||
|   continue to apply even if you sudo), instead of modifying | ||||
|   `PATH` in our build scripts. | ||||
|  | ||||
| - We use `/etc/ld.so.conf.d` to register directories containing | ||||
|   shared libraries, instead of modifying `LD_LIBRARY_PATH` in our | ||||
|   build scripts. | ||||
|  | ||||
| - We reroute well known paths like `/usr/bin/gcc` to alternate | ||||
|   implementations with `update-alternatives`, instead of setting | ||||
|   `CC` and `CXX` in our implementations. | ||||
|  | ||||
| @ -44,16 +44,15 @@ time python test/run_test.py --verbose -i distributed/_tensor/test_dtensor_compi | ||||
| time python test/run_test.py --verbose -i distributed/test_device_mesh | ||||
|  | ||||
| # DTensor/TP tests | ||||
| time python test/run_test.py --verbose -i distributed/tensor/parallel/test_ddp_2d_parallel | ||||
| time python test/run_test.py --verbose -i distributed/tensor/parallel/test_fsdp_2d_parallel | ||||
| time python test/run_test.py --verbose -i distributed/tensor/parallel/test_tp_examples | ||||
| time python test/run_test.py --verbose -i distributed/tensor/parallel/test_tp_random_state | ||||
|  | ||||
| # FSDP2 tests | ||||
| time python test/run_test.py --verbose -i distributed/_composable/fsdp/test_fully_shard_training -- -k test_2d_mlp_with_nd_mesh | ||||
|  | ||||
| # Pipelining composability tests | ||||
| time python test/run_test.py --verbose -i distributed/pipelining/test_composability.py | ||||
| # ND composability tests | ||||
| time python test/run_test.py --verbose -i distributed/_composable/test_composability/test_2d_composability | ||||
| time python test/run_test.py --verbose -i distributed/_composable/test_composability/test_pp_composability | ||||
|  | ||||
| # Other tests | ||||
| time python test/run_test.py --verbose -i test_cuda_primary_ctx | ||||
|  | ||||
| @ -316,11 +316,9 @@ test_inductor_distributed() { | ||||
|   python test/run_test.py -i inductor/test_aot_inductor.py -k test_replicate_on_devices --verbose | ||||
|   python test/run_test.py -i distributed/test_c10d_functional_native.py --verbose | ||||
|   python test/run_test.py -i distributed/_tensor/test_dtensor_compile.py --verbose | ||||
|   python test/run_test.py -i distributed/tensor/parallel/test_fsdp_2d_parallel.py --verbose | ||||
|   python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_comm.py --verbose | ||||
|   python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_multi_group --verbose | ||||
|   python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_with_activation_checkpointing --verbose | ||||
|   python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_2d_mlp --verbose | ||||
|   python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_hsdp --verbose | ||||
|   python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_2d_transformer_checkpoint_resume --verbose | ||||
|   python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_training.py -k test_gradient_accumulation --verbose | ||||
| @ -405,7 +403,7 @@ if [[ "${TEST_CONFIG}" == *dynamic* ]]; then | ||||
|   DYNAMO_BENCHMARK_FLAGS+=(--dynamic-shapes --dynamic-batch-only) | ||||
| fi | ||||
|  | ||||
| if [[ "${TEST_CONFIG}" == *cpu_inductor* || "${TEST_CONFIG}" == *cpu_aot_inductor* ]]; then | ||||
| if [[ "${TEST_CONFIG}" == *cpu* ]]; then | ||||
|   DYNAMO_BENCHMARK_FLAGS+=(--device cpu) | ||||
| else | ||||
|   DYNAMO_BENCHMARK_FLAGS+=(--device cuda) | ||||
| @ -429,6 +427,19 @@ test_perf_for_dashboard() { | ||||
|   # TODO: All the accuracy tests can be skipped once the CI accuracy checking is stable enough | ||||
|   local targets=(accuracy performance) | ||||
|  | ||||
|   local device=cuda | ||||
|   local taskset="" | ||||
|   if [[ "${TEST_CONFIG}" == *cpu* ]]; then | ||||
|     if [[ "${TEST_CONFIG}" == *cpu_x86* ]]; then | ||||
|       device=cpu_x86 | ||||
|     elif [[ "${TEST_CONFIG}" == *cpu_aarch64* ]]; then | ||||
|       device=cpu_aarch64 | ||||
|     fi | ||||
|     test_inductor_set_cpu_affinity | ||||
|     end_core=$(( $(test_inductor_get_core_number)-1 )) | ||||
|     taskset="taskset -c 0-$end_core" | ||||
|   fi | ||||
|  | ||||
|   for mode in "${modes[@]}"; do | ||||
|     if [[ "$mode" == "inference" ]]; then | ||||
|       dtype=bfloat16 | ||||
| @ -444,56 +455,56 @@ test_perf_for_dashboard() { | ||||
|       fi | ||||
|  | ||||
|       if [[ "$DASHBOARD_TAG" == *default-true* ]]; then | ||||
|         python "benchmarks/dynamo/$suite.py" \ | ||||
|         $taskset python "benchmarks/dynamo/$suite.py" \ | ||||
|             "${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" --disable-cudagraphs "$@" \ | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_no_cudagraphs_${suite}_${dtype}_${mode}_cuda_${target}.csv" | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_no_cudagraphs_${suite}_${dtype}_${mode}_${device}_${target}.csv" | ||||
|       fi | ||||
|       if [[ "$DASHBOARD_TAG" == *cudagraphs-true* ]]; then | ||||
|         python "benchmarks/dynamo/$suite.py" \ | ||||
|         $taskset python "benchmarks/dynamo/$suite.py" \ | ||||
|             "${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" "$@" \ | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_with_cudagraphs_${suite}_${dtype}_${mode}_cuda_${target}.csv" | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_with_cudagraphs_${suite}_${dtype}_${mode}_${device}_${target}.csv" | ||||
|       fi | ||||
|       if [[ "$DASHBOARD_TAG" == *dynamic-true* ]]; then | ||||
|         python "benchmarks/dynamo/$suite.py" \ | ||||
|         $taskset python "benchmarks/dynamo/$suite.py" \ | ||||
|             "${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" --dynamic-shapes \ | ||||
|             --dynamic-batch-only "$@" \ | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_dynamic_${suite}_${dtype}_${mode}_cuda_${target}.csv" | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_dynamic_${suite}_${dtype}_${mode}_${device}_${target}.csv" | ||||
|       fi | ||||
|       if [[ "$DASHBOARD_TAG" == *cppwrapper-true* ]] && [[ "$mode" == "inference" ]]; then | ||||
|         TORCHINDUCTOR_CPP_WRAPPER=1 python "benchmarks/dynamo/$suite.py" \ | ||||
|         TORCHINDUCTOR_CPP_WRAPPER=1 $taskset python "benchmarks/dynamo/$suite.py" \ | ||||
|             "${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" --disable-cudagraphs "$@" \ | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_cpp_wrapper_${suite}_${dtype}_${mode}_cuda_${target}.csv" | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_cpp_wrapper_${suite}_${dtype}_${mode}_${device}_${target}.csv" | ||||
|       fi | ||||
|       if [[ "$DASHBOARD_TAG" == *freezing_cudagraphs-true* ]] && [[ "$mode" == "inference" ]]; then | ||||
|         python "benchmarks/dynamo/$suite.py" \ | ||||
|         $taskset python "benchmarks/dynamo/$suite.py" \ | ||||
|             "${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" "$@" --freezing \ | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_with_cudagraphs_freezing_${suite}_${dtype}_${mode}_cuda_${target}.csv" | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_with_cudagraphs_freezing_${suite}_${dtype}_${mode}_${device}_${target}.csv" | ||||
|       fi | ||||
|       if [[ "$DASHBOARD_TAG" == *freeze_autotune_cudagraphs-true* ]] && [[ "$mode" == "inference" ]]; then | ||||
|         TORCHINDUCTOR_MAX_AUTOTUNE=1 python "benchmarks/dynamo/$suite.py" \ | ||||
|         TORCHINDUCTOR_MAX_AUTOTUNE=1 $taskset python "benchmarks/dynamo/$suite.py" \ | ||||
|             "${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" "$@" --freezing \ | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_with_cudagraphs_freezing_autotune_${suite}_${dtype}_${mode}_cuda_${target}.csv" | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_with_cudagraphs_freezing_autotune_${suite}_${dtype}_${mode}_${device}_${target}.csv" | ||||
|       fi | ||||
|       if [[ "$DASHBOARD_TAG" == *aotinductor-true* ]] && [[ "$mode" == "inference" ]]; then | ||||
|         TORCHINDUCTOR_ABI_COMPATIBLE=1 python "benchmarks/dynamo/$suite.py" \ | ||||
|         TORCHINDUCTOR_ABI_COMPATIBLE=1 $taskset python "benchmarks/dynamo/$suite.py" \ | ||||
|             "${target_flag[@]}" --"$mode" --"$dtype" --export-aot-inductor --disable-cudagraphs "$@" \ | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_aot_inductor_${suite}_${dtype}_${mode}_cuda_${target}.csv" | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_aot_inductor_${suite}_${dtype}_${mode}_${device}_${target}.csv" | ||||
|       fi | ||||
|       if [[ "$DASHBOARD_TAG" == *maxautotune-true* ]]; then | ||||
|         TORCHINDUCTOR_MAX_AUTOTUNE=1 python "benchmarks/dynamo/$suite.py" \ | ||||
|         TORCHINDUCTOR_MAX_AUTOTUNE=1 $taskset python "benchmarks/dynamo/$suite.py" \ | ||||
|             "${target_flag[@]}" --"$mode" --"$dtype" --backend "$backend" "$@" \ | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_max_autotune_${suite}_${dtype}_${mode}_cuda_${target}.csv" | ||||
|             --output "$TEST_REPORTS_DIR/${backend}_max_autotune_${suite}_${dtype}_${mode}_${device}_${target}.csv" | ||||
|       fi | ||||
|       if [[ "$DASHBOARD_TAG" == *cudagraphs_low_precision-true* ]] && [[ "$mode" == "inference" ]]; then | ||||
|         # TODO: This has a new dtype called quant and the benchmarks script needs to be updated to support this. | ||||
|         # The tentative command is as follows. It doesn't work now, but it's ok because we only need mock data | ||||
|         # to fill the dashboard. | ||||
|         python "benchmarks/dynamo/$suite.py" \ | ||||
|         $taskset python "benchmarks/dynamo/$suite.py" \ | ||||
|           "${target_flag[@]}" --"$mode" --quant --backend "$backend" "$@" \ | ||||
|           --output "$TEST_REPORTS_DIR/${backend}_cudagraphs_low_precision_${suite}_quant_${mode}_cuda_${target}.csv" || true | ||||
|           --output "$TEST_REPORTS_DIR/${backend}_cudagraphs_low_precision_${suite}_quant_${mode}_${device}_${target}.csv" || true | ||||
|         # Copy cudagraph results as mock data, easiest choice? | ||||
|         cp "$TEST_REPORTS_DIR/${backend}_with_cudagraphs_${suite}_${dtype}_${mode}_cuda_${target}.csv" \ | ||||
|           "$TEST_REPORTS_DIR/${backend}_cudagraphs_low_precision_${suite}_quant_${mode}_cuda_${target}.csv" | ||||
|         cp "$TEST_REPORTS_DIR/${backend}_with_cudagraphs_${suite}_${dtype}_${mode}_${device}_${target}.csv" \ | ||||
|           "$TEST_REPORTS_DIR/${backend}_cudagraphs_low_precision_${suite}_quant_${mode}_${device}_${target}.csv" | ||||
|       fi | ||||
|     done | ||||
|   done | ||||
| @ -574,7 +585,7 @@ test_dynamo_benchmark() { | ||||
|   elif [[ "${TEST_CONFIG}" == *perf* ]]; then | ||||
|     test_single_dynamo_benchmark "dashboard" "$suite" "$shard_id" "$@" | ||||
|   else | ||||
|     if [[ "${TEST_CONFIG}" == *cpu_inductor* || "${TEST_CONFIG}" == *cpu_aot_inductor* ]]; then | ||||
|     if [[ "${TEST_CONFIG}" == *cpu* ]]; then | ||||
|       local dt="float32" | ||||
|       if [[ "${TEST_CONFIG}" == *amp* ]]; then | ||||
|         dt="amp" | ||||
| @ -645,10 +656,11 @@ test_inductor_torchbench_smoketest_perf() { | ||||
|   done | ||||
| } | ||||
|  | ||||
| test_inductor_torchbench_cpu_smoketest_perf(){ | ||||
|   TEST_REPORTS_DIR=$(pwd)/test/test-reports | ||||
|   mkdir -p "$TEST_REPORTS_DIR" | ||||
| test_inductor_get_core_number() { | ||||
|   echo $(($(lscpu | grep 'Socket(s):' | awk '{print $2}') * $(lscpu | grep 'Core(s) per socket:' | awk '{print $4}'))) | ||||
| } | ||||
|  | ||||
| test_inductor_set_cpu_affinity(){ | ||||
|   #set jemalloc | ||||
|   JEMALLOC_LIB="/usr/lib/x86_64-linux-gnu/libjemalloc.so.2" | ||||
|   IOMP_LIB="$(dirname "$(which python)")/../lib/libiomp5.so" | ||||
| @ -656,32 +668,39 @@ test_inductor_torchbench_cpu_smoketest_perf(){ | ||||
|   export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:-1" | ||||
|   export KMP_AFFINITY=granularity=fine,compact,1,0 | ||||
|   export KMP_BLOCKTIME=1 | ||||
|   CORES=$(lscpu | grep Core | awk '{print $4}') | ||||
|   export OMP_NUM_THREADS=$CORES | ||||
|   end_core=$(( CORES-1 )) | ||||
|   cores=$(test_inductor_get_core_number) | ||||
|   export OMP_NUM_THREADS=$cores | ||||
| } | ||||
|  | ||||
| test_inductor_torchbench_cpu_smoketest_perf(){ | ||||
|   TEST_REPORTS_DIR=$(pwd)/test/test-reports | ||||
|   mkdir -p "$TEST_REPORTS_DIR" | ||||
|  | ||||
|   test_inductor_set_cpu_affinity | ||||
|   end_core=$(( $(test_inductor_get_core_number)-1 )) | ||||
|   MODELS_SPEEDUP_TARGET=benchmarks/dynamo/expected_ci_speedup_inductor_torchbench_cpu.csv | ||||
|  | ||||
|   grep -v '^ *#' < "$MODELS_SPEEDUP_TARGET" | while IFS=',' read -r -a model_cfg | ||||
|   do | ||||
|     local model_name=${model_cfg[0]} | ||||
|     local data_type=${model_cfg[1]} | ||||
|     local speedup_target=${model_cfg[4]} | ||||
|     if [[ ${model_cfg[3]} == "cpp" ]]; then | ||||
|     local data_type=${model_cfg[2]} | ||||
|     local speedup_target=${model_cfg[5]} | ||||
|     local backend=${model_cfg[1]} | ||||
|     if [[ ${model_cfg[4]} == "cpp" ]]; then | ||||
|       export TORCHINDUCTOR_CPP_WRAPPER=1 | ||||
|     else | ||||
|       unset TORCHINDUCTOR_CPP_WRAPPER | ||||
|     fi | ||||
|     local output_name="$TEST_REPORTS_DIR/inductor_inference_${model_cfg[0]}_${model_cfg[1]}_${model_cfg[2]}_${model_cfg[3]}_cpu_smoketest.csv" | ||||
|  | ||||
|     if [[ ${model_cfg[2]} == "dynamic" ]]; then | ||||
|     if [[ ${model_cfg[3]} == "dynamic" ]]; then | ||||
|       taskset -c 0-"$end_core" python benchmarks/dynamo/torchbench.py \ | ||||
|         --inference --performance --"$data_type" -dcpu -n50 --only "$model_name" --dynamic-shapes \ | ||||
|         --dynamic-batch-only --freezing --timeout 9000 --backend=inductor --output "$output_name" | ||||
|         --dynamic-batch-only --freezing --timeout 9000 --"$backend" --output "$output_name" | ||||
|     else | ||||
|       taskset -c 0-"$end_core" python benchmarks/dynamo/torchbench.py \ | ||||
|         --inference --performance --"$data_type" -dcpu -n50 --only "$model_name" \ | ||||
|         --freezing --timeout 9000 --backend=inductor --output "$output_name" | ||||
|         --freezing --timeout 9000 --"$backend" --output "$output_name" | ||||
|     fi | ||||
|     cat "$output_name" | ||||
|     # The threshold value needs to be actively maintained to make this check useful. | ||||
| @ -1267,7 +1286,7 @@ elif [[ "${TEST_CONFIG}" == *timm* ]]; then | ||||
|   id=$((SHARD_NUMBER-1)) | ||||
|   test_dynamo_benchmark timm_models "$id" | ||||
| elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then | ||||
|   if [[ "${TEST_CONFIG}" == *cpu_inductor* || "${TEST_CONFIG}" == *cpu_aot_inductor* ]]; then | ||||
|   if [[ "${TEST_CONFIG}" == *cpu* ]]; then | ||||
|     install_torchaudio cpu | ||||
|   else | ||||
|     install_torchaudio cuda | ||||
| @ -1284,7 +1303,7 @@ elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then | ||||
|   elif [[ "${TEST_CONFIG}" == *inductor_torchbench_cpu_smoketest_perf* ]]; then | ||||
|     checkout_install_torchbench timm_vision_transformer phlippe_densenet basic_gnn_gcn \ | ||||
|       llama_v2_7b_16h resnet50 timm_efficientnet mobilenet_v3_large timm_resnest \ | ||||
|       shufflenet_v2_x1_0 hf_GPT2 | ||||
|       shufflenet_v2_x1_0 hf_GPT2 yolov3 mobilenet_v2 resnext50_32x4d hf_T5_base | ||||
|     PYTHONPATH=$(pwd)/torchbench test_inductor_torchbench_cpu_smoketest_perf | ||||
|   elif [[ "${TEST_CONFIG}" == *torchbench_gcp_smoketest* ]]; then | ||||
|     checkout_install_torchbench | ||||
| @ -1293,7 +1312,7 @@ elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then | ||||
|     checkout_install_torchbench | ||||
|     # Do this after checkout_install_torchbench to ensure we clobber any | ||||
|     # nightlies that torchbench may pull in | ||||
|     if [[ "${TEST_CONFIG}" != *cpu_inductor* && "${TEST_CONFIG}" != *cpu_aot_inductor* ]]; then | ||||
|     if [[ "${TEST_CONFIG}" != *cpu* ]]; then | ||||
|       install_torchrec_and_fbgemm | ||||
|     fi | ||||
|     PYTHONPATH=$(pwd)/torchbench test_dynamo_benchmark torchbench "$id" | ||||
|  | ||||
| @ -52,13 +52,6 @@ if [[ "\$python_nodot" = *39* ]]; then | ||||
|   NUMPY_PIN=">=1.20" | ||||
| fi | ||||
|  | ||||
| if [[ "\$python_nodot" = *38* ]]; then | ||||
|   # sympy 1.12.1 is the last version that supports Python 3.8 | ||||
|   SYMPY_PIN="==1.12.1" | ||||
| else | ||||
|   SYMPY_PIN=">=1.13.0" | ||||
| fi | ||||
|  | ||||
| # Move debug wheels out of the package dir so they don't get installed | ||||
| mkdir -p /tmp/debug_final_pkgs | ||||
| mv /final_pkgs/debug-*.zip /tmp/debug_final_pkgs || echo "no debug packages to move" | ||||
| @ -88,7 +81,7 @@ if [[ "$PACKAGE_TYPE" == conda ]]; then | ||||
|       "numpy\${NUMPY_PIN}" \ | ||||
|       mkl>=2018 \ | ||||
|       ninja \ | ||||
|       "sympy\${SYMPY_PIN}" \ | ||||
|       sympy>=1.12 \ | ||||
|       typing-extensions \ | ||||
|       ${PROTOBUF_PACKAGE} | ||||
|     if [[ "$DESIRED_CUDA" == 'cpu' ]]; then | ||||
|  | ||||
| @ -5,7 +5,7 @@ git submodule sync | ||||
| git submodule update --init --recursive | ||||
|  | ||||
| # This takes some time | ||||
| make setup_lint | ||||
| make setup-lint | ||||
|  | ||||
| # Add CMAKE_PREFIX_PATH to bashrc | ||||
| echo 'export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}' >> ~/.bashrc | ||||
|  | ||||
							
								
								
									
										26
									
								
								.github/actionlint.yaml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										26
									
								
								.github/actionlint.yaml
									
									
									
									
										vendored
									
									
								
							| @ -9,14 +9,16 @@ self-hosted-runner: | ||||
|     - linux.large | ||||
|     - linux.2xlarge | ||||
|     - linux.4xlarge | ||||
|     - linux.9xlarge.ephemeral | ||||
|     - linux.12xlarge | ||||
|     - linux.12xlarge.ephemeral | ||||
|     - linux.24xlarge | ||||
|     - linux.arm64.2xlarge | ||||
|     - linux.4xlarge.nvidia.gpu | ||||
|     - linux.8xlarge.nvidia.gpu | ||||
|     - linux.16xlarge.nvidia.gpu | ||||
|     - linux.g5.4xlarge.nvidia.gpu | ||||
|     # Organization-wide AWS Linux Runners on Linux Foundation account | ||||
|     # Pytorch/pytorch AWS Linux Runners on Linux Foundation account | ||||
|     - lf.linux.large | ||||
|     - lf.linux.2xlarge | ||||
|     - lf.linux.4xlarge | ||||
| @ -27,6 +29,28 @@ self-hosted-runner: | ||||
|     - lf.linux.8xlarge.nvidia.gpu | ||||
|     - lf.linux.16xlarge.nvidia.gpu | ||||
|     - lf.linux.g5.4xlarge.nvidia.gpu | ||||
|     # Organization-wide AWS Linux Runners with new Amazon 2023 AMI | ||||
|     - amz2023.linux.large | ||||
|     - amz2023.linux.2xlarge | ||||
|     - amz2023.linux.4xlarge | ||||
|     - amz2023.linux.12xlarge | ||||
|     - amz2023.linux.24xlarge | ||||
|     - amz2023.linux.arm64.2xlarge | ||||
|     - amz2023.linux.4xlarge.nvidia.gpu | ||||
|     - amz2023.linux.8xlarge.nvidia.gpu | ||||
|     - amz2023.linux.16xlarge.nvidia.gpu | ||||
|     - amz2023.linux.g5.4xlarge.nvidia.gpu | ||||
|     # Pytorch/pytorch AWS Linux Runners with the new Amazon 2023 AMI on Linux Foundation account | ||||
|     - amz2023.lf.linux.large | ||||
|     - amz2023.lf.linux.2xlarge | ||||
|     - amz2023.lf.linux.4xlarge | ||||
|     - amz2023.lf.linux.12xlarge | ||||
|     - amz2023.lf.linux.24xlarge | ||||
|     - amz2023.lf.linux.arm64.2xlarge | ||||
|     - amz2023.lf.linux.4xlarge.nvidia.gpu | ||||
|     - amz2023.lf.linux.8xlarge.nvidia.gpu | ||||
|     - amz2023.lf.linux.16xlarge.nvidia.gpu | ||||
|     - amz2023.lf.linux.g5.4xlarge.nvidia.gpu | ||||
|     # Repo-specific IBM hosted S390x runner | ||||
|     - linux.s390x | ||||
|     # Organization wide AWS Windows runners | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							| @ -6,6 +6,7 @@ ciflow_push_tags: | ||||
| - ciflow/binaries_libtorch | ||||
| - ciflow/binaries_wheel | ||||
| - ciflow/inductor | ||||
| - ciflow/inductor-rocm | ||||
| - ciflow/inductor-perf-compare | ||||
| - ciflow/inductor-micro-benchmark | ||||
| - ciflow/inductor-cu124 | ||||
|  | ||||
| @ -13,7 +13,7 @@ git checkout "$SYNC_BRANCH" | ||||
|  | ||||
| # Using a hardcoded SHA here is a massive speedup as we can skip the entire history of the pytorch GitHub repo. | ||||
| # This specific SHA was chosen as it was before the "branch point" of the stable branch | ||||
| for SHA in $(git log ba3b05fdf37ddbc3c301294d6a560a816335e717..origin/main --pretty="%h" --reverse -- torch/distributed torch/csrc/distributed test/distributed test/cpp/c10d benchmarks/distributed) | ||||
| for SHA in $(git log ba3b05fdf37ddbc3c301294d6a560a816335e717..origin/main --pretty="%h" -- torch/distributed torch/csrc/distributed test/distributed test/cpp/c10d benchmarks/distributed) | ||||
| do | ||||
|     # `git merge-base --is-ancestor` exits with code 0 if the given SHA is an ancestor, and non-0 otherwise | ||||
|     if git merge-base --is-ancestor $SHA HEAD || [[ $(git log --grep="(cherry picked from commit $SHA") ]] | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							| @ -1459,7 +1459,7 @@ def find_matching_merge_rule( | ||||
|  | ||||
|         if not skip_internal_checks and pr.has_internal_changes(): | ||||
|             raise RuntimeError( | ||||
|                 "This PR has internal changes and must be landed via Phabricator" | ||||
|                 "This PR has internal changes and must be landed via Phabricator! Please try reimporting/rexporting the PR!" | ||||
|             ) | ||||
|  | ||||
|         # Categorize all checks when skip_mandatory_checks (force merge) is set. Do it here | ||||
|  | ||||
							
								
								
									
										64
									
								
								.github/workflows/build-conda-images.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								.github/workflows/build-conda-images.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,64 @@ | ||||
| name: Build conda docker images | ||||
|  | ||||
| on: | ||||
|   workflow_dispatch: | ||||
|   push: | ||||
|     branches: | ||||
|       - main | ||||
|       - release/* | ||||
|     tags: | ||||
|       # NOTE: Binary build pipelines should only get triggered on release candidate or nightly builds | ||||
|       # Release candidate tags look like: v1.11.0-rc1 | ||||
|       - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+ | ||||
|     paths: | ||||
|       - conda/Dockerfile | ||||
|       - 'common/*' | ||||
|       - .github/workflows/build-conda-images.yml | ||||
|   pull_request: | ||||
|     paths: | ||||
|       - conda/Dockerfile | ||||
|       - 'common/*' | ||||
|       - .github/workflows/build-conda-images.yml | ||||
|  | ||||
| env: | ||||
|   DOCKER_REGISTRY: "docker.io" | ||||
|   DOCKER_BUILDKIT: 1 | ||||
|   DOCKER_ID: ${{ secrets.DOCKER_ID }} | ||||
|   DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }} | ||||
|   WITH_PUSH: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release')) }} | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| jobs: | ||||
|   build-docker: | ||||
|     runs-on: linux.9xlarge.ephemeral | ||||
|     strategy: | ||||
|       matrix: | ||||
|         cuda_version: ["11.8", "12.1", "12.4", "cpu"] | ||||
|     env: | ||||
|       CUDA_VERSION: ${{ matrix.cuda_version }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: conda-builder${{ matrix.cuda_version == 'cpu' && '-' || '-cuda' }}${{matrix.cuda_version}} | ||||
|             docker-build-dir:  .ci/docker/conda | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/conda/build.sh conda-builder${{ matrix.cuda_version == 'cpu' && ':' || ':cuda' }}${{matrix.cuda_version}} | ||||
							
								
								
									
										120
									
								
								.github/workflows/build-libtorch-images.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										120
									
								
								.github/workflows/build-libtorch-images.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,120 @@ | ||||
| name: Build libtorch docker images | ||||
|  | ||||
| on: | ||||
|   push: | ||||
|     branches: | ||||
|       - main | ||||
|       - release/* | ||||
|     tags: | ||||
|       # NOTE: Binary build pipelines should only get triggered on release candidate or nightly builds | ||||
|       # Release candidate tags look like: v1.11.0-rc1 | ||||
|       - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+ | ||||
|     paths: | ||||
|       - '.ci/docker/libtorch/*' | ||||
|       - '.ci/docker/common/*' | ||||
|       - .github/workflows/build-libtorch-images.yml | ||||
|   pull_request: | ||||
|     paths: | ||||
|       - '.ci/docker/libtorch/*' | ||||
|       - '.ci/docker/common/*' | ||||
|       - .github/workflows/build-libtorch-images.yml | ||||
|  | ||||
| env: | ||||
|   DOCKER_REGISTRY: "docker.io" | ||||
|   DOCKER_BUILDKIT: 1 | ||||
|   DOCKER_ID: ${{ secrets.DOCKER_ID }} | ||||
|   DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }} | ||||
|   WITH_PUSH: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release')) }} | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| jobs: | ||||
|   build-docker-cuda: | ||||
|     runs-on: linux.9xlarge.ephemeral | ||||
|     strategy: | ||||
|       matrix: | ||||
|         cuda_version: ["12.4", "12.1", "11.8"] | ||||
|     env: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       GPU_ARCH_VERSION: ${{ matrix.cuda_version }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: libtorch-cxx11-builder-cuda${{matrix.cuda_version}} | ||||
|             docker-build-dir:  .ci/docker/libtorch | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/libtorch/build.sh libtorch-cxx11-builder:cuda${{matrix.cuda_version}} | ||||
|   build-docker-rocm: | ||||
|     runs-on: linux.9xlarge.ephemeral | ||||
|     strategy: | ||||
|       matrix: | ||||
|         rocm_version: ["6.0", "6.1"] | ||||
|     env: | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       GPU_ARCH_VERSION: ${{ matrix.rocm_version }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: libtorch-cxx11-builder-rocm${{matrix.rocm_version}} | ||||
|             docker-build-dir:  .ci/docker/libtorch | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/libtorch/build.sh libtorch-cxx11-builder:rocm${{matrix.rocm_version}} | ||||
|   build-docker-cpu: | ||||
|     runs-on: linux.9xlarge.ephemeral | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: libtorch-cxx11-builder-cpu | ||||
|             docker-build-dir:  .ci/docker/libtorch | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/libtorch/build.sh libtorch-cxx11-builder:cpu | ||||
							
								
								
									
										322
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										322
									
								
								.github/workflows/build-manywheel-images.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,322 @@ | ||||
| name: Build manywheel docker images | ||||
|  | ||||
| on: | ||||
|   workflow_dispatch: | ||||
|   push: | ||||
|     branches: | ||||
|       - main | ||||
|       - release/* | ||||
|     tags: | ||||
|       # NOTE: Binary build pipelines should only get triggered on release candidate or nightly builds | ||||
|       # Release candidate tags look like: v1.11.0-rc1 | ||||
|       - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+ | ||||
|     paths: | ||||
|       - '.ci/docker/manywheel/*' | ||||
|       - '.ci/docker/common/*' | ||||
|       - .github/workflows/build-manywheel-images.yml | ||||
|   pull_request: | ||||
|     paths: | ||||
|       - '.ci/docker/manywheel/*' | ||||
|       - '.ci/docker/common/*' | ||||
|       - .github/workflows/build-manywheel-images.yml | ||||
|  | ||||
|  | ||||
| env: | ||||
|   DOCKER_REGISTRY: "docker.io" | ||||
|   DOCKER_BUILDKIT: 1 | ||||
|   DOCKER_ID: ${{ secrets.DOCKER_ID }} | ||||
|   DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }} | ||||
|   WITH_PUSH: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release')) }} | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| jobs: | ||||
|   build-docker-cuda: | ||||
|     runs-on: linux.9xlarge.ephemeral | ||||
|     strategy: | ||||
|       matrix: | ||||
|         cuda_version: ["12.4", "12.1", "11.8"] | ||||
|     env: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       GPU_ARCH_VERSION: ${{ matrix.cuda_version }} | ||||
|     steps: | ||||
|       - name: Purge tools folder (free space for build) | ||||
|         run: rm -rf /opt/hostedtoolcache | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: manylinux-builder-cuda${{matrix.cuda_version}} | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/manywheel/build.sh manylinux-builder:cuda${{matrix.cuda_version}} | ||||
|   # NOTE: manylinux_2_28 are still experimental, see https://github.com/pytorch/pytorch/issues/123649 | ||||
|   build-docker-cuda-manylinux_2_28: | ||||
|     runs-on: linux.9xlarge.ephemeral | ||||
|     strategy: | ||||
|       matrix: | ||||
|         cuda_version: ["12.4", "12.1", "11.8"] | ||||
|     env: | ||||
|       GPU_ARCH_TYPE: cuda-manylinux_2_28 | ||||
|       GPU_ARCH_VERSION: ${{ matrix.cuda_version }} | ||||
|     steps: | ||||
|       - name: Purge tools folder (free space for build) | ||||
|         run: rm -rf /opt/hostedtoolcache | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: manylinux2_28-builder-cuda${{matrix.cuda_version}} | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/manywheel/build.sh manylinux2_28-builder:cuda${{matrix.cuda_version}} | ||||
|   build-docker-cuda-aarch64: | ||||
|     runs-on: linux.arm64.2xlarge | ||||
|     strategy: | ||||
|       matrix: | ||||
|         cuda_version: ["12.4"] | ||||
|     env: | ||||
|       GPU_ARCH_TYPE: cuda-aarch64 | ||||
|       GPU_ARCH_VERSION: ${{ matrix.cuda_version }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: actions/checkout@v3 | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: manylinuxaarch64-builder-cuda${{matrix.cuda_version}} | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/manywheel/build.sh manylinuxaarch64-builder:cuda${{matrix.cuda_version}} | ||||
|   build-docker-rocm: | ||||
|     runs-on: linux.9xlarge.ephemeral | ||||
|     strategy: | ||||
|       matrix: | ||||
|         rocm_version: ["6.0", "6.1"] | ||||
|     env: | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       GPU_ARCH_VERSION: ${{ matrix.rocm_version }} | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: manylinux-builder-rocm${{matrix.rocm_version}} | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/manywheel/build.sh manylinux-builder:rocm${{matrix.rocm_version}} | ||||
|   build-docker-cpu: | ||||
|     runs-on: linux.9xlarge.ephemeral | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: manylinux-builder-cpu | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/manywheel/build.sh manylinux-builder:cpu | ||||
|   build-docker-cpu-manylinux_2_28: | ||||
|     runs-on: linux.9xlarge.ephemeral | ||||
|     env: | ||||
|       GPU_ARCH_TYPE: cpu-manylinux_2_28 | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: manylinux2_28-builder-cpu | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/manywheel/build.sh manylinux2_28-builder:cpu | ||||
|   build-docker-cpu-aarch64: | ||||
|     runs-on: linux.arm64.2xlarge | ||||
|     env: | ||||
|       GPU_ARCH_TYPE: cpu-aarch64 | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: manylinuxaarch64-builder-cpu-aarch64 | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/manywheel/build.sh manylinuxaarch64-builder:cpu-aarch64 | ||||
|   build-docker-cpu-aarch64-2_28: | ||||
|     runs-on: linux.arm64.2xlarge | ||||
|     env: | ||||
|       GPU_ARCH_TYPE: cpu-aarch64-2_28 | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: manylinux2_28_aarch64-builder-cpu-aarch64 | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/manywheel/build.sh manylinux2_28_aarch64-builder:cpu-aarch64 | ||||
|   build-docker-cpu-cxx11-abi: | ||||
|     runs-on: linux.9xlarge.ephemeral | ||||
|     env: | ||||
|       GPU_ARCH_TYPE: cpu-cxx11-abi | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: manylinuxcxx11-abi-builder-cpu-cxx11-abi | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/manywheel/build.sh manylinuxcxx11-abi-builder:cpu-cxx11-abi | ||||
|   build-docker-xpu: | ||||
|     runs-on: linux.9xlarge.ephemeral | ||||
|     env: | ||||
|       GPU_ARCH_TYPE: xpu | ||||
|     steps: | ||||
|       - name: Checkout PyTorch | ||||
|         uses: pytorch/pytorch/.github/actions/checkout-pytorch@main | ||||
|         with: | ||||
|           submodules: false | ||||
|       - name: Calculate docker image | ||||
|         if: env.WITH_PUSH == 'false' | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|             docker-image-name: manylinux2_28-builder-xpu | ||||
|             docker-build-dir:  .ci/docker/manywheel | ||||
|             always-rebuild: true | ||||
|             push: true | ||||
|       - name: Authenticate if WITH_PUSH | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           if [[ "${WITH_PUSH}" == true ]]; then | ||||
|             echo "${DOCKER_TOKEN}" | docker login -u "${DOCKER_ID}" --password-stdin | ||||
|           fi | ||||
|       - name: Build Docker Image | ||||
|         if: env.WITH_PUSH == 'true' | ||||
|         run: | | ||||
|           .ci/docker/manywheel/build.sh manylinux2_28-builder:xpu | ||||
							
								
								
									
										2
									
								
								.github/workflows/docker-builds.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/docker-builds.yml
									
									
									
									
										vendored
									
									
								
							| @ -65,6 +65,8 @@ jobs: | ||||
|         include: | ||||
|           - docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11 | ||||
|             runner: linux.arm64.2xlarge | ||||
|           - docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11-inductor-benchmarks | ||||
|             runner: linux.arm64.2xlarge | ||||
|     runs-on: [self-hosted, "${{ matrix.runner }}"] | ||||
|     env: | ||||
|       DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/${{ matrix.docker-image-name }} | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/workflows/inductor-cu124.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/inductor-cu124.yml
									
									
									
									
										vendored
									
									
								
							| @ -9,6 +9,7 @@ on: | ||||
|     # Run every 4 hours during the week and every 12 hours on the weekend | ||||
|     - cron: 45 0,4,8,12,16,20 * * 1-5 | ||||
|     - cron: 45 4,12 * * 0,6 | ||||
|     - cron: 29 8 * * *  # about 1:29am PDT, for mem leak check and rerun disabled tests | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }} | ||||
|  | ||||
							
								
								
									
										106
									
								
								.github/workflows/inductor-perf-test-nightly-aarch64.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										106
									
								
								.github/workflows/inductor-perf-test-nightly-aarch64.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,106 @@ | ||||
| name: inductor-perf-nightly-aarch64 | ||||
|  | ||||
| on: | ||||
|   schedule: | ||||
|     # - cron: 0 7 * * 1-6 | ||||
|     # - cron: 0 7 * * 0 | ||||
|     # Does not perform max_autotune on CPU, so skip the weekly run setup | ||||
|     - cron: 0 7 * * * | ||||
|   # NB: GitHub has an upper limit of 10 inputs here | ||||
|   workflow_dispatch: | ||||
|     inputs: | ||||
|       training: | ||||
|         # CPU for training is not typical, but leave the option open here | ||||
|         description: Run training (off by default)? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       inference: | ||||
|         description: Run inference (on by default)? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       default: | ||||
|         description: Run inductor_default? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       dynamic: | ||||
|         description: Run inductor_dynamic_shapes? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       aotinductor: | ||||
|         description: Run aot_inductor for inference? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       benchmark_configs: | ||||
|         description: The list of configs used the benchmark | ||||
|         required: false | ||||
|         type: string | ||||
|         default: inductor_huggingface_perf_cpu_aarch64,inductor_timm_perf_cpu_aarch64,inductor_torchbench_perf_cpu_aarch64 | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: read-all | ||||
|  | ||||
| jobs: | ||||
|   linux-jammy-aarch64-py3_10-inductor-build: | ||||
|     name: linux-jammy-aarch64-py3.10-inductor | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-aarch64-py3.10 | ||||
|       docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11-inductor-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor_huggingface_perf_cpu_aarch64", shard: 1, num_shards: 3, runner: "linux.arm64.m7g.metal" }, | ||||
|           { config: "inductor_huggingface_perf_cpu_aarch64", shard: 2, num_shards: 3, runner: "linux.arm64.m7g.metal" }, | ||||
|           { config: "inductor_huggingface_perf_cpu_aarch64", shard: 3, num_shards: 3, runner: "linux.arm64.m7g.metal" }, | ||||
|           { config: "inductor_timm_perf_cpu_aarch64", shard: 1, num_shards: 5, runner: "linux.arm64.m7g.metal" }, | ||||
|           { config: "inductor_timm_perf_cpu_aarch64", shard: 2, num_shards: 5, runner: "linux.arm64.m7g.metal" }, | ||||
|           { config: "inductor_timm_perf_cpu_aarch64", shard: 3, num_shards: 5, runner: "linux.arm64.m7g.metal" }, | ||||
|           { config: "inductor_timm_perf_cpu_aarch64", shard: 4, num_shards: 5, runner: "linux.arm64.m7g.metal" }, | ||||
|           { config: "inductor_timm_perf_cpu_aarch64", shard: 5, num_shards: 5, runner: "linux.arm64.m7g.metal" }, | ||||
|           { config: "inductor_torchbench_perf_cpu_aarch64", shard: 1, num_shards: 4, runner: "linux.arm64.m7g.metal" }, | ||||
|           { config: "inductor_torchbench_perf_cpu_aarch64", shard: 2, num_shards: 4, runner: "linux.arm64.m7g.metal" }, | ||||
|           { config: "inductor_torchbench_perf_cpu_aarch64", shard: 3, num_shards: 4, runner: "linux.arm64.m7g.metal" }, | ||||
|           { config: "inductor_torchbench_perf_cpu_aarch64", shard: 4, num_shards: 4, runner: "linux.arm64.m7g.metal" }, | ||||
|         ]} | ||||
|       selected-test-configs: ${{ inputs.benchmark_configs }} | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|  | ||||
|  | ||||
|   linux-jammy-aarch64-py3_10-inductor-test-nightly: | ||||
|     name: linux-jammy-aarch64-py3.10-inductor | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: linux-jammy-aarch64-py3_10-inductor-build | ||||
|     if: github.event.schedule == '0 7 * * *' | ||||
|     with: | ||||
|       build-environment: linux-jammy-aarch64-py3.10 | ||||
|       dashboard-tag: training-false-inference-true-default-true-dynamic-true-aotinductor-true | ||||
|       docker-image: ${{ needs.linux-jammy-aarch64-py3_10-inductor-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-aarch64-py3_10-inductor-build.outputs.test-matrix }} | ||||
|       use-gha: anything-non-empty-to-use-gha | ||||
|       timeout-minutes: 720 | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|  | ||||
|  | ||||
|   linux-jammy-aarch64-py3_10-inductor-test: | ||||
|     name: linux-jammy-aarch64-py3.10-inductor | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: linux-jammy-aarch64-py3_10-inductor-build | ||||
|     if: github.event_name == 'workflow_dispatch' | ||||
|     with: | ||||
|       build-environment: linux-jammy-aarch64-py3.10 | ||||
|       dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-aotinductor-${{ inputs.aotinductor }} | ||||
|       docker-image: ${{ needs.linux-jammy-aarch64-py3_10-inductor-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-aarch64-py3_10-inductor-build.outputs.test-matrix }} | ||||
|       use-gha: anything-non-empty-to-use-gha | ||||
|       timeout-minutes: 720 | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
							
								
								
									
										106
									
								
								.github/workflows/inductor-perf-test-nightly-x86.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										106
									
								
								.github/workflows/inductor-perf-test-nightly-x86.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,106 @@ | ||||
| name: inductor-perf-nightly-x86 | ||||
|  | ||||
| on: | ||||
|   schedule: | ||||
|     # - cron: 0 7 * * 1-6 | ||||
|     # - cron: 0 7 * * 0 | ||||
|     # Does not perform max_autotune on CPU, so skip the weekly run setup | ||||
|     - cron: 0 7 * * * | ||||
|   # NB: GitHub has an upper limit of 10 inputs here | ||||
|   workflow_dispatch: | ||||
|     inputs: | ||||
|       training: | ||||
|         # CPU for training is not typical, but leave the option open here | ||||
|         description: Run training (off by default)? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       inference: | ||||
|         description: Run inference (on by default)? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       default: | ||||
|         description: Run inductor_default? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: true | ||||
|       dynamic: | ||||
|         description: Run inductor_dynamic_shapes? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       aotinductor: | ||||
|         description: Run aot_inductor for inference? | ||||
|         required: false | ||||
|         type: boolean | ||||
|         default: false | ||||
|       benchmark_configs: | ||||
|         description: The list of configs used the benchmark | ||||
|         required: false | ||||
|         type: string | ||||
|         default: inductor_huggingface_perf_cpu_x86,inductor_timm_perf_cpu_x86,inductor_torchbench_perf_cpu_x86 | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: read-all | ||||
|  | ||||
| jobs: | ||||
|   linux-jammy-cpu-py3_8-gcc11-inductor-build: | ||||
|     name: linux-jammy-cpu-py3.8-gcc11-inductor | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.8-gcc11-build | ||||
|       docker-image-name: pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor_huggingface_perf_cpu_x86", shard: 1, num_shards: 3, runner: "linux.24xl.spr-metal" }, | ||||
|           { config: "inductor_huggingface_perf_cpu_x86", shard: 2, num_shards: 3, runner: "linux.24xl.spr-metal" }, | ||||
|           { config: "inductor_huggingface_perf_cpu_x86", shard: 3, num_shards: 3, runner: "linux.24xl.spr-metal" }, | ||||
|           { config: "inductor_timm_perf_cpu_x86", shard: 1, num_shards: 5, runner: "linux.24xl.spr-metal" }, | ||||
|           { config: "inductor_timm_perf_cpu_x86", shard: 2, num_shards: 5, runner: "linux.24xl.spr-metal" }, | ||||
|           { config: "inductor_timm_perf_cpu_x86", shard: 3, num_shards: 5, runner: "linux.24xl.spr-metal" }, | ||||
|           { config: "inductor_timm_perf_cpu_x86", shard: 4, num_shards: 5, runner: "linux.24xl.spr-metal" }, | ||||
|           { config: "inductor_timm_perf_cpu_x86", shard: 5, num_shards: 5, runner: "linux.24xl.spr-metal" }, | ||||
|           { config: "inductor_torchbench_perf_cpu_x86", shard: 1, num_shards: 4, runner: "linux.24xl.spr-metal" }, | ||||
|           { config: "inductor_torchbench_perf_cpu_x86", shard: 2, num_shards: 4, runner: "linux.24xl.spr-metal" }, | ||||
|           { config: "inductor_torchbench_perf_cpu_x86", shard: 3, num_shards: 4, runner: "linux.24xl.spr-metal" }, | ||||
|           { config: "inductor_torchbench_perf_cpu_x86", shard: 4, num_shards: 4, runner: "linux.24xl.spr-metal" }, | ||||
|         ]} | ||||
|       selected-test-configs: ${{ inputs.benchmark_configs }} | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|  | ||||
|  | ||||
|   linux-jammy-cpu-py3_8-gcc11-inductor-test-nightly: | ||||
|     name: linux-jammy-cpu-py3.8-gcc11-inductor | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: linux-jammy-cpu-py3_8-gcc11-inductor-build | ||||
|     if: github.event.schedule == '0 7 * * *' | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.8-gcc11-build | ||||
|       dashboard-tag: training-false-inference-true-default-true-dynamic-true-aotinductor-true | ||||
|       docker-image: ${{ needs.linux-jammy-cpu-py3_8-gcc11-inductor-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-cpu-py3_8-gcc11-inductor-build.outputs.test-matrix }} | ||||
|       use-gha: anything-non-empty-to-use-gha | ||||
|       timeout-minutes: 720 | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|  | ||||
|  | ||||
|   linux-jammy-cpu-py3_8-gcc11-inductor-test: | ||||
|     name: linux-jammy-cpu-py3.8-gcc11-inductor | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: linux-jammy-cpu-py3_8-gcc11-inductor-build | ||||
|     if: github.event_name == 'workflow_dispatch' | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.8-gcc11-build | ||||
|       dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-aotinductor-${{ inputs.aotinductor }} | ||||
|       docker-image: ${{ needs.linux-jammy-cpu-py3_8-gcc11-inductor-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-cpu-py3_8-gcc11-inductor-build.outputs.test-matrix }} | ||||
|       use-gha: anything-non-empty-to-use-gha | ||||
|       timeout-minutes: 720 | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
							
								
								
									
										47
									
								
								.github/workflows/inductor-rocm.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										47
									
								
								.github/workflows/inductor-rocm.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,47 @@ | ||||
| name: inductor-rocm | ||||
|  | ||||
| on: | ||||
|   schedule: | ||||
|     # We have several schedules so jobs can check github.event.schedule to activate only for a fraction of the runs. | ||||
|     # Also run less frequently on weekends. | ||||
|     - cron: 45 0,4,8,12,16,20 * * 1-5 | ||||
|     - cron: 45 4,12 * * 0,6 | ||||
|     - cron: 29 8 * * *  # about 1:29am PDT, for mem leak check and rerun disabled tests | ||||
|   push: | ||||
|     branches: | ||||
| #     - main | ||||
|       - release/* | ||||
|     tags: | ||||
|       - ciflow/inductor-rocm/* | ||||
|   workflow_dispatch: | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}-${{ github.event.schedule }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: read-all | ||||
|  | ||||
| jobs: | ||||
|   linux-focal-rocm6_1-py3_8-inductor-build: | ||||
|     name: rocm6.1-py3.8-inductor | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-rocm6.1-py3.8 | ||||
|       docker-image-name: pytorch-linux-focal-rocm-n-py3 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.2" }, | ||||
|           { config: "inductor", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.2" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-rocm6_1-py3_8-inductor-test: | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     name: rocm6.1-py3.8-inductor | ||||
|     uses: ./.github/workflows/_rocm-test.yml | ||||
|     needs: linux-focal-rocm6_1-py3_8-inductor-build | ||||
|     with: | ||||
|       build-environment: linux-focal-rocm6.1-py3.8 | ||||
|       docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-inductor-build.outputs.docker-image }} | ||||
|       test-matrix:  ${{ needs.linux-focal-rocm6_1-py3_8-inductor-build.outputs.test-matrix }} | ||||
							
								
								
									
										24
									
								
								.github/workflows/inductor.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										24
									
								
								.github/workflows/inductor.yml
									
									
									
									
										vendored
									
									
								
							| @ -16,30 +16,6 @@ concurrency: | ||||
| permissions: read-all | ||||
|  | ||||
| jobs: | ||||
|   linux-focal-rocm6_1-py3_8-inductor-build: | ||||
|     name: rocm6.1-py3.8-inductor | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-rocm6.1-py3.8 | ||||
|       docker-image-name: pytorch-linux-focal-rocm-n-py3 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.2" }, | ||||
|           { config: "inductor", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.2" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-rocm6_1-py3_8-inductor-test: | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     name: rocm6.1-py3.8-inductor | ||||
|     uses: ./.github/workflows/_rocm-test.yml | ||||
|     needs: linux-focal-rocm6_1-py3_8-inductor-build | ||||
|     with: | ||||
|       build-environment: linux-focal-rocm6.1-py3.8 | ||||
|       docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-inductor-build.outputs.docker-image }} | ||||
|       test-matrix:  ${{ needs.linux-focal-rocm6_1-py3_8-inductor-build.outputs.test-matrix }} | ||||
|  | ||||
|   linux-focal-cuda12_1-py3_10-gcc9-inductor-build: | ||||
|     name: cuda12.1-py3.10-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|  | ||||
							
								
								
									
										14
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							| @ -19,7 +19,7 @@ jobs: | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job.yml@main | ||||
|     with: | ||||
|       timeout: 120 | ||||
|       runner: linux.2xlarge | ||||
|       runner: amz2023.linux.2xlarge | ||||
|       docker-image: pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter | ||||
|       # NB: A shallow checkout won't work here because calculate-docker-image requires a full checkout | ||||
|       # to run git rev-parse HEAD~:.ci/docker when a new image is needed | ||||
| @ -35,7 +35,7 @@ jobs: | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job.yml@main | ||||
|     with: | ||||
|       timeout: 120 | ||||
|       runner: linux.2xlarge | ||||
|       runner: amz2023.linux.2xlarge | ||||
|       docker-image: pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter | ||||
|       # NB: A shallow checkout won't work here because calculate-docker-image requires a full checkout | ||||
|       # to run git rev-parse HEAD~:.ci/docker when a new image is needed | ||||
| @ -49,7 +49,7 @@ jobs: | ||||
|   quick-checks: | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job.yml@main | ||||
|     with: | ||||
|       runner: linux.2xlarge | ||||
|       runner: amz2023.linux.2xlarge | ||||
|       docker-image: pytorch-linux-focal-linter | ||||
|       fetch-depth: 0 | ||||
|       ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
| @ -83,7 +83,7 @@ jobs: | ||||
|  | ||||
|   pr-sanity-checks: | ||||
|     name: pr-sanity-checks | ||||
|     runs-on: [self-hosted, linux.large] | ||||
|     runs-on: [self-hosted, amz2023.linux.large] | ||||
|     # Only run this on pull requests. This check is simple enough to be done without a Docker image | ||||
|     if: github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'skip-pr-sanity-checks') | ||||
|     steps: | ||||
| @ -103,7 +103,7 @@ jobs: | ||||
|   workflow-checks: | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job.yml@main | ||||
|     with: | ||||
|       runner: linux.2xlarge | ||||
|       runner: amz2023.linux.2xlarge | ||||
|       docker-image: pytorch-linux-focal-linter | ||||
|       fetch-depth: -1 | ||||
|       submodules: true | ||||
| @ -139,7 +139,7 @@ jobs: | ||||
|   toc: | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job.yml@main | ||||
|     with: | ||||
|       runner: linux.2xlarge | ||||
|       runner: amz2023.linux.2xlarge | ||||
|       docker-image: pytorch-linux-focal-linter | ||||
|       fetch-depth: 0 | ||||
|       ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
| @ -177,7 +177,7 @@ jobs: | ||||
|     if: ${{ github.repository == 'pytorch/pytorch' }} | ||||
|     uses: pytorch/test-infra/.github/workflows/linux_job.yml@main | ||||
|     with: | ||||
|       runner: linux.2xlarge | ||||
|       runner: amz2023.linux.2xlarge | ||||
|       docker-image: pytorch-linux-focal-linter | ||||
|       fetch-depth: 0 | ||||
|       ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/mac-mps.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/mac-mps.yml
									
									
									
									
										vendored
									
									
								
							| @ -28,7 +28,7 @@ jobs: | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "mps", shard: 1, num_shards: 1, runner: "macos-m1-13" }, | ||||
|           { config: "mps", shard: 1, num_shards: 1, runner: "macos-m2-14" }, | ||||
|           { config: "mps", shard: 1, num_shards: 1, runner: "macos-m1-14" }, | ||||
|         ]} | ||||
|  | ||||
|   macos-py3-arm64-mps-test: | ||||
|  | ||||
							
								
								
									
										5
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							| @ -277,8 +277,9 @@ jobs: | ||||
|       docker-image-name: pytorch-linux-focal-rocm-n-py3 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "distributed", shard: 1, num_shards: 2, runner: "linux.rocm.gpu" }, | ||||
|           { config: "distributed", shard: 2, num_shards: 2, runner: "linux.rocm.gpu" }, | ||||
|           { config: "distributed", shard: 1, num_shards: 3, runner: "linux.rocm.gpu" }, | ||||
|           { config: "distributed", shard: 2, num_shards: 3, runner: "linux.rocm.gpu" }, | ||||
|           { config: "distributed", shard: 3, num_shards: 3, runner: "linux.rocm.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-rocm6_1-py3_8-test: | ||||
|  | ||||
							
								
								
									
										182
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										182
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							| @ -48,20 +48,20 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-jammy-py3.8-gcc11 | ||||
|       docker-image-name: pytorch-linux-jammy-py3.8-gcc11 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "docs_test", shard: 1, num_shards: 1,  runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "backwards_compat", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "distributed", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "distributed", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "docs_test", shard: 1, num_shards: 1,  runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "backwards_compat", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "distributed", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "distributed", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-jammy-py3_8-gcc11-test: | ||||
| @ -88,7 +88,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-jammy-py3.8-gcc11-no-ops | ||||
|       docker-image-name: pytorch-linux-jammy-py3.8-gcc11 | ||||
|       test-matrix: | | ||||
| @ -101,7 +101,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-jammy-py3.8-gcc11-pch | ||||
|       docker-image-name: pytorch-linux-jammy-py3.8-gcc11 | ||||
|       test-matrix: | | ||||
| @ -115,17 +115,17 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-jammy-py3.10-clang15-asan | ||||
|       docker-image-name: pytorch-linux-jammy-py3-clang15-asan | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "default", shard: 4, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "default", shard: 5, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "default", shard: 6, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "default", shard: 1, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" }, | ||||
|           { config: "default", shard: 4, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" }, | ||||
|           { config: "default", shard: 5, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" }, | ||||
|           { config: "default", shard: 6, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" }, | ||||
|         ]} | ||||
|       sync-tag: asan-build | ||||
|  | ||||
| @ -147,13 +147,13 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-py3.8-clang10-onnx | ||||
|       docker-image-name: pytorch-linux-focal-py3-clang10-onnx | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-py3_8-clang10-onnx-test: | ||||
| @ -172,20 +172,20 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-py3.8-clang10 | ||||
|       docker-image-name: pytorch-linux-focal-py3.8-clang10 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "crossref", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "crossref", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "crossref", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "crossref", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|         ]} | ||||
|   linux-focal-py3_8-clang10-test: | ||||
|     name: linux-focal-py3.8-clang10 | ||||
| @ -203,20 +203,20 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-py3.11-clang10 | ||||
|       docker-image-name: pytorch-linux-focal-py3.11-clang10 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "crossref", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "crossref", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "crossref", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "crossref", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-py3_11-clang10-test: | ||||
| @ -235,18 +235,18 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-py3.12-clang10 | ||||
|       docker-image-name: pytorch-linux-focal-py3.12-clang10 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-py3_12-clang10-test: | ||||
| @ -264,7 +264,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-cuda11.8-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
| @ -291,16 +291,16 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_1-py3_10-gcc9-test: | ||||
| @ -320,7 +320,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-jammy-py3-clang12-mobile-build | ||||
|       docker-image-name: pytorch-linux-jammy-py3-clang15-asan | ||||
|       build-generates-artifacts: false | ||||
| @ -334,7 +334,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-jammy-cuda11.8-cudnn9-py3.8-clang12 | ||||
|       docker-image-name: pytorch-linux-jammy-cuda11.8-cudnn9-py3.8-clang12 | ||||
|       test-matrix: | | ||||
| @ -347,7 +347,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-py3-clang9-mobile-custom-build-static | ||||
|       docker-image-name: pytorch-linux-focal-py3-clang9-android-ndk-r21e | ||||
|       build-generates-artifacts: false | ||||
| @ -361,12 +361,12 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-py3.8-clang9-xla | ||||
|       docker-image-name: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/xla_base:v1.1-lite | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "xla", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.12xlarge" }, | ||||
|           { config: "xla", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.12xlarge" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-py3_8-clang9-xla-test: | ||||
| @ -401,13 +401,13 @@ jobs: | ||||
|     uses: ./.github/workflows/_bazel-build-test.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.large" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.large" | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-bazel-test | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       cuda-version: cpu | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "default", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_1-py3_10-gcc9-bazel-test: | ||||
| @ -415,13 +415,13 @@ jobs: | ||||
|     uses: ./.github/workflows/_bazel-build-test.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.large" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.large" | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-bazel-test | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       cuda-version: "12.1" | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-bazel-test: | ||||
| @ -429,13 +429,13 @@ jobs: | ||||
|     uses: ./.github/workflows/_bazel-build-test.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.large" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.large" | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-bazel-test | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9 | ||||
|       cuda-version: "12.4" | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-py3-clang9-android-ndk-r21e-gradle-custom-build-single: | ||||
| @ -465,7 +465,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-jammy-py3.8-gcc111-mobile-lightweight-dispatch-build | ||||
|       docker-image-name: pytorch-linux-jammy-py3.8-gcc11 | ||||
|       build-generates-artifacts: false | ||||
| @ -481,7 +481,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-rocm6.1-py3.8 | ||||
|       docker-image-name: pytorch-linux-focal-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
| @ -497,17 +497,17 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       cuda-arch-list: 8.6 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.g5.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_1-py3_10-gcc9-sm86-test: | ||||
| @ -526,12 +526,12 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-jammy-py3-clang12-executorch | ||||
|       docker-image-name: pytorch-linux-jammy-py3-clang12-executorch | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "executorch", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }, | ||||
|           { config: "executorch", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-jammy-py3-clang12-executorch-test: | ||||
| @ -548,17 +548,17 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       use_split_build: true | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build-test: | ||||
| @ -576,18 +576,20 @@ jobs: | ||||
|   linux-focal-py3_12-clang10-experimental-split-build: | ||||
|     name: linux-focal-py3.12-clang10-experimental-split-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       use_split_build: True | ||||
|       build-environment: linux-focal-py3.12-clang10 | ||||
|       docker-image-name: pytorch-linux-focal-py3.12-clang10 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" }, | ||||
|           { config: "default", shard: 1, num_shards: 3, runner: "amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 3, runner: "amz2023.linux.2xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 3, runner: "amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 1, num_shards: 3, runner: "amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 2, num_shards: 3, runner: "amz2023.linux.2xlarge" }, | ||||
|           { config: "dynamo", shard: 3, num_shards: 3, runner: "amz2023.linux.2xlarge" }, | ||||
|         ]} | ||||
|   linux-focal-py3_12-clang10-experimental-split-build-test: | ||||
|     name: linux-focal-py3.12-clang10-experimental-split-build | ||||
|  | ||||
							
								
								
									
										8
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/rocm.yml
									
									
									
									
										vendored
									
									
								
							| @ -3,12 +3,18 @@ name: rocm | ||||
| on: | ||||
|   push: | ||||
|     branches: | ||||
|       - main | ||||
| #     - main | ||||
|       - release/* | ||||
|     tags: | ||||
|       - ciflow/rocm/* | ||||
|   workflow_dispatch: | ||||
|   schedule: | ||||
|     # We have several schedules so jobs can check github.event.schedule to activate only for a fraction of the runs. | ||||
|     # Also run less frequently on weekends. | ||||
|     - cron: 45 0,8,16 * * 1-5 | ||||
|     - cron: 45 4 * * 0,6 | ||||
|     - cron: 45 4,12,20 * * 1-5 | ||||
|     - cron: 45 12 * * 0,6 | ||||
|     - cron: 29 8 * * *  # about 1:29am PDT | ||||
|  | ||||
| concurrency: | ||||
|  | ||||
							
								
								
									
										8
									
								
								.github/workflows/slow.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/slow.yml
									
									
									
									
										vendored
									
									
								
							| @ -152,14 +152,14 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-jammy-py3.10-clang15-asan | ||||
|       docker-image-name: pytorch-linux-jammy-py3-clang15-asan | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "slow", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "slow", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "slow", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" }, | ||||
|           { config: "slow", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" }, | ||||
|           { config: "slow", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" }, | ||||
|           { config: "slow", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" }, | ||||
|         ]} | ||||
|       sync-tag: asan-build | ||||
|  | ||||
|  | ||||
							
								
								
									
										52
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										52
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							| @ -46,17 +46,19 @@ jobs: | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-sm86-build: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9 | ||||
|       cuda-arch-list: 8.6 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.g5.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-sm86-test: | ||||
| @ -73,11 +75,12 @@ jobs: | ||||
|   libtorch-linux-focal-cuda12_1-py3_7-gcc9-debug-build: | ||||
|     name: libtorch-linux-focal-cuda12.1-py3.7-gcc9-debug | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       build-environment: libtorch-linux-focal-cuda12.1-py3.7-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       build-generates-artifacts: false | ||||
|       runner: linux.4xlarge | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1 }, | ||||
| @ -87,7 +90,9 @@ jobs: | ||||
|   linux-focal-cuda12_1-py3_10-gcc9-no-ops-build: | ||||
|     name: linux-focal-cuda12.1-py3.10-gcc9-no-ops | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-no-ops | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
| @ -98,11 +103,12 @@ jobs: | ||||
|   libtorch-linux-focal-cuda12_4-py3_7-gcc9-debug-build: | ||||
|     name: libtorch-linux-focal-cuda12.4-py3.7-gcc9-debug | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       build-environment: libtorch-linux-focal-cuda12.4-py3.7-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9 | ||||
|       build-generates-artifacts: false | ||||
|       runner: linux.4xlarge | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge" | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1 }, | ||||
| @ -112,7 +118,9 @@ jobs: | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-no-ops-build: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9-no-ops | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-no-ops | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
| @ -128,7 +136,7 @@ jobs: | ||||
|       docker-image-name: pytorch-linux-focal-py3-clang9-android-ndk-r21e | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1, runner: "linux.2xlarge" }, | ||||
|           { config: "default", shard: 1, num_shards: 1, runner: "amz2023.linux.2xlarge" }, | ||||
|         ]} | ||||
|  | ||||
|   macos-py3-arm64-build: | ||||
| @ -217,7 +225,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       build-environment: linux-focal-rocm6.1-py3.8 | ||||
|       docker-image-name: pytorch-linux-focal-rocm-n-py3 | ||||
|       sync-tag: rocm-build | ||||
| @ -246,20 +254,22 @@ jobs: | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-experimental-split-build: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9-experimental-split-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       use_split_build: true | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" }, | ||||
|           { config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" }, | ||||
|           { config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-experimental-split-build-test: | ||||
| @ -276,15 +286,17 @@ jobs: | ||||
|   linux-focal-cuda11_8-py3_10-gcc9-experimental-split-build: | ||||
|     name: linux-focal-cuda11.8-py3.10-gcc9-experimental-split-build | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     needs: get-label-type | ||||
|     with: | ||||
|       runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.2xlarge" | ||||
|       use_split_build: true | ||||
|       build-environment: linux-focal-cuda11.8-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "distributed", shard: 1, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" }, | ||||
|           { config: "distributed", shard: 2, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" }, | ||||
|           { config: "distributed", shard: 3, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" }, | ||||
|           { config: "distributed", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.8xlarge.nvidia.gpu" }, | ||||
|           { config: "distributed", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.8xlarge.nvidia.gpu" }, | ||||
|           { config: "distributed", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}amz2023.linux.8xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda11_8-py3_10-gcc9-experimental-split-build-test: | ||||
|  | ||||
| @ -2,7 +2,7 @@ name: Upload torch dynamo performance stats | ||||
|  | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: [inductor-A100-perf-nightly] | ||||
|     workflows: [inductor-A100-perf-nightly, inductor-perf-nightly-aarch64, inductor-perf-nightly-x86] | ||||
|     types: | ||||
|       - completed | ||||
|  | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/xpu.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/xpu.yml
									
									
									
									
										vendored
									
									
								
							| @ -17,7 +17,7 @@ jobs: | ||||
|     with: | ||||
|       build-environment: linux-jammy-xpu-py3.8 | ||||
|       docker-image-name: pytorch-linux-jammy-xpu-2024.0-py3 | ||||
|       runner: linux.2xlarge | ||||
|       runner: linux.12xlarge | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 4, runner: "linux.idc.xpu" }, | ||||
|  | ||||
							
								
								
									
										244
									
								
								.lintrunner.toml
									
									
									
									
									
								
							
							
						
						
									
										244
									
								
								.lintrunner.toml
									
									
									
									
									
								
							| @ -417,6 +417,7 @@ exclude_patterns = [ | ||||
|     'aten/src/ATen/native/vulkan/api/vk_mem_alloc.h', | ||||
|     'test/cpp/jit/upgrader_models/*.ptl', | ||||
|     'test/cpp/jit/upgrader_models/*.ptl.ff', | ||||
|     '.ci/docker/common/install_rocm_drm.sh', | ||||
|     '.lintrunner.toml', | ||||
| ] | ||||
| command = [ | ||||
| @ -1191,196 +1192,6 @@ exclude_patterns = [ | ||||
|     'torch/_export/trace.py', | ||||
|     'torch/_export/verifier.py', | ||||
|     'torch/_vendor/**', | ||||
|     'torch/ao/__init__.py', | ||||
|     'torch/ao/nn/__init__.py', | ||||
|     'torch/ao/nn/intrinsic/__init__.py', | ||||
|     'torch/ao/nn/intrinsic/modules/__init__.py', | ||||
|     'torch/ao/nn/intrinsic/modules/fused.py', | ||||
|     'torch/ao/nn/intrinsic/qat/__init__.py', | ||||
|     'torch/ao/nn/intrinsic/qat/modules/__init__.py', | ||||
|     'torch/ao/nn/intrinsic/qat/modules/conv_fused.py', | ||||
|     'torch/ao/nn/intrinsic/qat/modules/linear_fused.py', | ||||
|     'torch/ao/nn/intrinsic/qat/modules/linear_relu.py', | ||||
|     'torch/ao/nn/intrinsic/quantized/__init__.py', | ||||
|     'torch/ao/nn/intrinsic/quantized/dynamic/__init__.py', | ||||
|     'torch/ao/nn/intrinsic/quantized/dynamic/modules/__init__.py', | ||||
|     'torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py', | ||||
|     'torch/ao/nn/intrinsic/quantized/modules/__init__.py', | ||||
|     'torch/ao/nn/intrinsic/quantized/modules/bn_relu.py', | ||||
|     'torch/ao/nn/intrinsic/quantized/modules/conv_add.py', | ||||
|     'torch/ao/nn/intrinsic/quantized/modules/conv_relu.py', | ||||
|     'torch/ao/nn/intrinsic/quantized/modules/linear_relu.py', | ||||
|     'torch/ao/nn/qat/__init__.py', | ||||
|     'torch/ao/nn/qat/dynamic/__init__.py', | ||||
|     'torch/ao/nn/qat/dynamic/modules/__init__.py', | ||||
|     'torch/ao/nn/qat/dynamic/modules/linear.py', | ||||
|     'torch/ao/nn/qat/modules/__init__.py', | ||||
|     'torch/ao/nn/qat/modules/conv.py', | ||||
|     'torch/ao/nn/qat/modules/embedding_ops.py', | ||||
|     'torch/ao/nn/qat/modules/linear.py', | ||||
|     'torch/ao/nn/quantizable/__init__.py', | ||||
|     'torch/ao/nn/quantizable/modules/__init__.py', | ||||
|     'torch/ao/nn/quantizable/modules/activation.py', | ||||
|     'torch/ao/nn/quantizable/modules/rnn.py', | ||||
|     'torch/ao/nn/quantized/__init__.py', | ||||
|     'torch/ao/nn/quantized/dynamic/__init__.py', | ||||
|     'torch/ao/nn/quantized/dynamic/modules/__init__.py', | ||||
|     'torch/ao/nn/quantized/dynamic/modules/conv.py', | ||||
|     'torch/ao/nn/quantized/dynamic/modules/linear.py', | ||||
|     'torch/ao/nn/quantized/dynamic/modules/rnn.py', | ||||
|     'torch/ao/nn/quantized/functional.py', | ||||
|     'torch/ao/nn/quantized/modules/__init__.py', | ||||
|     'torch/ao/nn/quantized/modules/activation.py', | ||||
|     'torch/ao/nn/quantized/modules/batchnorm.py', | ||||
|     'torch/ao/nn/quantized/modules/conv.py', | ||||
|     'torch/ao/nn/quantized/modules/dropout.py', | ||||
|     'torch/ao/nn/quantized/modules/embedding_ops.py', | ||||
|     'torch/ao/nn/quantized/modules/functional_modules.py', | ||||
|     'torch/ao/nn/quantized/modules/linear.py', | ||||
|     'torch/ao/nn/quantized/modules/normalization.py', | ||||
|     'torch/ao/nn/quantized/modules/rnn.py', | ||||
|     'torch/ao/nn/quantized/modules/utils.py', | ||||
|     'torch/ao/nn/quantized/reference/__init__.py', | ||||
|     'torch/ao/nn/quantized/reference/modules/__init__.py', | ||||
|     'torch/ao/nn/quantized/reference/modules/conv.py', | ||||
|     'torch/ao/nn/quantized/reference/modules/linear.py', | ||||
|     'torch/ao/nn/quantized/reference/modules/rnn.py', | ||||
|     'torch/ao/nn/quantized/reference/modules/sparse.py', | ||||
|     'torch/ao/nn/quantized/reference/modules/utils.py', | ||||
|     'torch/ao/nn/sparse/__init__.py', | ||||
|     'torch/ao/nn/sparse/quantized/__init__.py', | ||||
|     'torch/ao/nn/sparse/quantized/dynamic/__init__.py', | ||||
|     'torch/ao/nn/sparse/quantized/dynamic/linear.py', | ||||
|     'torch/ao/nn/sparse/quantized/linear.py', | ||||
|     'torch/ao/nn/sparse/quantized/utils.py', | ||||
|     'torch/ao/ns/__init__.py', | ||||
|     'torch/ao/ns/_numeric_suite.py', | ||||
|     'torch/ao/ns/_numeric_suite_fx.py', | ||||
|     'torch/ao/ns/fx/__init__.py', | ||||
|     'torch/ao/ns/fx/graph_matcher.py', | ||||
|     'torch/ao/ns/fx/graph_passes.py', | ||||
|     'torch/ao/ns/fx/mappings.py', | ||||
|     'torch/ao/ns/fx/n_shadows_utils.py', | ||||
|     'torch/ao/ns/fx/ns_types.py', | ||||
|     'torch/ao/ns/fx/pattern_utils.py', | ||||
|     'torch/ao/ns/fx/qconfig_multi_mapping.py', | ||||
|     'torch/ao/ns/fx/utils.py', | ||||
|     'torch/ao/ns/fx/weight_utils.py', | ||||
|     'torch/ao/pruning/__init__.py', | ||||
|     'torch/ao/pruning/_experimental/__init__.py', | ||||
|     'torch/ao/pruning/_experimental/activation_sparsifier/__init__.py', | ||||
|     'torch/ao/pruning/_experimental/activation_sparsifier/activation_sparsifier.py', | ||||
|     'torch/ao/pruning/_experimental/data_scheduler/__init__.py', | ||||
|     'torch/ao/pruning/_experimental/data_scheduler/base_data_scheduler.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/__init__.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/base_data_sparsifier.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/benchmarks/dlrm_utils.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/benchmarks/evaluate_disk_savings.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/benchmarks/evaluate_forward_time.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/benchmarks/evaluate_model_metrics.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/data_norm_sparsifier.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/lightning/__init__.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__init__.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/_data_sparstity_utils.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/data_sparsity.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/lightning/tests/test_callbacks.py', | ||||
|     'torch/ao/pruning/_experimental/data_sparsifier/quantization_utils.py', | ||||
|     'torch/ao/pruning/_experimental/pruner/__init__.py', | ||||
|     'torch/ao/pruning/_experimental/pruner/base_structured_sparsifier.py', | ||||
|     'torch/ao/pruning/_experimental/pruner/lstm_saliency_pruner.py', | ||||
|     'torch/ao/pruning/_experimental/pruner/match_utils.py', | ||||
|     'torch/ao/pruning/_experimental/pruner/parametrization.py', | ||||
|     'torch/ao/pruning/_experimental/pruner/prune_functions.py', | ||||
|     'torch/ao/pruning/_experimental/pruner/saliency_pruner.py', | ||||
|     'torch/ao/pruning/_mappings.py', | ||||
|     'torch/ao/pruning/scheduler/__init__.py', | ||||
|     'torch/ao/pruning/scheduler/base_scheduler.py', | ||||
|     'torch/ao/pruning/scheduler/cubic_scheduler.py', | ||||
|     'torch/ao/pruning/scheduler/lambda_scheduler.py', | ||||
|     'torch/ao/pruning/sparsifier/__init__.py', | ||||
|     'torch/ao/pruning/sparsifier/base_sparsifier.py', | ||||
|     'torch/ao/pruning/sparsifier/nearly_diagonal_sparsifier.py', | ||||
|     'torch/ao/pruning/sparsifier/utils.py', | ||||
|     'torch/ao/pruning/sparsifier/weight_norm_sparsifier.py', | ||||
|     'torch/ao/quantization/__init__.py', | ||||
|     'torch/ao/quantization/_correct_bias.py', | ||||
|     'torch/ao/quantization/_equalize.py', | ||||
|     'torch/ao/quantization/_learnable_fake_quantize.py', | ||||
|     'torch/ao/quantization/backend_config/__init__.py', | ||||
|     'torch/ao/quantization/backend_config/_common_operator_config_utils.py', | ||||
|     'torch/ao/quantization/backend_config/_qnnpack_pt2e.py', | ||||
|     'torch/ao/quantization/backend_config/_x86_inductor_pt2e.py', | ||||
|     'torch/ao/quantization/backend_config/backend_config.py', | ||||
|     'torch/ao/quantization/backend_config/executorch.py', | ||||
|     'torch/ao/quantization/backend_config/fbgemm.py', | ||||
|     'torch/ao/quantization/backend_config/native.py', | ||||
|     'torch/ao/quantization/backend_config/observation_type.py', | ||||
|     'torch/ao/quantization/backend_config/onednn.py', | ||||
|     'torch/ao/quantization/backend_config/qnnpack.py', | ||||
|     'torch/ao/quantization/backend_config/tensorrt.py', | ||||
|     'torch/ao/quantization/backend_config/utils.py', | ||||
|     'torch/ao/quantization/backend_config/x86.py', | ||||
|     'torch/ao/quantization/experimental/APoT_tensor.py', | ||||
|     'torch/ao/quantization/experimental/apot_utils.py', | ||||
|     'torch/ao/quantization/experimental/fake_quantize.py', | ||||
|     'torch/ao/quantization/experimental/fake_quantize_function.py', | ||||
|     'torch/ao/quantization/experimental/linear.py', | ||||
|     'torch/ao/quantization/experimental/observer.py', | ||||
|     'torch/ao/quantization/experimental/qconfig.py', | ||||
|     'torch/ao/quantization/experimental/quantizer.py', | ||||
|     'torch/ao/quantization/fake_quantize.py', | ||||
|     'torch/ao/quantization/fuse_modules.py', | ||||
|     'torch/ao/quantization/fuser_method_mappings.py', | ||||
|     'torch/ao/quantization/fx/__init__.py', | ||||
|     'torch/ao/quantization/fx/_decomposed.py', | ||||
|     'torch/ao/quantization/fx/_equalize.py', | ||||
|     'torch/ao/quantization/fx/_lower_to_native_backend.py', | ||||
|     'torch/ao/quantization/fx/_model_report/__init__.py', | ||||
|     'torch/ao/quantization/fx/_model_report/detector.py', | ||||
|     'torch/ao/quantization/fx/_model_report/model_report.py', | ||||
|     'torch/ao/quantization/fx/_model_report/model_report_observer.py', | ||||
|     'torch/ao/quantization/fx/_model_report/model_report_visualizer.py', | ||||
|     'torch/ao/quantization/fx/convert.py', | ||||
|     'torch/ao/quantization/fx/custom_config.py', | ||||
|     'torch/ao/quantization/fx/fuse.py', | ||||
|     'torch/ao/quantization/fx/fuse_handler.py', | ||||
|     'torch/ao/quantization/fx/graph_module.py', | ||||
|     'torch/ao/quantization/fx/lower_to_fbgemm.py', | ||||
|     'torch/ao/quantization/fx/lower_to_qnnpack.py', | ||||
|     'torch/ao/quantization/fx/lstm_utils.py', | ||||
|     'torch/ao/quantization/fx/match_utils.py', | ||||
|     'torch/ao/quantization/fx/pattern_utils.py', | ||||
|     'torch/ao/quantization/fx/prepare.py', | ||||
|     'torch/ao/quantization/fx/qconfig_mapping_utils.py', | ||||
|     'torch/ao/quantization/fx/quantize_handler.py', | ||||
|     'torch/ao/quantization/fx/tracer.py', | ||||
|     'torch/ao/quantization/fx/utils.py', | ||||
|     'torch/ao/quantization/observer.py', | ||||
|     'torch/ao/quantization/pt2e/__init__.py', | ||||
|     'torch/ao/quantization/pt2e/_propagate_annotation.py', | ||||
|     'torch/ao/quantization/pt2e/graph_utils.py', | ||||
|     'torch/ao/quantization/pt2e/prepare.py', | ||||
|     'torch/ao/quantization/pt2e/qat_utils.py', | ||||
|     'torch/ao/quantization/pt2e/quantizer/__init__.py', | ||||
|     'torch/ao/quantization/pt2e/quantizer/composable_quantizer.py', | ||||
|     'torch/ao/quantization/pt2e/quantizer/embedding_quantizer.py', | ||||
|     'torch/ao/quantization/pt2e/quantizer/qnnpack_quantizer.py', | ||||
|     'torch/ao/quantization/pt2e/quantizer/quantizer.py', | ||||
|     'torch/ao/quantization/pt2e/quantizer/utils.py', | ||||
|     'torch/ao/quantization/pt2e/quantizer/x86_inductor_quantizer.py', | ||||
|     'torch/ao/quantization/pt2e/representation/__init__.py', | ||||
|     'torch/ao/quantization/pt2e/representation/rewrite.py', | ||||
|     'torch/ao/quantization/pt2e/utils.py', | ||||
|     'torch/ao/quantization/qconfig.py', | ||||
|     'torch/ao/quantization/qconfig_mapping.py', | ||||
|     'torch/ao/quantization/quant_type.py', | ||||
|     'torch/ao/quantization/quantization_mappings.py', | ||||
|     'torch/ao/quantization/quantize.py', | ||||
|     'torch/ao/quantization/quantize_fx.py', | ||||
|     'torch/ao/quantization/quantize_jit.py', | ||||
|     'torch/ao/quantization/quantize_pt2e.py', | ||||
|     'torch/ao/quantization/stubs.py', | ||||
|     'torch/ao/quantization/utils.py', | ||||
|     'torch/compiler/__init__.py', | ||||
|     'torch/contrib/__init__.py', | ||||
|     'torch/contrib/_tensorboard_vis.py', | ||||
| @ -1476,59 +1287,6 @@ exclude_patterns = [ | ||||
|     'torch/linalg/__init__.py', | ||||
|     'torch/monitor/__init__.py', | ||||
|     'torch/nested/__init__.py', | ||||
|     'torch/nn/intrinsic/__init__.py', | ||||
|     'torch/nn/intrinsic/modules/__init__.py', | ||||
|     'torch/nn/intrinsic/modules/fused.py', | ||||
|     'torch/nn/intrinsic/qat/__init__.py', | ||||
|     'torch/nn/intrinsic/qat/modules/__init__.py', | ||||
|     'torch/nn/intrinsic/qat/modules/conv_fused.py', | ||||
|     'torch/nn/intrinsic/qat/modules/linear_fused.py', | ||||
|     'torch/nn/intrinsic/qat/modules/linear_relu.py', | ||||
|     'torch/nn/intrinsic/quantized/__init__.py', | ||||
|     'torch/nn/intrinsic/quantized/dynamic/__init__.py', | ||||
|     'torch/nn/intrinsic/quantized/dynamic/modules/__init__.py', | ||||
|     'torch/nn/intrinsic/quantized/dynamic/modules/linear_relu.py', | ||||
|     'torch/nn/intrinsic/quantized/modules/__init__.py', | ||||
|     'torch/nn/intrinsic/quantized/modules/bn_relu.py', | ||||
|     'torch/nn/intrinsic/quantized/modules/conv_relu.py', | ||||
|     'torch/nn/intrinsic/quantized/modules/linear_relu.py', | ||||
|     'torch/nn/qat/__init__.py', | ||||
|     'torch/nn/qat/dynamic/__init__.py', | ||||
|     'torch/nn/qat/dynamic/modules/__init__.py', | ||||
|     'torch/nn/qat/dynamic/modules/linear.py', | ||||
|     'torch/nn/qat/modules/__init__.py', | ||||
|     'torch/nn/qat/modules/conv.py', | ||||
|     'torch/nn/qat/modules/embedding_ops.py', | ||||
|     'torch/nn/qat/modules/linear.py', | ||||
|     'torch/nn/quantizable/__init__.py', | ||||
|     'torch/nn/quantizable/modules/__init__.py', | ||||
|     'torch/nn/quantizable/modules/activation.py', | ||||
|     'torch/nn/quantizable/modules/rnn.py', | ||||
|     'torch/nn/quantized/__init__.py', | ||||
|     'torch/nn/quantized/_reference/__init__.py', | ||||
|     'torch/nn/quantized/_reference/modules/__init__.py', | ||||
|     'torch/nn/quantized/_reference/modules/conv.py', | ||||
|     'torch/nn/quantized/_reference/modules/linear.py', | ||||
|     'torch/nn/quantized/_reference/modules/rnn.py', | ||||
|     'torch/nn/quantized/_reference/modules/sparse.py', | ||||
|     'torch/nn/quantized/_reference/modules/utils.py', | ||||
|     'torch/nn/quantized/dynamic/__init__.py', | ||||
|     'torch/nn/quantized/dynamic/modules/__init__.py', | ||||
|     'torch/nn/quantized/dynamic/modules/conv.py', | ||||
|     'torch/nn/quantized/dynamic/modules/linear.py', | ||||
|     'torch/nn/quantized/dynamic/modules/rnn.py', | ||||
|     'torch/nn/quantized/functional.py', | ||||
|     'torch/nn/quantized/modules/__init__.py', | ||||
|     'torch/nn/quantized/modules/activation.py', | ||||
|     'torch/nn/quantized/modules/batchnorm.py', | ||||
|     'torch/nn/quantized/modules/conv.py', | ||||
|     'torch/nn/quantized/modules/dropout.py', | ||||
|     'torch/nn/quantized/modules/embedding_ops.py', | ||||
|     'torch/nn/quantized/modules/functional_modules.py', | ||||
|     'torch/nn/quantized/modules/linear.py', | ||||
|     'torch/nn/quantized/modules/normalization.py', | ||||
|     'torch/nn/quantized/modules/rnn.py', | ||||
|     'torch/nn/quantized/modules/utils.py', | ||||
|     'torch/signal/__init__.py', | ||||
|     'torch/signal/windows/__init__.py', | ||||
|     'torch/signal/windows/windows.py', | ||||
|  | ||||
| @ -1041,10 +1041,6 @@ if(NOT MSVC) | ||||
|  | ||||
|   append_cxx_flag_if_supported("-Wno-error=pedantic" CMAKE_CXX_FLAGS) | ||||
|   append_cxx_flag_if_supported("-Wno-error=old-style-cast" CMAKE_CXX_FLAGS) | ||||
|   append_cxx_flag_if_supported("-Wno-error=inconsistent-missing-override" | ||||
|                                CMAKE_CXX_FLAGS) | ||||
|   append_cxx_flag_if_supported( | ||||
|     "-Wno-error=inconsistent-missing-destructor-override" CMAKE_CXX_FLAGS) | ||||
|   append_cxx_flag_if_supported("-Wconstant-conversion" CMAKE_CXX_FLAGS) | ||||
|   append_cxx_flag_if_supported("-Wno-invalid-partial-specialization" | ||||
|                                CMAKE_CXX_FLAGS) | ||||
|  | ||||
| @ -11,6 +11,7 @@ aspects of contributing to PyTorch. | ||||
| <!-- toc --> | ||||
|  | ||||
| - [Developing PyTorch](#developing-pytorch) | ||||
|   - [Setup the development environment](#setup-the-development-environment) | ||||
|   - [Tips and Debugging](#tips-and-debugging) | ||||
| - [Nightly Checkout & Pull](#nightly-checkout--pull) | ||||
| - [Codebase structure](#codebase-structure) | ||||
| @ -64,8 +65,24 @@ aspects of contributing to PyTorch. | ||||
| <!-- tocstop --> | ||||
|  | ||||
| ## Developing PyTorch | ||||
|  | ||||
| Follow the instructions for [installing PyTorch from source](https://github.com/pytorch/pytorch#from-source). If you get stuck when developing PyTorch on your machine, check out the [tips and debugging](#tips-and-debugging) section below for common solutions. | ||||
|  | ||||
| ### Setup the development environment | ||||
|  | ||||
| First, you need to [fork the PyTorch project on GitHub](https://github.com/pytorch/pytorch/fork) and follow the instructions at [Connecting to GitHub with SSH](https://docs.github.com/en/authentication/connecting-to-github-with-ssh) to setup your SSH authentication credentials. | ||||
|  | ||||
| Then clone the PyTorch project and setup the development environment: | ||||
|  | ||||
| ```bash | ||||
| git clone git@github.com:<USERNAME>/pytorch.git | ||||
| cd pytorch | ||||
| git remote add upstream git@github.com:pytorch/pytorch.git | ||||
|  | ||||
| make setup-env  # or make setup-env-cuda for pre-built CUDA binaries | ||||
| conda activate pytorch-deps | ||||
| ``` | ||||
|  | ||||
| ### Tips and Debugging | ||||
|  | ||||
| * If you want to have no-op incremental rebuilds (which are fast), see [Make no-op build fast](#make-no-op-build-fast) below. | ||||
| @ -175,6 +192,13 @@ the regular environment parameters (`--name` or `--prefix`): | ||||
| conda activate my-env | ||||
| ``` | ||||
|  | ||||
| To install the nightly binaries built with CUDA, you can pass in the flag `--cuda`: | ||||
|  | ||||
| ```bash | ||||
| ./tools/nightly.py checkout -b my-nightly-branch --cuda | ||||
| conda activate pytorch-deps | ||||
| ``` | ||||
|  | ||||
| You can also use this tool to pull the nightly commits into the current branch: | ||||
|  | ||||
| ```bash | ||||
| @ -325,7 +349,7 @@ command runs tests such as `TestNN.test_BCELoss` and | ||||
| Install all prerequisites by running | ||||
|  | ||||
| ```bash | ||||
| make setup_lint | ||||
| make setup-lint | ||||
| ``` | ||||
|  | ||||
| You can now run the same linting steps that are used in CI locally via `make`: | ||||
|  | ||||
							
								
								
									
										22
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										22
									
								
								Makefile
									
									
									
									
									
								
							| @ -1,6 +1,7 @@ | ||||
| # This makefile does nothing but delegating the actual building to cmake. | ||||
| PYTHON = python3 | ||||
| PIP = pip3 | ||||
| PIP = $(PYTHON) -m pip | ||||
| NIGHTLY_TOOL_OPTS := pull | ||||
|  | ||||
| all: | ||||
| 	@mkdir -p build && cd build && cmake .. $(shell $(PYTHON) ./scripts/get_python_cmake_flags.py) && $(MAKE) | ||||
| @ -22,10 +23,27 @@ linecount: | ||||
| 		echo "Cloc is not available on the machine. You can install cloc with " && \ | ||||
| 		echo "    sudo apt-get install cloc" | ||||
|  | ||||
| setup_lint: | ||||
| ensure-branch-clean: | ||||
| 	@if [ -n "$(shell git status --porcelain)" ]; then \ | ||||
| 		echo "Please commit or stash all changes before running this script"; \ | ||||
| 		exit 1; \ | ||||
| 	fi | ||||
|  | ||||
| setup-env: ensure-branch-clean | ||||
| 	$(PYTHON) tools/nightly.py $(NIGHTLY_TOOL_OPTS) | ||||
|  | ||||
| setup-env-cuda: | ||||
| 	$(MAKE) setup-env PYTHON="$(PYTHON)" NIGHTLY_TOOL_OPTS="$(NIGHTLY_TOOL_OPTS) --cuda" | ||||
|  | ||||
| setup_env: setup-env | ||||
| setup_env_cuda: setup-env-cuda | ||||
|  | ||||
| setup-lint: | ||||
| 	$(PIP) install lintrunner | ||||
| 	lintrunner init | ||||
|  | ||||
| setup_lint: setup-lint | ||||
|  | ||||
| lint: | ||||
| 	lintrunner | ||||
|  | ||||
|  | ||||
| @ -98,7 +98,7 @@ void CPUGeneratorImpl::set_current_seed(uint64_t seed) { | ||||
|  * Sets the offset of RNG state. | ||||
|  * See Note [Acquire lock when using random generators] | ||||
|  */ | ||||
| void CPUGeneratorImpl::set_offset(uint64_t offset) { | ||||
| void CPUGeneratorImpl::set_offset(uint64_t offset [[maybe_unused]]) { | ||||
|   TORCH_CHECK(false, "CPU Generator does not use offset"); | ||||
| } | ||||
|  | ||||
|  | ||||
| @ -73,6 +73,8 @@ class TORCH_API Context { | ||||
|       return at::detail::getPrivateUse1Hooks(); | ||||
|     } else if (device_type == at::kMTIA) { | ||||
|       return at::detail::getMTIAHooks(); | ||||
|     } else if (device_type == at::kHIP) { | ||||
|       return at::detail::getHIPHooks(); | ||||
|     } else { | ||||
|       AT_ERROR( | ||||
|           c10::DeviceTypeName(device_type), " device type not an accelerator."); | ||||
| @ -94,8 +96,22 @@ class TORCH_API Context { | ||||
|       AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled."); | ||||
|     } | ||||
|   } | ||||
|   static bool isPinnedPtr(const void* data) { | ||||
|     return detail::getCUDAHooks().isPinnedPtr(data); | ||||
|   bool isPinnedPtr( | ||||
|       const void* data, | ||||
|       std::optional<c10::DeviceType> device_type = std::nullopt) { | ||||
|     auto opt_device_type = | ||||
|         device_type.has_value() ? device_type.value() : at::getAccelerator(); | ||||
|     if (!opt_device_type.has_value() || // there is no accelerator | ||||
|         !at::isAccelerator( | ||||
|             opt_device_type.value())) { // passed device not an accelerator | ||||
|       return false; | ||||
|     } | ||||
|     return getAcceleratorHooksInterface(opt_device_type.value()) | ||||
|         .isPinnedPtr(data); | ||||
|   } | ||||
|   Allocator* getPinnedMemoryAllocator( | ||||
|       std::optional<c10::DeviceType> device_type = std::nullopt) { | ||||
|     return getAcceleratorHooksInterface(device_type).getPinnedMemoryAllocator(); | ||||
|   } | ||||
|   static bool hasOpenMP(); | ||||
|   static bool hasMKL(); | ||||
|  | ||||
| @ -295,7 +295,7 @@ DLManagedTensor* toDLPack(const Tensor& src) { | ||||
| } | ||||
|  | ||||
| Tensor fromDLPack(DLManagedTensor* src) { | ||||
|   auto deleter = [src](void* self) { | ||||
|   auto deleter = [src](void* self [[maybe_unused]]) { | ||||
|     if (src->deleter) { | ||||
|       src->deleter(src); | ||||
|     } | ||||
|  | ||||
| @ -2,7 +2,7 @@ | ||||
| #include <ATen/DeviceAccelerator.h> | ||||
| namespace at { | ||||
|  | ||||
| C10_API std::optional<DeviceType> getAccelerator(bool checked) { | ||||
| std::optional<c10::DeviceType> getAccelerator(bool checked) { | ||||
| #define DETECT_AND_ASSIGN_ACCELERATOR(device_name) \ | ||||
|   if (at::has##device_name()) {                    \ | ||||
|     device_type = k##device_name;                  \ | ||||
| @ -20,11 +20,13 @@ C10_API std::optional<DeviceType> getAccelerator(bool checked) { | ||||
|     // first. | ||||
|     return kPrivateUse1; | ||||
|   } | ||||
|   std::optional<DeviceType> device_type = std::nullopt; | ||||
|   std::optional<c10::DeviceType> device_type = std::nullopt; | ||||
|   bool is_accelerator_detected = false; | ||||
|   DETECT_AND_ASSIGN_ACCELERATOR(CUDA) | ||||
|   DETECT_AND_ASSIGN_ACCELERATOR(MTIA) | ||||
|   DETECT_AND_ASSIGN_ACCELERATOR(XPU) | ||||
|   DETECT_AND_ASSIGN_ACCELERATOR(HIP) | ||||
|   DETECT_AND_ASSIGN_ACCELERATOR(MPS) | ||||
|   if (checked) { | ||||
|     TORCH_CHECK( | ||||
|         device_type, "Cannot access accelerator device when none is available.") | ||||
| @ -34,4 +36,18 @@ C10_API std::optional<DeviceType> getAccelerator(bool checked) { | ||||
| #undef DETECT_AND_ASSIGN_ACCELERATOR | ||||
| } | ||||
|  | ||||
| bool isAccelerator(c10::DeviceType d) { | ||||
|   switch (d) { | ||||
|     case at::kCUDA: | ||||
|     case at::kMTIA: | ||||
|     case at::kXPU: | ||||
|     case at::kHIP: | ||||
|     case at::kMPS: | ||||
|     case at::kPrivateUse1: | ||||
|       return true; | ||||
|     default: | ||||
|       return false; | ||||
|   } | ||||
| } | ||||
|  | ||||
| } // namespace at | ||||
|  | ||||
| @ -13,9 +13,7 @@ | ||||
| // - It provides a set of common APIs as defined by AcceleratorHooksInterface | ||||
| // | ||||
| // As of today, accelerator devices are (in no particular order): | ||||
| // CUDA, MTIA, XPU, PrivateUse1 | ||||
| // We want to add once all the proper APIs are supported and tested: | ||||
| // HIP, MPS | ||||
| // CUDA, MTIA, XPU, HIP, MPS, PrivateUse1 | ||||
|  | ||||
| namespace at { | ||||
|  | ||||
| @ -24,4 +22,6 @@ namespace at { | ||||
| // When checked is true, the returned optional always has a value. | ||||
| TORCH_API std::optional<c10::DeviceType> getAccelerator(bool checked = false); | ||||
|  | ||||
| TORCH_API bool isAccelerator(c10::DeviceType d); | ||||
|  | ||||
| } // namespace at | ||||
|  | ||||
| @ -444,8 +444,7 @@ TensorBase empty_strided_symint_meta( | ||||
|     SymIntArrayRef stride, | ||||
|     std::optional<ScalarType> dtype_opt, | ||||
|     std::optional<Layout> layout_opt, | ||||
|     std::optional<Device> device_opt, | ||||
|     std::optional<bool> pin_memory_opt) { | ||||
|     std::optional<Device> device_opt) { | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta); | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided); | ||||
|  | ||||
| @ -462,8 +461,7 @@ TensorBase empty_strided_symint_meta( | ||||
|       stride, | ||||
|       optTypeMetaToScalarType(options.dtype_opt()), | ||||
|       options.layout_opt(), | ||||
|       options.device_opt(), | ||||
|       options.pinned_memory_opt()); | ||||
|       options.device_opt()); | ||||
| } | ||||
|  | ||||
| } // namespace at::detail | ||||
|  | ||||
| @ -156,8 +156,7 @@ TORCH_API TensorBase empty_strided_symint_meta( | ||||
|     SymIntArrayRef stride, | ||||
|     std::optional<ScalarType> dtype_opt, | ||||
|     std::optional<Layout> layout_opt, | ||||
|     std::optional<Device> device_opt, | ||||
|     std::optional<bool> pin_memory_opt); | ||||
|     std::optional<Device> device_opt); | ||||
|  | ||||
| TORCH_API TensorBase empty_strided_symint_meta( | ||||
|     SymIntArrayRef size, | ||||
|  | ||||
| @ -19,6 +19,7 @@ namespace at::jit { | ||||
| struct TemplateEnv { | ||||
|   TemplateEnv() = default; | ||||
|   TemplateEnv(TemplateEnv& parent) : parent(&parent) {} | ||||
|   TemplateEnv& operator==(const TemplateEnv& parent) = delete; | ||||
|  | ||||
|   using string_list = std::vector<std::string>; | ||||
|  | ||||
|  | ||||
| @ -70,7 +70,7 @@ static std::string getSchemaInputTypesString(const FunctionSchema& schema) { | ||||
|   return input_types.str(); | ||||
| } | ||||
|  | ||||
| std::string ClassType::getForwardPreHookErrorMessage(int pre_hook_idx) const { | ||||
| std::string ClassType::getForwardPreHookErrorMessage(size_t pre_hook_idx) const { | ||||
|   const std::string& pre_hook_name = forward_pre_hooks_[pre_hook_idx]->name(); | ||||
|   const FunctionSchema& forward_schema = getMethod("forward").getSchema(); | ||||
|   std::string input_types = getSchemaInputTypesString(forward_schema); | ||||
| @ -98,7 +98,7 @@ std::string ClassType::getForwardPreHookErrorMessage(int pre_hook_idx) const { | ||||
|   return return_string; | ||||
| } | ||||
|  | ||||
| std::string ClassType::getForwardHookErrorMessage(int hook_idx) const { | ||||
| std::string ClassType::getForwardHookErrorMessage(size_t hook_idx) const { | ||||
|   const std::string& hook_name = forward_hooks_[hook_idx]->name(); | ||||
|   const FunctionSchema& forward_schema = getMethod("forward").getSchema(); | ||||
|   std::string input_types = getSchemaInputTypesString(forward_schema); | ||||
| @ -190,7 +190,7 @@ static void checkForwardHookInputArguments( | ||||
| } | ||||
|  | ||||
| void ClassType::checkForwardPreHookSchema( | ||||
|     int pre_hook_idx, | ||||
|     size_t pre_hook_idx, | ||||
|     const FunctionSchema& pre_hook_schema) const { | ||||
|   const torch::jit::Function* pre_hook = forward_pre_hooks_[pre_hook_idx]; | ||||
|   std::string hook_id = | ||||
| @ -287,7 +287,7 @@ void ClassType::checkForwardPreHookSchema( | ||||
| } | ||||
|  | ||||
| void ClassType::checkForwardHookSchema( | ||||
|       int hook_idx, | ||||
|       size_t hook_idx, | ||||
|       const FunctionSchema& hook_schema) const { | ||||
|   const torch::jit::Function* hook = forward_hooks_[hook_idx]; | ||||
|   std::string hook_id = | ||||
| @ -451,8 +451,7 @@ bool ClassType::isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const { | ||||
|         return false; | ||||
|       } | ||||
|       if (!self_method->getSchema().isSubtypeOf( | ||||
|               // NOLINTNEXTLINE(bugprone-argument-comment) | ||||
|               schema, /*is_method=*/true, why_not)) { | ||||
|               schema, /*as_method=*/true, why_not)) { | ||||
|         if (why_not) { | ||||
|           *why_not << "Method on class '" << repr_str() | ||||
|                    << "' (1) is not compatible with interface '" | ||||
|  | ||||
| @ -341,10 +341,10 @@ struct TORCH_API ClassType : public NamedType { | ||||
|   const std::vector<torch::jit::Function*>& getForwardPreHooks() const; | ||||
|  | ||||
|   void checkForwardPreHookSchema( | ||||
|       int pre_hook_idx, | ||||
|       size_t pre_hook_idx, | ||||
|       const FunctionSchema& pre_hook_schema) const; | ||||
|   void checkForwardHookSchema( | ||||
|       int hook_idx, | ||||
|       size_t hook_idx, | ||||
|       const FunctionSchema& hook_schema) const; | ||||
|  | ||||
|   void addMethod(torch::jit::Function* method); | ||||
| @ -396,8 +396,8 @@ struct TORCH_API ClassType : public NamedType { | ||||
|   } | ||||
|  | ||||
|   void addAttribute(ClassAttribute classAttribute); | ||||
|   std::string getForwardPreHookErrorMessage(int pre_hook_idx) const; | ||||
|   std::string getForwardHookErrorMessage(int hook_idx) const; | ||||
|   std::string getForwardPreHookErrorMessage(size_t pre_hook_idx) const; | ||||
|   std::string getForwardHookErrorMessage(size_t hook_idx) const; | ||||
|  | ||||
|   // Mapping of attribute names -> their type. | ||||
|   // NOTE: this does not contain methods, which are stored in the module | ||||
|  | ||||
| @ -127,7 +127,7 @@ constexpr bool allowlist_contains(string_view allowlist, string_view item) { | ||||
|  | ||||
| // Returns true iff the given op name is on the allowlist | ||||
| // and should be registered | ||||
| constexpr bool op_allowlist_check(string_view op_name) { | ||||
| constexpr bool op_allowlist_check(string_view op_name [[maybe_unused]]) { | ||||
|   assert(op_name.find("::") != string_view::npos); | ||||
|   // Use assert() instead of throw() due to a gcc bug. See: | ||||
|   // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function | ||||
|  | ||||
| @ -363,16 +363,16 @@ public: | ||||
|     return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask, | ||||
|                                                       0xFFFFFFFFFFFFFFFF)); | ||||
|   } | ||||
|   Vectorized<c10::complex<double>> operator<(const Vectorized<c10::complex<double>>& other) const { | ||||
|   Vectorized<c10::complex<double>> operator<(const Vectorized<c10::complex<double>>& other [[maybe_unused]]) const { | ||||
|     TORCH_CHECK(false, "not supported for complex numbers"); | ||||
|   } | ||||
|   Vectorized<c10::complex<double>> operator<=(const Vectorized<c10::complex<double>>& other) const { | ||||
|   Vectorized<c10::complex<double>> operator<=(const Vectorized<c10::complex<double>>& other [[maybe_unused]]) const { | ||||
|     TORCH_CHECK(false, "not supported for complex numbers"); | ||||
|   } | ||||
|   Vectorized<c10::complex<double>> operator>(const Vectorized<c10::complex<double>>& other) const { | ||||
|   Vectorized<c10::complex<double>> operator>(const Vectorized<c10::complex<double>>& other [[maybe_unused]]) const { | ||||
|     TORCH_CHECK(false, "not supported for complex numbers"); | ||||
|   } | ||||
|   Vectorized<c10::complex<double>> operator>=(const Vectorized<c10::complex<double>>& other) const { | ||||
|   Vectorized<c10::complex<double>> operator>=(const Vectorized<c10::complex<double>>& other [[maybe_unused]]) const { | ||||
|     TORCH_CHECK(false, "not supported for complex numbers"); | ||||
|   } | ||||
|  | ||||
|  | ||||
| @ -864,16 +864,16 @@ public: | ||||
|     auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_UQ); | ||||
|     return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF)); | ||||
|   } | ||||
|   Vectorized<c10::complex<float>> operator<(const Vectorized<c10::complex<float>>& other) const { | ||||
|   Vectorized<c10::complex<float>> operator<(const Vectorized<c10::complex<float>>& other [[maybe_unused]]) const { | ||||
|     TORCH_CHECK(false, "not supported for complex numbers"); | ||||
|   } | ||||
|   Vectorized<c10::complex<float>> operator<=(const Vectorized<c10::complex<float>>& other) const { | ||||
|   Vectorized<c10::complex<float>> operator<=(const Vectorized<c10::complex<float>>& other [[maybe_unused]]) const { | ||||
|     TORCH_CHECK(false, "not supported for complex numbers"); | ||||
|   } | ||||
|   Vectorized<c10::complex<float>> operator>(const Vectorized<c10::complex<float>>& other) const { | ||||
|   Vectorized<c10::complex<float>> operator>(const Vectorized<c10::complex<float>>& other [[maybe_unused]]) const { | ||||
|     TORCH_CHECK(false, "not supported for complex numbers"); | ||||
|   } | ||||
|   Vectorized<c10::complex<float>> operator>=(const Vectorized<c10::complex<float>>& other) const { | ||||
|   Vectorized<c10::complex<float>> operator>=(const Vectorized<c10::complex<float>>& other [[maybe_unused]]) const { | ||||
|     TORCH_CHECK(false, "not supported for complex numbers"); | ||||
|   } | ||||
|  | ||||
|  | ||||
| @ -72,12 +72,13 @@ __m512i pack_saturate_and_clamp( | ||||
|  | ||||
| template <> | ||||
| inline __m512i pack_saturate_and_clamp<int32_t>( | ||||
|     __m512i first, | ||||
|     __m512i second, | ||||
|     int32_t min_val, | ||||
|     int32_t max_val) { | ||||
|     __m512i first [[maybe_unused]], | ||||
|     __m512i second [[maybe_unused]], | ||||
|     int32_t min_val [[maybe_unused]], | ||||
|     int32_t max_val [[maybe_unused]]) { | ||||
|   // This function is for linkage only, will not be used | ||||
|   AT_ERROR("pack_saturate_and_clamp<int32_t> is not supported"); | ||||
|   return __m512i{}; | ||||
| } | ||||
|  | ||||
| template <> | ||||
| @ -342,7 +343,7 @@ struct Vectorized<c10::qint32> : public Vectorizedqi { | ||||
|         const float_vec_return_type& rhs, | ||||
|         float scale, | ||||
|         int32_t zero_point, | ||||
|         float inverse_scale) { | ||||
|         float inverse_scale [[maybe_unused]]) { | ||||
|       Vectorized<c10::qint32> retval; | ||||
|       auto rhs_data = (__m512)rhs[0]; | ||||
|       at::native::quantize_vec<c10::qint32, /*precision=*/32>( | ||||
| @ -955,7 +956,7 @@ struct VectorizedQuantizedConverter { | ||||
|   float_vec_return_type dequantize( | ||||
|       Vectorized<float> scale, | ||||
|       Vectorized<float> zero_point, | ||||
|       Vectorized<float> scale_zp_premul) const { | ||||
|       Vectorized<float> scale_zp_premul [[maybe_unused]]) const { | ||||
|     float_vec_return_type rv; | ||||
|     for (const auto i : c10::irange(float_num_vecs())) { | ||||
|       float tmp_vals[16]; | ||||
| @ -1039,7 +1040,7 @@ struct Vectorized<c10::qint32> : public VectorizedQuantizedConverter< | ||||
|       const float_vec_return_type& rhs, | ||||
|       float scale, | ||||
|       int32_t zero_point, | ||||
|       float inverse_scale) { | ||||
|       float inverse_scale [[maybe_unused]]) { | ||||
|     std::array<value_type, size()> qvals; | ||||
|     std::array<float, float_num_vecs() * 16> float_vals; | ||||
|  | ||||
| @ -1183,7 +1184,7 @@ struct Vectorized<c10::qint8> : public VectorizedQuantizedConverter< | ||||
|       const float_vec_return_type& rhs, | ||||
|       float scale, | ||||
|       int32_t zero_point, | ||||
|       float inverse_scale) { | ||||
|       float inverse_scale [[maybe_unused]]) { | ||||
|     std::array<value_type, size()> qvals; | ||||
|     std::array<float, float_num_vecs() * 16> float_vals; | ||||
|  | ||||
| @ -1315,7 +1316,7 @@ struct Vectorized<c10::quint8> : public VectorizedQuantizedConverter< | ||||
|       const float_vec_return_type& rhs, | ||||
|       float scale, | ||||
|       int32_t zero_point, | ||||
|       float inverse_scale) { | ||||
|       float inverse_scale [[maybe_unused]]) { | ||||
|     std::array<value_type, size()> qvals; | ||||
|     std::array<float, float_num_vecs() * 16> float_vals; | ||||
|  | ||||
|  | ||||
| @ -1,32 +0,0 @@ | ||||
| #include <ATen/cuda/PinnedMemoryAllocator.h> | ||||
| #include <ATen/Context.h> | ||||
| #include <ATen/Config.h> | ||||
| #include <ATen/TensorUtils.h> | ||||
| #include <c10/core/Storage.h> | ||||
| #include <ATen/ATen.h> | ||||
| #include <ATen/CPUFunctions.h> | ||||
|  | ||||
| namespace at::native { | ||||
|  | ||||
| bool is_pinned_cuda(const Tensor& self, std::optional<Device> device) { | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_cuda()); | ||||
|   // TODO: unhook this | ||||
|   return detail::getCUDAHooks().isPinnedPtr(self.storage().data()); | ||||
| } | ||||
|  | ||||
| Tensor _pin_memory_cuda(const Tensor& self, std::optional<Device> device) { | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_cuda()); | ||||
|   auto* allocator = at::cuda::getPinnedMemoryAllocator(); | ||||
|   auto storage = Storage( | ||||
|       Storage::use_byte_size_t(), | ||||
|       detail::computeStorageNbytes( | ||||
|           self.sizes(), self.strides(), self.dtype().itemsize()), | ||||
|       allocator, | ||||
|       /*resizable=*/false); | ||||
|   auto tensor = at::cpu::empty({0}, self.options()).set_(storage, 0, self.sizes(), self.strides()); | ||||
|   tensor.copy_(self); | ||||
|   return tensor; | ||||
| } | ||||
|  | ||||
|  | ||||
| } // namespace at::native | ||||
| @ -45,7 +45,7 @@ struct DeviceThreadHandlePool : public std::enable_shared_from_this<DeviceThread | ||||
|     // unordered_map<int, vector<unique_ptr<Handle>>> created_handles; | ||||
|     Handle(const Handle& rhs) = delete; | ||||
|     // Following https://stackoverflow.com/questions/3279543/what-is-the-copy-and-swap-idiom | ||||
|     Handle(Handle&& rhs) : Handle() { std::swap(handle, rhs.handle); } | ||||
|     Handle(Handle&& rhs) noexcept : Handle() { std::swap(handle, rhs.handle); } | ||||
|     // operator= takes argument by value | ||||
|     Handle& operator=(Handle rhs) { std::swap(handle, rhs.handle); return *this; } | ||||
|     ~Handle() { | ||||
|  | ||||
| @ -166,15 +166,15 @@ struct GemmStridedBatchedParams : OpParams { | ||||
|   } | ||||
|  | ||||
|   size_t GetSizeA() const { | ||||
|     return sizeof(T) * lda * ((transa == 'n' || transa == 'N') ? k : m) * batch; | ||||
|     return sizeof(T) * std::min(lda, stride_a) * ((transa == 'n' || transa == 'N') ? k : m) * batch; | ||||
|   } | ||||
|  | ||||
|   size_t GetSizeB() const { | ||||
|     return sizeof(T) * ldb * ((transb == 'n' || transb == 'N') ? n : k) * batch; | ||||
|     return sizeof(T) * std::min(ldb, stride_b) * ((transb == 'n' || transb == 'N') ? n : k) * batch; | ||||
|   } | ||||
|  | ||||
|   size_t GetSizeC() const { | ||||
|     return sizeof(T) * ldc * n * batch; | ||||
|     return sizeof(T) * std::min(ldc, stride_c) * n * batch; | ||||
|   } | ||||
|  | ||||
|   size_t GetSize(bool duplicate_inputs) const { | ||||
|  | ||||
| @ -18,7 +18,7 @@ namespace at::cuda::tunable { | ||||
| class StreamTimer : public ITimer { | ||||
|   public: | ||||
|     StreamTimer(); | ||||
|     virtual ~StreamTimer(); | ||||
|     virtual ~StreamTimer() override; | ||||
|  | ||||
|     void Start() override; | ||||
|  | ||||
|  | ||||
| @ -194,15 +194,19 @@ static void AddRocblasValidator() { | ||||
| static void AddHipblasltValidator() { | ||||
|   auto validators = getTuningContext()->GetTuningResultsValidator().GetAllValidators(); | ||||
|   if (validators.find("HIPBLASLT_VERSION") == validators.end()) { | ||||
|     std::string hipblaslt_version = c10::str( | ||||
|         XSTRINGIFY(HIPBLASLT_VERSION_MAJOR), ".", | ||||
|         XSTRINGIFY(HIPBLASLT_VERSION_MINOR), ".", | ||||
|         XSTRINGIFY(HIPBLASLT_VERSION_PATCH), "-", | ||||
|         XSTRINGIFY(HIPBLASLT_VERSION_TWEAK)); | ||||
|     int version; | ||||
|     std::string revision(128, '\0'); | ||||
|     auto handle = at::cuda::getCurrentCUDABlasLtHandle(); | ||||
|     hipblasLtGetVersion(handle, &version); | ||||
|     hipblasLtGetGitRevision(handle, revision.data()); | ||||
|     std::string hipblaslt_version = | ||||
|         c10::str(version, "-", revision.c_str()); | ||||
|     getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|         "HIPBLASLT_VERSION", | ||||
|         [hipblaslt_version]() { return hipblaslt_version; }, | ||||
|         [hipblaslt_version](auto&& k) { return hipblaslt_version == k ? OK : FAIL; }); | ||||
|         [hipblaslt_version](auto&& k) { | ||||
|           return hipblaslt_version == k ? OK : FAIL; | ||||
|         }); | ||||
|   } | ||||
| } | ||||
|  | ||||
| @ -318,8 +322,6 @@ class ScaledGemmTunableOp : public TunableOp<ScaledGemmParams<CT>, StreamTimer> | ||||
|   ScaledGemmTunableOp() { | ||||
|     this->RegisterOp(std::string("Default"), std::make_unique<DefaultScaledGemmOp<CT>>()); | ||||
|  | ||||
|     auto validators = getTuningContext()->GetTuningResultsValidator().GetAllValidators(); | ||||
|  | ||||
| #if defined(USE_ROCM) | ||||
|     for (auto&& [name, op] : GetHipBlasLtScaledGemmTypeStringAndOps<AT, BT, CT, ALayout, BLayout>()) { | ||||
|       this->RegisterOp(std::move(name), std::move(op)); | ||||
|  | ||||
| @ -2,6 +2,7 @@ | ||||
|  | ||||
| #include <c10/core/Device.h> | ||||
| #include <c10/core/Stream.h> | ||||
| #include <c10/core/Allocator.h> | ||||
| C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-parameter") | ||||
| namespace at { | ||||
|  | ||||
| @ -40,6 +41,15 @@ struct TORCH_API AcceleratorHooksInterface { | ||||
|     TORCH_CHECK(false, "Backend doesn't support maybeExchangeDevice()"); | ||||
|     return -1; | ||||
|   } | ||||
|  | ||||
|   virtual bool isPinnedPtr(const void* data) const { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   virtual Allocator* getPinnedMemoryAllocator() const { | ||||
|     TORCH_CHECK(false, "Backend doesn't support getPinnedMemoryAllocator()"); | ||||
|     return nullptr; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| } // namespace at | ||||
|  | ||||
| @ -77,7 +77,7 @@ struct TORCH_API CUDAHooksInterface : AcceleratorHooksInterface { | ||||
|     TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP); | ||||
|   } | ||||
|  | ||||
|   virtual bool isPinnedPtr(const void* /*data*/) const { | ||||
|   virtual bool isPinnedPtr(const void* data) const override { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
| @ -121,7 +121,7 @@ struct TORCH_API CUDAHooksInterface : AcceleratorHooksInterface { | ||||
|     return -1; | ||||
|   } | ||||
|  | ||||
|   virtual Allocator* getPinnedMemoryAllocator() const { | ||||
|   virtual Allocator* getPinnedMemoryAllocator() const override { | ||||
|     TORCH_CHECK(false, "Pinned memory requires CUDA. ", CUDA_HELP); | ||||
|   } | ||||
|  | ||||
|  | ||||
| @ -6,6 +6,8 @@ | ||||
|  | ||||
| #include <c10/util/Registry.h> | ||||
|  | ||||
| #include <ATen/detail/AcceleratorHooksInterface.h> | ||||
|  | ||||
| #include <memory> | ||||
|  | ||||
| namespace at { | ||||
| @ -19,10 +21,10 @@ namespace at { | ||||
| // which we may want to call into from CPU code (and thus must be dynamically | ||||
| // dispatched, to allow for separate compilation of HIP code).  See | ||||
| // CUDAHooksInterface for more detailed motivation. | ||||
| struct TORCH_API HIPHooksInterface { | ||||
| struct TORCH_API HIPHooksInterface : AcceleratorHooksInterface { | ||||
|   // This should never actually be implemented, but it is used to | ||||
|   // squelch -Werror=non-virtual-dtor | ||||
|   virtual ~HIPHooksInterface() = default; | ||||
|   virtual ~HIPHooksInterface() override = default; | ||||
|  | ||||
|   // Initialize the HIP library state | ||||
|   virtual void initHIP() const { | ||||
| @ -41,7 +43,11 @@ struct TORCH_API HIPHooksInterface { | ||||
|     return -1; | ||||
|   } | ||||
|  | ||||
|   virtual Allocator* getPinnedMemoryAllocator() const { | ||||
|   virtual bool isPinnedPtr(const void* data) const override { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   virtual Allocator* getPinnedMemoryAllocator() const override { | ||||
|     AT_ERROR("Pinned memory requires HIP."); | ||||
|   } | ||||
|  | ||||
| @ -52,6 +58,10 @@ struct TORCH_API HIPHooksInterface { | ||||
|   virtual int getNumGPUs() const { | ||||
|     return 0; | ||||
|   } | ||||
|  | ||||
|   virtual bool hasPrimaryContext(DeviceIndex device_index) const override { | ||||
|     AT_ERROR("Cannot check primary context without ATen_hip library."); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| // NB: dummy argument to suppress "ISO C++11 requires at least one argument | ||||
|  | ||||
| @ -94,6 +94,12 @@ struct TORCH_API MPSHooksInterface : AcceleratorHooksInterface { | ||||
|   bool hasPrimaryContext(DeviceIndex device_index) const override { | ||||
|     FAIL_MPSHOOKS_FUNC(__func__); | ||||
|   } | ||||
|   virtual bool isPinnedPtr(const void* data) const override { | ||||
|     return false; | ||||
|   } | ||||
|   virtual Allocator* getPinnedMemoryAllocator() const override { | ||||
|     FAIL_MPSHOOKS_FUNC(__func__); | ||||
|   } | ||||
|   #undef FAIL_MPSHOOKS_FUNC | ||||
| }; | ||||
|  | ||||
|  | ||||
| @ -6,6 +6,9 @@ | ||||
| #include <c10/core/Stream.h> | ||||
| #include <c10/util/Registry.h> | ||||
|  | ||||
| #include <c10/core/Allocator.h> | ||||
|  | ||||
| #include <c10/util/python_stub.h> | ||||
| #include <ATen/detail/AcceleratorHooksInterface.h> | ||||
|  | ||||
| #include <string> | ||||
| @ -15,7 +18,6 @@ class Context; | ||||
| } | ||||
|  | ||||
| namespace at { | ||||
|  | ||||
| constexpr const char* MTIA_HELP = | ||||
|     "The MTIA backend requires MTIA extension for PyTorch;" | ||||
|     "this error has occurred because you are trying " | ||||
| @ -88,6 +90,20 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface { | ||||
|   virtual void setCurrentStream(const c10::Stream& stream) const { | ||||
|     FAIL_MTIAHOOKS_FUNC(__func__); | ||||
|   } | ||||
|  | ||||
|   virtual bool isPinnedPtr(const void* data) const override { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   virtual Allocator* getPinnedMemoryAllocator() const override { | ||||
|     FAIL_MTIAHOOKS_FUNC(__func__); | ||||
|     return nullptr; | ||||
|   } | ||||
|  | ||||
|   virtual PyObject* memoryStats(DeviceIndex device) const { | ||||
|     FAIL_MTIAHOOKS_FUNC(__func__); | ||||
|     return nullptr; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| struct TORCH_API MTIAHooksArgs {}; | ||||
|  | ||||
| @ -24,7 +24,11 @@ struct TORCH_API PrivateUse1HooksInterface : AcceleratorHooksInterface { | ||||
|         "You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `getDeviceFromPtr`."); | ||||
|   } | ||||
|  | ||||
|   virtual Allocator* getPinnedMemoryAllocator() const { | ||||
|   virtual bool isPinnedPtr(const void* data) const override { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   virtual Allocator* getPinnedMemoryAllocator() const override { | ||||
|     TORCH_CHECK( | ||||
|         false, | ||||
|         "You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `getPinnedMemoryAllocator`."); | ||||
|  | ||||
| @ -58,15 +58,15 @@ struct TORCH_API XPUHooksInterface : AcceleratorHooksInterface{ | ||||
|     TORCH_CHECK(false, "Cannot synchronize XPU device without ATen_xpu library."); | ||||
|   } | ||||
|  | ||||
|   virtual Allocator* getPinnedMemoryAllocator() const  { | ||||
|   virtual Allocator* getPinnedMemoryAllocator() const override { | ||||
|     TORCH_CHECK(false, "Cannot get XPU pinned memory allocator without ATen_xpu library."); | ||||
|   } | ||||
|  | ||||
|   virtual bool isPinnedPtr(const void* /*data*/) const { | ||||
|   virtual bool isPinnedPtr(const void* data) const override { | ||||
|     return false; | ||||
|   } | ||||
|  | ||||
|   virtual bool hasPrimaryContext(DeviceIndex /*device_index*/) const override{ | ||||
|   virtual bool hasPrimaryContext(DeviceIndex device_index) const override { | ||||
|     TORCH_CHECK(false, "Cannot query primary context without ATen_xpu library."); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| @ -861,21 +861,10 @@ static std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_ | ||||
|     return at::native_batch_norm(self, weight_opt, bias_opt, Tensor(), Tensor(), train, momentum, eps); | ||||
| } | ||||
|  | ||||
| static std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _batch_norm_with_update_batch( | ||||
|   const Tensor& self, const std::optional<Tensor>& weight_opt, const std::optional<Tensor>& bias_opt, | ||||
|   Tensor& running_mean, Tensor& running_var, double momentum, double eps) { | ||||
|     at::Tensor output, save_mean, save_var; | ||||
|     std::tie(output, save_mean, save_var) = | ||||
|       at::native_batch_norm(self, weight_opt, bias_opt, running_mean, running_var, true/*train*/, momentum, eps); | ||||
|     at::Tensor reserve = at::empty({0}, self.options().dtype(kByte)); | ||||
|     return std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>(output, save_mean, save_var, reserve); | ||||
| } | ||||
|  | ||||
| TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) { | ||||
|   VMAP_SUPPORT(native_batch_norm, NATIVE_BATCH_NORM_BATCH_RULE(native_batch_norm)); | ||||
|   VMAP_SUPPORT(cudnn_batch_norm, CUDNN_BATCH_NORM_BATCH_RULE(cudnn_batch_norm)); | ||||
|   VMAP_SUPPORT(miopen_batch_norm, MIOPEN_BATCH_NORM_BATCH_RULE(miopen_batch_norm)); | ||||
|   m.impl("_batch_norm_with_update", _batch_norm_with_update_batch); | ||||
|   m.impl("_native_batch_norm_legit", _native_batch_norm_legit_batch); | ||||
|   m.impl("_native_batch_norm_legit.no_stats", _native_batch_norm_legit_no_stats_batch); | ||||
|   m.impl("native_batch_norm_backward", NATIVE_BATCH_NORM_BACKWARD_BATCH_RULE(native_batch_norm_backward)); | ||||
|  | ||||
| @ -55,3 +55,14 @@ static inline const char* _mklGetErrorString(sparse_status_t status) { | ||||
|         at::mkl::sparse::_mklGetErrorString(__err), \ | ||||
|         " when calling `" #EXPR "`");               \ | ||||
|   } while (0) | ||||
|  | ||||
| #define TORCH_MKLSPARSE_CHECK_SUCCESS_OR_INVALID(status, function_name) \ | ||||
|   do {                                                   \ | ||||
|     sparse_status_t __status = (status);                 \ | ||||
|     TORCH_CHECK(                                         \ | ||||
|         __status == SPARSE_STATUS_SUCCESS ||             \ | ||||
|             __status == SPARSE_STATUS_INVALID_VALUE,     \ | ||||
|         "MKL error: ",                                   \ | ||||
|         at::mkl::sparse::_mklGetErrorString(__status),   \ | ||||
|         " when calling `" function_name "`");            \ | ||||
|   } while (0) | ||||
|  | ||||
| @ -278,48 +278,60 @@ void spmmd<c10::complex<double>>(MKL_SPARSE_SPMMD_ARGTYPES(c10::complex<double>) | ||||
| } | ||||
|  | ||||
| template <> | ||||
| void trsv<float>(MKL_SPARSE_TRSV_ARGTYPES(float)) { | ||||
|   TORCH_MKLSPARSE_CHECK(mkl_sparse_s_trsv(operation, alpha, A, descr, x, y)); | ||||
| sparse_status_t trsv<float>(MKL_SPARSE_TRSV_ARGTYPES(float)) { | ||||
|   sparse_status_t status = mkl_sparse_s_trsv(operation, alpha, A, descr, x, y); | ||||
|   TORCH_MKLSPARSE_CHECK_SUCCESS_OR_INVALID(status, "mkl_sparse_s_trsv"); | ||||
|   return status; | ||||
| } | ||||
| template <> | ||||
| void trsv<double>(MKL_SPARSE_TRSV_ARGTYPES(double)) { | ||||
|   TORCH_MKLSPARSE_CHECK(mkl_sparse_d_trsv(operation, alpha, A, descr, x, y)); | ||||
| sparse_status_t trsv<double>(MKL_SPARSE_TRSV_ARGTYPES(double)) { | ||||
|   sparse_status_t status = mkl_sparse_d_trsv(operation, alpha, A, descr, x, y); | ||||
|   TORCH_MKLSPARSE_CHECK_SUCCESS_OR_INVALID(status, "mkl_sparse_d_trsv"); | ||||
|   return status; | ||||
| } | ||||
| template <> | ||||
| void trsv<c10::complex<float>>(MKL_SPARSE_TRSV_ARGTYPES(c10::complex<float>)) { | ||||
|   TORCH_MKLSPARSE_CHECK(mkl_sparse_c_trsv( | ||||
| sparse_status_t trsv<c10::complex<float>>(MKL_SPARSE_TRSV_ARGTYPES(c10::complex<float>)) { | ||||
|   sparse_status_t status = mkl_sparse_c_trsv( | ||||
|       operation, | ||||
|       to_mkl_complex<float, MKL_Complex8>(alpha), | ||||
|       A, | ||||
|       descr, | ||||
|       reinterpret_cast<const MKL_Complex8*>(x), | ||||
|       reinterpret_cast<MKL_Complex8*>(y))); | ||||
|       reinterpret_cast<MKL_Complex8*>(y)); | ||||
|   TORCH_MKLSPARSE_CHECK_SUCCESS_OR_INVALID(status, "mkl_sparse_c_trsv"); | ||||
|   return status; | ||||
| } | ||||
| template <> | ||||
| void trsv<c10::complex<double>>( | ||||
| sparse_status_t trsv<c10::complex<double>>( | ||||
|     MKL_SPARSE_TRSV_ARGTYPES(c10::complex<double>)) { | ||||
|   TORCH_MKLSPARSE_CHECK(mkl_sparse_z_trsv( | ||||
|   sparse_status_t status = mkl_sparse_z_trsv( | ||||
|       operation, | ||||
|       to_mkl_complex<double, MKL_Complex16>(alpha), | ||||
|       A, | ||||
|       descr, | ||||
|       reinterpret_cast<const MKL_Complex16*>(x), | ||||
|       reinterpret_cast<MKL_Complex16*>(y))); | ||||
|       reinterpret_cast<MKL_Complex16*>(y)); | ||||
|   TORCH_MKLSPARSE_CHECK_SUCCESS_OR_INVALID(status, "mkl_sparse_z_trsv"); | ||||
|   return status; | ||||
| } | ||||
|  | ||||
| template <> | ||||
| void trsm<float>(MKL_SPARSE_TRSM_ARGTYPES(float)) { | ||||
|   TORCH_MKLSPARSE_CHECK(mkl_sparse_s_trsm( | ||||
|       operation, alpha, A, descr, layout, x, columns, ldx, y, ldy)); | ||||
| sparse_status_t trsm<float>(MKL_SPARSE_TRSM_ARGTYPES(float)) { | ||||
|   sparse_status_t status = mkl_sparse_s_trsm( | ||||
|       operation, alpha, A, descr, layout, x, columns, ldx, y, ldy); | ||||
|   TORCH_MKLSPARSE_CHECK_SUCCESS_OR_INVALID(status, "mkl_sparse_s_trsm"); | ||||
|   return status; | ||||
| } | ||||
| template <> | ||||
| void trsm<double>(MKL_SPARSE_TRSM_ARGTYPES(double)) { | ||||
|   TORCH_MKLSPARSE_CHECK(mkl_sparse_d_trsm( | ||||
|       operation, alpha, A, descr, layout, x, columns, ldx, y, ldy)); | ||||
| sparse_status_t trsm<double>(MKL_SPARSE_TRSM_ARGTYPES(double)) { | ||||
|   sparse_status_t status = mkl_sparse_d_trsm( | ||||
|       operation, alpha, A, descr, layout, x, columns, ldx, y, ldy); | ||||
|   TORCH_MKLSPARSE_CHECK_SUCCESS_OR_INVALID(status, "mkl_sparse_d_trsm"); | ||||
|   return status; | ||||
| } | ||||
| template <> | ||||
| void trsm<c10::complex<float>>(MKL_SPARSE_TRSM_ARGTYPES(c10::complex<float>)) { | ||||
|   TORCH_MKLSPARSE_CHECK(mkl_sparse_c_trsm( | ||||
| sparse_status_t trsm<c10::complex<float>>(MKL_SPARSE_TRSM_ARGTYPES(c10::complex<float>)) { | ||||
|   sparse_status_t status = mkl_sparse_c_trsm( | ||||
|       operation, | ||||
|       to_mkl_complex<float, MKL_Complex8>(alpha), | ||||
|       A, | ||||
| @ -329,12 +341,14 @@ void trsm<c10::complex<float>>(MKL_SPARSE_TRSM_ARGTYPES(c10::complex<float>)) { | ||||
|       columns, | ||||
|       ldx, | ||||
|       reinterpret_cast<MKL_Complex8*>(y), | ||||
|       ldy)); | ||||
|       ldy); | ||||
|   TORCH_MKLSPARSE_CHECK_SUCCESS_OR_INVALID(status, "mkl_sparse_c_trsm"); | ||||
|   return status; | ||||
| } | ||||
| template <> | ||||
| void trsm<c10::complex<double>>( | ||||
| sparse_status_t trsm<c10::complex<double>>( | ||||
|     MKL_SPARSE_TRSM_ARGTYPES(c10::complex<double>)) { | ||||
|   TORCH_MKLSPARSE_CHECK(mkl_sparse_z_trsm( | ||||
|   sparse_status_t status = mkl_sparse_z_trsm( | ||||
|       operation, | ||||
|       to_mkl_complex<double, MKL_Complex16>(alpha), | ||||
|       A, | ||||
| @ -344,7 +358,9 @@ void trsm<c10::complex<double>>( | ||||
|       columns, | ||||
|       ldx, | ||||
|       reinterpret_cast<MKL_Complex16*>(y), | ||||
|       ldy)); | ||||
|       ldy); | ||||
|   TORCH_MKLSPARSE_CHECK_SUCCESS_OR_INVALID(status, "mkl_sparse_z_trsm"); | ||||
|   return status; | ||||
| } | ||||
|  | ||||
| } // namespace at::mkl::sparse | ||||
|  | ||||
| @ -184,7 +184,7 @@ void spmmd<c10::complex<double>>( | ||||
|       const scalar_t *x, scalar_t *y | ||||
|  | ||||
| template <typename scalar_t> | ||||
| inline void trsv(MKL_SPARSE_TRSV_ARGTYPES(scalar_t)) { | ||||
| inline sparse_status_t trsv(MKL_SPARSE_TRSV_ARGTYPES(scalar_t)) { | ||||
|   TORCH_INTERNAL_ASSERT( | ||||
|       false, | ||||
|       "at::mkl::sparse::trsv: not implemented for ", | ||||
| @ -192,13 +192,13 @@ inline void trsv(MKL_SPARSE_TRSV_ARGTYPES(scalar_t)) { | ||||
| } | ||||
|  | ||||
| template <> | ||||
| void trsv<float>(MKL_SPARSE_TRSV_ARGTYPES(float)); | ||||
| sparse_status_t trsv<float>(MKL_SPARSE_TRSV_ARGTYPES(float)); | ||||
| template <> | ||||
| void trsv<double>(MKL_SPARSE_TRSV_ARGTYPES(double)); | ||||
| sparse_status_t trsv<double>(MKL_SPARSE_TRSV_ARGTYPES(double)); | ||||
| template <> | ||||
| void trsv<c10::complex<float>>(MKL_SPARSE_TRSV_ARGTYPES(c10::complex<float>)); | ||||
| sparse_status_t trsv<c10::complex<float>>(MKL_SPARSE_TRSV_ARGTYPES(c10::complex<float>)); | ||||
| template <> | ||||
| void trsv<c10::complex<double>>(MKL_SPARSE_TRSV_ARGTYPES(c10::complex<double>)); | ||||
| sparse_status_t trsv<c10::complex<double>>(MKL_SPARSE_TRSV_ARGTYPES(c10::complex<double>)); | ||||
|  | ||||
| #define MKL_SPARSE_TRSM_ARGTYPES(scalar_t)                                    \ | ||||
|   const sparse_operation_t operation, const scalar_t alpha,                   \ | ||||
| @ -207,7 +207,7 @@ void trsv<c10::complex<double>>(MKL_SPARSE_TRSV_ARGTYPES(c10::complex<double>)); | ||||
|       const MKL_INT ldx, scalar_t *y, const MKL_INT ldy | ||||
|  | ||||
| template <typename scalar_t> | ||||
| inline void trsm(MKL_SPARSE_TRSM_ARGTYPES(scalar_t)) { | ||||
| inline sparse_status_t trsm(MKL_SPARSE_TRSM_ARGTYPES(scalar_t)) { | ||||
|   TORCH_INTERNAL_ASSERT( | ||||
|       false, | ||||
|       "at::mkl::sparse::trsm: not implemented for ", | ||||
| @ -215,12 +215,12 @@ inline void trsm(MKL_SPARSE_TRSM_ARGTYPES(scalar_t)) { | ||||
| } | ||||
|  | ||||
| template <> | ||||
| void trsm<float>(MKL_SPARSE_TRSM_ARGTYPES(float)); | ||||
| sparse_status_t trsm<float>(MKL_SPARSE_TRSM_ARGTYPES(float)); | ||||
| template <> | ||||
| void trsm<double>(MKL_SPARSE_TRSM_ARGTYPES(double)); | ||||
| sparse_status_t trsm<double>(MKL_SPARSE_TRSM_ARGTYPES(double)); | ||||
| template <> | ||||
| void trsm<c10::complex<float>>(MKL_SPARSE_TRSM_ARGTYPES(c10::complex<float>)); | ||||
| sparse_status_t trsm<c10::complex<float>>(MKL_SPARSE_TRSM_ARGTYPES(c10::complex<float>)); | ||||
| template <> | ||||
| void trsm<c10::complex<double>>(MKL_SPARSE_TRSM_ARGTYPES(c10::complex<double>)); | ||||
| sparse_status_t trsm<c10::complex<double>>(MKL_SPARSE_TRSM_ARGTYPES(c10::complex<double>)); | ||||
|  | ||||
| } // namespace at::mkl::sparse | ||||
|  | ||||
| @ -3,8 +3,6 @@ | ||||
| #include <ATen/CPUFunctions.h> | ||||
| #include <ATen/EmptyTensor.h> | ||||
| #include <ATen/mps/MPSAllocator.h> | ||||
| #include <ATen/ops/_pin_memory_native.h> | ||||
| #include <ATen/ops/is_pinned_native.h> | ||||
| #include <c10/core/Allocator.h> | ||||
| #include <c10/core/Storage.h> | ||||
|  | ||||
| @ -860,31 +858,12 @@ IMPSAllocator* getIMPSAllocator(bool sharedAllocator) { | ||||
|   return nullptr; | ||||
| } | ||||
|  | ||||
| } // namespace at::mps | ||||
|  | ||||
| namespace at::native { | ||||
|  | ||||
| // torch.is_pinned() implementation | ||||
| // Pinned memory will be helpful on Apple Silicon Macs with Unified memory as we | ||||
| // will be able to use SharedStorageMode for MTLBuffer allocations. This will | ||||
| // avoid extra copies on DataLoading operations. | ||||
| bool is_pinned_mps(const Tensor& self, std::optional<Device> device) { | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_mps()); | ||||
|   return at::mps::_getSharedAllocator().isSharedBuffer(self.storage().data()); | ||||
| bool isMPSPinnedPtr(const void* data) { | ||||
|   return at::mps::_getSharedAllocator().isSharedBuffer(data); | ||||
| } | ||||
|  | ||||
| // torch.pin_memory() implementation | ||||
| Tensor _pin_memory_mps(const Tensor& self, std::optional<Device> device) { | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_mps()); | ||||
|   auto* shared_allocator = at::mps::getIMPSAllocator(true); | ||||
|   TORCH_CHECK(shared_allocator, "unable to pin memory on a non-unified memory device"); | ||||
|  | ||||
|   const size_t storage_size = at::detail::computeStorageNbytes(self.sizes(), self.strides(), self.dtype().itemsize()); | ||||
|   std::cerr << "Pinning memory of size " << storage_size / 1024UL << " KB\n"; | ||||
|   auto storage = Storage(Storage::use_byte_size_t(), storage_size, shared_allocator, false); | ||||
|   auto tensor = at::cpu::empty({0}, self.options()).set_(storage, 0, self.sizes(), self.strides()); | ||||
|   tensor.copy_(self); | ||||
|   return tensor; | ||||
| } | ||||
|  | ||||
| } // namespace at::native | ||||
| } // namespace at::mps | ||||
|  | ||||
| @ -59,4 +59,6 @@ C10_DECLARE_REGISTRY(MPSAllocatorCallbacksRegistry, IMpsAllocatorCallback); | ||||
|  | ||||
| IMPSAllocator* getIMPSAllocator(bool sharedAllocator = false); | ||||
|  | ||||
| bool isMPSPinnedPtr(const void* data); | ||||
|  | ||||
| } // namespace at::mps | ||||
|  | ||||
| @ -31,6 +31,7 @@ enum class MacOSVersion : uint32_t { | ||||
|   MACOS_VER_13_2_PLUS, | ||||
|   MACOS_VER_13_3_PLUS, | ||||
|   MACOS_VER_14_0_PLUS, | ||||
|   MACOS_VER_14_4_PLUS, | ||||
| }; | ||||
|  | ||||
| //----------------------------------------------------------------- | ||||
|  | ||||
| @ -105,19 +105,19 @@ MPSDevice::MPSDevice() : _mtl_device(nil), _mtl_indexing_library(nil) { | ||||
|  | ||||
| bool MPSDevice::isMacOS13Plus(MacOSVersion version) const { | ||||
|   id mpsCD = NSClassFromString(@"MPSGraph"); | ||||
|   static auto compileOptions = [[[MTLCompileOptions alloc] init] autorelease]; | ||||
|   static bool _macos_13_0_plus = [mpsCD instancesRespondToSelector:@selector(cumulativeSumWithTensor: | ||||
|                                                                                                 axis:name:)] == YES; | ||||
|   static bool _macos_13_1_plus = | ||||
|       [mpsCD instancesRespondToSelector:@selector | ||||
|              (sampleGridWithSourceTensor: | ||||
|                         coordinateTensor:layout:normalizeCoordinates:relativeCoordinates:alignCorners:paddingMode | ||||
|                                         :samplingMode:constantValue:name:)] == YES; | ||||
|   static bool _macos_13_2_plus = | ||||
|       [mpsCD instancesRespondToSelector:@selector(convolution3DWithSourceTensor:weightsTensor:descriptor:name:)] == YES; | ||||
|   static bool _macos_13_3_plus = [compileOptions respondsToSelector:@selector(maxTotalThreadsPerThreadgroup)] == YES; | ||||
|  | ||||
|   static bool _macos_14_0_plus = [mpsCD instancesRespondToSelector:@selector(conjugateWithTensor:name:)] == YES; | ||||
|   auto is_os_version_at_least = [](int major, int minor) { | ||||
|     @autoreleasepool { | ||||
|       NSProcessInfo* processInfo = [[NSProcessInfo alloc] init]; | ||||
|       return [processInfo | ||||
|           isOperatingSystemAtLeastVersion:{.majorVersion = major, .minorVersion = minor, .patchVersion = 0}]; | ||||
|     } | ||||
|   }; | ||||
|   static bool _macos_13_0_plus = is_os_version_at_least(13, 0); | ||||
|   static bool _macos_13_1_plus = is_os_version_at_least(13, 1); | ||||
|   static bool _macos_13_2_plus = is_os_version_at_least(13, 2); | ||||
|   static bool _macos_13_3_plus = is_os_version_at_least(13, 3); | ||||
|   static bool _macos_14_0_plus = is_os_version_at_least(14, 0); | ||||
|   static bool _macos_14_4_plus = is_os_version_at_least(14, 0); | ||||
|  | ||||
|   switch (version) { | ||||
|     case MacOSVersion::MACOS_VER_13_0_PLUS: | ||||
| @ -130,6 +130,8 @@ bool MPSDevice::isMacOS13Plus(MacOSVersion version) const { | ||||
|       return _macos_13_3_plus; | ||||
|     case MacOSVersion::MACOS_VER_14_0_PLUS: | ||||
|       return _macos_14_0_plus; | ||||
|     case MacOSVersion::MACOS_VER_14_4_PLUS: | ||||
|       return _macos_14_4_plus; | ||||
|     default: | ||||
|       return false; | ||||
|   } | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	