mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-10-27 09:04:53 +08:00 
			
		
		
		
	Compare commits
	
		
			324 Commits
		
	
	
		
			embg/trito
			...
			fx_cpp
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 73c49ee963 | |||
| 0e6c204642 | |||
| 44371bd432 | |||
| 6e13c7e874 | |||
| 94165dba7b | |||
| 8a0bc8c9ee | |||
| cbb7e3053f | |||
| 310f80995b | |||
| ffc202a1b9 | |||
| c446851829 | |||
| 613c7d270d | |||
| 2e42671619 | |||
| 0e3fe694d1 | |||
| 8a45cf4c64 | |||
| 1d84c7e100 | |||
| 6e5c2a1a3b | |||
| 6220602943 | |||
| 0ef5229569 | |||
| f9508b4c1f | |||
| fe74bbd6f0 | |||
| 921aa194c7 | |||
| ad96f991a5 | |||
| 5ef081031e | |||
| 647815049e | |||
| ef2b5ed500 | |||
| 39dd4740e6 | |||
| bef586111a | |||
| 09cccbc1c7 | |||
| 11f2d8e823 | |||
| 5b3624117a | |||
| ba81c3c290 | |||
| 3a620a0f65 | |||
| 8892ddaacc | |||
| fdf1666b20 | |||
| e647ea55a3 | |||
| dcb63fcedb | |||
| cafbcb6376 | |||
| 8ca4cefc7d | |||
| 0a6df4fca6 | |||
| 82d7a36a27 | |||
| 0c7f4353e5 | |||
| 662a78f957 | |||
| 19b31d899a | |||
| b741819b05 | |||
| 3aa623d407 | |||
| 754e6d4ad0 | |||
| 85758fa5ae | |||
| 6c824cd9fb | |||
| b9b89ed638 | |||
| d9696ea624 | |||
| fc6e3ff96d | |||
| 128952625b | |||
| c219fa5eb9 | |||
| 8d16a73f0f | |||
| 852b7b4c99 | |||
| ac51f782fe | |||
| 23c156cd2d | |||
| a1b664adeb | |||
| 3090667cf9 | |||
| 224b4339e5 | |||
| 6e75024ff0 | |||
| 771be55bb0 | |||
| 3f9798a4fd | |||
| 543a870943 | |||
| 5f81265572 | |||
| 7efaeb1494 | |||
| 0c16800b4a | |||
| 01601ebd41 | |||
| 70724bdbfe | |||
| 00c6ca4459 | |||
| 190f06d468 | |||
| a448b3ae95 | |||
| 2ff312359c | |||
| d943357a21 | |||
| 68cc63ae27 | |||
| 7e48d6a497 | |||
| dc8e3c2e90 | |||
| 6a2bf48cfa | |||
| e8e0bdf541 | |||
| 96806b1777 | |||
| 3df53c2a8f | |||
| 747fc35ff5 | |||
| 5e5bbdb35e | |||
| 4d0ece8196 | |||
| 476bfe6cce | |||
| 50155e825b | |||
| 65aa16f968 | |||
| f99409903c | |||
| 740cd0559f | |||
| 117ab34891 | |||
| 901226ae83 | |||
| 7ede78f9f5 | |||
| e5b3387166 | |||
| 6dfdce92ba | |||
| 56a3d276fe | |||
| 80fa2778ed | |||
| 0a761f0627 | |||
| 54fe2d0e89 | |||
| 04272a0e12 | |||
| baaa914bf7 | |||
| 9554300436 | |||
| 2184cdd291 | |||
| 7e059b3c95 | |||
| e8670f6aea | |||
| de4f8b9946 | |||
| fba21edf5b | |||
| 936225d7b2 | |||
| 32fb68960e | |||
| 78a6b0c479 | |||
| 304956e1fb | |||
| 1d0c1087dd | |||
| e9c5144cbc | |||
| 2ffdf556ea | |||
| 2d47385f0f | |||
| 4f87f47ea1 | |||
| f0dd11df55 | |||
| 0de6d2427f | |||
| a5b86a1ec0 | |||
| a5ba9b2858 | |||
| c58d3af3b4 | |||
| 9d849d4312 | |||
| 48a54146e7 | |||
| f08fd8e9e3 | |||
| c97e3ebb96 | |||
| 457df212e1 | |||
| f5328542b5 | |||
| 5a3bea1e88 | |||
| 70ba6f0ab6 | |||
| c8ff1cd387 | |||
| 638f543ac2 | |||
| cd42b95047 | |||
| 68eb771265 | |||
| 2f7cfecd86 | |||
| c1a43a69e4 | |||
| 9795c4224b | |||
| b4a0161449 | |||
| 6adcf21b2b | |||
| bf2c05352e | |||
| 80d34217c6 | |||
| d3ad84c38f | |||
| 8bcebc8dae | |||
| 2fd75667b4 | |||
| e98662bed9 | |||
| ffaea656b5 | |||
| a7c596870d | |||
| 4123323eff | |||
| 01694eaa56 | |||
| 8184cd85fc | |||
| 626dc934d1 | |||
| 72e863df27 | |||
| 6e545392cd | |||
| 6412c6060c | |||
| bb68b54be0 | |||
| 3acbfd602e | |||
| 6454e95824 | |||
| 4adee71155 | |||
| a9cc147fa1 | |||
| 9acc19f8da | |||
| 22964d1007 | |||
| 5dc9128229 | |||
| 4f9fcd7156 | |||
| 9f2c4b9342 | |||
| 3d617333e7 | |||
| df75a9dc80 | |||
| d48c25c7d1 | |||
| 8992141dba | |||
| 879d01afcb | |||
| 4ce5322a1f | |||
| faabda4fc9 | |||
| c3949b20a1 | |||
| 9a8ab778d3 | |||
| bb2de3b101 | |||
| 4a384d813b | |||
| b054470db2 | |||
| 30788739f4 | |||
| e505132797 | |||
| d5cb5d623a | |||
| 55a4ef80c4 | |||
| 71e684bfae | |||
| ce4436944c | |||
| a135776307 | |||
| 8e496046e5 | |||
| 6c07e2c930 | |||
| 8830b81208 | |||
| 7fdfb88f03 | |||
| 1f67cfd437 | |||
| 907cb28f67 | |||
| f4b05ce683 | |||
| 0eb9ec958a | |||
| 20f966a8e0 | |||
| a7b1dd82ff | |||
| 1b704a160f | |||
| 6dc0a291b9 | |||
| 597922ba21 | |||
| e76b28c765 | |||
| 01e6d1cae4 | |||
| 9a25ff77af | |||
| f27c4dd862 | |||
| 569c5e72e7 | |||
| c7e936a56a | |||
| 3bcc3cddb5 | |||
| 0ff60236ab | |||
| 627d2cd87d | |||
| 36e9b71613 | |||
| ff32f6c93b | |||
| c490046693 | |||
| 97ea2b5d83 | |||
| 7a60a75256 | |||
| 9adfa143d7 | |||
| f8c6d43524 | |||
| 91461601b6 | |||
| c209fbdc53 | |||
| 059cae6176 | |||
| 4c074a9b8b | |||
| fb696ef3aa | |||
| db515b6ac7 | |||
| 49048e7f26 | |||
| f325b39303 | |||
| cf77e7dd97 | |||
| b9c058c203 | |||
| 6abca6a564 | |||
| e216df48c8 | |||
| 2122c9e2a9 | |||
| ef77f2ca4a | |||
| f4b77ce8e2 | |||
| e7cb43a2d2 | |||
| 2ad0e4197d | |||
| 8d153e0bab | |||
| e793ae220f | |||
| dae757c971 | |||
| 22368eac10 | |||
| 69f5b66132 | |||
| 2498ef7490 | |||
| 6580a18f86 | |||
| 7e906ec9e5 | |||
| c32fe6b279 | |||
| 4d0386ce1c | |||
| ddef7c350f | |||
| 1208347d09 | |||
| 41033a4274 | |||
| 05fa05cbae | |||
| dbf39a6e63 | |||
| b42cfcabc4 | |||
| ac568fc007 | |||
| 0e7bd7fedd | |||
| 0f1f0d3015 | |||
| f6ca822366 | |||
| 6faa3d5f18 | |||
| 01fc22056a | |||
| 406532f864 | |||
| c27882ffa8 | |||
| 3efac92888 | |||
| c6dc624690 | |||
| 6d4ec9b2ec | |||
| a4064da8ca | |||
| ef9451ac8d | |||
| 941316f821 | |||
| db9d457a3f | |||
| 2fc907971a | |||
| 3f45fa63f2 | |||
| c35b65715c | |||
| 3437177e2b | |||
| d8d0bf264a | |||
| 7c3740d388 | |||
| 4d32de14b6 | |||
| 12c4a2c297 | |||
| 21144ce570 | |||
| ded580a594 | |||
| d1fad416a8 | |||
| 53f001c599 | |||
| 8677508167 | |||
| 430cdfc0ac | |||
| badf898df2 | |||
| 63d7ffe121 | |||
| 3f8b8f08c8 | |||
| d05cddfe23 | |||
| f03f8bc901 | |||
| d6963e769c | |||
| f343f98710 | |||
| e017b56c0c | |||
| 2d1ad0c31a | |||
| 10e3406ea5 | |||
| 6d21685b45 | |||
| 48846cd164 | |||
| e2e3ca94cc | |||
| 288df042c5 | |||
| 1b182ea0d2 | |||
| 3399ad8d9d | |||
| 7e97b33fbb | |||
| 84776d7597 | |||
| e57f51b80f | |||
| fec8ef8c17 | |||
| 8b08b0f340 | |||
| 139b9c6529 | |||
| 30213ab0a7 | |||
| fb53cd6497 | |||
| 08653fe355 | |||
| e24a87ed8d | |||
| c2547dfcc3 | |||
| 67ef2683d9 | |||
| c1dd3a615f | |||
| 4e7f497bb3 | |||
| 2129903aa3 | |||
| 16578e8584 | |||
| 4fd777ed59 | |||
| c19ad112f6 | |||
| 2cef2fc2b4 | |||
| 0d9e527c4d | |||
| 2e779166eb | |||
| 6e2e09f6cc | |||
| b505e86475 | |||
| 17dea09b15 | |||
| 82cd7a7dab | |||
| 42312a52b3 | |||
| edffb28d39 | |||
| 22f392ba40 | |||
| d49dc8f4b8 | |||
| 114c752b14 | |||
| efcea2d2fd | |||
| 4129c3e596 | |||
| ac60bdaf01 | |||
| 4aa7a1efcf | |||
| 25994a7ed1 | |||
| c3be459f26 | 
| @ -91,9 +91,9 @@ _UCC_COMMIT=20eae37090a4ce1b32bcce6144ccad0b49943e0b | ||||
| # configuration, so we hardcode everything here rather than do it | ||||
| # from scratch | ||||
| case "$image" in | ||||
|   pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9) | ||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9) | ||||
|     CUDA_VERSION=12.4.0 | ||||
|     CUDNN_VERSION=8 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
| @ -105,9 +105,9 @@ case "$image" in | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9) | ||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9) | ||||
|     CUDA_VERSION=12.1.1 | ||||
|     CUDNN_VERSION=8 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
| @ -119,9 +119,9 @@ case "$image" in | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks) | ||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks) | ||||
|     CUDA_VERSION=12.4.0 | ||||
|     CUDNN_VERSION=8 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
| @ -134,9 +134,9 @@ case "$image" in | ||||
|     TRITON=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks) | ||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks) | ||||
|     CUDA_VERSION=12.1.1 | ||||
|     CUDNN_VERSION=8 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
| @ -149,9 +149,9 @@ case "$image" in | ||||
|     TRITON=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.1-cudnn8-py3.12-gcc9-inductor-benchmarks) | ||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3.12-gcc9-inductor-benchmarks) | ||||
|     CUDA_VERSION=12.1.1 | ||||
|     CUDNN_VERSION=8 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.12 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
| @ -164,9 +164,9 @@ case "$image" in | ||||
|     TRITON=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.4-cudnn8-py3.12-gcc9-inductor-benchmarks) | ||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks) | ||||
|     CUDA_VERSION=12.4.0 | ||||
|     CUDNN_VERSION=8 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.12 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
| @ -179,9 +179,9 @@ case "$image" in | ||||
|     TRITON=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9) | ||||
|   pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9) | ||||
|     CUDA_VERSION=11.8.0 | ||||
|     CUDNN_VERSION=8 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
| @ -193,9 +193,9 @@ case "$image" in | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9) | ||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9) | ||||
|     CUDA_VERSION=12.4.0 | ||||
|     CUDNN_VERSION=8 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
| @ -207,9 +207,9 @@ case "$image" in | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9) | ||||
|   pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9) | ||||
|     CUDA_VERSION=12.1.1 | ||||
|     CUDNN_VERSION=8 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
| @ -221,9 +221,9 @@ case "$image" in | ||||
|     CONDA_CMAKE=yes | ||||
|     TRITON=yes | ||||
|     ;; | ||||
|   pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9) | ||||
|   pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9) | ||||
|     CUDA_VERSION=12.4.0 | ||||
|     CUDNN_VERSION=8 | ||||
|     CUDNN_VERSION=9 | ||||
|     ANACONDA_PYTHON_VERSION=3.10 | ||||
|     GCC_VERSION=9 | ||||
|     PROTOBUF=yes | ||||
| @ -330,10 +330,10 @@ case "$image" in | ||||
|     DOCS=yes | ||||
|     INDUCTOR_BENCHMARKS=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12) | ||||
|   pytorch-linux-jammy-cuda11.8-cudnn9-py3.8-clang12) | ||||
|     ANACONDA_PYTHON_VERSION=3.8 | ||||
|     CUDA_VERSION=11.8 | ||||
|     CUDNN_VERSION=8 | ||||
|     CUDNN_VERSION=9 | ||||
|     CLANG_VERSION=12 | ||||
|     PROTOBUF=yes | ||||
|     DB=yes | ||||
| @ -380,7 +380,7 @@ case "$image" in | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CONDA_CMAKE=yes | ||||
|     ;; | ||||
|   pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter) | ||||
|   pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter) | ||||
|     ANACONDA_PYTHON_VERSION=3.9 | ||||
|     CUDA_VERSION=11.8 | ||||
|     CONDA_CMAKE=yes | ||||
| @ -447,7 +447,7 @@ tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]') | ||||
| #when using cudnn version 8 install it separately from cuda | ||||
| if [[ "$image" == *cuda*  && ${OS} == "ubuntu" ]]; then | ||||
|   IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}" | ||||
|   if [[ ${CUDNN_VERSION} == 8 ]]; then | ||||
|   if [[ ${CUDNN_VERSION} == 9 ]]; then | ||||
|     IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}" | ||||
|   fi | ||||
| fi | ||||
| @ -499,7 +499,7 @@ docker build \ | ||||
|        "$@" \ | ||||
|        . | ||||
|  | ||||
| # NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`, | ||||
| # NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`, | ||||
| # for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could | ||||
| # find the correct image. As a result, here we have to replace the | ||||
| #   "$UBUNTU_VERSION" == "18.04-rc" | ||||
|  | ||||
| @ -118,6 +118,13 @@ COPY ./common/install_cache.sh install_cache.sh | ||||
| ENV PATH /opt/cache/bin:$PATH | ||||
| RUN bash ./install_cache.sh && rm install_cache.sh | ||||
|  | ||||
| # Install AOTriton | ||||
| COPY ci_commit_pins/aotriton.txt aotriton.txt | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ./common/install_aotriton.sh install_aotriton.sh | ||||
| RUN bash ./install_aotriton.sh /opt/rocm/aotriton && rm -rf install_aotriton.sh aotriton aotriton.txt common_utils.sh | ||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton | ||||
|  | ||||
| # Include BUILD_ENVIRONMENT environment variable in image | ||||
| ARG BUILD_ENVIRONMENT | ||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} | ||||
|  | ||||
							
								
								
									
										1
									
								
								.ci/docker/ci_commit_pins/aotriton.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								.ci/docker/ci_commit_pins/aotriton.txt
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1 @@ | ||||
| 24a3fe9cb57e5cda3c923df29743f9767194cc27 | ||||
| @ -1 +1 @@ | ||||
| bbe6246e37d8aa791c67daaf9d9d61b26c9ccfdc | ||||
| 01cbe5045a6898c9a925f01435c8277b2fe6afcc | ||||
|  | ||||
| @ -1 +1 @@ | ||||
| aee0630e1208fdd28411e8ec2448981eb37bc83a | ||||
| 45fff310c891f5a92d55445adf8cc9d29df5841e | ||||
|  | ||||
							
								
								
									
										24
									
								
								.ci/docker/common/install_aotriton.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								.ci/docker/common/install_aotriton.sh
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,24 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh" | ||||
|  | ||||
| AOTRITON_DIR="aotriton" | ||||
| AOTRITON_PINNED_NAME="aotriton" # No .txt extension | ||||
| AOTRITON_PINNED_COMMIT=$(get_pinned_commit ${AOTRITON_PINNED_NAME}) | ||||
| AOTRITON_INSTALL_PREFIX="$1" | ||||
|  | ||||
| git clone https://github.com/ROCm/aotriton.git "${AOTRITON_DIR}" | ||||
| cd "${AOTRITON_DIR}" | ||||
| git checkout "${AOTRITON_PINNED_COMMIT}" | ||||
| git submodule sync --recursive | ||||
| git submodule update --init --recursive --force --depth 1 | ||||
| mkdir build | ||||
| cd build | ||||
| cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=./install_dir -DCMAKE_BUILD_TYPE=Release -DAOTRITON_COMPRESS_KERNEL=OFF -DAOTRITON_NO_PYTHON=ON -DAOTRITON_NO_SHARED=ON | ||||
| ninja install | ||||
| mkdir -p "${AOTRITON_INSTALL_PREFIX}" | ||||
| cp -r install_dir/* "${AOTRITON_INSTALL_PREFIX}" | ||||
| find /tmp/ -mindepth 1 -delete | ||||
| rm -rf ~/.triton | ||||
| @ -3,7 +3,7 @@ | ||||
| set -ex | ||||
|  | ||||
| install_ubuntu() { | ||||
|   # NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`, | ||||
|   # NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`, | ||||
|   # for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could | ||||
|   # find the correct image. As a result, here we have to check for | ||||
|   #   "$UBUNTU_VERSION" == "18.04"* | ||||
|  | ||||
| @ -1,23 +1,18 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| if [[ ${CUDNN_VERSION} == 8 ]]; then | ||||
| if [[ -n "${CUDNN_VERSION}" ]]; then | ||||
|     # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement | ||||
|     mkdir tmp_cudnn | ||||
|     pushd tmp_cudnn | ||||
|     if [[ ${CUDA_VERSION:0:4} == "12.4" ]]; then | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-8.9.7.29_cuda12-archive" | ||||
|         curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz | ||||
|     elif [[ ${CUDA_VERSION:0:4} == "12.1" ]]; then | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-8.9.2.26_cuda12-archive" | ||||
|         curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz | ||||
|     elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-8.7.0.84_cuda11-archive" | ||||
|         curl --retry 3 -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.7.0/local_installers/11.8/${CUDNN_NAME}.tar.xz | ||||
|     if [[ ${CUDA_VERSION:0:2} == "12" ]]; then | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda12-archive" | ||||
|     elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then | ||||
|         CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive" | ||||
|     else | ||||
|         print "Unsupported CUDA version ${CUDA_VERSION}" | ||||
|         exit 1 | ||||
|     fi | ||||
|  | ||||
|     curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz | ||||
|     tar xf ${CUDNN_NAME}.tar.xz | ||||
|     cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/ | ||||
|     cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/ | ||||
|  | ||||
| @ -19,7 +19,7 @@ elif [ -n "${XPU_VERSION}" ]; then | ||||
|   TRITON_REPO="https://github.com/intel/intel-xpu-backend-for-triton" | ||||
|   TRITON_TEXT_FILE="triton-xpu" | ||||
| else | ||||
|   TRITON_REPO="https://github.com/embg/triton" | ||||
|   TRITON_REPO="https://github.com/openai/triton" | ||||
|   TRITON_TEXT_FILE="triton" | ||||
| fi | ||||
|  | ||||
|  | ||||
| @ -139,7 +139,7 @@ COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm | ||||
| ARG CUDNN_VERSION | ||||
| ARG CUDA_VERSION | ||||
| COPY ./common/install_cudnn.sh install_cudnn.sh | ||||
| RUN if [ "${CUDNN_VERSION}" -eq 8 ]; then bash install_cudnn.sh; fi | ||||
| RUN if [ -n "${CUDNN_VERSION}" ]; then bash install_cudnn.sh; fi | ||||
| RUN rm install_cudnn.sh | ||||
|  | ||||
| # Install CUSPARSELT | ||||
|  | ||||
| @ -110,6 +110,13 @@ COPY ./common/install_cache.sh install_cache.sh | ||||
| ENV PATH /opt/cache/bin:$PATH | ||||
| RUN bash ./install_cache.sh && rm install_cache.sh | ||||
|  | ||||
| # Install AOTriton | ||||
| COPY ci_commit_pins/aotriton.txt aotriton.txt | ||||
| COPY ./common/common_utils.sh common_utils.sh | ||||
| COPY ./common/install_aotriton.sh install_aotriton.sh | ||||
| RUN bash ./install_aotriton.sh /opt/rocm/aotriton && rm -rf install_aotriton.sh aotriton aotriton.txt common_utils.sh | ||||
| ENV AOTRITON_INSTALLED_PREFIX /opt/rocm/aotriton | ||||
|  | ||||
| # Include BUILD_ENVIRONMENT environment variable in image | ||||
| ARG BUILD_ENVIRONMENT | ||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} | ||||
|  | ||||
| @ -368,7 +368,7 @@ test_inductor_cpp_wrapper_abi_compatible() { | ||||
|  | ||||
|   echo "Testing Inductor cpp wrapper mode with TORCHINDUCTOR_ABI_COMPATIBLE=1" | ||||
|   # cpu stack allocation causes segfault and needs more investigation | ||||
|   python test/run_test.py --include inductor/test_cpu_cpp_wrapper | ||||
|   PYTORCH_TESTING_DEVICE_ONLY_FOR="" python test/run_test.py --include inductor/test_cpu_cpp_wrapper | ||||
|   python test/run_test.py --include inductor/test_cuda_cpp_wrapper | ||||
|  | ||||
|   TORCHINDUCTOR_CPP_WRAPPER=1 python benchmarks/dynamo/timm_models.py --device cuda --accuracy --amp \ | ||||
| @ -565,7 +565,11 @@ test_dynamo_benchmark() { | ||||
|     test_single_dynamo_benchmark "dashboard" "$suite" "$shard_id" "$@" | ||||
|   else | ||||
|     if [[ "${TEST_CONFIG}" == *cpu_inductor* ]]; then | ||||
|       test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --float32 "$@" | ||||
|       if [[ "${TEST_CONFIG}" == *freezing* ]]; then | ||||
|         test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --float32 --freezing "$@" | ||||
|       else | ||||
|         test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --float32 "$@" | ||||
|       fi | ||||
|     elif [[ "${TEST_CONFIG}" == *aot_inductor* ]]; then | ||||
|       test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --bfloat16 "$@" | ||||
|     else | ||||
|  | ||||
							
								
								
									
										25
									
								
								.github/actionlint.yaml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										25
									
								
								.github/actionlint.yaml
									
									
									
									
										vendored
									
									
								
							| @ -1,9 +1,12 @@ | ||||
| self-hosted-runner: | ||||
|   labels: | ||||
|     # GitHub hosted x86 Linux runners | ||||
|     - linux.20_04.4x | ||||
|     - linux.20_04.16x | ||||
|     - linux.large | ||||
|     # Repo-specific LF hosted ARC runners | ||||
|     - linux.large.arc | ||||
|     # Organization-wide AWS Linux Runners | ||||
|     - linux.large | ||||
|     - linux.2xlarge | ||||
|     - linux.4xlarge | ||||
|     - linux.12xlarge | ||||
| @ -13,16 +16,34 @@ self-hosted-runner: | ||||
|     - linux.8xlarge.nvidia.gpu | ||||
|     - linux.16xlarge.nvidia.gpu | ||||
|     - linux.g5.4xlarge.nvidia.gpu | ||||
|     # Organization-wide AWS Linux Runners on Linux Foundation account | ||||
|     - lf.linux.large | ||||
|     - lf.linux.2xlarge | ||||
|     - lf.linux.4xlarge | ||||
|     - lf.linux.12xlarge | ||||
|     - lf.linux.24xlarge | ||||
|     - lf.linux.arm64.2xlarge | ||||
|     - lf.linux.4xlarge.nvidia.gpu | ||||
|     - lf.linux.8xlarge.nvidia.gpu | ||||
|     - lf.linux.16xlarge.nvidia.gpu | ||||
|     - lf.linux.g5.4xlarge.nvidia.gpu | ||||
|     # Repo-specific IBM hosted S390x runner | ||||
|     - linux.s390x | ||||
|     # Organization wide AWS Windows runners | ||||
|     - windows.4xlarge.nonephemeral | ||||
|     - windows.8xlarge.nvidia.gpu | ||||
|     - windows.8xlarge.nvidia.gpu.nonephemeral | ||||
|     - windows.g5.4xlarge.nvidia.gpu | ||||
|     - bm-runner | ||||
|     # Organization-wide AMD hosted MI300 runners | ||||
|     - linux.rocm.gpu | ||||
|     # Repo-specific Apple hosted  runners | ||||
|     - macos-m1-ultra | ||||
|     - macos-m2-14 | ||||
|     # Org wise AWS `mac2.metal` runners (2020 Mac mini hardware powered by Apple silicon M1 processors) | ||||
|     - macos-m1-stable | ||||
|     - macos-m1-13 | ||||
|     - macos-m1-14 | ||||
|     # GitHub-hosted MacOS runners | ||||
|     - macos-latest-xlarge | ||||
|     - macos-13-xlarge | ||||
|     - macos-14-xlarge | ||||
|  | ||||
							
								
								
									
										1
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/pytorch-probot.yml
									
									
									
									
										vendored
									
									
								
							| @ -8,6 +8,7 @@ ciflow_push_tags: | ||||
| - ciflow/inductor | ||||
| - ciflow/inductor-perf-compare | ||||
| - ciflow/inductor-micro-benchmark | ||||
| - ciflow/inductor-cu124 | ||||
| - ciflow/linux-aarch64 | ||||
| - ciflow/mps | ||||
| - ciflow/nightly | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/requirements/conda-env-Linux-X64.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/requirements/conda-env-Linux-X64.txt
									
									
									
									
										vendored
									
									
								
							| @ -5,4 +5,4 @@ ninja=1.10.2 | ||||
| numpy=1.23.3 | ||||
| pyyaml=6.0 | ||||
| setuptools=68.2.2 | ||||
| typing-extensions=4.3.0 | ||||
| typing-extensions=4.9.0 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/requirements/conda-env-iOS.txt
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/requirements/conda-env-iOS.txt
									
									
									
									
										vendored
									
									
								
							| @ -4,4 +4,4 @@ ninja=1.10.2 | ||||
| numpy=1.23.3 | ||||
| pyyaml=6.0 | ||||
| setuptools=68.2.2 | ||||
| typing-extensions=4.3.0 | ||||
| typing-extensions=4.9.0 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/requirements/conda-env-macOS-ARM64
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/requirements/conda-env-macOS-ARM64
									
									
									
									
										vendored
									
									
								
							| @ -2,7 +2,7 @@ numpy=1.22.3 | ||||
| pyyaml=6.0 | ||||
| setuptools=61.2.0 | ||||
| cmake=3.22.* | ||||
| typing-extensions=4.3.0 | ||||
| typing-extensions=4.9.0 | ||||
| dataclasses=0.8 | ||||
| pip=22.2.2 | ||||
| pillow=10.0.1 | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/requirements/conda-env-macOS-X64
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/requirements/conda-env-macOS-X64
									
									
									
									
										vendored
									
									
								
							| @ -4,7 +4,7 @@ numpy=1.21.2 | ||||
| pyyaml=5.3 | ||||
| setuptools=46.0.0 | ||||
| cmake=3.22.* | ||||
| typing-extensions=4.3.0 | ||||
| typing-extensions=4.9.0 | ||||
| dataclasses=0.8 | ||||
| pip=22.2.2 | ||||
| pillow=10.0.1 | ||||
|  | ||||
							
								
								
									
										12
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								.github/scripts/generate_binary_build_matrix.py
									
									
									
									
										vendored
									
									
								
							| @ -19,7 +19,7 @@ CUDA_ARCHES = ["11.8", "12.1", "12.4"] | ||||
| CUDA_ARCHES_FULL_VERSION = {"11.8": "11.8.0", "12.1": "12.1.1", "12.4": "12.4.0"} | ||||
|  | ||||
|  | ||||
| CUDA_ARCHES_CUDNN_VERSION = {"11.8": "8", "12.1": "8", "12.4": "8"} | ||||
| CUDA_ARCHES_CUDNN_VERSION = {"11.8": "9", "12.1": "9", "12.4": "9"} | ||||
|  | ||||
|  | ||||
| ROCM_ARCHES = ["6.0", "6.1"] | ||||
| @ -42,7 +42,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { | ||||
|         "nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | "  # noqa: B950 | ||||
|         "nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
| @ -55,7 +55,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { | ||||
|         "nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | "  # noqa: B950 | ||||
|         "nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
| @ -68,7 +68,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = { | ||||
|         "nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
|         "nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | " | ||||
| @ -347,6 +347,10 @@ def generate_wheels_matrix( | ||||
|     for python_version in python_versions: | ||||
|         for arch_version in arches: | ||||
|             gpu_arch_type = arch_type(arch_version) | ||||
|             # Disable py3.12 builds for ROCm because of triton dependency | ||||
|             # on llnl-hatchet, which doesn't have py3.12 wheels available | ||||
|             if gpu_arch_type == "rocm" and python_version == "3.12": | ||||
|                 continue | ||||
|             gpu_arch_version = ( | ||||
|                 "" | ||||
|                 if arch_version == "cpu" | ||||
|  | ||||
							
								
								
									
										12
									
								
								.github/scripts/test_trymerge.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								.github/scripts/test_trymerge.py
									
									
									
									
										vendored
									
									
								
							| @ -773,13 +773,13 @@ class TestBypassFailures(TestCase): | ||||
|                 # than the one on the base commit. This should still count as broken trunk | ||||
|                 "pr_num": 104214, | ||||
|                 "related_failure_count": 0, | ||||
|                 "unrelated_failure_count": 1, | ||||
|                 "flaky_or_broken_trunk": 1, | ||||
|             }, | ||||
|             { | ||||
|                 # This PR had one broken trunk failure and it used ghstack | ||||
|                 "pr_num": 105145, | ||||
|                 "related_failure_count": 0, | ||||
|                 "unrelated_failure_count": 1, | ||||
|                 "flaky_or_broken_trunk": 1, | ||||
|             }, | ||||
|             { | ||||
|                 # The failure on the merge base was retried successfully and | ||||
| @ -788,20 +788,20 @@ class TestBypassFailures(TestCase): | ||||
|                 # be used to detect broken trunk | ||||
|                 "pr_num": 107160, | ||||
|                 "related_failure_count": 0, | ||||
|                 "unrelated_failure_count": 4, | ||||
|                 "flaky_or_broken_trunk": 1, | ||||
|             }, | ||||
|             { | ||||
|                 # This PR used Dr.CI broken trunk classification | ||||
|                 "pr_num": 111253, | ||||
|                 "related_failure_count": 1, | ||||
|                 "unrelated_failure_count": 2, | ||||
|                 "flaky_or_broken_trunk": 1, | ||||
|             }, | ||||
|         ] | ||||
|  | ||||
|         for case in test_cases: | ||||
|             pr_num = case["pr_num"] | ||||
|             related_failure_count = case["related_failure_count"] | ||||
|             unrelated_failure_count = case["unrelated_failure_count"] | ||||
|             flaky_or_broken_trunk = case["flaky_or_broken_trunk"] | ||||
|  | ||||
|             pr = GitHubPR("pytorch", "pytorch", pr_num) | ||||
|             checks = pr.get_checkrun_conclusions() | ||||
| @ -823,7 +823,7 @@ class TestBypassFailures(TestCase): | ||||
|             ) | ||||
|             self.assertTrue(len(pending) == 0) | ||||
|             self.assertTrue( | ||||
|                 len(failed) == unrelated_failure_count + related_failure_count | ||||
|                 len(failed) == flaky_or_broken_trunk + related_failure_count | ||||
|             ) | ||||
|  | ||||
|     def test_ignore_current(self, *args: Any) -> None: | ||||
|  | ||||
							
								
								
									
										30
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										30
									
								
								.github/scripts/trymerge.py
									
									
									
									
										vendored
									
									
								
							| @ -2027,10 +2027,8 @@ def categorize_checks( | ||||
|     pending_checks: List[Tuple[str, Optional[str], Optional[int]]] = [] | ||||
|     failed_checks: List[Tuple[str, Optional[str], Optional[int]]] = [] | ||||
|  | ||||
|     # ok_failed_checks is used with ok_failed_checks_threshold while ignorable_failed_checks | ||||
|     # is used to keep track of all ignorable failures when saving the merge record on Rockset | ||||
|     ok_failed_checks: List[Tuple[str, Optional[str], Optional[int]]] = [] | ||||
|     ignorable_failed_checks: Dict[str, List[Any]] = defaultdict(list) | ||||
|     # failed_checks_categorization is used to keep track of all ignorable failures when saving the merge record on Rockset | ||||
|     failed_checks_categorization: Dict[str, List[Any]] = defaultdict(list) | ||||
|  | ||||
|     # If required_checks is not set or empty, consider all names are relevant | ||||
|     relevant_checknames = [ | ||||
| @ -2058,36 +2056,38 @@ def categorize_checks( | ||||
|             continue | ||||
|         elif not is_passing_status(check_runs[checkname].status): | ||||
|             target = ( | ||||
|                 ignorable_failed_checks[classification] | ||||
|                 failed_checks_categorization[classification] | ||||
|                 if classification | ||||
|                 in ("IGNORE_CURRENT_CHECK", "BROKEN_TRUNK", "FLAKY", "UNSTABLE") | ||||
|                 else failed_checks | ||||
|             ) | ||||
|             target.append((checkname, url, job_id)) | ||||
|  | ||||
|             if classification in ("BROKEN_TRUNK", "FLAKY", "UNSTABLE"): | ||||
|                 ok_failed_checks.append((checkname, url, job_id)) | ||||
|     flaky_or_broken_trunk = ( | ||||
|         failed_checks_categorization["BROKEN_TRUNK"] | ||||
|         + failed_checks_categorization["FLAKY"] | ||||
|     ) | ||||
|  | ||||
|     if ok_failed_checks: | ||||
|     if flaky_or_broken_trunk: | ||||
|         warn( | ||||
|             f"The following {len(ok_failed_checks)} checks failed but were likely due flakiness or broken trunk: " | ||||
|             + ", ".join([x[0] for x in ok_failed_checks]) | ||||
|             f"The following {len(flaky_or_broken_trunk)} checks failed but were likely due flakiness or broken trunk: " | ||||
|             + ", ".join([x[0] for x in flaky_or_broken_trunk]) | ||||
|             + ( | ||||
|                 f" but this is greater than the threshold of {ok_failed_checks_threshold} so merge will fail" | ||||
|                 if ok_failed_checks_threshold is not None | ||||
|                 and len(ok_failed_checks) > ok_failed_checks_threshold | ||||
|                 and len(flaky_or_broken_trunk) > ok_failed_checks_threshold | ||||
|                 else "" | ||||
|             ) | ||||
|         ) | ||||
|  | ||||
|     if ( | ||||
|         ok_failed_checks_threshold is not None | ||||
|         and len(ok_failed_checks) > ok_failed_checks_threshold | ||||
|         and len(flaky_or_broken_trunk) > ok_failed_checks_threshold | ||||
|     ): | ||||
|         failed_checks = failed_checks + ok_failed_checks | ||||
|         failed_checks = failed_checks + flaky_or_broken_trunk | ||||
|  | ||||
|     # The list of ignorable_failed_checks is returned so that it can be saved into the Rockset merge record | ||||
|     return (pending_checks, failed_checks, ignorable_failed_checks) | ||||
|     # The list of failed_checks_categorization is returned so that it can be saved into the Rockset merge record | ||||
|     return (pending_checks, failed_checks, failed_checks_categorization) | ||||
|  | ||||
|  | ||||
| def merge( | ||||
|  | ||||
							
								
								
									
										18
									
								
								.github/workflows/docker-builds.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								.github/workflows/docker-builds.yml
									
									
									
									
										vendored
									
									
								
							| @ -38,19 +38,19 @@ jobs: | ||||
|       matrix: | ||||
|         runner: [linux.12xlarge] | ||||
|         docker-image-name: [ | ||||
|           pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9, | ||||
|           pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks, | ||||
|           pytorch-linux-focal-cuda12.4-cudnn8-py3.12-gcc9-inductor-benchmarks, | ||||
|           pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9, | ||||
|           pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks, | ||||
|           pytorch-linux-focal-cuda12.1-cudnn8-py3.12-gcc9-inductor-benchmarks, | ||||
|           pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9, | ||||
|           pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9, | ||||
|           pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks, | ||||
|           pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks, | ||||
|           pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9, | ||||
|           pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks, | ||||
|           pytorch-linux-focal-cuda12.1-cudnn9-py3.12-gcc9-inductor-benchmarks, | ||||
|           pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9, | ||||
|           pytorch-linux-focal-py3.8-clang10, | ||||
|           pytorch-linux-focal-py3.11-clang10, | ||||
|           pytorch-linux-focal-py3.12-clang10, | ||||
|           pytorch-linux-focal-rocm-n-1-py3, | ||||
|           pytorch-linux-focal-rocm-n-py3, | ||||
|           pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12, | ||||
|           pytorch-linux-jammy-cuda11.8-cudnn9-py3.8-clang12, | ||||
|           pytorch-linux-focal-py3-clang9-android-ndk-r21e, | ||||
|           pytorch-linux-jammy-py3.8-gcc11, | ||||
|           pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks, | ||||
| @ -58,7 +58,7 @@ jobs: | ||||
|           pytorch-linux-jammy-py3-clang15-asan, | ||||
|           pytorch-linux-focal-py3-clang10-onnx, | ||||
|           pytorch-linux-focal-linter, | ||||
|           pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter, | ||||
|           pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter, | ||||
|           pytorch-linux-jammy-py3-clang12-executorch | ||||
|           ] | ||||
|         include: | ||||
|  | ||||
							
								
								
									
										7
									
								
								.github/workflows/docker-release.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/docker-release.yml
									
									
									
									
										vendored
									
									
								
							| @ -149,3 +149,10 @@ jobs: | ||||
|       - name: Teardown Linux | ||||
|         uses: pytorch/test-infra/.github/actions/teardown-linux@main | ||||
|         if: always() | ||||
|  | ||||
|   validate: | ||||
|     needs: build | ||||
|     uses: pytorch/builder/.github/workflows/validate-docker-images.yml@main | ||||
|     with: | ||||
|       channel: nightly | ||||
|       ref: main | ||||
|  | ||||
							
								
								
									
										10
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -54,7 +54,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_8-cpu-aarch64 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_8-cpu-aarch64-test:  # Testing | ||||
| @ -162,7 +162,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_9-cpu-aarch64 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_9-cpu-aarch64-test:  # Testing | ||||
| @ -270,7 +270,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_10-cpu-aarch64 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cpu-aarch64-test:  # Testing | ||||
| @ -378,7 +378,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_11-cpu-aarch64 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cpu-aarch64-test:  # Testing | ||||
| @ -486,7 +486,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "arm64v8/alpine" | ||||
|       build_name: manywheel-py3_12-cpu-aarch64 | ||||
|       build_environment: linux-aarch64-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cpu-aarch64-test:  # Testing | ||||
|  | ||||
							
								
								
									
										6
									
								
								.github/workflows/generated-linux-binary-manywheel-main.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.github/workflows/generated-linux-binary-manywheel-main.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -48,7 +48,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.8" | ||||
|       build_name: manywheel-py3_8-cuda11_8 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_8-cuda11_8-test:  # Testing | ||||
| @ -88,7 +88,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.8" | ||||
|       build_name: manywheel-py3_8-cuda12_1 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_8-cuda12_1-test:  # Testing | ||||
| @ -128,7 +128,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.8" | ||||
|       build_name: manywheel-py3_8-cuda12_4 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_8-cuda12_4-test:  # Testing | ||||
|  | ||||
							
								
								
									
										236
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										236
									
								
								.github/workflows/generated-linux-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -174,7 +174,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.8" | ||||
|       build_name: manywheel-py3_8-cuda11_8 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_8-cuda11_8-test:  # Testing | ||||
| @ -237,7 +237,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.8" | ||||
|       build_name: manywheel-py3_8-cuda12_1 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_8-cuda12_1-test:  # Testing | ||||
| @ -300,7 +300,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.8" | ||||
|       build_name: manywheel-py3_8-cuda12_4 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_8-cuda12_4-test:  # Testing | ||||
| @ -690,7 +690,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       build_name: manywheel-py3_9-cuda11_8 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_9-cuda11_8-test:  # Testing | ||||
| @ -753,7 +753,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       build_name: manywheel-py3_9-cuda12_1 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_9-cuda12_1-test:  # Testing | ||||
| @ -816,7 +816,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       build_name: manywheel-py3_9-cuda12_4 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_9-cuda12_4-test:  # Testing | ||||
| @ -1206,7 +1206,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda11_8 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda11_8-test:  # Testing | ||||
| @ -1269,7 +1269,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda12_1 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda12_1-test:  # Testing | ||||
| @ -1332,7 +1332,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       build_name: manywheel-py3_10-cuda12_4 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cuda12_4-test:  # Testing | ||||
| @ -1722,7 +1722,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda11_8 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda11_8-test:  # Testing | ||||
| @ -1785,7 +1785,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda12_1 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda12_1-test:  # Testing | ||||
| @ -1848,7 +1848,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       build_name: manywheel-py3_11-cuda12_4 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cuda12_4-test:  # Testing | ||||
| @ -2238,7 +2238,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda11_8 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda11_8-test:  # Testing | ||||
| @ -2301,7 +2301,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda12_1 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda12_1-test:  # Testing | ||||
| @ -2364,7 +2364,7 @@ jobs: | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-cuda12_4 | ||||
|       build_environment: linux-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cuda12_4-test:  # Testing | ||||
| @ -2410,209 +2410,3 @@ jobs: | ||||
|       conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }} | ||||
|       conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-rocm6_0-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       BUILDER_ROOT: /builder | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: rocm6.0 | ||||
|       GPU_ARCH_VERSION: 6.0 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-rocm6_0 | ||||
|       build_environment: linux-binary-manywheel | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-rocm6_0-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: manywheel-py3_12-rocm6_0-build | ||||
|     runs-on: linux.rocm.gpu | ||||
|     timeout-minutes: 240 | ||||
|     env: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       BUILDER_ROOT: /builder | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: rocm6.0 | ||||
|       GPU_ARCH_VERSION: 6.0 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|       - uses: actions/download-artifact@v3 | ||||
|         name: Download Build Artifacts | ||||
|         with: | ||||
|           name: manywheel-py3_12-rocm6_0 | ||||
|           path: "${{ runner.temp }}/artifacts/" | ||||
|       - name: Checkout PyTorch | ||||
|         uses: malfet/checkout@silent-checkout | ||||
|         with: | ||||
|           ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|           submodules: recursive | ||||
|           path: pytorch | ||||
|           quiet-checkout: true | ||||
|       - name: Clean PyTorch checkout | ||||
|         run: | | ||||
|           # Remove any artifacts from the previous checkouts | ||||
|           git clean -fxd | ||||
|         working-directory: pytorch | ||||
|       - name: Checkout pytorch/builder | ||||
|         uses: malfet/checkout@silent-checkout | ||||
|         with: | ||||
|           ref: main | ||||
|           submodules: recursive | ||||
|           repository: pytorch/builder | ||||
|           path: builder | ||||
|           quiet-checkout: true | ||||
|       - name: Clean pytorch/builder checkout | ||||
|         run: | | ||||
|           # Remove any artifacts from the previous checkouts | ||||
|           git clean -fxd | ||||
|         working-directory: builder | ||||
|       - name: ROCm set GPU_FLAG | ||||
|         run: | | ||||
|           echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}" | ||||
|       - name: Pull Docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         with: | ||||
|           docker-image: pytorch/manylinux-builder:rocm6.0-main | ||||
|       - name: Test Pytorch binary | ||||
|         uses: ./pytorch/.github/actions/test-pytorch-binary | ||||
|       - name: Teardown ROCm | ||||
|         uses: ./.github/actions/teardown-rocm | ||||
|   manywheel-py3_12-rocm6_0-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_12-rocm6_0-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       BUILDER_ROOT: /builder | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: rocm6.0 | ||||
|       GPU_ARCH_VERSION: 6.0 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-rocm6_0 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|       conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }} | ||||
|       conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
|   manywheel-py3_12-rocm6_1-build: | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     uses: ./.github/workflows/_binary-build-linux.yml | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       BUILDER_ROOT: /builder | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: rocm6.1 | ||||
|       GPU_ARCH_VERSION: 6.1 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-rocm6_1 | ||||
|       build_environment: linux-binary-manywheel | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-rocm6_1-test:  # Testing | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     needs: manywheel-py3_12-rocm6_1-build | ||||
|     runs-on: linux.rocm.gpu | ||||
|     timeout-minutes: 240 | ||||
|     env: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       BUILDER_ROOT: /builder | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: rocm6.1 | ||||
|       GPU_ARCH_VERSION: 6.1 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|     steps: | ||||
|       - name: Setup ROCm | ||||
|         uses: ./.github/actions/setup-rocm | ||||
|       - uses: actions/download-artifact@v3 | ||||
|         name: Download Build Artifacts | ||||
|         with: | ||||
|           name: manywheel-py3_12-rocm6_1 | ||||
|           path: "${{ runner.temp }}/artifacts/" | ||||
|       - name: Checkout PyTorch | ||||
|         uses: malfet/checkout@silent-checkout | ||||
|         with: | ||||
|           ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} | ||||
|           submodules: recursive | ||||
|           path: pytorch | ||||
|           quiet-checkout: true | ||||
|       - name: Clean PyTorch checkout | ||||
|         run: | | ||||
|           # Remove any artifacts from the previous checkouts | ||||
|           git clean -fxd | ||||
|         working-directory: pytorch | ||||
|       - name: Checkout pytorch/builder | ||||
|         uses: malfet/checkout@silent-checkout | ||||
|         with: | ||||
|           ref: main | ||||
|           submodules: recursive | ||||
|           repository: pytorch/builder | ||||
|           path: builder | ||||
|           quiet-checkout: true | ||||
|       - name: Clean pytorch/builder checkout | ||||
|         run: | | ||||
|           # Remove any artifacts from the previous checkouts | ||||
|           git clean -fxd | ||||
|         working-directory: builder | ||||
|       - name: ROCm set GPU_FLAG | ||||
|         run: | | ||||
|           echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}" | ||||
|       - name: Pull Docker image | ||||
|         uses: pytorch/test-infra/.github/actions/pull-docker-image@main | ||||
|         with: | ||||
|           docker-image: pytorch/manylinux-builder:rocm6.1-main | ||||
|       - name: Test Pytorch binary | ||||
|         uses: ./pytorch/.github/actions/test-pytorch-binary | ||||
|       - name: Teardown ROCm | ||||
|         uses: ./.github/actions/teardown-rocm | ||||
|   manywheel-py3_12-rocm6_1-upload:  # Uploading | ||||
|     if: ${{ github.repository_owner == 'pytorch' }} | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|     needs: manywheel-py3_12-rocm6_1-test | ||||
|     with: | ||||
|       PYTORCH_ROOT: /pytorch | ||||
|       BUILDER_ROOT: /builder | ||||
|       PACKAGE_TYPE: manywheel | ||||
|       # TODO: This is a legacy variable that we eventually want to get rid of in | ||||
|       #       favor of GPU_ARCH_VERSION | ||||
|       DESIRED_CUDA: rocm6.1 | ||||
|       GPU_ARCH_VERSION: 6.1 | ||||
|       GPU_ARCH_TYPE: rocm | ||||
|       DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       build_name: manywheel-py3_12-rocm6_1 | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|       conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }} | ||||
|       conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }} | ||||
|     uses: ./.github/workflows/_binary-upload.yml | ||||
|  | ||||
							
								
								
									
										10
									
								
								.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -54,7 +54,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "docker.io/s390x/alpine" | ||||
|       build_name: manywheel-py3_8-cpu-s390x | ||||
|       build_environment: linux-s390x-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_8-cpu-s390x-test:  # Testing | ||||
| @ -117,7 +117,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "docker.io/s390x/alpine" | ||||
|       build_name: manywheel-py3_9-cpu-s390x | ||||
|       build_environment: linux-s390x-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_9-cpu-s390x-test:  # Testing | ||||
| @ -180,7 +180,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "docker.io/s390x/alpine" | ||||
|       build_name: manywheel-py3_10-cpu-s390x | ||||
|       build_environment: linux-s390x-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_10-cpu-s390x-test:  # Testing | ||||
| @ -243,7 +243,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "docker.io/s390x/alpine" | ||||
|       build_name: manywheel-py3_11-cpu-s390x | ||||
|       build_environment: linux-s390x-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_11-cpu-s390x-test:  # Testing | ||||
| @ -306,7 +306,7 @@ jobs: | ||||
|       ALPINE_IMAGE: "docker.io/s390x/alpine" | ||||
|       build_name: manywheel-py3_12-cpu-s390x | ||||
|       build_environment: linux-s390x-binary-manywheel | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     secrets: | ||||
|       github-token: ${{ secrets.GITHUB_TOKEN }} | ||||
|   manywheel-py3_12-cpu-s390x-test:  # Testing | ||||
|  | ||||
							
								
								
									
										10
									
								
								.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -46,7 +46,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.8" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       # For sccache access (only on non-forked PRs) | ||||
|       AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }} | ||||
|       AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }} | ||||
| @ -165,7 +165,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       # For sccache access (only on non-forked PRs) | ||||
|       AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }} | ||||
|       AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }} | ||||
| @ -284,7 +284,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       # For sccache access (only on non-forked PRs) | ||||
|       AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }} | ||||
|       AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }} | ||||
| @ -403,7 +403,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       # For sccache access (only on non-forked PRs) | ||||
|       AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }} | ||||
|       AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }} | ||||
| @ -522,7 +522,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       # For sccache access (only on non-forked PRs) | ||||
|       AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }} | ||||
|       AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }} | ||||
|  | ||||
							
								
								
									
										40
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										40
									
								
								.github/workflows/generated-windows-binary-wheel-nightly.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -46,7 +46,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.8" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -290,7 +290,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.8" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -536,7 +536,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.8" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -782,7 +782,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.8" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -1027,7 +1027,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -1271,7 +1271,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -1517,7 +1517,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -1763,7 +1763,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.9" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -2008,7 +2008,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -2252,7 +2252,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -2498,7 +2498,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -2744,7 +2744,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.10" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -2989,7 +2989,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -3233,7 +3233,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -3479,7 +3479,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -3725,7 +3725,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.11" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -3970,7 +3970,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cpu | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -4214,7 +4214,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -4460,7 +4460,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
| @ -4706,7 +4706,7 @@ jobs: | ||||
|       GPU_ARCH_TYPE: cuda | ||||
|       SKIP_ALL_TESTS: 1 | ||||
|       DESIRED_PYTHON: "3.12" | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|       PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | ||||
|     steps: | ||||
|       - name: Display EC2 information | ||||
|         shell: bash | ||||
|  | ||||
							
								
								
									
										108
									
								
								.github/workflows/inductor-cu124.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										108
									
								
								.github/workflows/inductor-cu124.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,108 @@ | ||||
| name: inductor-cu124 | ||||
|  | ||||
| on: | ||||
|   push: | ||||
|     tags: | ||||
|       - ciflow/inductor-cu124/* | ||||
|   workflow_dispatch: | ||||
|   schedule: | ||||
|     # Run every 4 hours during the week and every 12 hours on the weekend | ||||
|     - cron: 45 0,4,8,12,16,20 * * 1-5 | ||||
|     - cron: 45 4,12 * * 0,6 | ||||
|  | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }} | ||||
|   cancel-in-progress: true | ||||
|  | ||||
| permissions: read-all | ||||
|  | ||||
| jobs: | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-inductor-build: | ||||
|     # Should be synced with the one in inductor.yml, but this doesn't run inductor_timm | ||||
|     name: cuda12.4-py3.10-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       sync-tag: linux-focal-cuda12_4-py3_10-gcc9-inductor-build | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.6' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "dynamic_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "dynamic_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "dynamic_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "dynamic_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "dynamic_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "aot_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "aot_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "aot_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "aot_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "aot_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_cpp_wrapper_abi_compatible", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-inductor-test: | ||||
|     name: cuda12.4-py3.10-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: linux-focal-cuda12_4-py3_10-gcc9-inductor-build | ||||
|     with: | ||||
|       sync-tag: linux-focal-cuda12_4-py3_10-gcc9-inductor-test | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build.outputs.test-matrix }} | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp: | ||||
|     name: cuda12.4-py3.10-gcc9-sm80 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm80 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.0' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor_torchbench_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.gcp.a100" }, | ||||
|         ]} | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-inductor-test-gcp: | ||||
|     name: cuda12.4-py3.10-gcc9-sm80 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm80 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }} | ||||
|       use-gha: anything-non-empty-to-use-gha | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_12-gcc9-inductor-build: | ||||
|     name: cuda12.4-py3.12-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.12-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.6' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_12-gcc9-inductor-test: | ||||
|     name: cuda12.4-py3.12-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: linux-focal-cuda12_4-py3_12-gcc9-inductor-build | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.12-gcc9-sm86 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_4-py3_12-gcc9-inductor-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_4-py3_12-gcc9-inductor-build.outputs.test-matrix }} | ||||
| @ -21,7 +21,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.0' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/inductor-perf-compare.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/inductor-perf-compare.yml
									
									
									
									
										vendored
									
									
								
							| @ -18,7 +18,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.0' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|  | ||||
| @ -71,7 +71,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.0' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/inductor-periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/inductor-periodic.yml
									
									
									
									
										vendored
									
									
								
							| @ -23,7 +23,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.6' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|  | ||||
							
								
								
									
										79
									
								
								.github/workflows/inductor.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										79
									
								
								.github/workflows/inductor.yml
									
									
									
									
										vendored
									
									
								
							| @ -44,7 +44,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.6' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
| @ -86,7 +86,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.0' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
| @ -112,7 +112,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.12-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3.12-gcc9-inductor-benchmarks | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3.12-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.6' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
| @ -129,32 +129,18 @@ jobs: | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_1-py3_12-gcc9-inductor-build.outputs.test-matrix }} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-inductor-build: | ||||
|     # Should be synced with the one in inductor-periodic.yml but this only runs inductor_timm | ||||
|     name: cuda12.4-py3.10-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       sync-tag: linux-focal-cuda12_4-py3_10-gcc9-inductor-build | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.6' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "dynamic_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "dynamic_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "dynamic_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "dynamic_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "dynamic_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "aot_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "aot_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "aot_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "aot_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "aot_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "inductor_cpp_wrapper_abi_compatible", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
| @ -164,59 +150,13 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: linux-focal-cuda12_4-py3_10-gcc9-inductor-build | ||||
|     with: | ||||
|       sync-tag: linux-focal-cuda12_4-py3_10-gcc9-inductor-test | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build.outputs.test-matrix }} | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp: | ||||
|     name: cuda12.4-py3.10-gcc9-sm80 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm80 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.0' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor_torchbench_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.gcp.a100" }, | ||||
|         ]} | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_12-gcc9-inductor-build: | ||||
|     name: cuda12.4-py3.12-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.12-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3.12-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.6' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-inductor-test-gcp: | ||||
|     name: cuda12.4-py3.10-gcc9-sm80 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm80 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }} | ||||
|       use-gha: anything-non-empty-to-use-gha | ||||
|     secrets: | ||||
|       HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_12-gcc9-inductor-test: | ||||
|     name: cuda12.4-py3.12-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: linux-focal-cuda12_4-py3_12-gcc9-inductor-build | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.12-gcc9-sm86 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_4-py3_12-gcc9-inductor-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_4-py3_12-gcc9-inductor-build.outputs.test-matrix }} | ||||
|  | ||||
|   linux-jammy-cpu-py3_8-gcc11-inductor-build: | ||||
|     name: linux-jammy-cpu-py3.8-gcc11-inductor | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
| @ -230,6 +170,11 @@ jobs: | ||||
|           { config: "cpu_inductor_timm", shard: 2, num_shards: 2, runner: "linux.12xlarge" }, | ||||
|           { config: "cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.12xlarge" }, | ||||
|           { config: "cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.12xlarge" }, | ||||
|           { config: "cpu_inductor_huggingface_freezing", shard: 1, num_shards: 1, runner: "linux.12xlarge" }, | ||||
|           { config: "cpu_inductor_timm_freezing", shard: 1, num_shards: 2, runner: "linux.12xlarge" }, | ||||
|           { config: "cpu_inductor_timm_freezing", shard: 2, num_shards: 2, runner: "linux.12xlarge" }, | ||||
|           { config: "cpu_inductor_torchbench_freezing", shard: 1, num_shards: 2, runner: "linux.12xlarge" }, | ||||
|           { config: "cpu_inductor_torchbench_freezing", shard: 2, num_shards: 2, runner: "linux.12xlarge" }, | ||||
|           { config: "dynamic_cpu_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.12xlarge" }, | ||||
|           { config: "dynamic_cpu_inductor_timm", shard: 1, num_shards: 2, runner: "linux.12xlarge" }, | ||||
|           { config: "dynamic_cpu_inductor_timm", shard: 2, num_shards: 2, runner: "linux.12xlarge" }, | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/lint.yml
									
									
									
									
										vendored
									
									
								
							| @ -20,7 +20,7 @@ jobs: | ||||
|     with: | ||||
|       timeout: 120 | ||||
|       runner: linux.2xlarge | ||||
|       docker-image: pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter | ||||
|       docker-image: pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter | ||||
|       # NB: A shallow checkout won't work here because calculate-docker-image requires a full checkout | ||||
|       # to run git rev-parse HEAD~:.ci/docker when a new image is needed | ||||
|       fetch-depth: 0 | ||||
| @ -36,7 +36,7 @@ jobs: | ||||
|     with: | ||||
|       timeout: 120 | ||||
|       runner: linux.2xlarge | ||||
|       docker-image: pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter | ||||
|       docker-image: pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter | ||||
|       # NB: A shallow checkout won't work here because calculate-docker-image requires a full checkout | ||||
|       # to run git rev-parse HEAD~:.ci/docker when a new image is needed | ||||
|       fetch-depth: 0 | ||||
|  | ||||
							
								
								
									
										5
									
								
								.github/workflows/mac-mps.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								.github/workflows/mac-mps.yml
									
									
									
									
										vendored
									
									
								
							| @ -23,9 +23,12 @@ jobs: | ||||
|       build-generates-artifacts: true | ||||
|       # To match the one pre-installed in the m1 runners | ||||
|       python-version: 3.9.12 | ||||
|       # The runner macos-m2-14 is not a typo, it's a custom runner that is different | ||||
|       # than our AWS macos-m1-14 runners | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "mps", shard: 1, num_shards: 1, runner: "macos-m1-14" }, | ||||
|           { config: "mps", shard: 1, num_shards: 1, runner: "macos-m1-13" }, | ||||
|           { config: "mps", shard: 1, num_shards: 1, runner: "macos-m2-14" }, | ||||
|         ]} | ||||
|  | ||||
|   macos-py3-arm64-mps-test: | ||||
|  | ||||
							
								
								
									
										57
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										57
									
								
								.github/workflows/periodic.yml
									
									
									
									
										vendored
									
									
								
							| @ -37,6 +37,59 @@ jobs: | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|   linux-focal-cuda12_1-py3_10-gcc9-build: | ||||
|     name: linux-focal-cuda12.1-py3.10-gcc9 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" }, | ||||
|           { config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|   linux-focal-cuda12_1-py3_10-gcc9-test: | ||||
|     name: linux-focal-cuda12.1-py3.10-gcc9 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: | ||||
|       - linux-focal-cuda12_1-py3_10-gcc9-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.test-matrix }} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-build: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|     uses: ./.github/workflows/_linux-build-label.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "deploy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" }, | ||||
|           { config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-test: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: | ||||
|       - linux-focal-cuda12_4-py3_10-gcc9-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       timeout-minutes: 360 | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-build.outputs.test-matrix }} | ||||
|  | ||||
|   parallelnative-linux-jammy-py3_8-gcc11-build: | ||||
|     name: parallelnative-linux-jammy-py3.8-gcc11 | ||||
| @ -67,7 +120,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda11.8-py3.9-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9 | ||||
|       cuda-arch-list: 8.6 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
| @ -89,7 +142,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda11.8-py3.10-gcc9-debug | ||||
|       docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9 | ||||
|       build-with-debug: true | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|  | ||||
							
								
								
									
										75
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										75
									
								
								.github/workflows/pull.yml
									
									
									
									
										vendored
									
									
								
							| @ -237,7 +237,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build-label.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda11.8-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "distributed", shard: 1, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" }, | ||||
| @ -262,7 +262,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build-label.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
| @ -285,34 +285,6 @@ jobs: | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.test-matrix }} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-build: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|     uses: ./.github/workflows/_linux-build-label.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "deploy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-test: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: | ||||
|       - linux-focal-cuda12_4-py3_10-gcc9-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       timeout-minutes: 360 | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-build.outputs.test-matrix }} | ||||
|  | ||||
|   linux-jammy-py3-clang12-mobile-build: | ||||
|     name: linux-jammy-py3-clang12-mobile-build | ||||
|     uses: ./.github/workflows/_linux-build-label.yml | ||||
| @ -325,12 +297,12 @@ jobs: | ||||
|           { config: "default", shard: 1, num_shards: 1 }, | ||||
|         ]} | ||||
|  | ||||
|   linux-jammy-cuda-11_8-cudnn8-py3_8-clang12-build: | ||||
|     name: linux-jammy-cuda11.8-cudnn8-py3.8-clang12 | ||||
|   linux-jammy-cuda-11_8-cudnn9-py3_8-clang12-build: | ||||
|     name: linux-jammy-cuda11.8-cudnn9-py3.8-clang12 | ||||
|     uses: ./.github/workflows/_linux-build-label.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-cuda11.8-cudnn8-py3.8-clang12 | ||||
|       docker-image-name: pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12 | ||||
|       build-environment: linux-jammy-cuda11.8-cudnn9-py3.8-clang12 | ||||
|       docker-image-name: pytorch-linux-jammy-cuda11.8-cudnn9-py3.8-clang12 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1 }, | ||||
| @ -389,7 +361,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_bazel-build-test.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-bazel-test | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       cuda-version: cpu | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
| @ -401,7 +373,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_bazel-build-test.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-bazel-test | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       cuda-version: "12.1" | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
| @ -413,7 +385,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_bazel-build-test.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-bazel-test | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9 | ||||
|       cuda-version: "12.4" | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
| @ -475,7 +447,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build-label.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       cuda-arch-list: 8.6 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
| @ -497,33 +469,6 @@ jobs: | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-sm86-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-sm86-build.outputs.test-matrix }} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-sm86-build: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-build-label.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9 | ||||
|       cuda-arch-list: 8.6 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-sm86-test: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: | ||||
|       - linux-focal-cuda12_4-py3_10-gcc9-sm86-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-sm86-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-sm86-build.outputs.test-matrix }} | ||||
|  | ||||
|   linux-jammy-py3-clang12-executorch-build: | ||||
|     name: linux-jammy-py3-clang12-executorch | ||||
|     uses: ./.github/workflows/_linux-build-label.yml | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/workflows/slow.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/slow.yml
									
									
									
									
										vendored
									
									
								
							| @ -41,7 +41,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3-gcc9-slow-gradcheck | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       cuda-arch-list: 8.6 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
| @ -70,7 +70,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       cuda-arch-list: 8.6 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|  | ||||
| @ -26,7 +26,7 @@ jobs: | ||||
|         id: calculate-docker-image | ||||
|         uses: pytorch/test-infra/.github/actions/calculate-docker-image@main | ||||
|         with: | ||||
|           docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9 | ||||
|           docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|           working-directory: pytorch | ||||
|  | ||||
|       - name: Use following to pull public copy of the image | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/torchbench.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/torchbench.yml
									
									
									
									
										vendored
									
									
								
							| @ -16,7 +16,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks | ||||
|       cuda-arch-list: '8.0' | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|  | ||||
							
								
								
									
										64
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										64
									
								
								.github/workflows/trunk.yml
									
									
									
									
										vendored
									
									
								
							| @ -34,36 +34,39 @@ jobs: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|  | ||||
|   linux-focal-cuda12_1-py3_10-gcc9-build: | ||||
|     name: linux-focal-cuda12.1-py3.10-gcc9 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-sm86-build: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-build-label.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9 | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9 | ||||
|       cuda-arch-list: 8.6 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" }, | ||||
|           { config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 1, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 2, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 3, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 4, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|           { config: "default", shard: 5, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_1-py3_10-gcc9-test: | ||||
|     name: linux-focal-cuda12.1-py3.10-gcc9 | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-sm86-test: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: | ||||
|       - linux-focal-cuda12_1-py3_10-gcc9-build | ||||
|       - linux-focal-cuda12_4-py3_10-gcc9-sm86-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.test-matrix }} | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-sm86-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-sm86-build.outputs.test-matrix }} | ||||
|  | ||||
|   libtorch-linux-focal-cuda12_1-py3_7-gcc9-debug-build: | ||||
|     name: libtorch-linux-focal-cuda12.1-py3.7-gcc9-debug | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: libtorch-linux-focal-cuda12.1-py3.7-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       build-generates-artifacts: false | ||||
|       runner: linux.4xlarge | ||||
|       test-matrix: | | ||||
| @ -77,42 +80,18 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.1-py3.10-gcc9-no-ops | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1 }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-build: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" }, | ||||
|           { config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" }, | ||||
|           { config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-cuda12_4-py3_10-gcc9-test: | ||||
|     name: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|     uses: ./.github/workflows/_linux-test.yml | ||||
|     needs: | ||||
|       - linux-focal-cuda12_4-py3_10-gcc9-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9 | ||||
|       docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-build.outputs.test-matrix }} | ||||
|  | ||||
|   libtorch-linux-focal-cuda12_4-py3_7-gcc9-debug-build: | ||||
|     name: libtorch-linux-focal-cuda12.4-py3.7-gcc9-debug | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: libtorch-linux-focal-cuda12.4-py3.7-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9 | ||||
|       build-generates-artifacts: false | ||||
|       runner: linux.4xlarge | ||||
|       test-matrix: | | ||||
| @ -126,7 +105,7 @@ jobs: | ||||
|     uses: ./.github/workflows/_linux-build.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-cuda12.4-py3.10-gcc9-no-ops | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9 | ||||
|       docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1 }, | ||||
| @ -172,6 +151,7 @@ jobs: | ||||
|       python-version: 3.9.12 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "mps", shard: 1, num_shards: 1, runner: "macos-m1-13" }, | ||||
|           { config: "mps", shard: 1, num_shards: 1, runner: "macos-m1-14" }, | ||||
|         ]} | ||||
|  | ||||
|  | ||||
							
								
								
									
										171
									
								
								.github/workflows/unstable.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										171
									
								
								.github/workflows/unstable.yml
									
									
									
									
										vendored
									
									
								
							| @ -32,174 +32,3 @@ jobs: | ||||
|           echo | ||||
|           echo "Once the jobs are deemed stable enough (% red signal < 5% and TTS < 3h)," | ||||
|           echo " they can graduate and move back to pull or trunk." | ||||
|  | ||||
|   # | ||||
|   # Experimental ARC jobs | ||||
|   # | ||||
|   llm-td: | ||||
|     name: before-test | ||||
|     uses: ./.github/workflows/llm_td_retrieval.yml | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|  | ||||
|   target-determination: | ||||
|     name: before-test | ||||
|     uses: ./.github/workflows/target_determination.yml | ||||
|     needs: llm-td | ||||
|     permissions: | ||||
|       id-token: write | ||||
|       contents: read | ||||
|  | ||||
|   linux-jammy-py3_8-gcc11-build: | ||||
|     name: linux-jammy-py3.8-gcc11 | ||||
|     uses: ./.github/workflows/_linux-build-rg.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.8-gcc11 | ||||
|       docker-image-name: pytorch-linux-jammy-py3.8-gcc11 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "default", shard: 2, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "default", shard: 3, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "docs_test", shard: 1, num_shards: 1,  runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "jit_legacy", shard: 1, num_shards: 1, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "backwards_compat", shard: 1, num_shards: 1, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "distributed", shard: 1, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "distributed", shard: 2, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-jammy-py3_8-gcc11-test: | ||||
|     name: linux-jammy-py3.8-gcc11 | ||||
|     uses: ./.github/workflows/_linux-test-rg.yml | ||||
|     needs: | ||||
|       - linux-jammy-py3_8-gcc11-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.8-gcc11 | ||||
|       docker-image: ${{ needs.linux-jammy-py3_8-gcc11-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-jammy-py3_8-gcc11-build.outputs.test-matrix }} | ||||
|  | ||||
|   linux-jammy-py3_8-gcc11-no-ops: | ||||
|     name: linux-jammy-py3.8-gcc11-no-ops | ||||
|     uses: ./.github/workflows/_linux-build-rg.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.8-gcc11-no-ops | ||||
|       docker-image-name: pytorch-linux-jammy-py3.8-gcc11 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1 }, | ||||
|         ]} | ||||
|  | ||||
|   linux-jammy-py3_8-gcc11-pch: | ||||
|     name: linux-jammy-py3.8-gcc11-pch | ||||
|     uses: ./.github/workflows/_linux-build-rg.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.8-gcc11-pch | ||||
|       docker-image-name: pytorch-linux-jammy-py3.8-gcc11 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 1 }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-py3_8-clang10-onnx-build: | ||||
|     name: linux-focal-py3.8-clang10-onnx | ||||
|     uses: ./.github/workflows/_linux-build-rg.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-py3.8-clang10-onnx | ||||
|       docker-image-name: pytorch-linux-focal-py3-clang10-onnx | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "default", shard: 2, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-py3_8-clang10-onnx-test: | ||||
|     name: linux-focal-py3.8-clang10-onnx | ||||
|     uses: ./.github/workflows/_linux-test-rg.yml | ||||
|     needs: | ||||
|       - linux-focal-py3_8-clang10-onnx-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-focal-py3.8-clang10-onnx | ||||
|       docker-image: ${{ needs.linux-focal-py3_8-clang10-onnx-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-py3_8-clang10-onnx-build.outputs.test-matrix }} | ||||
|  | ||||
|   linux-jammy-py3_10-clang15-asan-build: | ||||
|     name: linux-jammy-py3.10-clang15-asan | ||||
|     uses: ./.github/workflows/_linux-build-rg.yml | ||||
|     with: | ||||
|       build-environment: linux-jammy-py3.10-clang15-asan | ||||
|       docker-image-name: pytorch-linux-jammy-py3-clang15-asan | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 6, runner: "linux.4xlarge" }, | ||||
|           { config: "default", shard: 2, num_shards: 6, runner: "linux.4xlarge" }, | ||||
|           { config: "default", shard: 3, num_shards: 6, runner: "linux.4xlarge" }, | ||||
|           { config: "default", shard: 4, num_shards: 6, runner: "linux.4xlarge" }, | ||||
|           { config: "default", shard: 5, num_shards: 6, runner: "linux.4xlarge" }, | ||||
|           { config: "default", shard: 6, num_shards: 6, runner: "linux.4xlarge" }, | ||||
|         ]} | ||||
|       sync-tag: asan-build-arc | ||||
|  | ||||
|   linux-focal-py3_8-clang10-build: | ||||
|     name: linux-focal-py3.8-clang10 | ||||
|     uses: ./.github/workflows/_linux-build-rg.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-py3.8-clang10 | ||||
|       docker-image-name: pytorch-linux-focal-py3.8-clang10 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "default", shard: 2, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "default", shard: 3, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "crossref", shard: 1, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "crossref", shard: 2, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "dynamo", shard: 1, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "dynamo", shard: 2, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "dynamo", shard: 3, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-py3_8-clang10-test: | ||||
|     name: linux-focal-py3.8-clang10 | ||||
|     uses: ./.github/workflows/_linux-test-rg.yml | ||||
|     needs: | ||||
|       - linux-focal-py3_8-clang10-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-focal-py3.8-clang10 | ||||
|       docker-image: ${{ needs.linux-focal-py3_8-clang10-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-py3_8-clang10-build.outputs.test-matrix }} | ||||
|  | ||||
|   linux-focal-py3_11-clang10-build: | ||||
|     name: linux-focal-py3.11-clang10 | ||||
|     uses: ./.github/workflows/_linux-build-rg.yml | ||||
|     with: | ||||
|       build-environment: linux-focal-py3.11-clang10 | ||||
|       docker-image-name: pytorch-linux-focal-py3.11-clang10 | ||||
|       test-matrix: | | ||||
|         { include: [ | ||||
|           { config: "default", shard: 1, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "default", shard: 2, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "default", shard: 3, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "crossref", shard: 1, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "crossref", shard: 2, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "dynamo", shard: 1, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "dynamo", shard: 2, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|           { config: "dynamo", shard: 3, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" }, | ||||
|         ]} | ||||
|  | ||||
|   linux-focal-py3_11-clang10-test: | ||||
|     name: linux-focal-py3.11-clang10 | ||||
|     uses: ./.github/workflows/_linux-test-rg.yml | ||||
|     needs: | ||||
|       - linux-focal-py3_11-clang10-build | ||||
|       - target-determination | ||||
|     with: | ||||
|       build-environment: linux-focal-py3.11-clang10 | ||||
|       docker-image: ${{ needs.linux-focal-py3_11-clang10-build.outputs.docker-image }} | ||||
|       test-matrix: ${{ needs.linux-focal-py3_11-clang10-build.outputs.test-matrix }} | ||||
|  | ||||
|   # | ||||
|   # End of Experimental ARC jobs | ||||
|   # | ||||
|  | ||||
							
								
								
									
										4
									
								
								.gitmodules
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.gitmodules
									
									
									
									
										vendored
									
									
								
							| @ -18,10 +18,6 @@ | ||||
|     ignore = dirty | ||||
|     path = third_party/protobuf | ||||
|     url = https://github.com/protocolbuffers/protobuf.git | ||||
| [submodule "third_party/ios-cmake"] | ||||
|     ignore = dirty | ||||
|     path = third_party/ios-cmake | ||||
|     url = https://github.com/Yangqing/ios-cmake.git | ||||
| [submodule "third_party/NNPACK"] | ||||
|     ignore = dirty | ||||
|     path = third_party/NNPACK | ||||
|  | ||||
| @ -1072,7 +1072,6 @@ exclude_patterns = [ | ||||
|     'test/test_jit_disabled.py', | ||||
|     'test/test_jit_fuser.py', | ||||
|     'test/test_jit_fuser_legacy.py', | ||||
|     'test/test_jit_fuser_te.py', | ||||
|     'test/test_jit_legacy.py', | ||||
|     'test/test_jit_llga_fuser.py', | ||||
|     'test/test_jit_profiling.py', | ||||
| @ -1115,9 +1114,6 @@ exclude_patterns = [ | ||||
|     'test/test_segment_reductions.py', | ||||
|     'test/test_serialization.py', | ||||
|     'test/test_set_default_mobile_cpu_allocator.py', | ||||
|     'test/test_shape_ops.py', | ||||
|     'test/test_show_pickle.py', | ||||
|     'test/test_sort_and_select.py', | ||||
|     'test/test_sparse.py', | ||||
|     'test/test_sparse_csr.py', | ||||
|     'test/test_sparse_semi_structured.py', | ||||
| @ -1536,28 +1532,6 @@ exclude_patterns = [ | ||||
|     'torch/distributed/optim/post_localSGD_optimizer.py', | ||||
|     'torch/distributed/optim/utils.py', | ||||
|     'torch/distributed/optim/zero_redundancy_optimizer.py', | ||||
|     'torch/distributed/pipeline/__init__.py', | ||||
|     'torch/distributed/pipeline/sync/__init__.py', | ||||
|     'torch/distributed/pipeline/sync/_balance/__init__.py', | ||||
|     'torch/distributed/pipeline/sync/_balance/blockpartition.py', | ||||
|     'torch/distributed/pipeline/sync/_balance/profile.py', | ||||
|     'torch/distributed/pipeline/sync/batchnorm.py', | ||||
|     'torch/distributed/pipeline/sync/checkpoint.py', | ||||
|     'torch/distributed/pipeline/sync/copy.py', | ||||
|     'torch/distributed/pipeline/sync/dependency.py', | ||||
|     'torch/distributed/pipeline/sync/microbatch.py', | ||||
|     'torch/distributed/pipeline/sync/phony.py', | ||||
|     'torch/distributed/pipeline/sync/pipe.py', | ||||
|     'torch/distributed/pipeline/sync/pipeline.py', | ||||
|     'torch/distributed/pipeline/sync/skip/__init__.py', | ||||
|     'torch/distributed/pipeline/sync/skip/layout.py', | ||||
|     'torch/distributed/pipeline/sync/skip/namespace.py', | ||||
|     'torch/distributed/pipeline/sync/skip/portal.py', | ||||
|     'torch/distributed/pipeline/sync/skip/skippable.py', | ||||
|     'torch/distributed/pipeline/sync/skip/tracker.py', | ||||
|     'torch/distributed/pipeline/sync/stream.py', | ||||
|     'torch/distributed/pipeline/sync/utils.py', | ||||
|     'torch/distributed/pipeline/sync/worker.py', | ||||
|     'torch/distributed/remote_device.py', | ||||
|     'torch/distributed/rendezvous.py', | ||||
|     'torch/distributed/rpc/__init__.py', | ||||
| @ -1582,7 +1556,6 @@ exclude_patterns = [ | ||||
|     'torch/distributed/tensor/parallel/input_reshard.py', | ||||
|     'torch/distributed/tensor/parallel/multihead_attention_tp.py', | ||||
|     'torch/distributed/tensor/parallel/style.py', | ||||
|     'torch/distributed/utils.py', | ||||
|     'torch/fft/__init__.py', | ||||
|     'torch/func/__init__.py', | ||||
|     'torch/functional.py', | ||||
| @ -1674,18 +1647,6 @@ exclude_patterns = [ | ||||
|     'torch/hub.py', | ||||
|     'torch/library.py', | ||||
|     'torch/linalg/__init__.py', | ||||
|     # UFMT causes import cycle on masked | ||||
|     'torch/masked/__init__.py', | ||||
|     'torch/masked/_docs.py', | ||||
|     'torch/masked/_ops.py', | ||||
|     'torch/masked/maskedtensor/__init__.py', | ||||
|     'torch/masked/maskedtensor/_ops_refs.py', | ||||
|     'torch/masked/maskedtensor/binary.py', | ||||
|     'torch/masked/maskedtensor/core.py', | ||||
|     'torch/masked/maskedtensor/creation.py', | ||||
|     'torch/masked/maskedtensor/passthrough.py', | ||||
|     'torch/masked/maskedtensor/reductions.py', | ||||
|     'torch/masked/maskedtensor/unary.py', | ||||
|     'torch/monitor/__init__.py', | ||||
|     'torch/nested/__init__.py', | ||||
|     'torch/nn/__init__.py', | ||||
| @ -1864,8 +1825,6 @@ exclude_patterns = [ | ||||
|     'torch/testing/_internal/distributed/nn/__init__.py', | ||||
|     'torch/testing/_internal/distributed/nn/api/__init__.py', | ||||
|     'torch/testing/_internal/distributed/nn/api/remote_module_test.py', | ||||
|     'torch/testing/_internal/distributed/pipe_with_ddp_test.py', | ||||
|     'torch/testing/_internal/distributed/pipeline/__init__.py', | ||||
|     'torch/testing/_internal/distributed/rpc/__init__.py', | ||||
|     'torch/testing/_internal/distributed/rpc/dist_autograd_test.py', | ||||
|     'torch/testing/_internal/distributed/rpc/dist_optimizer_test.py', | ||||
| @ -2120,7 +2079,7 @@ init_command = [ | ||||
|     'python3', | ||||
|     'tools/linter/adapters/pip_init.py', | ||||
|     '--dry-run={{DRYRUN}}', | ||||
|     'ruff==0.4.6', | ||||
|     'ruff==0.4.8', | ||||
| ] | ||||
| is_formatter = true | ||||
|  | ||||
|  | ||||
| @ -455,7 +455,6 @@ filegroup( | ||||
|     name = "caffe2_core_srcs", | ||||
|     srcs = [ | ||||
|         "caffe2/core/common.cc", | ||||
|         "caffe2/core/types.cc", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| @ -488,7 +487,6 @@ filegroup( | ||||
| filegroup( | ||||
|     name = "caffe2_utils_srcs", | ||||
|     srcs = [ | ||||
|         "caffe2/utils/cpuid.cc", | ||||
|         "caffe2/utils/proto_wrap.cc", | ||||
|         "caffe2/utils/string_utils.cc", | ||||
|         "caffe2/utils/threadpool/ThreadPool.cc", | ||||
| @ -507,12 +505,9 @@ cc_library( | ||||
|     name = "caffe2_for_aten_headers", | ||||
|     hdrs = [ | ||||
|         "caffe2/core/common.h", | ||||
|         "caffe2/core/logging.h", | ||||
|         "caffe2/core/types.h", | ||||
|         "caffe2/perfkernels/common.h", | ||||
|         "caffe2/perfkernels/embedding_lookup.h", | ||||
|         "caffe2/perfkernels/embedding_lookup_idx.h", | ||||
|         "caffe2/utils/cpuid.h", | ||||
|         "caffe2/utils/fixed_divisor.h", | ||||
|     ] + glob([ | ||||
|         "caffe2/utils/threadpool/*.h", | ||||
| @ -522,7 +517,6 @@ cc_library( | ||||
|     deps = [ | ||||
|         ":caffe2_core_macros", | ||||
|         "//c10", | ||||
|         "//caffe2/proto:caffe2_pb", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| @ -547,7 +541,6 @@ cc_library( | ||||
|     deps = [ | ||||
|         ":caffe2_core_macros", | ||||
|         ":caffe2_for_aten_headers", | ||||
|         "//caffe2/proto:caffe2_pb", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| @ -568,7 +561,6 @@ cc_library( | ||||
|         ":caffe2_perfkernels_avx", | ||||
|         ":caffe2_perfkernels_avx2", | ||||
|         ":caffe2_perfkernels_avx512", | ||||
|         "//caffe2/proto:caffe2_pb", | ||||
|         "//third_party/miniz-2.1.0:miniz", | ||||
|         "@com_google_protobuf//:protobuf", | ||||
|         "@eigen", | ||||
| @ -777,6 +769,7 @@ cc_library( | ||||
|         ":caffe2", | ||||
|         ":torch_headers", | ||||
|         "@kineto", | ||||
|         "@cpp-httplib", | ||||
|     ] + if_cuda([ | ||||
|         "@cuda//:nvToolsExt", | ||||
|         "@cutlass", | ||||
|  | ||||
| @ -242,8 +242,7 @@ option(USE_COLORIZE_OUTPUT "Colorize output during compilation" ON) | ||||
| option(USE_ASAN "Use Address+Undefined Sanitizers" OFF) | ||||
| option(USE_TSAN "Use Thread Sanitizer" OFF) | ||||
| option(USE_CUDA "Use CUDA" ON) | ||||
| cmake_dependent_option(USE_XPU "Use XPU. Only available on Linux." ON "LINUX" | ||||
|                        OFF) | ||||
| option(USE_XPU "Use XPU" ON) | ||||
| cmake_dependent_option( | ||||
|   BUILD_LAZY_CUDA_LINALG "Build cuda linalg ops as separate library" ON | ||||
|   "USE_CUDA AND LINUX AND BUILD_PYTHON" OFF) | ||||
| @ -540,6 +539,8 @@ option(BUILD_EXECUTORCH "Master flag to build Executorch" ON) | ||||
| if(LINUX) | ||||
|   set(CMAKE_SHARED_LINKER_FLAGS | ||||
|       "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-as-needed") | ||||
|   set(CMAKE_SHARED_LINKER_FLAGS | ||||
|       "${CMAKE_SHARED_LINKER_FLAGS} $ENV{LDFLAGS}") | ||||
| endif() | ||||
|  | ||||
| if(MSVC) | ||||
| @ -892,6 +893,14 @@ endif() | ||||
|  | ||||
| if(USE_SLEEF_FOR_ARM_VEC256) | ||||
|   string(APPEND CMAKE_CXX_FLAGS " -DAT_BUILD_ARM_VEC256_WITH_SLEEF") | ||||
|   add_definitions(-DAT_BUILD_ARM_VEC256_WITH_SLEEF) | ||||
| endif() | ||||
|  | ||||
| # Enable sleef on macOS with Apple silicon by default | ||||
| if((${CMAKE_SYSTEM_NAME} STREQUAL "Darwin") AND ("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "arm64")) | ||||
|   message(STATUS "Running on macOS with Apple silicon") | ||||
|   string(APPEND CMAKE_CXX_FLAGS " -DAT_BUILD_ARM_VEC256_WITH_SLEEF") | ||||
|   add_definitions(-DAT_BUILD_ARM_VEC256_WITH_SLEEF) | ||||
| endif() | ||||
|  | ||||
| if(USE_XNNPACK) | ||||
|  | ||||
| @ -1,4 +1,4 @@ | ||||
|  | ||||
|  | ||||
|  | ||||
| -------------------------------------------------------------------------------- | ||||
|  | ||||
| @ -98,7 +98,7 @@ from several research papers on this topic, as well as current and past work suc | ||||
| While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date. | ||||
| You get the best of speed and flexibility for your crazy research. | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| ### Python First | ||||
|  | ||||
| @ -189,7 +189,7 @@ Other potentially useful environment variables may be found in `setup.py`. | ||||
| ##### Intel GPU Support | ||||
| If you want to compile with Intel GPU support, follow these | ||||
| - [PyTorch Prerequisites for Intel GPUs](https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html) instructions. | ||||
| - Intel GPU is currently supported only for Linux systems. | ||||
| - Intel GPU is supported for Linux and Windows. | ||||
|  | ||||
| If you want to disable Intel GPU support, export the environment variable `USE_XPU=0`. | ||||
| Other potentially useful environment variables may be found in `setup.py`. | ||||
| @ -213,6 +213,7 @@ conda install -c pytorch magma-cuda121  # or the magma-cuda* that matches your C | ||||
|  | ||||
| # (optional) If using torch.compile with inductor/triton, install the matching version of triton | ||||
| # Run from the pytorch directory after cloning | ||||
| # For Intel GPU support, please explicitly `export USE_XPU=1` before running command. | ||||
| make triton | ||||
| ``` | ||||
|  | ||||
|  | ||||
							
								
								
									
										10
									
								
								RELEASE.md
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								RELEASE.md
									
									
									
									
									
								
							| @ -37,6 +37,7 @@ | ||||
|     - [TL;DR](#tldr) | ||||
|   - [Accelerator Software](#accelerator-software) | ||||
|     - [Special support cases](#special-support-cases) | ||||
|   - [Operating Systems](#operating-systems) | ||||
| - [Submitting Tutorials](#submitting-tutorials) | ||||
| - [Special Topics](#special-topics) | ||||
|   - [Updating submodules for a release](#updating-submodules-for-a-release) | ||||
| @ -426,6 +427,15 @@ the size restrictions for publishing on PyPI so the default version that is publ | ||||
| These special support cases will be handled on a case by case basis and support may be continued if current PyTorch maintainers feel as though there may still be a | ||||
| need to support these particular versions of software. | ||||
|  | ||||
| ## Operating Systems | ||||
| Supported OS flavors are summarized in the table below: | ||||
| | Operating System family | Architectrue | Notes | | ||||
| | --- | --- | --- | | ||||
| | Linux | aarch64, x86_64 | Wheels are manylinux2014 compatible, i.e. they should be runnable on any Linux system with glibc-2.17 or above. | | ||||
| | MacOS | arm64 | Builds should be compatible with MacOS 11 (Big Sur) or newer, but are actively tested against MacOS 14 (Sonoma). | | ||||
| | MacOS | x86_64 | Requires MacOS Catalina or above, not supported after 2.2, see https://github.com/pytorch/pytorch/issues/114602 | | ||||
| | Windows | x86_64 | Buils are compatible with Windows-10 or newer. | | ||||
|  | ||||
| # Submitting Tutorials | ||||
|  | ||||
| Tutorials in support of a release feature must be submitted to the [pytorch/tutorials](https://github.com/pytorch/tutorials) repo at least two weeks before the release date to allow for editorial and technical review. There is no cherry-pick process for tutorials. All tutorials will be merged around the release day and published at [pytorch.org/tutorials](https://pytorch.org/tutorials/). | ||||
|  | ||||
| @ -168,6 +168,12 @@ new_local_repository( | ||||
|     path = "third_party/opentelemetry-cpp", | ||||
| ) | ||||
|  | ||||
| new_local_repository( | ||||
|     name = "cpp-httplib", | ||||
|     build_file = "//third_party:cpp-httplib.BUILD", | ||||
|     path = "third_party/cpp-httplib", | ||||
| ) | ||||
|  | ||||
| new_local_repository( | ||||
|     name = "tensorpipe", | ||||
|     build_file = "//third_party:tensorpipe.BUILD", | ||||
|  | ||||
| @ -386,6 +386,7 @@ if(UNIX AND NOT APPLE) | ||||
| endif(UNIX AND NOT APPLE) | ||||
|  | ||||
| if(UNIX) | ||||
|   include(CheckFunctionExists) | ||||
|   set(CMAKE_EXTRA_INCLUDE_FILES "sys/mman.h") | ||||
|   CHECK_FUNCTION_EXISTS(mmap HAVE_MMAP) | ||||
|   if(HAVE_MMAP) | ||||
| @ -472,7 +473,6 @@ endif() | ||||
|  | ||||
| if(USE_CUDA AND NOT USE_ROCM) | ||||
|   list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/include) | ||||
|   list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/tools/util/include) | ||||
|   if($ENV{ATEN_STATIC_CUDA}) | ||||
|     list(APPEND ATen_CUDA_DEPENDENCY_LIBS | ||||
|       ${CUDA_LIBRARIES} | ||||
|  | ||||
| @ -218,8 +218,8 @@ static inline Tensor applySlice( | ||||
|         ? (*self_sizes)[dim] | ||||
|         : self.sym_size(dim); | ||||
|     if (!disable_slice_optimization && | ||||
|         TORCH_GUARD_SIZE_OBLIVIOUS(start.sym_eq(0)) && length == stop && | ||||
|         step == 1) { | ||||
|         TORCH_GUARD_SIZE_OBLIVIOUS(start.sym_eq(0)) && | ||||
|         TORCH_GUARD_SIZE_OBLIVIOUS(length.sym_eq(stop)) && step == 1) { | ||||
|       return self; | ||||
|     } | ||||
|   } | ||||
|  | ||||
| @ -68,7 +68,7 @@ thread_local std::array<at::ScalarType, at::COMPILE_TIME_MAX_DEVICE_TYPES> | ||||
|         at::kBFloat16, // XLA / TPU | ||||
|         at::ScalarType::Undefined, // Vulkan | ||||
|         at::ScalarType::Undefined, // Metal | ||||
|         at::kBFloat16, // XPU | ||||
|         at::kHalf, // XPU | ||||
|         at::ScalarType::Undefined, // MPS | ||||
|         at::ScalarType::Undefined, // Meta (tensors with no data) | ||||
|         at::kBFloat16, // HPU / HABANA | ||||
|  | ||||
| @ -275,16 +275,6 @@ void expectOutOfPlaceMultiBoxedCallingWorks(const KernelFunction& func) { | ||||
|   EXPECT_TRUE(stack[1].toTensor().is_same(t2)); | ||||
| } | ||||
|  | ||||
| void expectBoxedCallingFailsWith(const KernelFunction& func, const char* errorMessage) { | ||||
|   called_with_args = c10::nullopt; | ||||
|   vector<IValue> stack {3, 4}; | ||||
|   OperatorHandle dummy = makeDummyOperatorHandle(); | ||||
|  | ||||
|   expectThrows<c10::Error>([&] { | ||||
|     func.callBoxed(dummy, CPU_TEST_SET, &stack); | ||||
|   }, errorMessage); | ||||
| } | ||||
|  | ||||
| // | ||||
| // unboxed calling tests: | ||||
| // | ||||
|  | ||||
| @ -40,10 +40,6 @@ int64_t incrementKernel(const Tensor& tensor, int64_t input) { | ||||
|   return input + 1; | ||||
| } | ||||
|  | ||||
| int64_t decrementKernel(const Tensor& tensor, int64_t input) { | ||||
|   return input - 1; | ||||
| } | ||||
|  | ||||
| void expectCallsIncrement(DispatchKey dispatch_key) { | ||||
|   at::AutoDispatchBelowAutograd mode; | ||||
|  | ||||
| @ -55,17 +51,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) { | ||||
|   EXPECT_EQ(6, result[0].toInt()); | ||||
| } | ||||
|  | ||||
| void expectCallsDecrement(DispatchKey dispatch_key) { | ||||
|   at::AutoDispatchBelowAutograd mode; | ||||
|  | ||||
|   // assert that schema and cpu kernel are present | ||||
|   auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); | ||||
|   ASSERT_TRUE(op.has_value()); | ||||
|   auto result = callOp(*op, dummyTensor(dispatch_key), 5); | ||||
|   EXPECT_EQ(1, result.size()); | ||||
|   EXPECT_EQ(4, result[0].toInt()); | ||||
| } | ||||
|  | ||||
| TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernel_whenRegistered_thenCanBeCalled) { | ||||
|   auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", &incrementKernel); | ||||
|   expectCallsIncrement(DispatchKey::CPU); | ||||
|  | ||||
| @ -662,18 +662,6 @@ void expectCallsConcatUnboxed(DispatchKey dispatch_key) { | ||||
|   EXPECT_EQ("123", result); | ||||
| } | ||||
|  | ||||
| void expectCannotCallConcatBoxed(DispatchKey dispatch_key) { | ||||
|   at::AutoDispatchBelowAutograd mode; | ||||
|  | ||||
|   // assert that schema and cpu kernel are present | ||||
|   auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); | ||||
|   ASSERT_TRUE(op.has_value()); | ||||
|   expectThrows<c10::Error>( | ||||
|     [&] {callOp(*op, dummyTensor(dispatch_key), "1", "2", 3);}, | ||||
|     "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::call()." | ||||
|   ); | ||||
| } | ||||
|  | ||||
| TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernel_whenRegistered_thenCanBeCalledUnboxed) { | ||||
|   auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, str a, str b, int c) -> str", RegisterOperators::options().kernel<decltype(concatKernel), &concatKernel>(DispatchKey::CPU)); | ||||
|   expectCallsConcatUnboxed(DispatchKey::CPU); | ||||
|  | ||||
| @ -51,17 +51,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) { | ||||
|   EXPECT_EQ(6, result[0].toInt()); | ||||
| } | ||||
|  | ||||
| void expectCallsDecrement(DispatchKey dispatch_key) { | ||||
|   at::AutoDispatchBelowAutograd mode; | ||||
|  | ||||
|   // assert that schema and cpu kernel are present | ||||
|   auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); | ||||
|   ASSERT_TRUE(op.has_value()); | ||||
|   auto result = callOp(*op, dummyTensor(dispatch_key), 5); | ||||
|   EXPECT_EQ(1, result.size()); | ||||
|   EXPECT_EQ(4, result[0].toInt()); | ||||
| } | ||||
|  | ||||
| TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernel_whenRegistered_thenCanBeCalled) { | ||||
|   auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<IncrementKernel>(DispatchKey::CPU)); | ||||
|   expectCallsIncrement(DispatchKey::CPU); | ||||
|  | ||||
| @ -4,6 +4,21 @@ | ||||
| #endif | ||||
|  | ||||
| namespace at::cpu { | ||||
| bool is_cpu_support_avx2() { | ||||
| #if !defined(__s390x__) && !defined(__powerpc__) | ||||
|   return cpuinfo_initialize() && cpuinfo_has_x86_avx2(); | ||||
| #else | ||||
|   return false; | ||||
| #endif | ||||
| } | ||||
|  | ||||
| bool is_cpu_support_avx512() { | ||||
| #if !defined(__s390x__) && !defined(__powerpc__) | ||||
|   return cpuinfo_initialize() && cpuinfo_has_x86_avx512f() && cpuinfo_has_x86_avx512vl() && cpuinfo_has_x86_avx512bw() && cpuinfo_has_x86_avx512dq(); | ||||
| #else | ||||
|   return false; | ||||
| #endif | ||||
| } | ||||
|  | ||||
| bool is_cpu_support_vnni() { | ||||
| #if !defined(__s390x__) && !defined(__powerpc__) | ||||
|  | ||||
| @ -4,6 +4,9 @@ | ||||
|  | ||||
| namespace at::cpu { | ||||
|  | ||||
| TORCH_API bool is_cpu_support_avx2(); | ||||
| TORCH_API bool is_cpu_support_avx512(); | ||||
|  | ||||
| // Detect if CPU support Vector Neural Network Instruction. | ||||
| TORCH_API bool is_cpu_support_vnni(); | ||||
|  | ||||
|  | ||||
| @ -1,3 +1,4 @@ | ||||
| #include <ATen/cuda/CUDAContextLight.h> | ||||
| #include <ATen/cuda/Sleep.h> | ||||
|  | ||||
| #include <c10/cuda/CUDAException.h> | ||||
| @ -32,4 +33,37 @@ void sleep(int64_t cycles) { | ||||
|   C10_CUDA_KERNEL_LAUNCH_CHECK(); | ||||
| } | ||||
|  | ||||
| #ifdef USE_ROCM | ||||
| __global__ void flush_icache_kernel() | ||||
| { | ||||
|     asm __volatile__("s_icache_inv \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" | ||||
|                      "s_nop 0 \n\t" :: | ||||
|                          :); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| void flush_icache() { | ||||
| #ifdef USE_ROCM | ||||
|   dim3 grid(at::cuda::getCurrentDeviceProperties()->multiProcessorCount * 60); | ||||
|   dim3 block(64); | ||||
|   flush_icache_kernel<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>(); | ||||
|   C10_CUDA_KERNEL_LAUNCH_CHECK(); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| }  // namespace at::cuda | ||||
|  | ||||
| @ -7,4 +7,7 @@ namespace at::cuda { | ||||
| // enqueues a kernel that spins for the specified number of cycles | ||||
| TORCH_CUDA_CU_API void sleep(int64_t cycles); | ||||
|  | ||||
| // flushes instruction cache for ROCm; no-op for CUDA | ||||
| TORCH_CUDA_CU_API void flush_icache(); | ||||
|  | ||||
| }  // namespace at::cuda | ||||
|  | ||||
| @ -170,43 +170,6 @@ CUDA_STUB3(cuLinkComplete, CUlinkState, void **, size_t *); | ||||
| CUDA_STUB3(cuFuncSetAttribute, CUfunction, CUfunction_attribute, int); | ||||
| CUDA_STUB3(cuFuncGetAttribute, int*, CUfunction_attribute, CUfunction); | ||||
|  | ||||
| #if defined(CUDA_VERSION) && CUDA_VERSION >= 12000 | ||||
| CUresult CUDAAPI | ||||
| cuTensorMapEncodeTiled( | ||||
|     CUtensorMap* tensorMap, | ||||
|     CUtensorMapDataType tensorDataType, | ||||
|     cuuint32_t tensorRank, | ||||
|     void* globalAddress, | ||||
|     const cuuint64_t* globalDim, | ||||
|     const cuuint64_t* globalStrides, | ||||
|     const cuuint32_t* boxDim, | ||||
|     const cuuint32_t* elementStrides, | ||||
|     CUtensorMapInterleave interleave, | ||||
|     CUtensorMapSwizzle swizzle, | ||||
|     CUtensorMapL2promotion l2Promotion, | ||||
|     CUtensorMapFloatOOBfill oobFill) { | ||||
|   auto fn = reinterpret_cast<decltype(&cuTensorMapEncodeTiled)>( | ||||
|       getCUDALibrary().sym(__func__)); | ||||
|   if (!fn) | ||||
|     throw std::runtime_error("Can't get cuTensorMapEncodeTiled"); | ||||
|   lazyNVRTC.cuTensorMapEncodeTiled = fn; | ||||
|   return fn( | ||||
|       tensorMap, | ||||
|       tensorDataType, | ||||
|       tensorRank, | ||||
|       globalAddress, | ||||
|       globalDim, | ||||
|       globalStrides, | ||||
|       boxDim, | ||||
|       elementStrides, | ||||
|       interleave, | ||||
|       swizzle, | ||||
|       l2Promotion, | ||||
|       oobFill); | ||||
| } | ||||
|  | ||||
| #endif | ||||
|  | ||||
| // Irregularly shaped functions | ||||
| CUresult CUDAAPI cuLaunchKernel(CUfunction f, | ||||
|                                 unsigned int gridDimX, | ||||
|  | ||||
| @ -34,8 +34,8 @@ struct PhiloxCudaState { | ||||
|     int64_t* ptr; | ||||
|   }; | ||||
|  | ||||
|   Payload seed_; | ||||
|   Payload offset_; | ||||
|   Payload seed_{}; | ||||
|   Payload offset_{}; | ||||
|   uint32_t offset_intragraph_ = 0; | ||||
|   bool captured_ = false; | ||||
| }; | ||||
|  | ||||
| @ -59,25 +59,16 @@ namespace at { namespace cuda { | ||||
|   _(cuLinkAddData)                               \ | ||||
|   _(cuLinkComplete)                              \ | ||||
|   _(cuFuncSetAttribute)                          \ | ||||
|   _(cuFuncGetAttribute)                          \ | ||||
|  | ||||
| #if defined(CUDA_VERSION) && CUDA_VERSION >= 12000 | ||||
| #define AT_FORALL_NVRTC_EXTENDED(_)              \ | ||||
|   AT_FORALL_NVRTC_BASE(_)                        \ | ||||
|   _(cuTensorMapEncodeTiled) | ||||
| #else | ||||
| #define AT_FORALL_NVRTC_EXTENDED(_)              \ | ||||
|   AT_FORALL_NVRTC_BASE(_) | ||||
| #endif | ||||
|   _(cuFuncGetAttribute) | ||||
|  | ||||
| #if defined(CUDA_VERSION) && CUDA_VERSION >= 11010 | ||||
| #define AT_FORALL_NVRTC(_) \ | ||||
|   AT_FORALL_NVRTC_EXTENDED(_)  \ | ||||
|   AT_FORALL_NVRTC_BASE(_)  \ | ||||
|   _(nvrtcGetCUBINSize)     \ | ||||
|   _(nvrtcGetCUBIN) | ||||
| #else | ||||
| #define AT_FORALL_NVRTC(_) \ | ||||
|   AT_FORALL_NVRTC_EXTENDED(_) | ||||
|   AT_FORALL_NVRTC_BASE(_) | ||||
| #endif | ||||
|  | ||||
| #else | ||||
|  | ||||
| @ -66,7 +66,7 @@ static bool NumericalCheck(ScalarType dtype, void* c, void* other_c, int64_t siz | ||||
|     return false; | ||||
|   } | ||||
|   else { | ||||
|     TUNABLE_LOG("├──verify numerics: atol=", last_succeed_atol, ", rtol=", last_succeed_rtol); | ||||
|     TUNABLE_LOG3("├──verify numerics: atol=", last_succeed_atol, ", rtol=", last_succeed_rtol); | ||||
|   } | ||||
|  | ||||
|   return true; | ||||
| @ -76,30 +76,54 @@ static bool NumericalCheck(ScalarType dtype, void* c, void* other_c, int64_t siz | ||||
|  | ||||
| template <typename T> | ||||
| struct GemmParams : OpParams { | ||||
|   GemmParams() { | ||||
|     duplicate_inputs_ = false; | ||||
|   } | ||||
|  | ||||
|   std::string Signature() const override { | ||||
|     return c10::str(transa, transb, "_", m, "_", n, "_", k); | ||||
|   } | ||||
|  | ||||
|   GemmParams* DeepCopy() const { | ||||
|   size_t GetSize(bool duplicate_inputs) const { | ||||
|     size_t size = sizeof(T) * ldc * n; | ||||
|     if (duplicate_inputs) { | ||||
|       size += sizeof(T) * lda * ((transa == 'n' || transa == 'N') ? k : m); | ||||
|       size += sizeof(T) * ldb * ((transb == 'n' || transb == 'N') ? n : k); | ||||
|     } | ||||
|     return size; | ||||
|   } | ||||
|  | ||||
|   GemmParams* DeepCopy(bool duplicate_inputs) const { | ||||
|     GemmParams* copy = new GemmParams; | ||||
|     *copy = *this; | ||||
|     c10::DeviceIndex device = 0; | ||||
|     AT_CUDA_CHECK(c10::cuda::GetDevice(&device)); | ||||
|     size_t c_size = m * n * sizeof(T); | ||||
|     size_t c_size = ldc * n * sizeof(T); | ||||
|     copy->c = static_cast<T*>(c10::cuda::CUDACachingAllocator::raw_alloc(c_size)); | ||||
|     AT_CUDA_CHECK(c10::cuda::CUDACachingAllocator::memcpyAsync( | ||||
|         copy->c, device, c, device, c_size, getCurrentCUDAStream(device), true)); | ||||
|     if (duplicate_inputs) { | ||||
|       size_t a_size = sizeof(T) * lda * ((transa == 'n' || transa == 'N') ? k : m); | ||||
|       size_t b_size = sizeof(T) * ldb * ((transb == 'n' || transb == 'N') ? n : k); | ||||
|       copy->a = static_cast<const T*>(c10::cuda::CUDACachingAllocator::raw_alloc(a_size)); | ||||
|       copy->b = static_cast<const T*>(c10::cuda::CUDACachingAllocator::raw_alloc(b_size)); | ||||
|       copy->duplicate_inputs_ = true; | ||||
|     } | ||||
|     return copy; | ||||
|   } | ||||
|  | ||||
|   // only call on object returned by DeepCopy | ||||
|   void Delete() { | ||||
|     c10::cuda::CUDACachingAllocator::raw_delete(c); | ||||
|     if (duplicate_inputs_) { | ||||
|       c10::cuda::CUDACachingAllocator::raw_delete(const_cast<T*>(a)); | ||||
|       c10::cuda::CUDACachingAllocator::raw_delete(const_cast<T*>(b)); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   TuningStatus NumericalCheck(GemmParams<T> *other) { | ||||
|     auto c_dtype = c10::CppTypeToScalarType<T>::value; | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, m*n) ? OK : FAIL; | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, ldc*n) ? OK : FAIL; | ||||
|   } | ||||
|  | ||||
|   char transa; | ||||
| @ -115,15 +139,30 @@ struct GemmParams : OpParams { | ||||
|   at::opmath_type<T> beta; | ||||
|   T* c; | ||||
|   int64_t ldc; | ||||
| private: | ||||
|   bool duplicate_inputs_; | ||||
| }; | ||||
|  | ||||
| template <typename T> | ||||
| struct GemmStridedBatchedParams : OpParams { | ||||
|   GemmStridedBatchedParams() { | ||||
|     duplicate_inputs_ = false; | ||||
|   } | ||||
|  | ||||
|   std::string Signature() const override { | ||||
|     return c10::str(transa, transb, "_", m, "_", n, "_", k, "_B_", batch); | ||||
|   } | ||||
|  | ||||
|   GemmStridedBatchedParams* DeepCopy() const { | ||||
|   size_t GetSize(bool duplicate_inputs) const { | ||||
|     size_t size = sizeof(T) * stride_c * batch; | ||||
|     if (duplicate_inputs) { | ||||
|       size += sizeof(T) * stride_a * batch; | ||||
|       size += sizeof(T) * stride_b * batch; | ||||
|     } | ||||
|     return size; | ||||
|   } | ||||
|  | ||||
|   GemmStridedBatchedParams* DeepCopy(bool duplicate_inputs) const { | ||||
|     GemmStridedBatchedParams* copy = new GemmStridedBatchedParams; | ||||
|     *copy = *this; | ||||
|     c10::DeviceIndex device = 0; | ||||
| @ -132,12 +171,23 @@ struct GemmStridedBatchedParams : OpParams { | ||||
|     copy->c = static_cast<T*>(c10::cuda::CUDACachingAllocator::raw_alloc(c_size)); | ||||
|     AT_CUDA_CHECK(c10::cuda::CUDACachingAllocator::memcpyAsync( | ||||
|         copy->c, device, c, device, c_size, getCurrentCUDAStream(device), true)); | ||||
|     if (duplicate_inputs) { | ||||
|       size_t a_size = sizeof(T) * stride_a * batch; | ||||
|       size_t b_size = sizeof(T) * stride_b * batch; | ||||
|       copy->a = static_cast<const T*>(c10::cuda::CUDACachingAllocator::raw_alloc(a_size)); | ||||
|       copy->b = static_cast<const T*>(c10::cuda::CUDACachingAllocator::raw_alloc(b_size)); | ||||
|       copy->duplicate_inputs_ = true; | ||||
|     } | ||||
|     return copy; | ||||
|   } | ||||
|  | ||||
|   // only call on object returned by DeepCopy | ||||
|   void Delete() { | ||||
|     c10::cuda::CUDACachingAllocator::raw_delete(c); | ||||
|     if (duplicate_inputs_) { | ||||
|       c10::cuda::CUDACachingAllocator::raw_delete(const_cast<T*>(a)); | ||||
|       c10::cuda::CUDACachingAllocator::raw_delete(const_cast<T*>(b)); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   TuningStatus NumericalCheck(GemmStridedBatchedParams<T> *other) { | ||||
| @ -162,33 +212,59 @@ struct GemmStridedBatchedParams : OpParams { | ||||
|   int64_t ldc; | ||||
|   int64_t stride_c; | ||||
|   int64_t batch; | ||||
| private: | ||||
|   bool duplicate_inputs_; | ||||
| }; | ||||
|  | ||||
| template <typename T> | ||||
| struct ScaledGemmParams : OpParams { | ||||
|   ScaledGemmParams() { | ||||
|     duplicate_inputs_ = false; | ||||
|   } | ||||
|  | ||||
|   std::string Signature() const override { | ||||
|     return c10::str(transa, transb, "_", m, "_", n, "_", k); | ||||
|   } | ||||
|  | ||||
|   ScaledGemmParams* DeepCopy() const { | ||||
|   size_t GetSize(bool duplicate_inputs) const { | ||||
|     size_t size = sizeof(T) * ldc * n; | ||||
|     if (duplicate_inputs) { | ||||
|       size += sizeof(T) * lda * ((transa == 'n' || transa == 'N') ? k : m); | ||||
|       size += sizeof(T) * ldb * ((transb == 'n' || transb == 'N') ? n : k); | ||||
|     } | ||||
|     return size; | ||||
|   } | ||||
|  | ||||
|   ScaledGemmParams* DeepCopy(bool duplicate_inputs) const { | ||||
|     ScaledGemmParams* copy = new ScaledGemmParams; | ||||
|     *copy = *this; | ||||
|     c10::DeviceIndex device = 0; | ||||
|     AT_CUDA_CHECK(c10::cuda::GetDevice(&device)); | ||||
|     size_t c_size = m * n * sizeof(T); | ||||
|     size_t c_size = ldc * n * sizeof(T); | ||||
|     copy->c = c10::cuda::CUDACachingAllocator::raw_alloc(c_size); | ||||
|     AT_CUDA_CHECK(c10::cuda::CUDACachingAllocator::memcpyAsync( | ||||
|         copy->c, device, c, device, c_size, getCurrentCUDAStream(device), true)); | ||||
|     if (duplicate_inputs) { | ||||
|       size_t a_size = sizeof(T) * lda * ((transa == 'n' || transa == 'N') ? k : m); | ||||
|       size_t b_size = sizeof(T) * ldb * ((transb == 'n' || transb == 'N') ? n : k); | ||||
|       copy->a = c10::cuda::CUDACachingAllocator::raw_alloc(a_size); | ||||
|       copy->b = c10::cuda::CUDACachingAllocator::raw_alloc(b_size); | ||||
|       copy->duplicate_inputs_ = true; | ||||
|     } | ||||
|     return copy; | ||||
|   } | ||||
|  | ||||
|   // only call on object returned by DeepCopy | ||||
|   void Delete() { | ||||
|     c10::cuda::CUDACachingAllocator::raw_delete(c); | ||||
|     if (duplicate_inputs_) { | ||||
|       c10::cuda::CUDACachingAllocator::raw_delete(const_cast<void*>(a)); | ||||
|       c10::cuda::CUDACachingAllocator::raw_delete(const_cast<void*>(b)); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   TuningStatus NumericalCheck(ScaledGemmParams<T> *other) { | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, m*n) ? OK : FAIL; | ||||
|     return detail::NumericalCheck(c_dtype, c, other->c, ldc*n) ? OK : FAIL; | ||||
|   } | ||||
|  | ||||
|   char transa; | ||||
| @ -212,6 +288,8 @@ struct ScaledGemmParams : OpParams { | ||||
|   ScalarType c_dtype; | ||||
|   void* amax_ptr; | ||||
|   bool use_fast_accum; | ||||
| private: | ||||
|   bool duplicate_inputs_; | ||||
| }; | ||||
|  | ||||
| } // namespace at::cuda::tunable | ||||
|  | ||||
| @ -263,19 +263,19 @@ static size_t GetHipblasltWorkspaceSize() { | ||||
|   // 256MB is max workspace size allowed for hipblaslt | ||||
|   // hipblaslt-bench uses 32MB | ||||
|   // recommendation from hipblaslt author was 76MB | ||||
|   size_t workspace_size = 2*128*1024*1024; // default 256MB | ||||
|   size_t workspace_size = 32*1024;  // going with 32MB | ||||
|   if (env) { | ||||
|     try { | ||||
|       workspace_size = std::stoi(env); | ||||
|     } catch(std::invalid_argument const& e) { | ||||
|       TORCH_WARN("invalid HIPBLASLT_WORKSPACE_SIZE,", | ||||
|                  " using default workspace size of ", workspace_size, " bytes."); | ||||
|                  " using default workspace size of ", workspace_size, " KiB."); | ||||
|     } catch(std::out_of_range const& e) { | ||||
|       TORCH_WARN("HIPBLASLT_WORKSPACE_SIZE out of range,", | ||||
|                  " using default workspace size of ", workspace_size, " bytes."); | ||||
|                  " using default workspace size of ", workspace_size, " KiB."); | ||||
|     } | ||||
|   } | ||||
|   return workspace_size; | ||||
|   return workspace_size * 1024; | ||||
| } | ||||
|  | ||||
| template <typename T, cublasStatus_t (*destructor)(T*)> | ||||
| @ -413,12 +413,10 @@ class HipblasltGemmOp : public Callable<ParamsT> { | ||||
|  | ||||
|       if (status == HIPBLAS_STATUS_SUCCESS) { | ||||
|         if (ret_workspace_size >= workspace_size) { | ||||
|           //TUNABLE_LOG("[hipBLASLt] Solution #", algo_index, " workspace too large"); | ||||
|           return FAIL; | ||||
|         } | ||||
|       } | ||||
|       else { | ||||
|         //TUNABLE_LOG("[hipBLASLt] Solution #", algo_index, " not supported"); | ||||
|         return FAIL; | ||||
|       } | ||||
|  | ||||
|  | ||||
| @ -2,67 +2,30 @@ | ||||
|  | ||||
| This directory implements a TunableOp interface. | ||||
|  | ||||
| Some operations, such as GEMMs, could be implemented using more than one library or more than one technique.  For | ||||
| example, a GEMM could be implemented for CUDA or ROCm using either the blas or blasLt libraries.  Further, ROCm's | ||||
| rocblas and hipblaslt libraries allow the user to query for all possible algorithms and then choose one.  How does one | ||||
| know which implementation is the fastest and should be chosen?  That's what TunableOp provides. | ||||
| Some operations, such as GEMMs, could be implemented using more than one library or more than one technique. For | ||||
| example, a GEMM could be implemented for CUDA or ROCm using either the blas or blasLt libraries. Further, ROCm's | ||||
| rocblas and hipblaslt libraries allow the user to query for all possible algorithms and then choose one. How does one | ||||
| know which implementation is the fastest and should be chosen? That's what TunableOp provides. | ||||
|  | ||||
| The behavior of TunableOp is currently easily manipulated through environment variables, though you could use the C++ | ||||
| interface of at::cuda::tunable::getTuningContext().  A Python interface to the TuningContext does not yet exist. | ||||
| ## Enabling TunableOp and Tuning Separately | ||||
| The TunableOp feature is enabled separately from enabling the tuning phase itself. Enabling TunableOp means that PyTorch | ||||
| will replace any standard operators with their Tunable implementations. Any call to a TunableOp first checks whether it | ||||
| has already been tuned for the given operator inputs. If so, it will immediately call the tuned operation; no further | ||||
| tuning will take place even when the tuning setting is enabled. Instead if no tuning result is found, and tuning is | ||||
| enabled, the TunableOp will benchmark every registered implementation of that operator for the given set of inputs and | ||||
| select the fastest. | ||||
|  | ||||
| Currently only a TunableGemm for ROCm is implemented.  Any call to at::cuda::blas::gemm() can optionally use the | ||||
| TunableGemm.  Calling gemm() for a given set of input arguments (transa, transb, m, n, k) will attempt to use the | ||||
| fastest available implementation. | ||||
| ## File Input and Output | ||||
| The first time any TunableOp is invoked, the internal database of tuned operations will be prepared by attempting to | ||||
| read the results from the given file. The default filename is 'tunableop_results.csv'. To support tuning when multiple | ||||
| GPUs are used across multiple processes, the GPU device ordinal is automatically inserted into the filename to avoid | ||||
| multiple processes overwriting the same file. | ||||
|  | ||||
| ## Environment Variables | ||||
|  | ||||
| #### PYTORCH_TUNABLEOP_ENABLED | ||||
| Default is 0. Set to 1 to enable. | ||||
| This is the big on/off switch for all TunableOp implementations. | ||||
|  | ||||
| #### PYTORCH_TUNABLEOP_TUNING | ||||
| Default is 1. Set to 0 to disable. | ||||
| When enabled, if a tuned entry isn't found, run the tuning step and record the entry. | ||||
|  | ||||
| #### PYTORCH_TUNABLEOP_VERBOSE | ||||
| Default is 0. Set to 1 to enable. | ||||
| This will produce a lot of diagnostic messages but may be useful to see if TunableOp is being used at all. | ||||
| Otherwise, TunableOp is completely silent unless there is a warning or error during its use. | ||||
|  | ||||
| #### PYTORCH_TUNABLEOP_FILENAME | ||||
| Default is 'tunableop_results.csv'.  If you provide a filename, the TuningContext will attempt to read it the first time | ||||
| the context is used.  If tuning is enabled and new tunings are discovered, it will also write out to this same filename | ||||
| with all tunings, both the ones it read in at startup as well as the new ones found at runtime.  This can be used, for | ||||
| example, to build up a tunings file across many workloads by reusing the same file.  Unsetting this variable is not | ||||
| recommended but can be done, in which case the tuning results will not be saved. | ||||
|  | ||||
| #### PYTORCH_TUNABLEOP_NUMERICAL_CHECK | ||||
| Default is 1. Set to 0 to disable. Compare the results of each possible solution against the default solution and reject | ||||
| those with low accuracy. | ||||
|  | ||||
| #### PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED | ||||
| Default is 1. Set to 0 to disable hipblaslt being considered during tuning. | ||||
|  | ||||
| ### Tuning Iterations | ||||
| By default, each possible solution for a given operator will be run for either 100 iterations or as many iterations can | ||||
| be run within 30ms, whichever is smaller. Its average execution will be calculated. The fastest solution is chosen. In | ||||
| addition, a set of warm up iterations can optionally be run prior to the timed iterations. The following environment | ||||
| variables can be used to set either the maximum number of iterations to attempt or the maximum amount of time allowed in | ||||
| milliseconds, or both, in which case the smaller of the two values used. | ||||
|  | ||||
| #### PYTORCH_TUNABLEOP_MAX_TUNING_DURATION_MS | ||||
| Default is 30. | ||||
|  | ||||
| #### PYTORCH_TUNABLEOP_MAX_TUNING_ITERATIONS | ||||
| Default is 100. | ||||
|  | ||||
| #### PYTORCH_TUNABLEOP_MAX_WARMUP_DURATION_MS | ||||
| Default is 0, meaning it is not used. | ||||
|  | ||||
| #### PYTORCH_TUNABLEOP_MAX_WARMUP_ITERATIONS | ||||
| Default is 1. | ||||
|  | ||||
| ## File Output | ||||
| If tuning is enabled and new tunings are discovered during the course of your workload, it will also write out to this | ||||
| same filename with all tunings, both the ones it read in at startup as well as the new ones found at runtime. This can | ||||
| be used, for example, to build up a tunings file across many workloads by reusing the same file. The output file is | ||||
| automatically created when the application terminates. This behavior can be controlled by the C++ and Python APIs but | ||||
| not the environment variables. | ||||
|  | ||||
| Assuming you specified a filename, you'll end up with a CSV file with contents like so: | ||||
|  | ||||
| @ -75,8 +38,8 @@ GemmTunableOp_float_NT,nt_25088_4096_64,1219,1.262 | ||||
| GemmTunableOp_float_NT,nt_4096_4096_64,1216,0.033 | ||||
| ``` | ||||
|  | ||||
| Note the "Validator" lines.  If you change a library verison, or rocm version, or pytorch version, TunableOp will detect | ||||
| this and not load the tunings because they are likely affected by other software changes. | ||||
| Note the "Validator" lines. If you change a library verison, or ROCm version, or PyTorch version, TunableOp will detect | ||||
| this and reject the tunings file because the prior tunings are likely affected by other software changes. | ||||
|  | ||||
| The remaining lines are the tuned solutions for each TunableOp encountered during your execution. Each line consists of | ||||
| 4 comma-separated fields: operator name, operator parameters, solution name, and average execution time. The execution | ||||
| @ -86,3 +49,102 @@ hipBLAS or hipBLASLt libraries, if you know the specific solution index you can | ||||
| selected by replacing the value. The operator name and parameters (fields 1 and 2) are internally named and should not | ||||
| be modified. In the case of GemmTunableOp, field 1 indicates the datatype and whether the inputs are transposed (T) or | ||||
| not (N) and field 2 indicates the M, N, K input shapes. | ||||
|  | ||||
| There is an option to enable verbose output but it is only recommended for debugging purposes. This will produce a lot | ||||
| of diagnostic messages but may be useful to see if TunableOp is being used at all. Otherwise, TunableOp is completely | ||||
| silent, besides file output, unless there is a warning or error during its use. | ||||
|  | ||||
| ## A Note on Tuning Behavior, Warmup, and Cache Effects | ||||
| Tuning an operator consists of iterating through the list or registered implementations and profiling each one. The | ||||
| profile is established by running a single implementation in a loop multiple times and taking the average execution | ||||
| time. There is also an optional warmup phase prior to tuning that can help with reaching stable power states by the | ||||
| hardware. During tuning of a workload the various hardware caches will more likely produce hits than when not tuning. | ||||
| There are options for flushing the instruction cache and rotate the input tensors which might help produce a more | ||||
| faithful profile of the tuned operator as if the operator were run within a larger workload instead of in a tight, | ||||
| repetitive loop. | ||||
|  | ||||
| By default, each possible solution for a given operator will be run for either 100 iterations or as many iterations that | ||||
| can be run within 30ms, whichever is smaller, and its average execution will be calculated. The fastest solution among | ||||
| all that were successfully profiled will be chosen. A profile might fail if the given solution doesn't achieve the same | ||||
| accuracy as the default implementation or if the solution returns an error code. | ||||
|  | ||||
| ## Current Tunable Operators | ||||
|  | ||||
| ### TunableGemm for ROCm | ||||
| Currently only a TunableGemm for ROCm is implemented. Note that CUDA builds of PyTorch will function correctly when | ||||
| using TunableOp but the only solution available to CUDA builds is the 'Default' implementation i.e. the original cuBLAS | ||||
| default, now called through TunableOp. Any call to at::cuda::blas::gemm() or ::bgemm() will be routed through TunableOp | ||||
| when enabled. Calling gemm() for a given set of input arguments (transa, transb, m, n, k) will attempt to use the | ||||
| fastest available implementation across both rocblas and hipblaslt. | ||||
|  | ||||
| ## Tuning Context | ||||
| The behavior of TunableOp is currently manipulated through environment variables, the C++ interface of | ||||
| at::cuda::tunable::getTuningContext(), or the `torch.cuda.tunable` python interfaces. The environment variables take | ||||
| precedence over any setting you manipulate using the C++ or Python APIs. | ||||
|  | ||||
| ### Environment Variable Interface | ||||
| Environment variables are cached the first time they are read. You cannot use the environment variable interface | ||||
| programmatically since the settings become fixed. Use the C++ or Python APIs instead. | ||||
|  | ||||
| | Environment Variable | Description | | ||||
| | -------------------- | ----------- | | ||||
| | PYTORCH_TUNABLEOP_ENABLED | Default is 0. Set to 1 to enable. | | ||||
| | PYTORCH_TUNABLEOP_TUNING | Default is 1. Set to 0 to disable. | | ||||
| | PYTORCH_TUNABLEOP_VERBOSE | Default is 0. Set to 1 to enable basic logging. 2 for basic tuning status. 3 for full trace. | | ||||
| | PYTORCH_TUNABLEOP_VERBOSE_FILENAME | Default is "err" for stderr. Set to "out" for stdout or a filename for capturing verbose logging. | | ||||
| | PYTORCH_TUNABLEOP_FILENAME | Default is 'tunableop_results.csv'. | | ||||
| | PYTORCH_TUNABLEOP_NUMERICAL_CHECK | Default is 0. Set to 1 to enable. | | ||||
| | PYTORCH_TUNABLEOP_ROCBLAS_ENABLED | Default is 1. Set to 0 to disable rocblas being considered during tuning. | | ||||
| | PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED | Default is 1. Set to 0 to disable hipblaslt being considered during tuning. | | ||||
| | PYTORCH_TUNABLEOP_MAX_TUNING_DURATION_MS | Default is 30. Unit is milliseconds. | | ||||
| | PYTORCH_TUNABLEOP_MAX_TUNING_ITERATIONS | Default is 100. | | ||||
| | PYTORCH_TUNABLEOP_MAX_WARMUP_DURATION_MS | Default is 0, meaning it is not used. Unit is milliseconds. | | ||||
| | PYTORCH_TUNABLEOP_MAX_WARMUP_ITERATIONS | Default is 0, meaning it is not used. | | ||||
| | PYTORCH_TUNABLEOP_ICACHE_FLUSH_ENABLED | Default is 1. Set to 0 to disable. | | ||||
| | PYTORCH_TUNABLEOP_ROTATING_BUFFER_SIZE | Default is to query L2 cache size. Set to 0 to disable. Otherwise, set to the number of MiB to use for the pool of operator parameters. For example, setting this to the size of your device's memory cache will guarantee that every tuning iteration will use a cold cache. | | ||||
|  | ||||
| ### Python Interface | ||||
| All python APIs exist in the `torch.cuda.tunable` module. | ||||
|  | ||||
| | Python API | Description | | ||||
| | ---------- | ----------- | | ||||
| | enable(val: bool = True) -> None | | | ||||
| | is_enabled() -> bool | | | ||||
| | tuning_enable(val: bool = True) -> None | Default is True. | | ||||
| | tuning_is_enabled() -> bool | | | ||||
| | set_max_tuning_duration(duration: int) -> None | | | ||||
| | get_max_tuning_duration() -> int | | | ||||
| | set_max_tuning_iterations(iterations: int) -> None | | | ||||
| | get_max_tuning_iterations() -> int | | | ||||
| | set_filename(filename: str, insert_device_ordinal: bool = False) -> None | | | ||||
| | get_filename() -> str | | | ||||
| | get_results() -> Tuple[str, str, str, float] | | | ||||
| | get_validators() -> Tuple[str, str] | | | ||||
| | write_file_on_exit(val: bool) -> None | Default is True. | | ||||
| | write_file(filename: Optional[str] = None) -> None | If filename not given, it will call get_filename(). | | ||||
| | read_file(filename: Optional[str] = None) -> None | If filename not given, it will call get_filename(). | | ||||
|  | ||||
| ### C++ Interface | ||||
| Example: | ||||
| ```C++ | ||||
| #include <ATen/cuda/tunable/Tunable.h> | ||||
|  | ||||
| at::cuda::tunable::getTuningContext()->EnableTunableOp(true); | ||||
| ``` | ||||
|  | ||||
| | C++ API | Description | | ||||
| | ------- | ----------- | | ||||
| | void EnableTunableOp(bool value); | | | ||||
| | bool IsTunableOpEnabled() const; | | | ||||
| | void EnableTuning(bool value); | | | ||||
| | bool IsTuningEnabled() const; | | | ||||
| | void SetMaxTuningDurationMs(int max_duration_ms); | | | ||||
| | int GetMaxTuningDurationMs() const; | | | ||||
| | void SetMaxTuningIterations(int max_iter); | | | ||||
| | int GetMaxTuningIterations() const; | | | ||||
| | TuningResults GetTuningResults(); | | | ||||
| | void SetFilename(const std::string& filename, bool insert_device_ordinal=false); | | | ||||
| | std::string GetFilename() const; | | | ||||
| | void WriteFileOnExit(bool value); | | | ||||
| | bool ReadFile(const std::string& filename={}); | | | ||||
| | bool WriteFile(const std::string& filename={}); | | | ||||
|  | ||||
| @ -65,14 +65,14 @@ ResultEntry TuningResultsManager::Lookup(const std::string& op_signature, const | ||||
|   std::scoped_lock l{lock_}; | ||||
|   auto kernel_map_it = results_.find(op_signature); | ||||
|   if (kernel_map_it == results_.cend()) { | ||||
|     TUNABLE_LOG("missing op_signature, returning null ResultEntry"); | ||||
|     TUNABLE_LOG3("missing op_signature, returning null ResultEntry"); | ||||
|     return ResultEntry::Null(); | ||||
|   } | ||||
|  | ||||
|   const auto& km = kernel_map_it->second; | ||||
|   auto it = km.find(params_signature); | ||||
|   if (it == km.cend()) { | ||||
|     TUNABLE_LOG("missing params_signature, returning null ResultEntry"); | ||||
|     TUNABLE_LOG3("missing params_signature, returning null ResultEntry"); | ||||
|     return ResultEntry::Null(); | ||||
|   } | ||||
|   return it->second; | ||||
| @ -85,14 +85,14 @@ inline void TuningResultsManager::AddImpl(const std::string& op_signature, | ||||
|   auto it = kernel_map.find(params_signature); | ||||
|   if (it != kernel_map.end()) { | ||||
|     if (it->second != best) { | ||||
|       TUNABLE_LOG(op_signature, "(", params_signature, ") already has a best kernel ", | ||||
|       TUNABLE_LOG1(op_signature, "(", params_signature, ") already has a best kernel ", | ||||
|           "id=", it->second, " selected, want to add a different best kernel ", best, | ||||
|           ", the new kernel id will be ignored."); | ||||
|     } | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   TUNABLE_LOG(op_signature, "(", params_signature, ") -> ", best); | ||||
|   TUNABLE_LOG2(op_signature, "(", params_signature, ") -> ", best); | ||||
|   kernel_map.emplace(params_signature, best); | ||||
| } | ||||
|  | ||||
| @ -120,7 +120,7 @@ void TuningResultsManager::Delete(const std::string& op_signature, const std::st | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   TUNABLE_LOG(op_signature, "(", params_signature, ")"); | ||||
|   TUNABLE_LOG2(op_signature, "(", params_signature, ")"); | ||||
|   it->second.erase(it2); | ||||
| } | ||||
|  | ||||
| @ -131,7 +131,7 @@ inline void TuningResultsManager::DisjointMergeImpl( | ||||
|   auto it = results.find(op_signature); | ||||
|   if (it == results.end()) { | ||||
|     for (const auto& [param_sig, kernel_id] : kernel_map) { | ||||
|       TUNABLE_LOG(op_signature, "(", param_sig, ") -> ", kernel_id); | ||||
|       TUNABLE_LOG2(op_signature, "(", param_sig, ") -> ", kernel_id); | ||||
|     } | ||||
|     results[op_signature] = kernel_map; | ||||
|     return; | ||||
| @ -143,7 +143,7 @@ inline void TuningResultsManager::DisjointMergeImpl( | ||||
| } | ||||
|  | ||||
| void TuningResultsManager::Load(const std::unordered_map<std::string, KernelMap>& results_to_load) { | ||||
|   TUNABLE_LOG("Loading results"); | ||||
|   TUNABLE_LOG1("Loading results"); | ||||
|   std::scoped_lock l{lock_}; | ||||
|   for (const auto& [op_signature, kernel_map] : results_to_load) { | ||||
|     DisjointMergeImpl(op_signature, kernel_map, results_); | ||||
| @ -194,12 +194,12 @@ static bool CheckMandatoryKeys( | ||||
|   for (const auto& k : TuningResultsValidator::mandatory_keys) { | ||||
|     if (gv_funcs.find(k) == gv_funcs.end()) { | ||||
|       passed = false; | ||||
|       TUNABLE_LOG("key=\"", k, "\" is not registered for Get and Validate. "); | ||||
|       TUNABLE_LOG1("key=\"", k, "\" is not registered for Get and Validate. "); | ||||
|     } | ||||
|  | ||||
|     if (to_check.find(k) == to_check.end()) { | ||||
|       passed = false; | ||||
|       TUNABLE_LOG("key=\"", k, "\" is not provided for validation. "); | ||||
|       TUNABLE_LOG1("key=\"", k, "\" is not provided for validation. "); | ||||
|     } | ||||
|   } | ||||
|   return passed; | ||||
| @ -294,10 +294,14 @@ TuningContext::TuningContext() : | ||||
|     enable_{false}, | ||||
|     tuning_enable_{true}, | ||||
|     manager_initialized_{false}, | ||||
|     write_file_on_exit_{true}, | ||||
|     numerics_check_enable_{false}, | ||||
|     max_tuning_duration_ms_{30}, | ||||
|     max_tuning_iterations_{100}, | ||||
|     max_warmup_duration_ms_{0}, | ||||
|     max_warmup_iterations_{0}, | ||||
|     icache_flush_{true}, | ||||
|     rotating_buffer_size_{-1}, | ||||
|     filename_{}, | ||||
|     results_count_from_input_file_{0} | ||||
| { | ||||
| @ -311,115 +315,158 @@ TuningContext::~TuningContext() { | ||||
|     return; | ||||
|   } | ||||
|   auto filename = GetFilename(); | ||||
|   if (IsTunableOpEnabled() && IsTuningEnabled() && !filename.empty()) { | ||||
|   if (IsTunableOpEnabled() && IsTuningEnabled() && !filename.empty() && write_file_on_exit_) { | ||||
|     if (results_count_from_input_file_ < GetTuningResultsManager().GetSize()) { | ||||
|       if (results_count_from_input_file_ > 0) { | ||||
|         TUNABLE_LOG("additional tuning results available, rewriting file ", filename); | ||||
|         TUNABLE_LOG1("additional tuning results available, rewriting file ", filename); | ||||
|       } | ||||
|       else { | ||||
|         TUNABLE_LOG("writing file ", filename); | ||||
|         TUNABLE_LOG1("writing file ", filename); | ||||
|       } | ||||
|       if (!WriteFile(filename)) { | ||||
|         TUNABLE_LOG("failed to write file ", filename); | ||||
|         TUNABLE_LOG1("failed to write file ", filename); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| void TuningContext::EnableTunableOp() { | ||||
|   TUNABLE_LOG("Enable TunableOp"); | ||||
|   enable_ = true; | ||||
| } | ||||
|  | ||||
| void TuningContext::DisableTunableOp() { | ||||
|   TUNABLE_LOG("Disable TunableOp"); | ||||
|   enable_ = false; | ||||
| void TuningContext::EnableTunableOp(bool value) { | ||||
|   enable_ = value; | ||||
|   if (value) { | ||||
|     TUNABLE_LOG1("Enable TunableOp"); | ||||
|   } | ||||
|   else { | ||||
|     TUNABLE_LOG1("Disable TunableOp"); | ||||
|   } | ||||
| } | ||||
|  | ||||
| bool TuningContext::IsTunableOpEnabled() const { | ||||
|   static const char *env = std::getenv("PYTORCH_TUNABLEOP_ENABLED"); | ||||
|   if (env != nullptr && strcmp(env, "1") == 0) { | ||||
|     //TUNABLE_LOG("PYTORCH_TUNABLEOP_ENABLED=1"); | ||||
|     return true; | ||||
|   } | ||||
|   return enable_; | ||||
| } | ||||
|  | ||||
| void TuningContext::EnableTuning() { | ||||
|   TUNABLE_LOG("Enable Tuning for TunableOp"); | ||||
|   tuning_enable_ = true; | ||||
| } | ||||
|  | ||||
| void TuningContext::DisableTuning() { | ||||
|   TUNABLE_LOG("Disable Tuning for TunableOp"); | ||||
|   tuning_enable_ = false; | ||||
| void TuningContext::EnableTuning(bool value) { | ||||
|   tuning_enable_ = value; | ||||
|   if (value) { | ||||
|     TUNABLE_LOG1("Enable Tuning for TunableOp"); | ||||
|   } | ||||
|   else { | ||||
|     TUNABLE_LOG1("Disable Tuning for TunableOp"); | ||||
|   } | ||||
| } | ||||
|  | ||||
| bool TuningContext::IsTuningEnabled() const { | ||||
|   static const char *env = std::getenv("PYTORCH_TUNABLEOP_TUNING"); | ||||
|   if (env != nullptr && strcmp(env, "0") == 0) { | ||||
|     //TUNABLE_LOG("PYTORCH_TUNABLEOP_TUNING=1"); | ||||
|     return false; | ||||
|   } | ||||
|   return tuning_enable_; | ||||
| } | ||||
|  | ||||
| void TuningContext::WriteFileOnExit(bool value) { | ||||
|   write_file_on_exit_ = value; | ||||
| } | ||||
|  | ||||
| void TuningContext::EnableNumericsCheck(bool value) { | ||||
|   numerics_check_enable_ = value; | ||||
| } | ||||
|  | ||||
| bool TuningContext::IsNumericsCheckEnabled() const { | ||||
|   static const char *env = getenv("PYTORCH_TUNABLEOP_NUMERICAL_CHECK"); | ||||
|   if (env != nullptr && strcmp(env, "0") == 0) { | ||||
|     return false; | ||||
|   } | ||||
|   return numerics_check_enable_; | ||||
| } | ||||
|  | ||||
| void TuningContext::SetMaxTuningDurationMs(int max_duration_ms) { | ||||
|   max_tuning_duration_ms_ = max_duration_ms; | ||||
|   max_tuning_duration_ms_ = max_duration_ms < 0 ? 0 : max_duration_ms; | ||||
| } | ||||
|  | ||||
| int TuningContext::GetMaxTuningDurationMs() const { | ||||
|   static const char *env = std::getenv("PYTORCH_TUNABLEOP_MAX_TUNING_DURATION_MS"); | ||||
|   if (env != nullptr) { | ||||
|     return atoi(env); | ||||
|     int val = atoi(env); | ||||
|     return val < 0 ? 0 : val; | ||||
|   } | ||||
|   return max_tuning_duration_ms_; | ||||
| } | ||||
|  | ||||
| void TuningContext::SetMaxTuningIterations(int max_iter) { | ||||
|   max_tuning_iterations_ = max_iter; | ||||
|   max_tuning_iterations_ = max_iter < 0 ? 0 : max_iter; | ||||
| } | ||||
|  | ||||
| int TuningContext::GetMaxTuningIterations() const { | ||||
|   static const char *env = std::getenv("PYTORCH_TUNABLEOP_MAX_TUNING_ITERATIONS"); | ||||
|   if (env != nullptr) { | ||||
|     return atoi(env); | ||||
|     int val = atoi(env); | ||||
|     return val < 0 ? 0 : val; | ||||
|   } | ||||
|   return max_tuning_iterations_; | ||||
| } | ||||
|  | ||||
| void TuningContext::SetMaxWarmupDurationMs(int max_duration_ms) { | ||||
|   max_warmup_duration_ms_ = max_duration_ms; | ||||
|   max_warmup_duration_ms_ = max_duration_ms < 0 ? 0 : max_duration_ms; | ||||
| } | ||||
|  | ||||
| int TuningContext::GetMaxWarmupDurationMs() const { | ||||
|   static const char *env = std::getenv("PYTORCH_TUNABLEOP_MAX_WARMUP_DURATION_MS"); | ||||
|   if (env != nullptr) { | ||||
|     return atoi(env); | ||||
|     int val = atoi(env); | ||||
|     return val < 0 ? 0 : val; | ||||
|   } | ||||
|   return max_warmup_duration_ms_; | ||||
| } | ||||
|  | ||||
| void TuningContext::SetMaxWarmupIterations(int max_iter) { | ||||
|   max_warmup_iterations_ = max_iter; | ||||
|   max_warmup_iterations_ = max_iter < 0 ? 0 : max_iter; | ||||
| } | ||||
|  | ||||
| int TuningContext::GetMaxWarmupIterations() const { | ||||
|   static const char *env = std::getenv("PYTORCH_TUNABLEOP_MAX_WARMUP_ITERATIONS"); | ||||
|   if (env != nullptr) { | ||||
|     return atoi(env); | ||||
|     int val = atoi(env); | ||||
|     return val < 0 ? 0 : val; | ||||
|   } | ||||
|   return max_warmup_iterations_; | ||||
| } | ||||
|  | ||||
| void TuningContext::EnableTunableOpAndTuning() { | ||||
|   EnableTunableOp(); | ||||
|   EnableTuning(); | ||||
| void TuningContext::EnableICacheFlush(bool value) { | ||||
|   icache_flush_ = value; | ||||
| } | ||||
|  | ||||
| void TuningContext::DisableTunableOpAndTuning() { | ||||
|   DisableTunableOp(); | ||||
|   DisableTuning(); | ||||
| bool TuningContext::IsICacheFlushEnabled() const { | ||||
|   static const char *env = std::getenv("PYTORCH_TUNABLEOP_ICACHE_FLUSH_ENABLED"); | ||||
|   if (env != nullptr && strcmp(env, "0") == 0) { | ||||
|     return false; | ||||
|   } | ||||
|   return icache_flush_; | ||||
| } | ||||
|  | ||||
| void TuningContext::SetRotatingBufferSize(int size) { | ||||
|   rotating_buffer_size_ = size < 0 ? 0 : size; | ||||
| } | ||||
|  | ||||
| int TuningContext::GetRotatingBufferSize() const { | ||||
|   static const char *env = std::getenv("PYTORCH_TUNABLEOP_ROTATING_BUFFER_SIZE"); | ||||
|   if (env != nullptr) { | ||||
|     constexpr int MB = 1024 * 1024; | ||||
|     int val = atoi(env); | ||||
|     return val < 0 ? 0 : val * MB;  // env var is specified as MB, returned as bytes | ||||
|   } | ||||
|   else { | ||||
|     if (rotating_buffer_size_ < 0) { | ||||
|       // negative buffer size (default) means query for L2 cache size | ||||
|       int l2_cache_size = at::cuda::getCurrentDeviceProperties()->l2CacheSize; | ||||
|       return l2_cache_size; | ||||
|     } | ||||
|     else { | ||||
|       return rotating_buffer_size_; | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| TuningResultsManager& TuningContext::GetTuningResultsManager() { | ||||
| @ -429,7 +476,7 @@ TuningResultsManager& TuningContext::GetTuningResultsManager() { | ||||
|       // if SetFilename() was not already called, call it now with the default or env var | ||||
|       const char *env = std::getenv("PYTORCH_TUNABLEOP_FILENAME"); | ||||
|       std::string filename = (env == nullptr) ? "tunableop_results.csv" : env; | ||||
|       SetFilename(filename); | ||||
|       SetFilename(filename, true); | ||||
|     } | ||||
|     auto filename = GetFilename(); | ||||
|     if (!filename.empty()) { | ||||
| @ -461,32 +508,34 @@ TuningStatus TuningContext::LoadTuningResults(const TuningResults& tr) { | ||||
|   return OK; | ||||
| } | ||||
|  | ||||
| void TuningContext::SetFilename(const std::string& filename) { | ||||
| void TuningContext::SetFilename(const std::string& filename, bool insert_device_ordinal) { | ||||
|   filename_ = filename; | ||||
|  | ||||
|   if (filename_.empty()) { | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   // differentiate filename based on device ordinal to avoid | ||||
|   // use case of one process per device writing to same file | ||||
|   std::string device = c10::str(int(c10::cuda::current_device())); | ||||
|   if (insert_device_ordinal) { | ||||
|     // differentiate filename based on device ordinal to avoid | ||||
|     // use case of one process per device writing to same file | ||||
|     std::string device = c10::str(int(c10::cuda::current_device())); | ||||
|  | ||||
|   // does filename contain %d to insert device ordinal in specific location? | ||||
|   const std::string TOKEN("%d"); | ||||
|   std::size_t found = filename_.find(TOKEN); | ||||
|   if (found != std::string::npos) { | ||||
|     filename_.replace(found, TOKEN.length(), device); | ||||
|   } | ||||
|   else { | ||||
|     // no %d present, so append device ordinal before final '.' | ||||
|     found = filename_.rfind("."); | ||||
|     // does filename contain %d to insert device ordinal in specific location? | ||||
|     const std::string TOKEN("%d"); | ||||
|     std::size_t found = filename_.find(TOKEN); | ||||
|     if (found != std::string::npos) { | ||||
|       filename_.insert(found, device); | ||||
|       filename_.replace(found, TOKEN.length(), device); | ||||
|     } | ||||
|     else { | ||||
|       // all else fails, just append | ||||
|       filename_.append(device); | ||||
|       // no %d present, so append device ordinal before final '.' | ||||
|       found = filename_.rfind("."); | ||||
|       if (found != std::string::npos) { | ||||
|         filename_.insert(found, device); | ||||
|       } | ||||
|       else { | ||||
|         // all else fails, just append | ||||
|         filename_.append(device); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| } | ||||
| @ -495,14 +544,15 @@ std::string TuningContext::GetFilename() const { | ||||
|   return filename_; | ||||
| } | ||||
|  | ||||
| bool TuningContext::ReadFile(const std::string& filename) { | ||||
|   TUNABLE_LOG("reading tuning results from ", filename); | ||||
| bool TuningContext::ReadFile(const std::string& filename_) { | ||||
|   std::string filename = filename_.empty() ? GetFilename() : filename_; | ||||
|   TUNABLE_LOG1("reading tuning results from ", filename); | ||||
|   ResultsMap results; | ||||
|   std::unordered_map<std::string, std::string> validators; | ||||
|   std::string line; | ||||
|   std::ifstream file(filename); | ||||
|   if (!file) { | ||||
|     TUNABLE_LOG("could not open ", filename, " for reading tuning results"); | ||||
|     TUNABLE_LOG1("could not open ", filename, " for reading tuning results"); | ||||
|     return false; | ||||
|   } | ||||
|   while (std::getline(file, line)) { | ||||
| @ -517,7 +567,7 @@ bool TuningContext::ReadFile(const std::string& filename) { | ||||
|     } | ||||
|     if (parts[0] == "Validator" && parts.size() >= 3) { | ||||
|       validators[parts[1]] = parts[2]; | ||||
|       TUNABLE_LOG("Validator ", parts[1], "=", parts[2]); | ||||
|       TUNABLE_LOG1("Validator ", parts[1], "=", parts[2]); | ||||
|     } | ||||
|     else if (parts.size() >= 4) { | ||||
|       results[parts[0]].emplace(parts[1], ResultEntry(parts[2], atof(parts[3].c_str()))); | ||||
| @ -527,7 +577,7 @@ bool TuningContext::ReadFile(const std::string& filename) { | ||||
|       results[parts[0]].emplace(parts[1], ResultEntry(parts[2], 0)); | ||||
|     } | ||||
|     else { | ||||
|       TUNABLE_LOG("could not parse line: ", line); | ||||
|       TUNABLE_LOG1("could not parse line: ", line); | ||||
|     } | ||||
|   } | ||||
|   if (GetTuningResultsValidator().ValidateAll(validators) != FAIL) { | ||||
| @ -535,16 +585,17 @@ bool TuningContext::ReadFile(const std::string& filename) { | ||||
|     results_count_from_input_file_ = manager_.GetSize(); | ||||
|   } | ||||
|   else { | ||||
|     TUNABLE_LOG("results validator check failed"); | ||||
|     TUNABLE_LOG1("results validator check failed"); | ||||
|     return false; | ||||
|   } | ||||
|   return true; | ||||
| } | ||||
|  | ||||
| bool TuningContext::WriteFile(const std::string& filename) { | ||||
| bool TuningContext::WriteFile(const std::string& filename_) { | ||||
|   std::string filename = filename_.empty() ? GetFilename() : filename_; | ||||
|   std::ofstream file(filename, std::ios::out | std::ios::trunc); | ||||
|   if (!file.good()) { | ||||
|     TUNABLE_LOG("error opening tuning results file for writing ", filename); | ||||
|     TUNABLE_LOG1("error opening tuning results file for writing ", filename); | ||||
|     return false; | ||||
|   } | ||||
|   auto validators = GetTuningResultsValidator().GetAllValidators(); | ||||
|  | ||||
| @ -11,6 +11,7 @@ | ||||
|  | ||||
| #include <c10/util/CallOnce.h> | ||||
|  | ||||
| #include <fstream> | ||||
| #include <functional> | ||||
| #include <iostream> | ||||
| #include <memory> | ||||
| @ -23,27 +24,58 @@ | ||||
|  | ||||
| namespace at::cuda::tunable { | ||||
|  | ||||
| static void TunableLog(const std::string& msg) { | ||||
|   static const char *env = getenv("PYTORCH_TUNABLEOP_VERBOSE"); | ||||
|   if (env != nullptr && strcmp(env, "1") == 0) { | ||||
|     std::cerr << msg << std::endl; | ||||
| namespace detail { | ||||
|  | ||||
| struct MaybeDelete { | ||||
|   bool owns_pointer; | ||||
|   void operator()(std::ostream* os) const { if (owns_pointer) delete os; } | ||||
| }; | ||||
|  | ||||
| using OstreamPtr = std::unique_ptr<std::ostream, MaybeDelete>; | ||||
|  | ||||
| static OstreamPtr get_stream(std::string filename) { | ||||
|   if (filename.compare("out") == 0) { | ||||
|     return OstreamPtr { &std::cout, MaybeDelete {false} }; | ||||
|   } | ||||
|   else if (filename.compare("err") == 0) { | ||||
|     return OstreamPtr { &std::cerr, MaybeDelete {false} }; | ||||
|   } | ||||
|   else { | ||||
|     return OstreamPtr { new std::ofstream {filename.c_str()}, MaybeDelete {true} }; | ||||
|   } | ||||
| } | ||||
| #define TUNABLE_LOG(...) TunableLog(c10::str(__VA_ARGS__)) | ||||
|  | ||||
| enum TuningStatus { | ||||
| } | ||||
|  | ||||
| static void TunableLog(int level, const std::string& msg) { | ||||
|   static const char *env_file = getenv("PYTORCH_TUNABLEOP_VERBOSE_FILENAME"); | ||||
|   static const char *env_verbose = getenv("PYTORCH_TUNABLEOP_VERBOSE"); | ||||
|   static int level_user = env_verbose ? atoi(env_verbose) : 0; | ||||
|   static auto streamptr = detail::get_stream(env_file ? env_file : "err"); | ||||
|   if (level_user >= level) { | ||||
|     (*streamptr) << msg <<std::endl; | ||||
|   } | ||||
| } | ||||
| #define TUNABLE_LOGV(LEVEL, ...) TunableLog(LEVEL, c10::str(__VA_ARGS__)) | ||||
| #define TUNABLE_LOG1(...) TUNABLE_LOGV(1, __VA_ARGS__) | ||||
| #define TUNABLE_LOG2(...) TUNABLE_LOGV(2, __VA_ARGS__) | ||||
| #define TUNABLE_LOG3(...) TUNABLE_LOGV(3, __VA_ARGS__) | ||||
|  | ||||
| enum TORCH_CUDA_CPP_API TuningStatus { | ||||
|   OK = 0, | ||||
|   FAIL = 1, | ||||
|   UNSUPPORTED = 2, | ||||
| }; | ||||
|  | ||||
| // Mapping from params signature to kernel id | ||||
| class ResultEntry { | ||||
| class TORCH_CUDA_CPP_API ResultEntry { | ||||
|   public: | ||||
|     explicit ResultEntry(const std::string& key, double time) : key_(key), time_(time) {} | ||||
|     bool operator==(const ResultEntry& other) { return key_ == other.key_; } | ||||
|     bool operator!=(const ResultEntry& other) { return key_ != other.key_; } | ||||
|     operator std::string () { return key_; } | ||||
|     std::string GetKey() const { return key_; } | ||||
|     double GetTime() const { return time_; } | ||||
|     friend std::ostream& operator<<(std::ostream& stream, const ResultEntry& entry); | ||||
|     static ResultEntry Null() { return ResultEntry("Null", 0.0); } | ||||
|     static ResultEntry Default() { return ResultEntry("Default", 0.0); } | ||||
| @ -56,7 +88,7 @@ class ResultEntry { | ||||
| typedef std::unordered_map<std::string, ResultEntry> KernelMap; | ||||
| typedef std::unordered_map<std::string, KernelMap> ResultsMap; | ||||
|  | ||||
| struct TuningResults { | ||||
| struct TORCH_CUDA_CPP_API TuningResults { | ||||
|   // Validates if these results are compatible with the libraries | ||||
|   std::unordered_map<std::string, std::string> validators; | ||||
|  | ||||
| @ -64,7 +96,7 @@ struct TuningResults { | ||||
|   ResultsMap results; | ||||
| }; | ||||
|  | ||||
| class TuningResultsManager { | ||||
| class TORCH_CUDA_CPP_API TuningResultsManager { | ||||
|   public: | ||||
|     TuningResultsManager() = default; | ||||
|     ~TuningResultsManager() = default; | ||||
| @ -102,7 +134,7 @@ class TuningResultsManager { | ||||
|     ResultsMap results_; | ||||
| }; | ||||
|  | ||||
| class TuningResultsValidator { | ||||
| class TORCH_CUDA_CPP_API TuningResultsValidator { | ||||
|   public: | ||||
|     using GetFunc = std::function<std::string()>; | ||||
|     using ValidateFunc = std::function<TuningStatus(const std::string&)>; | ||||
| @ -126,7 +158,7 @@ class TuningResultsValidator { | ||||
|     GetValidateFuncs validators_; | ||||
| }; | ||||
|  | ||||
| class TuningContext { | ||||
| class TORCH_CUDA_CPP_API TuningContext { | ||||
|   public: | ||||
|     TuningContext(); | ||||
|     ~TuningContext(); | ||||
| @ -135,14 +167,15 @@ class TuningContext { | ||||
|     TuningContext &operator=(TuningContext &) = delete; | ||||
|     TuningContext &operator=(TuningContext &&) = delete; | ||||
|  | ||||
|     void EnableTunableOp(); | ||||
|     void DisableTunableOp(); | ||||
|     void EnableTunableOp(bool value); | ||||
|     bool IsTunableOpEnabled() const; | ||||
|  | ||||
|     void EnableTuning(); | ||||
|     void DisableTuning(); | ||||
|     void EnableTuning(bool value); | ||||
|     bool IsTuningEnabled() const; | ||||
|  | ||||
|     void EnableNumericsCheck(bool value); | ||||
|     bool IsNumericsCheckEnabled() const; | ||||
|  | ||||
|     void SetMaxTuningDurationMs(int max_duration_ms); | ||||
|     int GetMaxTuningDurationMs() const; | ||||
|  | ||||
| @ -155,8 +188,11 @@ class TuningContext { | ||||
|     void SetMaxWarmupIterations(int max_iter); | ||||
|     int GetMaxWarmupIterations() const; | ||||
|  | ||||
|     void EnableTunableOpAndTuning(); | ||||
|     void DisableTunableOpAndTuning(); | ||||
|     void EnableICacheFlush(bool value); | ||||
|     bool IsICacheFlushEnabled() const; | ||||
|  | ||||
|     void SetRotatingBufferSize(int size); | ||||
|     int GetRotatingBufferSize() const; | ||||
|  | ||||
|     TuningResultsManager& GetTuningResultsManager(); | ||||
|  | ||||
| @ -166,21 +202,26 @@ class TuningContext { | ||||
|  | ||||
|     TuningStatus LoadTuningResults(const TuningResults& tr); | ||||
|  | ||||
|     void SetFilename(const std::string& filename); | ||||
|     void SetFilename(const std::string& filename, bool insert_device_ordinal=false); | ||||
|     std::string GetFilename() const; | ||||
|  | ||||
|   protected: | ||||
|     bool ReadFile(const std::string& filename); | ||||
|     bool WriteFile(const std::string& filename); | ||||
|     void WriteFileOnExit(bool value); | ||||
|  | ||||
|     bool ReadFile(const std::string& filename={}); | ||||
|     bool WriteFile(const std::string& filename={}); | ||||
|  | ||||
|   private: | ||||
|     bool enable_; | ||||
|     bool tuning_enable_; | ||||
|     bool manager_initialized_; | ||||
|     bool write_file_on_exit_; | ||||
|     bool numerics_check_enable_; | ||||
|     int max_tuning_duration_ms_; | ||||
|     int max_tuning_iterations_; | ||||
|     int max_warmup_duration_ms_; | ||||
|     int max_warmup_iterations_; | ||||
|     bool icache_flush_; | ||||
|     int rotating_buffer_size_; | ||||
|     mutable TuningResultsManager manager_; | ||||
|     mutable c10::once_flag manager_init_once_; | ||||
|     TuningResultsValidator validator_; | ||||
| @ -188,7 +229,7 @@ class TuningContext { | ||||
|     size_t results_count_from_input_file_; | ||||
| }; | ||||
|  | ||||
| TuningContext* getTuningContext(); | ||||
| TORCH_CUDA_CPP_API TuningContext* getTuningContext(); | ||||
|  | ||||
| class ITimer { | ||||
|   public: | ||||
|  | ||||
| @ -175,6 +175,56 @@ inline std::string TypeName(c10::complex<float> v) { | ||||
|   return "c10::complex<float>"; | ||||
| } | ||||
|  | ||||
| #ifdef USE_ROCM | ||||
| static void AddRocblasValidator() { | ||||
|   auto validators = getTuningContext()->GetTuningResultsValidator().GetAllValidators(); | ||||
|   if (validators.find("ROCBLAS_VERSION") == validators.end()) { | ||||
|     std::string rocblas_version = c10::str( | ||||
|         XSTRINGIFY(ROCBLAS_VERSION_MAJOR), ".", | ||||
|         XSTRINGIFY(ROCBLAS_VERSION_MINOR), ".", | ||||
|         XSTRINGIFY(ROCBLAS_VERSION_PATCH), "-", | ||||
|         XSTRINGIFY(ROCBLAS_VERSION_TWEAK)); | ||||
|     getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|         "ROCBLAS_VERSION", | ||||
|         [rocblas_version]() { return rocblas_version; }, | ||||
|         [rocblas_version](auto&& k) { return rocblas_version == k ? OK : FAIL; }); | ||||
|   } | ||||
| } | ||||
|  | ||||
| static void AddHipblasltValidator() { | ||||
|   auto validators = getTuningContext()->GetTuningResultsValidator().GetAllValidators(); | ||||
|   if (validators.find("HIPBLASLT_VERSION") == validators.end()) { | ||||
|     std::string hipblaslt_version = c10::str( | ||||
|         XSTRINGIFY(HIPBLASLT_VERSION_MAJOR), ".", | ||||
|         XSTRINGIFY(HIPBLASLT_VERSION_MINOR), ".", | ||||
|         XSTRINGIFY(HIPBLASLT_VERSION_PATCH), "-", | ||||
|         XSTRINGIFY(HIPBLASLT_VERSION_TWEAK)); | ||||
|     getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|         "HIPBLASLT_VERSION", | ||||
|         [hipblaslt_version]() { return hipblaslt_version; }, | ||||
|         [hipblaslt_version](auto&& k) { return hipblaslt_version == k ? OK : FAIL; }); | ||||
|   } | ||||
| } | ||||
|  | ||||
| static void AddRocmValidator() { | ||||
|   auto validators = getTuningContext()->GetTuningResultsValidator().GetAllValidators(); | ||||
|   if (validators.find("ROCM_VERSION") == validators.end()) { | ||||
|     std::string rocm_version = ROCM_BUILD_INFO; | ||||
|     getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|         "ROCM_VERSION", | ||||
|         [rocm_version]() { return rocm_version; }, | ||||
|         [rocm_version](auto&& k) { return rocm_version == k ? OK : FAIL; }); | ||||
|   } | ||||
|  | ||||
|   if (validators.find("GCN_ARCH_NAME") == validators.end()) { | ||||
|     std::string gcn_arch_name = at::cuda::getCurrentDeviceProperties()->gcnArchName; | ||||
|     getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|         "GCN_ARCH_NAME", | ||||
|         [gcn_arch_name]() { return gcn_arch_name; }, | ||||
|         [gcn_arch_name](auto&& k) { return gcn_arch_name == k ? OK : FAIL; }); | ||||
|   } | ||||
| } | ||||
| #endif | ||||
|  | ||||
| template <typename T, BlasOp ALayout, BlasOp BLayout> | ||||
| class GemmTunableOp : public TunableOp<GemmParams<T>, StreamTimer> { | ||||
| @ -182,45 +232,21 @@ class GemmTunableOp : public TunableOp<GemmParams<T>, StreamTimer> { | ||||
|   GemmTunableOp() { | ||||
|     this->RegisterOp(std::string("Default"), std::make_unique<DefaultGemmOp<T>>()); | ||||
|  | ||||
|     auto validators = getTuningContext()->GetTuningResultsValidator().GetAllValidators(); | ||||
|  | ||||
| #ifdef USE_ROCM | ||||
|     for (auto&& [name, op] : GetRocBlasGemmTypeStringAndOps<T>()) { | ||||
|       this->RegisterOp(std::move(name), std::move(op)); | ||||
|     bool rocm_validators = false; | ||||
|  | ||||
|     static const char *env_rocblas = std::getenv("PYTORCH_TUNABLEOP_ROCBLAS_ENABLED"); | ||||
|     if (env_rocblas == nullptr || strcmp(env_rocblas, "1") == 0) { | ||||
|       rocm_validators = true; | ||||
|       for (auto&& [name, op] : GetRocBlasGemmTypeStringAndOps<T>()) { | ||||
|         this->RegisterOp(std::move(name), std::move(op)); | ||||
|       } | ||||
|       AddRocblasValidator(); | ||||
|     } | ||||
|  | ||||
|     if (validators.find("ROCM_VERSION") == validators.end()) { | ||||
|       std::string rocm_version = ROCM_BUILD_INFO; | ||||
|       getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|           "ROCM_VERSION", | ||||
|           [rocm_version]() { return rocm_version; }, | ||||
|           [rocm_version](auto&& k) { return rocm_version == k ? OK : FAIL; }); | ||||
|     } | ||||
|  | ||||
|     if (validators.find("GCN_ARCH_NAME") == validators.end()) { | ||||
|       std::string gcn_arch_name = at::cuda::getCurrentDeviceProperties()->gcnArchName; | ||||
|       getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|           "GCN_ARCH_NAME", | ||||
|           [gcn_arch_name]() { return gcn_arch_name; }, | ||||
|           [gcn_arch_name](auto&& k) { return gcn_arch_name == k ? OK : FAIL; }); | ||||
|     } | ||||
|  | ||||
|     if (validators.find("ROCBLAS_VERSION") == validators.end()) { | ||||
|       std::string rocblas_version = c10::str( | ||||
|           XSTRINGIFY(ROCBLAS_VERSION_MAJOR), ".", | ||||
|           XSTRINGIFY(ROCBLAS_VERSION_MINOR), ".", | ||||
|           XSTRINGIFY(ROCBLAS_VERSION_PATCH), "-", | ||||
|           XSTRINGIFY(ROCBLAS_VERSION_TWEAK)); | ||||
|       getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|           "ROCBLAS_VERSION", | ||||
|           [rocblas_version]() { return rocblas_version; }, | ||||
|           [rocblas_version](auto&& k) { return rocblas_version == k ? OK : FAIL; }); | ||||
|     } | ||||
| #endif | ||||
|  | ||||
| #if defined(USE_ROCM) | ||||
|     static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED"); | ||||
|     if (env == nullptr || strcmp(env, "1") == 0) { | ||||
|     static const char *env_hipblaslt = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED"); | ||||
|     if (env_hipblaslt == nullptr || strcmp(env_hipblaslt, "1") == 0) { | ||||
|       rocm_validators = true; | ||||
|       // disallow tuning of hipblaslt with c10::complex | ||||
|       if constexpr ( | ||||
|           !std::is_same_v<T, c10::complex<float>> && | ||||
| @ -229,18 +255,11 @@ class GemmTunableOp : public TunableOp<GemmParams<T>, StreamTimer> { | ||||
|           this->RegisterOp(std::move(name), std::move(op)); | ||||
|         } | ||||
|       } | ||||
|       AddHipblasltValidator(); | ||||
|     } | ||||
|  | ||||
|       if (validators.find("HIPBLASLT_VERSION") == validators.end()) { | ||||
|         std::string hipblaslt_version = c10::str( | ||||
|             XSTRINGIFY(HIPBLASLT_VERSION_MAJOR), ".", | ||||
|             XSTRINGIFY(HIPBLASLT_VERSION_MINOR), ".", | ||||
|             XSTRINGIFY(HIPBLASLT_VERSION_PATCH), "-", | ||||
|             XSTRINGIFY(HIPBLASLT_VERSION_TWEAK)); | ||||
|         getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|             "HIPBLASLT_VERSION", | ||||
|             [hipblaslt_version]() { return hipblaslt_version; }, | ||||
|             [hipblaslt_version](auto&& k) { return hipblaslt_version == k ? OK : FAIL; }); | ||||
|       } | ||||
|     if (rocm_validators) { | ||||
|       AddRocmValidator(); | ||||
|     } | ||||
| #endif | ||||
|   } | ||||
| @ -256,45 +275,21 @@ class GemmStridedBatchedTunableOp : public TunableOp<GemmStridedBatchedParams<T> | ||||
|   GemmStridedBatchedTunableOp() { | ||||
|     this->RegisterOp(std::string("Default"), std::make_unique<DefaultGemmStridedBatchedOp<T>>()); | ||||
|  | ||||
|     auto validators = getTuningContext()->GetTuningResultsValidator().GetAllValidators(); | ||||
|  | ||||
| #ifdef USE_ROCM | ||||
|     for (auto&& [name, op] : GetRocBlasGemmStridedBatchedTypeStringAndOps<T>()) { | ||||
|       this->RegisterOp(std::move(name), std::move(op)); | ||||
|     bool rocm_validators = false; | ||||
|  | ||||
|     static const char *env_rocblas = std::getenv("PYTORCH_TUNABLEOP_ROCBLAS_ENABLED"); | ||||
|     if (env_rocblas == nullptr || strcmp(env_rocblas, "1") == 0) { | ||||
|       rocm_validators = true; | ||||
|       for (auto&& [name, op] : GetRocBlasGemmStridedBatchedTypeStringAndOps<T>()) { | ||||
|         this->RegisterOp(std::move(name), std::move(op)); | ||||
|       } | ||||
|       AddRocblasValidator(); | ||||
|     } | ||||
|  | ||||
|     if (validators.find("ROCM_VERSION") == validators.end()) { | ||||
|       std::string rocm_version = ROCM_BUILD_INFO; | ||||
|       getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|           "ROCM_VERSION", | ||||
|           [rocm_version]() { return rocm_version; }, | ||||
|           [rocm_version](auto&& k) { return rocm_version == k ? OK : FAIL; }); | ||||
|     } | ||||
|  | ||||
|     if (validators.find("GCN_ARCH_NAME") == validators.end()) { | ||||
|       std::string gcn_arch_name = at::cuda::getCurrentDeviceProperties()->gcnArchName; | ||||
|       getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|           "GCN_ARCH_NAME", | ||||
|           [gcn_arch_name]() { return gcn_arch_name; }, | ||||
|           [gcn_arch_name](auto&& k) { return gcn_arch_name == k ? OK : FAIL; }); | ||||
|     } | ||||
|  | ||||
|     if (validators.find("ROCBLAS_VERSION") == validators.end()) { | ||||
|       std::string rocblas_version = c10::str( | ||||
|           XSTRINGIFY(ROCBLAS_VERSION_MAJOR), ".", | ||||
|           XSTRINGIFY(ROCBLAS_VERSION_MINOR), ".", | ||||
|           XSTRINGIFY(ROCBLAS_VERSION_PATCH), "-", | ||||
|           XSTRINGIFY(ROCBLAS_VERSION_TWEAK)); | ||||
|       getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|           "ROCBLAS_VERSION", | ||||
|           [rocblas_version]() { return rocblas_version; }, | ||||
|           [rocblas_version](auto&& k) { return rocblas_version == k ? OK : FAIL; }); | ||||
|     } | ||||
| #endif | ||||
|  | ||||
| #if defined(USE_ROCM) | ||||
|     static const char *env = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED"); | ||||
|     if (env == nullptr || strcmp(env, "1") == 0) { | ||||
|     static const char *env_hipblaslt = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED"); | ||||
|     if (env_hipblaslt == nullptr || strcmp(env_hipblaslt, "1") == 0) { | ||||
|       rocm_validators = true; | ||||
|       // disallow tuning of hipblaslt with c10::complex | ||||
|       if constexpr ( | ||||
|           !std::is_same_v<T, c10::complex<float>> && | ||||
| @ -303,18 +298,11 @@ class GemmStridedBatchedTunableOp : public TunableOp<GemmStridedBatchedParams<T> | ||||
|           this->RegisterOp(std::move(name), std::move(op)); | ||||
|         } | ||||
|       } | ||||
|       AddHipblasltValidator(); | ||||
|     } | ||||
|  | ||||
|       if (validators.find("HIPBLASLT_VERSION") == validators.end()) { | ||||
|         std::string hipblaslt_version = c10::str( | ||||
|             XSTRINGIFY(HIPBLASLT_VERSION_MAJOR), ".", | ||||
|             XSTRINGIFY(HIPBLASLT_VERSION_MINOR), ".", | ||||
|             XSTRINGIFY(HIPBLASLT_VERSION_PATCH), "-", | ||||
|             XSTRINGIFY(HIPBLASLT_VERSION_TWEAK)); | ||||
|         getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|             "HIPBLASLT_VERSION", | ||||
|             [hipblaslt_version]() { return hipblaslt_version; }, | ||||
|             [hipblaslt_version](auto&& k) { return hipblaslt_version == k ? OK : FAIL; }); | ||||
|       } | ||||
|     if (rocm_validators) { | ||||
|       AddRocmValidator(); | ||||
|     } | ||||
| #endif | ||||
|   } | ||||
| @ -336,18 +324,8 @@ class ScaledGemmTunableOp : public TunableOp<ScaledGemmParams<CT>, StreamTimer> | ||||
|     for (auto&& [name, op] : GetHipBlasLtScaledGemmTypeStringAndOps<AT, BT, CT, ALayout, BLayout>()) { | ||||
|       this->RegisterOp(std::move(name), std::move(op)); | ||||
|     } | ||||
|  | ||||
|     if (validators.find("HIPBLASLT_VERSION") == validators.end()) { | ||||
|       std::string hipblaslt_version = c10::str( | ||||
|           XSTRINGIFY(HIPBLASLT_VERSION_MAJOR), ".", | ||||
|           XSTRINGIFY(HIPBLASLT_VERSION_MINOR), ".", | ||||
|           XSTRINGIFY(HIPBLASLT_VERSION_PATCH), "-", | ||||
|           XSTRINGIFY(HIPBLASLT_VERSION_TWEAK)); | ||||
|       getTuningContext()->GetTuningResultsValidator().RegisterValidator( | ||||
|           "HIPBLASLT_VERSION", | ||||
|           [hipblaslt_version]() { return hipblaslt_version; }, | ||||
|           [hipblaslt_version](auto&& k) { return hipblaslt_version == k ? OK : FAIL; }); | ||||
|     } | ||||
|     AddHipblasltValidator(); | ||||
|     AddRocmValidator(); | ||||
| #endif | ||||
|   } | ||||
|  | ||||
|  | ||||
| @ -10,6 +10,7 @@ | ||||
| #pragma once | ||||
|  | ||||
| #include <ATen/cuda/tunable/Tunable.h> | ||||
| #include <ATen/cuda/Sleep.h> | ||||
| #include <c10/cuda/CUDACachingAllocator.h> | ||||
|  | ||||
| #ifndef _WIN32 | ||||
| @ -62,7 +63,7 @@ class TunableOp { | ||||
|         result = ResultEntry::Default(); | ||||
|       } | ||||
|       if (result == ResultEntry::Null()) { | ||||
|         TUNABLE_LOG("no result, using default"); | ||||
|         TUNABLE_LOG2("no result, using default"); | ||||
|         result = ResultEntry::Default(); | ||||
|       } | ||||
|       auto iter = ops_.find(result); | ||||
| @ -87,88 +88,120 @@ class TunableOp { | ||||
|     } | ||||
|  | ||||
|   private: | ||||
|     static void WarmUp(Callable<ParamsT> *op, ParamsT* param, size_t num_iter) { | ||||
|     static void WarmUp(Callable<ParamsT> *op, const std::vector<ParamsT*> ¶m, size_t num_iter, size_t &offset) { | ||||
|       TuningContext* ctx = getTuningContext(); | ||||
|       bool do_flush = ctx->IsICacheFlushEnabled(); | ||||
|       for (size_t i = 0; i < num_iter; i++) { | ||||
|         TORCH_CHECK(op->Call(param) == OK); | ||||
|         if (do_flush) { | ||||
|           at::cuda::flush_icache(); | ||||
|         } | ||||
|         TORCH_CHECK(op->Call(param[(i+offset++)%param.size()]) == OK); | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     static double Profile(Callable<ParamsT> *op, ParamsT* param, size_t num_iter) { | ||||
|     static double Profile(Callable<ParamsT> *op, const std::vector<ParamsT*> ¶m, size_t num_iter, size_t &offset) { | ||||
|       TuningContext* ctx = getTuningContext(); | ||||
|       bool do_flush = ctx->IsICacheFlushEnabled(); | ||||
|       TimerT timer{}; | ||||
|       timer.Start(); | ||||
|       for (size_t i = 0; i < num_iter; i++) { | ||||
|         TORCH_CHECK(op->Call(param) == OK); | ||||
|         if (do_flush) { | ||||
|           at::cuda::flush_icache(); | ||||
|         } | ||||
|         TORCH_CHECK(op->Call(param[(i+offset++)%param.size()]) == OK); | ||||
|       } | ||||
|       timer.End(); | ||||
|       return timer.Duration() / num_iter; | ||||
|     } | ||||
|  | ||||
|   protected: | ||||
|     bool IsNumericsCheckEnabled() { | ||||
|       static const char *env = getenv("PYTORCH_TUNABLEOP_NUMERICAL_CHECK"); | ||||
|       if (env != nullptr && strcmp(env, "0") == 0) { | ||||
|         return false; | ||||
|       } | ||||
|       return true; | ||||
|     } | ||||
|  | ||||
|     virtual ResultEntry FindFastest(const ParamsT* params) { | ||||
|       TuningContext* ctx = getTuningContext(); | ||||
|       auto op_sig = Signature(); | ||||
|       auto params_sig = params->Signature(); | ||||
|       TUNABLE_LOG("finding fastest for ", op_sig, '(', params_sig, ')', " out of ", op_names_.size(), " candidates"); | ||||
|       TUNABLE_LOG2("finding fastest for ", op_sig, '(', params_sig, ')', " out of ", op_names_.size(), " candidates"); | ||||
|       auto min_duration_ms = std::numeric_limits<double>::infinity(); | ||||
|       std::string id_name = "Default"; | ||||
|       ParamsT* reference_params = nullptr; | ||||
|  | ||||
|       // calcaulte a reference answer for numerical check | ||||
|       ParamsT* reference_params = params->DeepCopy(); | ||||
|       TORCH_CHECK(ops_[ResultEntry::Default()]->Call(reference_params) == OK); | ||||
|       if (ctx->IsNumericsCheckEnabled()) { | ||||
|         reference_params = params->DeepCopy(false); | ||||
|         TORCH_CHECK(ops_[ResultEntry::Default()]->Call(reference_params) == OK); | ||||
|       } | ||||
|  | ||||
|       // need a copy of params to reuse | ||||
|       ParamsT* reusable_params = params->DeepCopy(); | ||||
|       // need copies of params to reuse | ||||
|       // make as many copies as will fill the requested rotating buffer size, if requested | ||||
|       // rotating_size guaranteed to be >= 0 even though GetRotatingBufferSize() returns int | ||||
|       size_t rotating_size = ctx->GetRotatingBufferSize(); | ||||
|       bool use_buffer_rotation = (rotating_size > 0); | ||||
|       size_t param_size = params->GetSize(use_buffer_rotation); | ||||
|       size_t param_count = (rotating_size / param_size) + 1; | ||||
|       constexpr size_t MB = 1024*1024; | ||||
|       if (use_buffer_rotation) { | ||||
|         TUNABLE_LOG2("Rotating buffer ", rotating_size/MB, " MiB. ", | ||||
|             "Needed Size: ", param_size/MB, " MiB. ", | ||||
|             "Needed number of param copies: ", param_count); | ||||
|       } | ||||
|       TORCH_CHECK(param_count > 0); | ||||
|  | ||||
|       std::vector<ParamsT*> reusable_params(param_count); | ||||
|       for (size_t i = 0; i < param_count; i++) { | ||||
|         reusable_params[i] = params->DeepCopy(use_buffer_rotation); | ||||
|       } | ||||
|  | ||||
|       // for rotating buffer | ||||
|       size_t offset = 0; | ||||
|  | ||||
|       for (size_t i = 0; i < op_names_.size(); i++) { | ||||
|         auto* candidate = ops_[op_names_[i]].get(); // borrow pointer | ||||
|         auto status = candidate->Call(reusable_params); | ||||
|         if (status != OK) { | ||||
|           TUNABLE_LOG("├──unsupported id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|           continue; | ||||
|         } | ||||
|  | ||||
|         if (IsNumericsCheckEnabled()) { | ||||
|           ParamsT* numerical_params = params->DeepCopy(); | ||||
|           WarmUp(candidate, numerical_params, 1); | ||||
|         if (ctx->IsNumericsCheckEnabled()) { | ||||
|           ParamsT* numerical_params = params->DeepCopy(false); | ||||
|           auto status = candidate->Call(numerical_params); | ||||
|           if (status != OK) { | ||||
|             TUNABLE_LOG3("├──unsupported id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|             continue; | ||||
|           } | ||||
|           status = reference_params->NumericalCheck(numerical_params); | ||||
|           numerical_params->Delete(); | ||||
|           if (status != OK) { | ||||
|             TUNABLE_LOG("├──numerics check failed for id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|             TUNABLE_LOG3("├──numerics check failed for id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|             continue; | ||||
|           } | ||||
|         } | ||||
|         else { | ||||
|           auto status = candidate->Call(reusable_params[0]); | ||||
|           if (status != OK) { | ||||
|             TUNABLE_LOG3("├──unsupported id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|             continue; | ||||
|           } | ||||
|         } | ||||
|  | ||||
|         // collect a small profile | ||||
|         constexpr const int approx_num_iter = 3; | ||||
|         auto approx_duration = Profile(candidate, reusable_params, approx_num_iter); | ||||
|         auto approx_duration = Profile(candidate, reusable_params, approx_num_iter, offset); | ||||
|         // bail if too slow | ||||
|         if (approx_duration > 2 * min_duration_ms) { | ||||
|           TUNABLE_LOG("├──skip slow instance id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|           TUNABLE_LOG3("├──skip slow instance id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); | ||||
|           continue; | ||||
|         } | ||||
|  | ||||
|         // for warmup does user set max duration, max iters, or both? | ||||
|         // warmup is allowed to be skipped by setting either iterations or duration to 0 | ||||
|         double max_warmup_duration = ctx->GetMaxWarmupDurationMs(); | ||||
|         int max_warmup_iter = ctx->GetMaxWarmupIterations(); | ||||
|         int warmup_iter = 1; // default | ||||
|         if (max_warmup_duration > 0) { | ||||
|         if (max_warmup_duration >= 0) { | ||||
|           int duration_iters = max_warmup_duration / approx_duration; | ||||
|           if (max_warmup_iter > 0) { | ||||
|           if (max_warmup_iter >= 0) { | ||||
|             warmup_iter = std::min(max_warmup_iter, duration_iters); | ||||
|           } | ||||
|           else { | ||||
|             warmup_iter = duration_iters; | ||||
|           } | ||||
|         } | ||||
|         else if (max_warmup_iter > 0) { | ||||
|         else if (max_warmup_iter >= 0) { | ||||
|           warmup_iter = max_warmup_iter; | ||||
|         } | ||||
|  | ||||
| @ -188,27 +221,34 @@ class TunableOp { | ||||
|         else if (max_tuning_iter > 0) { | ||||
|           tuning_iter = max_tuning_iter; | ||||
|         } | ||||
|         // tuning must run at least 1 iteration | ||||
|         tuning_iter = std::max(1, tuning_iter); | ||||
|  | ||||
|         // do the full warmup followed by tuning | ||||
|         double warmup_ms = warmup_iter * approx_duration; | ||||
|         double tuning_ms = tuning_iter * approx_duration; | ||||
|         TUNABLE_LOG("├──tuning using " | ||||
|         TUNABLE_LOG3("├──tuning using " | ||||
|             "warmup iters ", warmup_iter, " [", warmup_ms, " ms] " | ||||
|             "and tuning iters ", tuning_iter, " [", tuning_ms, " ms] ", | ||||
|             "instance id=", i, ", ", op_sig, "(", params_sig, ") ", op_names_[i]); | ||||
|         WarmUp(candidate, reusable_params, warmup_iter); | ||||
|         auto duration_ms = Profile(candidate, reusable_params, tuning_iter); | ||||
|         TUNABLE_LOG3("├──offset at ", offset); | ||||
|         WarmUp(candidate, reusable_params, warmup_iter, offset); | ||||
|         auto duration_ms = Profile(candidate, reusable_params, tuning_iter, offset); | ||||
|         if (duration_ms < min_duration_ms) { | ||||
|           TUNABLE_LOG("├──found better instance id=", i, ". " , duration_ms, "ms. ", op_names_[i]); | ||||
|           TUNABLE_LOG3("├──found better instance id=", i, ". " , duration_ms, "ms. ", op_names_[i]); | ||||
|           min_duration_ms = duration_ms; | ||||
|           id_name = op_names_[i]; | ||||
|         } | ||||
|       } | ||||
|  | ||||
|       reusable_params->Delete(); | ||||
|       reference_params->Delete(); | ||||
|       for (size_t i = 0; i < reusable_params.size(); i++) { | ||||
|         reusable_params[i]->Delete(); | ||||
|       } | ||||
|       if (reference_params) { | ||||
|         reference_params->Delete(); | ||||
|       } | ||||
|  | ||||
|       TUNABLE_LOG("└──found fastest for ", op_sig, '(', params_sig, ") ", id_name); | ||||
|       TUNABLE_LOG2("└──found fastest for ", op_sig, '(', params_sig, ") ", id_name); | ||||
|       return ResultEntry(id_name, min_duration_ms); | ||||
|     } | ||||
|  | ||||
|  | ||||
| @ -31,46 +31,6 @@ Tensor index_select_backward_hack(const Tensor& grad, IntArrayRef self_sizes, in | ||||
|   return at::zeros(self_sizes, grad.options()).index_add(dim, index, grad); | ||||
| } | ||||
|  | ||||
| static optional<std::tuple<Tensor,int64_t>> unwrap(const Tensor& tensor) { | ||||
|   auto* wrapped = maybeGetTensorWrapper(tensor); | ||||
|   if (wrapped) { | ||||
|     if (wrapped->level().has_value()) { | ||||
|       return std::make_tuple(wrapped->value(), *wrapped->level()); | ||||
|     } | ||||
|     return unwrap(wrapped->value()); | ||||
|   } | ||||
|   auto* batched = maybeGetBatchedImpl(tensor); | ||||
|   if (batched) { | ||||
|     return std::make_tuple(batched->value(), batched->level()); | ||||
|   } | ||||
|   return nullopt; | ||||
| } | ||||
|  | ||||
| static bool can_perform_inplace(const Tensor& a, const Tensor& b) { | ||||
|   // TODO: generalize this to more transforms | ||||
|   auto a_ = unwrap(a); | ||||
|   auto b_ = unwrap(b); | ||||
|   if (!a_.has_value() && b_.has_value()) { | ||||
|     return false; | ||||
|   } | ||||
|   if (!a_.has_value() && !b_.has_value()) { | ||||
|     return true; | ||||
|   } | ||||
|   if (a_.has_value() && !b_.has_value()) { | ||||
|     return true; | ||||
|   } | ||||
|   TORCH_INTERNAL_ASSERT(a_.has_value() && b_.has_value()); | ||||
|  | ||||
|   // If b has any wrapper that a does not, then we cannot do a.inplace_(b) | ||||
|   if (std::get<1>(*a_) < std::get<1>(*b_)) { | ||||
|     return false; | ||||
|   } | ||||
|   if (std::get<1>(*a_) > std::get<1>(*b_)) { | ||||
|     return can_perform_inplace(std::get<0>(*a_), b); | ||||
|   } | ||||
|   return can_perform_inplace(std::get<0>(*a_), std::get<0>(*b_)); | ||||
| } | ||||
|  | ||||
| // TODO: linear is pretty important for performance, but I'm not sure how to work | ||||
| // around the in-place. | ||||
| Tensor linear_hack(const Tensor& input, const Tensor& weight, const std::optional<Tensor>& bias_opt) { | ||||
|  | ||||
| @ -1480,23 +1480,14 @@ Tensor& not_equal_(Tensor& self, const Scalar& other) { return self.ne_(other); | ||||
| Tensor& logical_and_out(const Tensor& self, const Tensor& other, Tensor& result) { return comparison_op_out(result, self, other, logical_and_stub); } | ||||
| Tensor logical_and(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_and_out)); } | ||||
| Tensor& logical_and_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_and_out)); } | ||||
| static Tensor& logical_and_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_and_out)); } | ||||
| static Tensor logical_and(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_and_out)); } | ||||
| static Tensor& logical_and_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_and_out)); } | ||||
|  | ||||
| Tensor& logical_or_out(const Tensor& self, const Tensor& other, Tensor& result) { return comparison_op_out(result, self, other, logical_or_stub); } | ||||
| Tensor logical_or(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_or_out)); } | ||||
| Tensor& logical_or_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_or_out)); } | ||||
| static Tensor& logical_or_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_or_out)); } | ||||
| static Tensor logical_or(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_or_out)); } | ||||
| static Tensor& logical_or_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_or_out)); } | ||||
|  | ||||
| Tensor& logical_xor_out(const Tensor& self, const Tensor& other, Tensor& result) { return comparison_op_out(result, self, other, logical_xor_stub); } | ||||
| Tensor logical_xor(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_xor_out)); } | ||||
| Tensor& logical_xor_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_xor_out)); } | ||||
| static Tensor& logical_xor_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_xor_out)); } | ||||
| static Tensor logical_xor(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_xor_out)); } | ||||
| static Tensor& logical_xor_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_xor_out)); } | ||||
|  | ||||
| // binary max, alias for maximum | ||||
| Tensor& max_out(const Tensor& self, const Tensor& other, Tensor& result) { | ||||
|  | ||||
| @ -105,6 +105,28 @@ void fp16_gemv_trans( | ||||
|     const float beta, | ||||
|     float16_t* y, | ||||
|     const int incy); | ||||
|  | ||||
| float fp16_dot_with_fp32_arith( | ||||
|     const float16_t* vec1, | ||||
|     const float16_t* vec2, | ||||
|     int64_t len); | ||||
|  | ||||
| void bf16_gemv_trans( | ||||
|     const int m, | ||||
|     const int n, | ||||
|     const at::BFloat16 alpha, | ||||
|     const at::BFloat16* a, | ||||
|     const int lda, | ||||
|     const at::BFloat16* x, | ||||
|     const int incx, | ||||
|     const at::BFloat16 beta, | ||||
|     at::BFloat16* y, | ||||
|     const int incy); | ||||
|  | ||||
| float bf16_dot_with_fp32_arith( | ||||
|     const at::BFloat16* vec1, | ||||
|     const at::BFloat16* vec2, | ||||
|     int64_t len); | ||||
| #endif | ||||
|  | ||||
| template <typename scalar_t> | ||||
| @ -113,8 +135,11 @@ bool scal_use_fast_path(C10_UNUSED int64_t n, C10_UNUSED int64_t incx) { | ||||
| } | ||||
|  | ||||
| template <typename scalar_t> | ||||
| bool gemv_use_fast_path(C10_UNUSED int64_t m, C10_UNUSED int64_t n, | ||||
|                         C10_UNUSED int64_t lda, C10_UNUSED int64_t incx, C10_UNUSED int64_t incy) { | ||||
| bool gemv_use_fast_path(C10_UNUSED char trans, C10_UNUSED int64_t m, | ||||
|                         C10_UNUSED int64_t n, C10_UNUSED scalar_t alpha, | ||||
|                         C10_UNUSED int64_t lda, | ||||
|                         C10_UNUSED int64_t incx, C10_UNUSED scalar_t beta, | ||||
|                         C10_UNUSED int64_t incy) { | ||||
|   return false; | ||||
| } | ||||
|  | ||||
| @ -133,7 +158,7 @@ void gemv_fast_path(C10_UNUSED const char *trans, C10_UNUSED const int *m, C10_U | ||||
|  | ||||
| #define INSTANTIATE(scalar_t)                                                                                                                                                     \ | ||||
| template bool scal_use_fast_path<scalar_t>(int64_t n, int64_t incx);                                                                                                              \ | ||||
| template bool gemv_use_fast_path<scalar_t>(int64_t m, int64_t n, int64_t lda, int64_t incx, int64_t incy);                                                                        \ | ||||
| template bool gemv_use_fast_path<scalar_t>(char trans, int64_t m, int64_t n, scalar_t alpha, int64_t lda, int64_t incx, scalar_t beta, int64_t incy); \ | ||||
| template void gemv_fast_path<scalar_t>(const char *trans, const int *m, const int *n, const scalar_t *alpha, const scalar_t *a, const int *lda, const scalar_t *x, const int *incx, const scalar_t *beta, scalar_t *y, const int *incy);      \ | ||||
| template void scal_fast_path<scalar_t>(int *n, scalar_t *a, scalar_t *x, int *incx); | ||||
|  | ||||
| @ -160,15 +185,15 @@ void scal_fast_path<float>(int *n, float *a, float *x, int *incx) { | ||||
| } | ||||
|  | ||||
| template <> | ||||
| bool gemv_use_fast_path<float>(int64_t m, int64_t n, int64_t lda, int64_t incx, int64_t incy) { | ||||
| bool gemv_use_fast_path<float>(C10_UNUSED char trans, int64_t m, int64_t n, C10_UNUSED float alpha, int64_t lda, int64_t incx, C10_UNUSED float beta, int64_t incy) { | ||||
|   auto intmax = std::numeric_limits<int>::max(); | ||||
|   return (m <= intmax) && (n <= intmax) && (lda <= intmax) && | ||||
|          (incx > 0) && (incx <= intmax) && (incy > 0) && (incy <= intmax); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| bool gemv_use_fast_path<double>(int64_t m, int64_t n, int64_t lda, int64_t incx, int64_t incy) { | ||||
|   return gemv_use_fast_path<float>(m, n, lda, incx, incy); | ||||
| bool gemv_use_fast_path<double>(C10_UNUSED char trans, int64_t m, int64_t n, C10_UNUSED double alpha, int64_t lda, int64_t incx, C10_UNUSED double beta, int64_t incy) { | ||||
|   return gemv_use_fast_path<float>(trans, m, n, (float)alpha, lda, incx, (float)beta, incy); | ||||
| } | ||||
|  | ||||
| template <> | ||||
| @ -190,7 +215,6 @@ INSTANTIATE(int8_t); | ||||
| INSTANTIATE(int16_t); | ||||
| INSTANTIATE(int); | ||||
| INSTANTIATE(int64_t); | ||||
| INSTANTIATE(c10::BFloat16); | ||||
| #if defined(__aarch64__) && !defined(C10_MOBILE) | ||||
| template <> | ||||
| bool scal_use_fast_path<at::Half>(C10_UNUSED int64_t n, C10_UNUSED int64_t incx) { | ||||
| @ -199,14 +223,32 @@ bool scal_use_fast_path<at::Half>(C10_UNUSED int64_t n, C10_UNUSED int64_t incx) | ||||
|  | ||||
| template <> | ||||
| bool gemv_use_fast_path<at::Half>( | ||||
|     C10_UNUSED char trans, | ||||
|     C10_UNUSED int64_t m, | ||||
|     C10_UNUSED int64_t n, | ||||
|     at::Half alpha, | ||||
|     C10_UNUSED int64_t lda, | ||||
|     C10_UNUSED int64_t incx, | ||||
|     at::Half beta, | ||||
|     C10_UNUSED int64_t incy) { | ||||
|   return true; | ||||
|   return incx == 1 && c10::detail::fp16_from_bits(alpha.x) == 1.0f && | ||||
|     c10::detail::fp16_from_bits(beta.x) == 0.0f; | ||||
| } | ||||
|  | ||||
| template <> | ||||
| bool gemv_use_fast_path<at::BFloat16>( | ||||
|   C10_UNUSED char trans, | ||||
|   C10_UNUSED int64_t m, | ||||
|     C10_UNUSED int64_t n, | ||||
|     at::BFloat16 alpha, | ||||
|     C10_UNUSED int64_t lda, | ||||
|     C10_UNUSED int64_t incx, | ||||
|     at::BFloat16 beta, | ||||
|     C10_UNUSED int64_t incy) { | ||||
|   return (trans == 'T' || trans == 't') && incx == 1 && alpha == 1.0 && beta == 0.0; | ||||
| } | ||||
|  | ||||
|  | ||||
| #ifdef __ARM_FEATURE_FP16_SCALAR_ARITHMETIC | ||||
| static inline float16_t reduce(float16x4_t x) { | ||||
|         auto sum = vpadd_f16(x, x); | ||||
| @ -379,7 +421,7 @@ static inline double reduce(float32x4_t x[kF32RegistersPerIteration]) { | ||||
|   return vaddvq_f32(x[0]); | ||||
| } | ||||
|  | ||||
| static C10_ALWAYS_INLINE void fp16_dot_with_fp32_arith_main_inner_loop( | ||||
| static C10_ALWAYS_INLINE void dot_with_fp32_arith_main_inner_loop( | ||||
|   const float16_t* vec1, | ||||
|   const float16_t* vec2, | ||||
|   float32x4_t sum[kF32RegistersPerIteration], | ||||
| @ -392,7 +434,7 @@ static C10_ALWAYS_INLINE void fp16_dot_with_fp32_arith_main_inner_loop( | ||||
|   sum[2 * registerPairIndex + 1] = f32_fma_high_f16(sum[2 * registerPairIndex + 1], temp_vec1, temp_vec2); | ||||
| } | ||||
|  | ||||
| static C10_ALWAYS_INLINE void fp16_dot_with_fp32_arith_vectorized_tail_inner_loop( | ||||
| static C10_ALWAYS_INLINE void dot_with_fp32_arith_vectorized_tail_inner_loop( | ||||
|   const float16_t* vec1, | ||||
|   const float16_t* vec2, | ||||
|   float32x4_t* tailSum, | ||||
| @ -402,14 +444,48 @@ static C10_ALWAYS_INLINE void fp16_dot_with_fp32_arith_vectorized_tail_inner_loo | ||||
|   *tailSum = f32_fma_f16(*tailSum, temp_vec1, temp_vec2); | ||||
| } | ||||
|  | ||||
| float fp16_dot_with_fp32_arith(const float16_t* vec1, const float16_t* vec2, int64_t len) { | ||||
| static C10_ALWAYS_INLINE float32x4_t to_bfloat16(uint16x4_t u16) { | ||||
|   int32x4_t shift = vdupq_n_s32(16); | ||||
|   return vreinterpretq_f32_u32(vshlq_u32(vmovl_u16(u16), shift)); | ||||
| } | ||||
|  | ||||
| static C10_ALWAYS_INLINE float32x4_t f32_fma_bf16(float32x4_t a, uint16x4_t b, uint16x4_t c) { | ||||
|   return f32_fma(a, to_bfloat16(b), to_bfloat16(c)); | ||||
| } | ||||
|  | ||||
| static C10_ALWAYS_INLINE void dot_with_fp32_arith_main_inner_loop( | ||||
|   const at::BFloat16* vec1, | ||||
|   const at::BFloat16* vec2, | ||||
|   float32x4_t sum[kF32RegistersPerIteration], | ||||
|   int registerPairIndex) { | ||||
|   // TODO: detect intrinsic availability, use them if they're available. __ARM_FEATURE_BF16 | ||||
|   // Load a pair of f32 registers at a time. | ||||
|   const uint16x8_t temp_vec1 = vld1q_u16(reinterpret_cast<const uint16_t*>(&vec1[registerPairIndex * 2 * kF32ElementsPerRegister])); | ||||
|   const uint16x8_t temp_vec2 = vld1q_u16(reinterpret_cast<const uint16_t*>(&vec2[registerPairIndex * 2 * kF32ElementsPerRegister])); | ||||
|  | ||||
|   sum[2 * registerPairIndex] = f32_fma_bf16(sum[2 * registerPairIndex], vget_low_u16(temp_vec1), vget_low_u16(temp_vec2)); | ||||
|   sum[2 * registerPairIndex + 1] = f32_fma_bf16(sum[2 * registerPairIndex + 1], vget_high_u16(temp_vec1), vget_high_u16(temp_vec2)); | ||||
| } | ||||
|  | ||||
| static C10_ALWAYS_INLINE void dot_with_fp32_arith_vectorized_tail_inner_loop( | ||||
|   const at::BFloat16* vec1, | ||||
|   const at::BFloat16* vec2, | ||||
|   float32x4_t* tailSum, | ||||
|   int idx) { | ||||
|   const auto temp_vec1 = vld1_u16(reinterpret_cast<const uint16_t*>(&vec1[idx])); | ||||
|   const auto temp_vec2 = vld1_u16(reinterpret_cast<const uint16_t*>(&vec2[idx])); | ||||
|   *tailSum = f32_fma_bf16(*tailSum, temp_vec1, temp_vec2); | ||||
| } | ||||
|  | ||||
| template <typename T> | ||||
| float dot_with_fp32_arith(const T* vec1, const T* vec2, int64_t len) { | ||||
|   float32x4_t sum[kF32RegistersPerIteration] = {vdupq_n_f32(0)}; | ||||
|   const auto len_aligned = len & ~(kF32ElementsPerIteration - 1); | ||||
|   for (int j = 0; j < len_aligned ; j += kF32ElementsPerIteration) { | ||||
|     const auto* vec1_ = vec1 + j; | ||||
|     const auto* vec2_ = vec2 + j; | ||||
|     c10::ForcedUnroll<kF32RegisterPairsPerIteration>{}([vec1_, vec2_, &sum](auto k) { | ||||
|       fp16_dot_with_fp32_arith_main_inner_loop(vec1_, vec2_, sum, k); | ||||
|       dot_with_fp32_arith_main_inner_loop(vec1_, vec2_, sum, k); | ||||
|     }); | ||||
|   } | ||||
|   auto reducedSum = reduce(sum); | ||||
| @ -420,7 +496,7 @@ float fp16_dot_with_fp32_arith(const float16_t* vec1, const float16_t* vec2, int | ||||
|   float32x4_t tailSum = vdupq_n_f32(0); | ||||
|   const auto len_aligned_4 = len & ~3; | ||||
|   for (int j = len_aligned; j < len_aligned_4; j += 4) { | ||||
|     fp16_dot_with_fp32_arith_vectorized_tail_inner_loop(vec1, vec2, &tailSum, j); | ||||
|     dot_with_fp32_arith_vectorized_tail_inner_loop(vec1, vec2, &tailSum, j); | ||||
|   } | ||||
|   auto reducedTail = vpaddq_f32(tailSum, tailSum); | ||||
|   reducedSum += vgetq_lane_f32(vpaddq_f32(reducedTail, reducedTail), 0); | ||||
| @ -432,6 +508,14 @@ float fp16_dot_with_fp32_arith(const float16_t* vec1, const float16_t* vec2, int | ||||
|   return reducedSum; | ||||
| } | ||||
|  | ||||
| float fp16_dot_with_fp32_arith(const float16_t* vec1, const float16_t* vec2, int64_t len) { | ||||
|   return dot_with_fp32_arith(vec1, vec2, len); | ||||
| } | ||||
|  | ||||
| float bf16_dot_with_fp32_arith(const at::BFloat16* vec1, const at::BFloat16* vec2, int64_t len) { | ||||
|   return dot_with_fp32_arith(vec1, vec2, len); | ||||
| } | ||||
|  | ||||
| // On my Apple M1 Macbook (which is ARM v8.5 and thus has the | ||||
| // instructions f32_fma_{low,high}_f16 is targeting), this kernel has | ||||
| // equivalent performance to the fp16-native kernel. | ||||
| @ -443,6 +527,14 @@ static void fp16_gemv_trans_fp32_arith_by_dot_products(const int m, const int n, | ||||
|   }); | ||||
| } | ||||
|  | ||||
| static void bf16_gemv_trans_fp32_arith_by_dot_products(const int m, const int n, const at::BFloat16* a, const int lda, const at::BFloat16 *x, at::BFloat16* y, int incy) { | ||||
|   parallel_for(0, n, 1, [&](int begin, int end) { | ||||
|     for (int i = begin; i < end; ++i) { | ||||
|       y[i * incy] = bf16_dot_with_fp32_arith(x, a + lda * i, m); | ||||
|     } | ||||
|   }); | ||||
| } | ||||
|  | ||||
| void fp16_gemv_trans( | ||||
|     const int m, | ||||
|     const int n, | ||||
| @ -454,26 +546,28 @@ void fp16_gemv_trans( | ||||
|     const float beta, | ||||
|     float16_t* y, | ||||
|     const int incy) { | ||||
|   if (incx == 1 && alpha == 1.0 && beta == 0.0) { | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(incx == 1 && alpha == 1.0 && beta == 0.0); | ||||
| #ifdef __ARM_FEATURE_FP16_SCALAR_ARITHMETIC | ||||
|     if (at::globalContext().allowFP16ReductionCPU()) { | ||||
|       return fp16_gemv_trans_fp16_arith_by_dot_products(m, n, a, lda, x, y, incy); | ||||
|     } | ||||
|   if (at::globalContext().allowFP16ReductionCPU()) { | ||||
|     return fp16_gemv_trans_fp16_arith_by_dot_products(m, n, a, lda, x, y, incy); | ||||
|   } | ||||
| #endif | ||||
|     return fp16_gemv_trans_fp32_arith_by_dot_products(m, n, a, lda, x, y, incy); | ||||
|   } | ||||
|   for (const auto i : c10::irange(n)) { | ||||
|     float sum = 0; | ||||
|     const auto row_ = a + lda * i; | ||||
|     for (const auto j : c10::irange(m)) { | ||||
|       sum += x[j * incx] * row_[j]; | ||||
|     } | ||||
|     if (beta == 0.0) { | ||||
|       y[i * incy] = alpha * sum; | ||||
|     } else { | ||||
|       y[i * incy] = beta * y[i * incy] + alpha * sum; | ||||
|     } | ||||
|   } | ||||
|   return fp16_gemv_trans_fp32_arith_by_dot_products(m, n, a, lda, x, y, incy); | ||||
| } | ||||
|  | ||||
| void bf16_gemv_trans( | ||||
|   const int m, | ||||
|   const int n, | ||||
|   const at::BFloat16 alpha, | ||||
|   const at::BFloat16* a, | ||||
|   const int lda, | ||||
|   const at::BFloat16* x, | ||||
|   const int incx, | ||||
|   const at::BFloat16 beta, | ||||
|   at::BFloat16* y, | ||||
|   const int incy) { | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(incx == 1 && alpha == 1.0 && beta == 0.0); | ||||
|   return bf16_gemv_trans_fp32_arith_by_dot_products(m, n, a, lda, x, y, incy); | ||||
| } | ||||
|  | ||||
|  | ||||
| @ -590,9 +684,37 @@ void gemv_fast_path<at::Half>( | ||||
|         *incy); | ||||
|   } | ||||
| } | ||||
| #else | ||||
|  | ||||
| template <> | ||||
| void gemv_fast_path<at::BFloat16>( | ||||
|     const char* trans, | ||||
|     const int* m, | ||||
|     const int* n, | ||||
|     const at::BFloat16* alpha, | ||||
|     const at::BFloat16* a, | ||||
|     const int* lda, | ||||
|     const at::BFloat16* x, | ||||
|     const int* incx, | ||||
|     const at::BFloat16* beta, | ||||
|     at::BFloat16* y, | ||||
|     const int* incy) { | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(trans[0] == 'T' || trans[0] == 't'); | ||||
|   bf16_gemv_trans( | ||||
|     *m, | ||||
|     *n, | ||||
|     *alpha, | ||||
|     a, | ||||
|     *lda, | ||||
|     x, | ||||
|     *incx, | ||||
|     *beta, | ||||
|     y, | ||||
|     *incy); | ||||
| } | ||||
| #else // defined(__aarch64__) && !defined(C10_MOBILE) | ||||
| INSTANTIATE(c10::Half); | ||||
| #endif | ||||
| INSTANTIATE(c10::BFloat16); | ||||
| #endif // defined(__aarch64__) && !defined(C10_MOBILE) | ||||
| #undef INSTANTIATE | ||||
|  | ||||
| } // namespace blas_impl | ||||
| @ -623,7 +745,7 @@ void gemv(char trans, int64_t m, int64_t n, scalar_t alpha, const scalar_t *a, i | ||||
|   if(n == 1) lda = m; | ||||
|  | ||||
| #if AT_BUILD_WITH_BLAS() | ||||
|   if (blas_impl::gemv_use_fast_path<scalar_t>(m, n, lda, incx, incy)) { | ||||
|   if (blas_impl::gemv_use_fast_path<scalar_t>(trans, m, n, alpha, lda, incx, beta, incy)) { | ||||
|     TORCH_CHECK(lda >= std::max<int64_t>(1L, m), "lda should be at least max(1,", m, "), but have ", lda); | ||||
|     int i_m = (int)m; | ||||
|     int i_n = (int)n; | ||||
|  | ||||
| @ -393,7 +393,7 @@ struct RegisterPRIVATEUSE1Dispatch { | ||||
| // REGISTER_DISPATCH now dispatches an AVX512 kernel to nullptr but registers other dispatches. | ||||
| // ALSO_REGISTER_AVX512_DISPATCH should be used for ensuring AVX512 dispatch, among others. | ||||
| #ifdef CPU_CAPABILITY_AVX512 | ||||
| #define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, nullptr) | ||||
| #define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, ((void*)(fn) ? nullptr : nullptr)) | ||||
| #else | ||||
| #define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn) | ||||
| #endif | ||||
|  | ||||
| @ -856,7 +856,7 @@ namespace { | ||||
| /** | ||||
|  * @brief Computes the optimal matrix chain multiplication order | ||||
|  * | ||||
|  * Follows the dynamic programming algorithm from Cormen et al, | ||||
|  * Follows the dynamic programming algorithm from Cormen et al., | ||||
|  * "Introduction to Algorithms, Third Edition", Chapter 15.2, | ||||
|  * p. 370-378. Note that the book uses 1-based indexing. | ||||
|  * | ||||
|  | ||||
| @ -2,9 +2,9 @@ | ||||
| // Licensed under the BSD-3-Clause license | ||||
| // This is the CPU implementation of the Connectionist Temporal Loss. | ||||
| // We mostly follow Graves. | ||||
| // 1. Graves et al: http://www.cs.toronto.edu/~graves/icml_2006.pdf | ||||
| // 1. Graves et al.: http://www.cs.toronto.edu/~graves/icml_2006.pdf | ||||
| // We use the equations from above link, but note that [1] has 1-based indexing and we (of course) use 0-based. | ||||
| // Graves et al call the probabilities y, we use log_probs (also calling them inputs) | ||||
| // Graves et al. call the probabilities y, we use log_probs (also calling them inputs) | ||||
| #define TORCH_ASSERT_ONLY_METHOD_OPERATORS | ||||
|  | ||||
| #include <ATen/core/Tensor.h> | ||||
|  | ||||
| @ -499,13 +499,4 @@ Tensor nll_loss2d_symint(const Tensor & self, const Tensor & target, const std:: | ||||
|   return std::get<0>(at::nll_loss2d_forward_symint(self, target, weight, reduction, std::move(ignore_index))); | ||||
| } | ||||
|  | ||||
| // Duplicate of above code for non-symbolic ints. Kept for BC purposes and to minimize breakages. | ||||
| static Tensor nll_loss2d(const Tensor & self, const Tensor & target, const std::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index) { | ||||
|   // See [Note: hacky wrapper removal for optional tensor] | ||||
|   c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); | ||||
|   const Tensor& weight = *weight_maybe_owned; | ||||
|  | ||||
|   return std::get<0>(at::nll_loss2d_forward_symint(self, target, weight, reduction, ignore_index)); | ||||
| } | ||||
|  | ||||
| } // namespace at::native | ||||
|  | ||||
| @ -508,7 +508,7 @@ static inline C10_HOST_DEVICE scalar_t calc_polygamma(scalar_t x, int n) { | ||||
|  | ||||
| /* References | ||||
|  * [igam1] "The Digital Library of Mathematical Functions", dlmf.nist.gov | ||||
|  * [igam2] Maddock et. al., "Incomplete Gamma Functions", | ||||
|  * [igam2] Maddock et al., "Incomplete Gamma Functions", | ||||
|  *     https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html | ||||
|  */ | ||||
|  | ||||
|  | ||||
| @ -28,18 +28,6 @@ Tensor empty_meta_symint( | ||||
|       size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt); | ||||
| } | ||||
|  | ||||
| // Kept only for BC with XLA | ||||
| static Tensor empty_strided_meta( | ||||
|   IntArrayRef size, | ||||
|   IntArrayRef stride, | ||||
|   std::optional<ScalarType> dtype_opt, | ||||
|   std::optional<Layout> layout_opt, | ||||
|   std::optional<Device> device_opt, | ||||
|   std::optional<bool> pin_memory_opt | ||||
| ) { | ||||
|   return empty_strided_meta_symint(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype_opt, layout_opt, device_opt, pin_memory_opt); | ||||
| } | ||||
|  | ||||
| Tensor empty_strided_meta_symint( | ||||
|   SymIntArrayRef size, | ||||
|   SymIntArrayRef stride, | ||||
|  | ||||
| @ -802,55 +802,6 @@ TORCH_IMPL_FUNC(slow_conv_transpose2d_structured_cpu) | ||||
|       dilation); | ||||
|  } | ||||
|  | ||||
| static std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose2d_backward_out_cpu(const Tensor& grad_output, | ||||
|     const Tensor& input, | ||||
|     const Tensor& weight, | ||||
|     IntArrayRef kernel_size, | ||||
|     IntArrayRef stride, | ||||
|     IntArrayRef padding, | ||||
|     IntArrayRef output_padding, | ||||
|     IntArrayRef dilation, | ||||
|     Tensor& grad_input, | ||||
|     Tensor& grad_weight, | ||||
|     Tensor& grad_bias) { | ||||
|   if (grad_input.defined()) { | ||||
|     slow_conv_transpose2d_backward_out_cpu_template( | ||||
|         input, | ||||
|         grad_output, | ||||
|         grad_input, | ||||
|         weight, | ||||
|         kernel_size, | ||||
|         stride, | ||||
|         padding, | ||||
|         output_padding, | ||||
|         dilation); | ||||
|   } | ||||
|  | ||||
|   if (grad_bias.defined()) { | ||||
|     at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3}); | ||||
|   } | ||||
|  | ||||
|   if (grad_weight.defined()) { | ||||
|     grad_weight.resize_(weight.sizes(), weight.suggest_memory_format()); | ||||
|     grad_weight.zero_(); | ||||
|     slow_conv_transpose2d_acc_grad_parameters_cpu( | ||||
|         input, | ||||
|         weight, | ||||
|         grad_output, | ||||
|         grad_weight, | ||||
|         grad_bias, | ||||
|         kernel_size, | ||||
|         stride, | ||||
|         padding, | ||||
|         output_padding, | ||||
|         dilation, | ||||
|         1); | ||||
|   } | ||||
|  | ||||
|   return std::tuple<Tensor&, Tensor&, Tensor&>( | ||||
|       grad_input, grad_weight, grad_bias); | ||||
| } | ||||
|  | ||||
| static std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose2d_backward_cpu( | ||||
|     const Tensor& grad_output, | ||||
|     const Tensor& input, | ||||
|  | ||||
| @ -871,58 +871,6 @@ Tensor slow_conv_transpose3d_cpu( | ||||
|   return output; | ||||
| } | ||||
|  | ||||
| static std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cpu(const Tensor& grad_output, | ||||
|     const Tensor& input, | ||||
|     const Tensor& weight, | ||||
|     IntArrayRef kernel_size, | ||||
|     IntArrayRef stride, | ||||
|     IntArrayRef padding, | ||||
|     IntArrayRef output_padding, | ||||
|     IntArrayRef dilation, | ||||
|     Tensor& grad_input, | ||||
|     Tensor& grad_weight, | ||||
|     Tensor& grad_bias) { | ||||
|   if (grad_input.defined()) { | ||||
|     slow_conv_transpose3d_backward_out_cpu_template( | ||||
|         input, | ||||
|         grad_output, | ||||
|         grad_input, | ||||
|         weight, | ||||
|         kernel_size, | ||||
|         stride, | ||||
|         padding, | ||||
|         output_padding, | ||||
|         dilation); | ||||
|   } | ||||
|  | ||||
|   if (grad_weight.defined()) { | ||||
|     grad_weight.resize_(weight.sizes()); | ||||
|     grad_weight.zero_(); | ||||
|   } | ||||
|  | ||||
|   if (grad_bias.defined()) { | ||||
|     grad_bias.resize_({weight.size(1)}); | ||||
|     grad_bias.zero_(); | ||||
|   } | ||||
|  | ||||
|   if (grad_weight.defined() || grad_bias.defined()) { | ||||
|     slow_conv_transpose3d_acc_grad_parameters_cpu( | ||||
|         input, | ||||
|         grad_output, | ||||
|         grad_weight, | ||||
|         grad_bias, | ||||
|         kernel_size, | ||||
|         stride, | ||||
|         padding, | ||||
|         output_padding, | ||||
|         dilation, | ||||
|         1); | ||||
|   } | ||||
|  | ||||
|   return std::tuple<Tensor&, Tensor&, Tensor&>( | ||||
|       grad_input, grad_weight, grad_bias); | ||||
| } | ||||
|  | ||||
| static std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cpu( | ||||
|     const Tensor& grad_output, | ||||
|     const Tensor& input, | ||||
|  | ||||
| @ -339,12 +339,6 @@ Tensor& gather_out(const Tensor& self, Dimname dim, const Tensor& index, bool sp | ||||
| Tensor index_add(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const Scalar &alpha) { | ||||
|   reportNYIDimnameOverload("index_add"); | ||||
| } | ||||
| static Tensor& index_add_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const Scalar &alpha) { | ||||
|   reportNYIDimnameOverload("index_add"); | ||||
| } | ||||
| static Tensor& index_add_out(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const Scalar& alpha, Tensor& result) { | ||||
|   reportNYIDimnameOverload("index_add"); | ||||
| } | ||||
| Tensor index_fill(const Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) { | ||||
|   return at::index_fill(self, dimname_to_position(self, dim), index, source); | ||||
| } | ||||
| @ -372,21 +366,12 @@ Tensor index_select(const Tensor& self, Dimname dim, const Tensor& index) { | ||||
| Tensor scatter(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) { | ||||
|   reportNYIDimnameOverload("scatter"); | ||||
| } | ||||
| static Tensor& scatter_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) { | ||||
|   reportNYIDimnameOverload("scatter"); | ||||
| } | ||||
| Tensor scatter(const Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) { | ||||
|   reportNYIDimnameOverload("scatter"); | ||||
| } | ||||
| static Tensor& scatter_(Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) { | ||||
|   reportNYIDimnameOverload("scatter"); | ||||
| } | ||||
| Tensor scatter_add(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) { | ||||
|   reportNYIDimnameOverload("scatter_add"); | ||||
| } | ||||
| static Tensor& scatter_add_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) { | ||||
|   reportNYIDimnameOverload("scatter_add"); | ||||
| } | ||||
| std::tuple<Tensor&, Tensor&> sort_out(const Tensor& self, std::optional<bool> stable, Dimname dim, bool keepdim, Tensor& values, Tensor& indices) { | ||||
|   reportNYIDimnameOverload("sort"); | ||||
| } | ||||
|  | ||||
| @ -2276,11 +2276,6 @@ bool cpu_equal(const Tensor& self, const Tensor& other) { | ||||
|   return result.load(); | ||||
| } | ||||
|  | ||||
| static Tensor value_selecting_reduction_backward(const Tensor& grad, int64_t dim, const Tensor& indices, at::IntArrayRef sizes, bool keepdim) { | ||||
|     return at::native::value_selecting_reduction_backward_symint(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim); | ||||
| } | ||||
|  | ||||
|  | ||||
| // max(dim), min(dim), topk(dim), mode(dim), are examples of reduction | ||||
| // functions that select values. value_selecting_reduction_backward is the | ||||
| // backward function for those operators; it propagates the grad to the | ||||
|  | ||||
| @ -301,14 +301,6 @@ void reflection_pad2d_backward_out_template( | ||||
|  | ||||
| } // namespace | ||||
|  | ||||
| // TODO: I tihnk this function should be removed since we implement it with | ||||
| // TORCH_IMPL_FUNC below | ||||
| static Tensor& reflection_pad1d_out_cpu(const Tensor& input, IntArrayRef padding, | ||||
|     Tensor& output) { | ||||
|   reflection_pad1d_kernel(kCPU, output, input, padding); | ||||
|   return output; | ||||
| } | ||||
|  | ||||
| Tensor& reflection_pad1d_out_quantized_cpu(const Tensor& input, IntArrayRef padding, | ||||
|     Tensor& output) { | ||||
|   TORCH_CHECK(input.qscheme() == kPerTensorAffine, "Only per tensor quantization is supported"); | ||||
|  | ||||
| @ -231,14 +231,6 @@ TensorImpl* resize_impl_cpu_( | ||||
|   return _resize_impl_(self, size, stride, resize_storage); | ||||
| } | ||||
|  | ||||
| static TensorImpl* resize_impl_meta_( | ||||
|     TensorImpl* self, | ||||
|     c10::SymIntArrayRef size, | ||||
|     at::OptionalSymIntArrayRef stride, | ||||
|     bool resize_storage = true) { | ||||
|   return _resize_impl_(self, size, stride, resize_storage); | ||||
| } | ||||
|  | ||||
| template <typename T> | ||||
| const Tensor& _resize_( | ||||
|     const Tensor& self, | ||||
|  | ||||
| @ -792,12 +792,6 @@ std::tuple<Tensor, Tensor> max(const Tensor& self, Dimname dim, bool keepdim) { | ||||
| std::tuple<Tensor&, Tensor&> max_out(const Tensor& self, Dimname dim, bool keepdim, Tensor& max, Tensor& max_indices) { | ||||
|   return at::max_out(max, max_indices, self, dimname_to_position(self, dim), keepdim); | ||||
| } | ||||
| static Tensor argmax(const Tensor& /*self*/, Dimname /*dim*/, bool /*keepdim*/) { | ||||
|   reportNYIDimnameOverload("argmax"); | ||||
| } | ||||
| static Tensor argmin(const Tensor& /*self*/, Dimname /*dim*/, bool /*keepdim*/) { | ||||
|   reportNYIDimnameOverload("argmin"); | ||||
| } | ||||
| Tensor argsort(const Tensor& /*self*/, Dimname /*dim*/, bool /*keepdim*/) { | ||||
|   reportNYIDimnameOverload("argsort"); | ||||
| } | ||||
|  | ||||
| @ -24,10 +24,6 @@ | ||||
|  | ||||
| namespace at::native { | ||||
|  | ||||
| static bool is_cuda(const Tensor& self) { | ||||
|   return self.is_cuda(); | ||||
| } | ||||
|  | ||||
| bool is_distributed(const Tensor& self) { | ||||
|   return false; | ||||
| } | ||||
| @ -60,18 +56,6 @@ bool is_neg(const Tensor& self) { | ||||
|   return self.is_neg(); | ||||
| } | ||||
|  | ||||
| static bool is_sparse(const Tensor& self) { | ||||
|   return self.is_sparse(); | ||||
| } | ||||
|  | ||||
| static bool is_sparse_csr(const Tensor& self) { | ||||
|   return self.is_sparse_csr(); | ||||
| } | ||||
|  | ||||
| static bool is_quantized(const Tensor& self) { | ||||
|   return self.is_quantized(); | ||||
| } | ||||
|  | ||||
| // True if `self` and `from` have compatible tensor type so that `from`'s | ||||
| // TensorImpl can be copied to `self`. | ||||
| bool _has_compatible_shallow_copy_type(const Tensor& self, const Tensor& from) { | ||||
|  | ||||
| @ -38,6 +38,11 @@ float fp16_dot_with_fp32_arith( | ||||
|   const float16_t* x, | ||||
|   const float16_t* a, | ||||
|   int64_t len); | ||||
|  | ||||
| float bf16_dot_with_fp32_arith( | ||||
|   const at::BFloat16* x, | ||||
|   const at::BFloat16* a, | ||||
|   int64_t len); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| @ -326,20 +331,8 @@ static float compute_dot(const at::Half* a, const at::Half* b, int64_t len) { | ||||
|     len); | ||||
| } | ||||
|  | ||||
| static float compute_dot(const at::BFloat16* a, const at::BFloat16* b, int64_t l) { | ||||
|     if ((l&3) != 0) { | ||||
|       return sum(l, [&](int64_t i) -> float { | ||||
|         return float(a[i]) * float(b[i]); | ||||
|       }); | ||||
|     } | ||||
|     float32x4_t rcv = vdupq_n_f32(0); | ||||
|     for (int64_t idx = 0; idx < l; idx += 4) { | ||||
|       float32x4_t aVec = load_as_float32x4(a + idx); | ||||
|       float32x4_t bVec = load_as_float32x4(b + idx); | ||||
|       rcv = vaddq_f32(rcv, vmulq_f32(aVec, bVec)); | ||||
|     } | ||||
|     auto sum = vpaddq_f32(rcv, rcv); | ||||
|     return vgetq_lane_f32(vpaddq_f32(sum, sum), 0); | ||||
| static float compute_dot(const at::BFloat16* a, const at::BFloat16* b, int64_t len) { | ||||
|   return at::native::blas_impl::bf16_dot_with_fp32_arith(a, b, len); | ||||
| } | ||||
|  | ||||
| template <> | ||||
|  | ||||
| @ -1,7 +1,3 @@ | ||||
| #include <cstdint> | ||||
| #include <c10/util/Exception.h> | ||||
| #include <c10/core/Scalar.h> | ||||
| #include <c10/core/ScalarType.h> | ||||
| #define TORCH_ASSERT_ONLY_METHOD_OPERATORS | ||||
| #include <ATen/core/Tensor.h> | ||||
| #include <ATen/core/NamedTensor.h> | ||||
| @ -14,7 +10,6 @@ | ||||
| #include <ATen/cuda/tunable/TunableGemm.h> | ||||
| #include <ATen/native/Resize.h> | ||||
| #include <c10/util/MaybeOwned.h> | ||||
| #include <ATen/native/cuda/RowwiseScaledMM.h> | ||||
|  | ||||
| #ifndef AT_PER_OPERATOR_HEADERS | ||||
| #include <ATen/Functions.h> | ||||
| @ -824,97 +819,24 @@ static bool _scaled_mm_allowed_device() { | ||||
| #endif | ||||
| } | ||||
|  | ||||
| namespace{ | ||||
|  | ||||
| enum class ScalingType { | ||||
|   TensorWise, | ||||
|   RowWise, | ||||
|   Error | ||||
| }; | ||||
|  | ||||
| // Validates the scale tensors to scaled_mm | ||||
| // And returns the type of scaling/which kernel to use | ||||
| ScalingType get_scaling_type( | ||||
|     const c10::optional<at::Tensor>& scale_a, | ||||
|     const c10::optional<at::Tensor>& scale_b, | ||||
|     int64_t dim_m, | ||||
|     int64_t dim_n) { | ||||
|   TORCH_CHECK( | ||||
|       scale_a.has_value() == scale_b.has_value(), | ||||
|       "Both scale_a and scale_b must be present or absent."); | ||||
|  | ||||
|   if (scale_a.has_value()) { | ||||
|     // Both Per-Tensor and Row-wise scaling expect fp32 tensors | ||||
|     TORCH_CHECK( | ||||
|         scale_a->scalar_type() == kFloat && scale_b->scalar_type() == kFloat, | ||||
|         "Both scale_a and scale_b must be float (fp32) tensors."); | ||||
|  | ||||
|     // Check the singluar scale case for per-tensor scaling | ||||
|     if (scale_a->numel() == 1 && scale_b->numel() == 1) { | ||||
|       return ScalingType::TensorWise; | ||||
|     } else if (scale_a->dim() == 1 && scale_a->size(0) == dim_m) { | ||||
| // Check the per-row scaling case | ||||
| #if !defined(USE_ROCM) && !defined(_MSC_VER) || \ | ||||
|     (defined(USE_ROCM) && ROCM_VERSION >= 60000) | ||||
|       TORCH_CHECK( | ||||
|           scale_a->dim() == 1 && scale_b->dim() == 1, | ||||
|           "Both scale_a and scale_b must be 1-dimensional tensors"); | ||||
|       TORCH_CHECK( | ||||
|           scale_b->size(0) == dim_n, | ||||
|           "For row-wise scaling, scale_b must have size ", | ||||
|           dim_n, | ||||
|           " but got ", | ||||
|           scale_b->size(0), | ||||
|           "."); | ||||
|       TORCH_CHECK( | ||||
|           scale_a->is_contiguous() && scale_b->is_contiguous(), | ||||
|           "Both scale_a and scale_b must be contiguous."); | ||||
|        return ScalingType::RowWise; | ||||
| #else | ||||
|       TORCH_CHECK(false, "Per-row scaling is not supported for this platform!"); | ||||
|       return ScalingType::Error; | ||||
| #endif // !defined(USE_ROCM) && !defined(_MSC_VER) || (defined(USE_ROCM) && | ||||
|        // ROCM_VERSION >= 60000) | ||||
|     } else { | ||||
|       TORCH_CHECK( | ||||
|           false, | ||||
|           "For row-wise scaling, scale_a must be size ", | ||||
|           dim_m, | ||||
|           " but got ", | ||||
|           scale_a->numel(), | ||||
|           " and scale_b must be size ", | ||||
|           dim_n, | ||||
|           " but got ", | ||||
|           scale_b->numel(), | ||||
|           "."); | ||||
|       // Unreachable | ||||
|       return ScalingType::RowWise; | ||||
|     } | ||||
|   } | ||||
|   return ScalingType::Error; | ||||
| } | ||||
|  | ||||
| } // namespace | ||||
|  | ||||
| // Computes matrix multiply + bias while applying scaling to input and output matrices and computes amax | ||||
| // Scales are only applicable when matrices are of Float8 type and assumbed to be equal to 1.0 by default. | ||||
| // If output matrix type is 16 or 32-bit type, neither scale_result is applied nor amax is computed. | ||||
| // Known limitations: | ||||
| //  - Only works if mat1 is row-major and mat2 is column-major | ||||
| //  - Only works if matrices sizes are divisible by 32 | ||||
| //  - If 1-dimensional tensors are used then scale_a should be size = mat1.size(0) | ||||
| //    and scale_b should have size = to mat2.size(1) | ||||
| // | ||||
| //  Arguments: | ||||
| //    - `mat1`: the first operand of the matrix multiply, can be type `torch.float8_e4m3fn` or `torch.float8_e5m2` | ||||
| //    - `mat2`: the second operand of the matrix multiply, can be type `torch.float8_e4m3fn` or `torch.float8_e5m2` | ||||
| //    - `bias`: the bias, can be type `torch.float16` or `torch.bfloat16` | ||||
| //    - `out_dtype`: the output dtype, can either be a float8 or a higher precision floating point type | ||||
| //    - `scale_a`: a scalar or 1-dimensional tensor with the inverse scale of `mat1`, only needed if `mat1` is a float8 type | ||||
| //    - `scale_b`: a scalar or 1-dimensional tensor with the inverse scale of `mat2`, only needed if `mat2` is a float8 type | ||||
| //    - `scale_result`: a scalar tensor with the scale of the output, only utilized if the output is a float8 type | ||||
| //    - `scale_a`: a scalar tensor with the inverse scale of `mat1`, only needed if `mat1` is a float8 type | ||||
| //    - `scale_b`: a scalar tensor with the inverse scale of `mat2`, only needed if `mat2` is a float8 type | ||||
| //    - `scale_result`: a scalar tensor with the scale of the output, only set if the output is a float8 type | ||||
| //    - `use_fast_accum`: if true, enables fast float8 accumulation | ||||
| //    - `out`: a reference to the output tensor | ||||
| //    - `amax`: a reference to the amax tensor of the output, only mutated if the output is a float8 type and will be updated inplace | ||||
| //    - `amax`: a reference to the amax tensor of the output, only needed if the output is a float8 type and will be updated inplace | ||||
|  | ||||
| std::tuple<Tensor&, Tensor&> | ||||
| _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2, | ||||
| @ -933,11 +855,10 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2, | ||||
|   TORCH_CHECK( | ||||
|       mat1.sizes()[1] == mat2.sizes()[0], "mat1 and mat2 shapes cannot be multiplied (", | ||||
|       mat1.sizes()[0], "x", mat1.sizes()[1], " and ", mat2.sizes()[0], "x", mat2.sizes()[1], ")"); | ||||
|  | ||||
|   // Check what type of scaling we are doing based on inputs | ||||
|   ScalingType scaling_choice = get_scaling_type(scale_a, scale_b, mat1.size(0), mat2.size(1)); | ||||
|   TORCH_INTERNAL_ASSERT(scaling_choice != ScalingType::Error, "Scaling type not supported"); | ||||
|  | ||||
|   TORCH_CHECK(!scale_a || (scale_a->numel() == 1 && scale_a->scalar_type() == kFloat), | ||||
|        "scale_a must be float scalar"); | ||||
|   TORCH_CHECK(!scale_b || (scale_b->numel() == 1 && scale_b->scalar_type() == kFloat), | ||||
|        "scale_b must be a float scalar"); | ||||
|   TORCH_CHECK(!scale_result || (scale_result->numel() == 1 && scale_result->scalar_type() == kFloat), | ||||
|        "scale_result must be a float scalar"); | ||||
|   TORCH_CHECK(!bias || bias->numel() == mat2.sizes()[1], "Bias must be size ", mat2.sizes()[1], | ||||
| @ -980,26 +901,12 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2, | ||||
|                       {scale_result_, "scale_result", 7}}; | ||||
|     checkAllSameGPU(__func__, targs); | ||||
|   } | ||||
|   // Validation checks have passed lets resize the output to actual size | ||||
|  | ||||
|   IntArrayRef mat1_sizes = mat1.sizes(); | ||||
|   IntArrayRef mat2_sizes = mat2.sizes(); | ||||
|   at::native::resize_output(out, {mat1_sizes[0], mat2_sizes[1]}); | ||||
|   at::native::resize_output(amax, {}); | ||||
|  | ||||
|   // We are doing row-wise scaling | ||||
|   if (scaling_choice == ScalingType::RowWise) { | ||||
|     TORCH_CHECK(out.dtype() == kBFloat16, "Only bf16 high precsion output types are supported for row-wise scaling."); | ||||
|     at::cuda::detail::f8f8bf16_rowwise( | ||||
|         mat1, | ||||
|         mat2, | ||||
|         scale_a.value(), | ||||
|         scale_b.value(), | ||||
|         bias, | ||||
|         use_fast_accum, | ||||
|         out); | ||||
|     return {out, amax}; | ||||
|   } | ||||
|  | ||||
|   cublasCommonArgs args(mat1, mat2, out); | ||||
|   const auto out_dtype_ = args.result->scalar_type(); | ||||
|   TORCH_CHECK(args.transa == 't' && args.transb == 'n', "Only multiplication of row-major and column-major matrices is supported by cuBLASLt"); | ||||
|  | ||||
| @ -2,9 +2,9 @@ | ||||
| // Licensed under the BSD-3-Clause license | ||||
| // This is the GPU implementation of the Connectionist Temporal Loss. | ||||
| // We mostly follow Graves. | ||||
| // 1. Graves et al: http://www.cs.toronto.edu/~graves/icml_2006.pdf | ||||
| // 1. Graves et al.: http://www.cs.toronto.edu/~graves/icml_2006.pdf | ||||
| // We use the equations from above link, but note that [1] has 1-based indexing and we (of course) use 0-based. | ||||
| // Graves et al call the probabilities y, we use log_probs (also calling them inputs) | ||||
| // Graves et al. call the probabilities y, we use log_probs (also calling them inputs) | ||||
| // A few optimizations (similar to those here, but also some I didn't take) are described in | ||||
| // 2. Minmin Sun: http://on-demand.gputechconf.com/gtc/2016/presentation/s6383-minmin-sun-speech-recognition.pdf | ||||
| #define TORCH_ASSERT_ONLY_METHOD_OPERATORS | ||||
|  | ||||
| @ -29,18 +29,10 @@ static inline void maybe_resize_storage_cuda(TensorImpl* self, size_t new_size_b | ||||
| inline TensorImpl* resize_impl_cuda_( | ||||
|     TensorImpl* self, | ||||
|     IntArrayRef size, | ||||
|     at::OptionalIntArrayRef stride, | ||||
|     bool device_guard = true) { | ||||
|     at::OptionalIntArrayRef stride) { | ||||
|   if (self->sizes() == size && (!stride || self->strides() == stride)) { | ||||
|     return self; | ||||
|   } | ||||
|  | ||||
|   // NB: We don't need to hold the device guard when calling from TH | ||||
|   cuda::OptionalCUDAGuard guard; | ||||
|   if (device_guard) { | ||||
|     guard.set_index(self->storage().device().index()); | ||||
|   } | ||||
|  | ||||
|   const auto itemsize = self->dtype().itemsize(); | ||||
|   const auto storage_offset = self->storage_offset(); | ||||
|   size_t storage_size = 1; | ||||
|  | ||||
| @ -1,535 +0,0 @@ | ||||
| #define TORCH_ASSERT_ONLY_METHOD_OPERATORS | ||||
| #include <ATen/Dispatch.h> | ||||
| #include <ATen/core/Tensor.h> | ||||
| #include <ATen/cuda/CUDAContext.h> | ||||
| #include <ATen/cuda/nvrtc_stub/ATenNVRTC.h> | ||||
|  | ||||
| // Determine if the architecture supports rowwise scaled mm | ||||
| #if !defined(USE_ROCM) && defined(CUDA_VERSION) && CUDA_VERSION >= 12000 | ||||
|  | ||||
| #define BUILD_ROWWISE_FP8_KERNEL | ||||
| #endif | ||||
|  | ||||
| #if defined(BUILD_ROWWISE_FP8_KERNEL) | ||||
|  | ||||
| // We are going to override the cuTensorMapEncodeTiled driver api with our lazy loader | ||||
| static CUresult CUDAAPI nvrtc_cuTensorMapEncodeTiled( | ||||
|     CUtensorMap* tensorMap, | ||||
|     CUtensorMapDataType tensorDataType, | ||||
|     cuuint32_t tensorRank, | ||||
|     void* globalAddress, | ||||
|     const cuuint64_t* globalDim, | ||||
|     const cuuint64_t* globalStrides, | ||||
|     const cuuint32_t* boxDim, | ||||
|     const cuuint32_t* elementStrides, | ||||
|     CUtensorMapInterleave interleave, | ||||
|     CUtensorMapSwizzle swizzle, | ||||
|     CUtensorMapL2promotion l2Promotion, | ||||
|     CUtensorMapFloatOOBfill oobFill) { | ||||
|   return at::globalContext().getNVRTC().cuTensorMapEncodeTiled( | ||||
|       tensorMap, | ||||
|       tensorDataType, | ||||
|       tensorRank, | ||||
|       globalAddress, | ||||
|       globalDim, | ||||
|       globalStrides, | ||||
|       boxDim, | ||||
|       elementStrides, | ||||
|       interleave, | ||||
|       swizzle, | ||||
|       l2Promotion, | ||||
|       oobFill); | ||||
| } | ||||
|  | ||||
|  | ||||
| #include <cutlass/core_io.h> | ||||
| #include <cutlass/cutlass.h> | ||||
| #include <cutlass/gemm/device/gemm.h> | ||||
| #include <cutlass/half.h> | ||||
| #include <cutlass/numeric_types.h> | ||||
| #include <cutlass/trace.h> | ||||
| #include <cutlass/util/host_tensor.h> | ||||
|  | ||||
| // Rename the global function symbol | ||||
| #define cuTensorMapEncodeTiled nvrtc_cuTensorMapEncodeTiled | ||||
| #include <cute/tensor.hpp> | ||||
| #undef cuTensorMapEncodeTiled | ||||
| // Set everything back to normal | ||||
|  | ||||
| #include <cutlass/gemm/collective/collective_builder.hpp> | ||||
| #include <cutlass/gemm/device/gemm_universal_adapter.h> | ||||
| #include <cutlass/epilogue/collective/collective_builder.hpp> | ||||
|  | ||||
| #include <cute/atom/mma_atom.hpp> | ||||
| #include <cutlass/gemm/dispatch_policy.hpp> | ||||
| #include <cutlass/gemm/kernel/gemm_universal.hpp> | ||||
| #include <cutlass/util/packed_stride.hpp> | ||||
|  | ||||
|  | ||||
| namespace { | ||||
| // Cutlass rowwise kernel | ||||
| template < | ||||
|     int TB_M, | ||||
|     int TB_N, | ||||
|     int TB_K, | ||||
|     int TBS_M, | ||||
|     int TBS_N, | ||||
|     int TBS_K, | ||||
|     bool PONG, | ||||
|     bool FAST_ACCUM, | ||||
|     bool USE_BIAS, | ||||
|     typename INPUT_DTYPE, | ||||
|     typename BIAS_DTYPE> | ||||
| void f8f8bf16_rowwise_impl( | ||||
|     at::Tensor XQ, // FP8 | ||||
|     at::Tensor WQ, // FP8 | ||||
|     at::Tensor x_scale, | ||||
|     at::Tensor w_scale, | ||||
|     c10::optional<at::Tensor> bias, | ||||
|     at::Tensor out) { | ||||
|   int M = XQ.size(0); | ||||
|   int N = WQ.size(1); | ||||
|   int K = XQ.size(1); | ||||
|  | ||||
|   TORCH_CHECK(XQ.is_cuda() && XQ.is_contiguous()); | ||||
|   TORCH_CHECK( | ||||
|       WQ.is_cuda() && WQ.ndimension() == 2 && WQ.stride(1) == WQ.size(0) && | ||||
|       WQ.stride(0) == 1); | ||||
|  | ||||
|   // auto Y = at::empty({M, N}, XQ.options().dtype(at::kBFloat16)); | ||||
|  | ||||
|   using ElementInputA = INPUT_DTYPE; | ||||
|   using LayoutInputA = cutlass::layout::RowMajor; | ||||
|   constexpr int AlignmentInputA = 16 / sizeof(ElementInputA); | ||||
|  | ||||
|   using ElementInputB = cutlass::float_e4m3_t; | ||||
|   using LayoutInputB = cutlass::layout::ColumnMajor; | ||||
|   constexpr int AlignmentInputB = 16 / sizeof(ElementInputB); | ||||
|  | ||||
|   using ElementBias = BIAS_DTYPE; | ||||
|  | ||||
|   using ElementOutput = cutlass::bfloat16_t; | ||||
|   using LayoutOutput = cutlass::layout::RowMajor; | ||||
|   constexpr int AlignmentOutput = 16 / sizeof(ElementOutput); | ||||
|  | ||||
|   using ElementAccumulator = float; | ||||
|   using ElementComputeEpilogue = float; | ||||
|   using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that | ||||
|                                        // supports the intended feature | ||||
|   using OperatorClass = cutlass::arch::OpClassTensorOp; | ||||
|   using TileShape = cute::Shape< | ||||
|       cute::Int<TB_M>, | ||||
|       cute::Int<TB_N>, | ||||
|       cute::Int<TB_K>>; // Threadblock-level | ||||
|                         // tile size | ||||
|   using ClusterShape = cute::Shape< | ||||
|       cute::Int<TBS_M>, | ||||
|       cute::Int<TBS_N>, | ||||
|       cute::Int<TBS_K>>; // Shape of the | ||||
|                          // threadblocks in a | ||||
|                          // cluster | ||||
|   using KernelSchedule = cutlass::gemm::collective:: | ||||
|       KernelScheduleAuto; // Kernel to launch based on the default setting in | ||||
|                           // the Collective Builder | ||||
|  | ||||
|   // Implement rowwise scaling epilogue. | ||||
|   using XScale = cutlass::epilogue::fusion::Sm90ColBroadcast< | ||||
|       0, | ||||
|       TileShape, | ||||
|       ElementComputeEpilogue, | ||||
|       cute::Stride<cute::Int<1>, cute::Int<0>, cute::Int<0>>>; | ||||
|  | ||||
|   using WScale = cutlass::epilogue::fusion::Sm90RowBroadcast< | ||||
|       PONG ? 2 : 1, | ||||
|       TileShape, | ||||
|       ElementComputeEpilogue, | ||||
|       cute::Stride<cute::Int<0>, cute::Int<1>, cute::Int<0>>>; | ||||
|  | ||||
|   using Bias = cutlass::epilogue::fusion::Sm90RowBroadcast< | ||||
|       PONG ? 2 : 1, | ||||
|       TileShape, | ||||
|       ElementBias, | ||||
|       cute::Stride<cute::Int<0>, cute::Int<1>, cute::Int<0>>>; | ||||
|  | ||||
|   using Accum = cutlass::epilogue::fusion::Sm90AccFetch; | ||||
|  | ||||
|   using Compute0 = cutlass::epilogue::fusion::Sm90Compute< | ||||
|       cutlass::multiplies, | ||||
|       ElementComputeEpilogue, // First stage output type. | ||||
|       ElementComputeEpilogue, // First stage input types. | ||||
|       cutlass::FloatRoundStyle::round_to_nearest>; | ||||
|  | ||||
|   using EVTCompute0 = | ||||
|       cutlass::epilogue::fusion::Sm90EVT<Compute0, WScale, Accum>; | ||||
|  | ||||
|   using Compute1 = cutlass::epilogue::fusion::Sm90Compute< | ||||
|       cutlass::multiplies, | ||||
|       cute::conditional_t< // Second stage output type. | ||||
|           USE_BIAS, | ||||
|           ElementBias, | ||||
|           ElementOutput>, | ||||
|       ElementComputeEpilogue, // Second stage input types. | ||||
|       cutlass::FloatRoundStyle::round_to_nearest>; | ||||
|  | ||||
|   using EVTCompute1 = | ||||
|       cutlass::epilogue::fusion::Sm90EVT<Compute1, XScale, EVTCompute0>; | ||||
|  | ||||
|   using ComputeBias = cutlass::epilogue::fusion::Sm90Compute< | ||||
|       cutlass::plus, | ||||
|       ElementOutput, // Final (optional) stage output type. | ||||
|       ElementBias, // Final stage input types. | ||||
|       cutlass::FloatRoundStyle::round_to_nearest>; | ||||
|  | ||||
|   using EVTComputeBias = | ||||
|       cutlass::epilogue::fusion::Sm90EVT<ComputeBias, Bias, EVTCompute1>; | ||||
|  | ||||
|   using EpilogueEVT = | ||||
|       cute::conditional_t<USE_BIAS, EVTComputeBias, EVTCompute1>; | ||||
|  | ||||
|   using CollectiveEpilogue = | ||||
|       typename cutlass::epilogue::collective::CollectiveBuilder< | ||||
|           cutlass::arch::Sm90, | ||||
|           cutlass::arch::OpClassTensorOp, | ||||
|           TileShape, | ||||
|           ClusterShape, | ||||
|           cutlass::epilogue::collective::EpilogueTileAuto, | ||||
|           ElementAccumulator, | ||||
|           ElementComputeEpilogue, | ||||
|           ElementOutput, | ||||
|           LayoutOutput, | ||||
|           AlignmentOutput, | ||||
|           ElementOutput, | ||||
|           LayoutOutput, | ||||
|           AlignmentOutput, | ||||
|           cutlass::epilogue::TmaWarpSpecialized, | ||||
|           EpilogueEVT>::CollectiveOp; | ||||
|  | ||||
|   using DefaultSchedule = cutlass::gemm::KernelTmaWarpSpecialized; | ||||
|   using PongSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong; | ||||
|   using FastDefaultSchedule = | ||||
|       cutlass::gemm::KernelTmaWarpSpecializedFP8FastAccum; | ||||
|   using FastPongSchedule = | ||||
|       cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum; | ||||
|   using SlowAccum = cute::conditional_t<PONG, PongSchedule, DefaultSchedule>; | ||||
|   using FastAccum = | ||||
|       cute::conditional_t<PONG, FastPongSchedule, FastDefaultSchedule>; | ||||
|   using MainLoopSchedule = | ||||
|       cute::conditional_t<FAST_ACCUM, FastAccum, SlowAccum>; | ||||
|  | ||||
|   using CollectiveMainloop = | ||||
|       typename cutlass::gemm::collective::CollectiveBuilder< | ||||
|           ArchTag, | ||||
|           OperatorClass, | ||||
|           ElementInputA, | ||||
|           LayoutInputA, | ||||
|           AlignmentInputA, | ||||
|           ElementInputB, | ||||
|           LayoutInputB, | ||||
|           AlignmentInputB, | ||||
|           ElementAccumulator, | ||||
|           TileShape, | ||||
|           ClusterShape, | ||||
|           cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>( | ||||
|               sizeof(typename CollectiveEpilogue::SharedStorage))>, | ||||
|           MainLoopSchedule>::CollectiveOp; | ||||
|  | ||||
|   using GemmKernel = cutlass::gemm::kernel::GemmUniversal< | ||||
|       cute::Shape<int, int, int>, | ||||
|       CollectiveMainloop, | ||||
|       CollectiveEpilogue>; | ||||
|  | ||||
|   using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; | ||||
|  | ||||
|   using StrideInputA = typename Gemm::GemmKernel::StrideA; | ||||
|   using StrideInputB = typename Gemm::GemmKernel::StrideB; | ||||
|   using StrideOutput = typename Gemm::GemmKernel::StrideC; | ||||
|  | ||||
|   StrideInputA stride_a = cutlass::make_cute_packed_stride( | ||||
|       StrideInputA{}, cute::make_shape(M, K, 1)); | ||||
|   StrideInputB stride_b = cutlass::make_cute_packed_stride( | ||||
|       StrideInputB{}, cute::make_shape(N, K, 1)); | ||||
|   StrideOutput stride_output = cutlass::make_cute_packed_stride( | ||||
|       StrideOutput{}, cute::make_shape(M, N, 1)); | ||||
|  | ||||
|   typename Gemm::Arguments arguments{ | ||||
|       cutlass::gemm::GemmUniversalMode::kGemm, | ||||
|       {M, N, K}, | ||||
|       {reinterpret_cast<ElementInputA*>(XQ.data_ptr()), | ||||
|        stride_a, | ||||
|        reinterpret_cast<ElementInputB*>(WQ.data_ptr()), | ||||
|        stride_b}, | ||||
|       {{}, // Epilogue thread we populate below. | ||||
|        (ElementOutput*)out.data_ptr<at::BFloat16>(), | ||||
|        stride_output, | ||||
|        (ElementOutput*)out.data_ptr<at::BFloat16>(), | ||||
|        stride_output}}; | ||||
|  | ||||
|   if constexpr (USE_BIAS) { | ||||
|     arguments.epilogue.thread = { | ||||
|         {reinterpret_cast<ElementBias*>(bias.value().data_ptr())}, // bias | ||||
|         // compute_1 | ||||
|         { | ||||
|             {reinterpret_cast<ElementComputeEpilogue*>( | ||||
|                 x_scale.data_ptr())}, // x_scale | ||||
|             // compute_0 | ||||
|             { | ||||
|                 {reinterpret_cast<ElementComputeEpilogue*>( | ||||
|                     w_scale.data_ptr())}, // w_scale | ||||
|                 {}, // Accumulator | ||||
|                 {} // Multiplies | ||||
|             }, | ||||
|             {}, // Multiplies | ||||
|         }, | ||||
|         {}, // Plus | ||||
|     }; | ||||
|   } else { | ||||
|     arguments.epilogue.thread = { | ||||
|         {reinterpret_cast<ElementComputeEpilogue*>( | ||||
|             x_scale.data_ptr())}, // x_scale | ||||
|         // compute_0 | ||||
|         { | ||||
|             {reinterpret_cast<ElementComputeEpilogue*>( | ||||
|                 w_scale.data_ptr())}, // w_scale | ||||
|             {}, // Accumulator | ||||
|             {} // Multiplies | ||||
|         }, | ||||
|         {}, // Multiplies | ||||
|     }; | ||||
|   } | ||||
|  | ||||
|   Gemm gemm; | ||||
|  | ||||
|   // Using the arguments, query for extra workspace required for matrix | ||||
|   // multiplication computation | ||||
|   size_t workspace_size = Gemm::get_workspace_size(arguments); | ||||
|  | ||||
|   // Allocate workspace memory | ||||
|   cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); | ||||
|  | ||||
|   // Check the problem size is supported or not | ||||
|   cutlass::Status status = gemm.can_implement(arguments); | ||||
|   if (status != cutlass::Status::kSuccess) { | ||||
|     throw std::runtime_error("cutlass cannot implement"); | ||||
|   } | ||||
|  | ||||
|   // Initialize CUTLASS kernel with arguments and workspace pointer | ||||
|   status = gemm.initialize(arguments, workspace.get()); | ||||
|   if (status != cutlass::Status::kSuccess) { | ||||
|     throw std::runtime_error("cutlass cannot initialize"); | ||||
|   } | ||||
|  | ||||
|   status = gemm(at::cuda::getCurrentCUDAStream()); | ||||
|   if (status != cutlass::Status::kSuccess) { | ||||
|     throw std::runtime_error( | ||||
|         std::string("cutlass cannot run") + | ||||
|         cutlass::cutlassGetStatusString(status)); | ||||
|   } | ||||
|   C10_CUDA_KERNEL_LAUNCH_CHECK(); | ||||
| } | ||||
|  | ||||
| // FP8 Rowwise Cutlass kernel dispatch. | ||||
| enum class KernelMode { Small, Large, Default }; | ||||
|  | ||||
| KernelMode get_kernel_mode(at::Tensor XQ, at::Tensor WQ) { | ||||
|   auto M = XQ.size(0); | ||||
|   auto K = XQ.size(1); | ||||
|   auto N = WQ.size(0); | ||||
|   // Use a large kernel if at least two shapes are large.... | ||||
|   bool use_large_kernel = | ||||
|       ((M >= 2048 && K >= 2048) || (M >= 2048 && N >= 2048) || | ||||
|        (K >= 2048 && N >= 2048)); | ||||
|   if (M <= 128 || N <= 128) { | ||||
|     return KernelMode::Small; | ||||
|   } else if (use_large_kernel) { | ||||
|     return KernelMode::Large; | ||||
|   } else { | ||||
|     return KernelMode::Default; | ||||
|   } | ||||
| } | ||||
|  | ||||
| template <typename InputDType, bool FastAccum, bool UseBias, typename BiasDType> | ||||
| void dispatch_fp8_rowwise_kernel( | ||||
|     at::Tensor XQ, | ||||
|     at::Tensor WQ, | ||||
|     at::Tensor x_scale, | ||||
|     at::Tensor w_scale, | ||||
|     c10::optional<at::Tensor> bias, | ||||
|     at::Tensor out) { | ||||
|   KernelMode kernel = get_kernel_mode(XQ, WQ); | ||||
|   if (kernel == KernelMode::Small) { | ||||
|     return f8f8bf16_rowwise_impl< | ||||
|         64, | ||||
|         128, | ||||
|         128, | ||||
|         2, | ||||
|         1, | ||||
|         1, | ||||
|         false, | ||||
|         FastAccum, | ||||
|         UseBias, | ||||
|         InputDType, | ||||
|         BiasDType>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|   } else if (kernel == KernelMode::Large) { | ||||
|     return f8f8bf16_rowwise_impl< | ||||
|         128, | ||||
|         128, | ||||
|         128, | ||||
|         2, | ||||
|         1, | ||||
|         1, | ||||
|         true, | ||||
|         FastAccum, | ||||
|         UseBias, | ||||
|         InputDType, | ||||
|         BiasDType>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|   } else { | ||||
|     return f8f8bf16_rowwise_impl< | ||||
|         128, | ||||
|         128, | ||||
|         128, | ||||
|         1, | ||||
|         2, | ||||
|         1, | ||||
|         false, | ||||
|         FastAccum, | ||||
|         UseBias, | ||||
|         InputDType, | ||||
|         BiasDType>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|   } | ||||
| } | ||||
|  | ||||
| } // namespace | ||||
|  | ||||
| #endif // !defined(USE_ROCM) | ||||
|  | ||||
| namespace at::cuda::detail { | ||||
| void f8f8bf16_rowwise( | ||||
|     at::Tensor XQ, // FP8 | ||||
|     at::Tensor WQ, // FP8 | ||||
|     at::Tensor x_scale, // FP32 | ||||
|     at::Tensor w_scale, // FP32 | ||||
|     c10::optional<at::Tensor> bias, // BF16 | ||||
|     bool use_fast_accum, | ||||
|     at::Tensor& out) { | ||||
| #if defined(BUILD_ROWWISE_FP8_KERNEL) | ||||
|   // Check datatypes. | ||||
|   TORCH_CHECK( | ||||
|       x_scale.dtype() == at::kFloat && w_scale.dtype() == at::kFloat, | ||||
|       "Scale tensors must be float32."); | ||||
|   if (bias.has_value()) { | ||||
|     TORCH_CHECK( | ||||
|         bias.value().dtype() == at::kFloat || | ||||
|             bias.value().dtype() == at::kBFloat16, | ||||
|         "Bias type must be bfloat16 or float32 if provided."); | ||||
|   } | ||||
|   // Extract problem size. | ||||
|   int M = XQ.size(0); | ||||
|   int N = WQ.size(1); | ||||
|   int K = XQ.size(1); | ||||
|  | ||||
|   bool use_bias = bias.has_value(); | ||||
|   bool bf16_bias = use_bias && bias.value().dtype() == at::kBFloat16; | ||||
|  | ||||
|   // Templatize based on input dtype. | ||||
|   bool use_e5m2 = XQ.dtype() == at::kFloat8_e5m2; | ||||
|   TORCH_CHECK(WQ.dtype() == at::kFloat8_e4m3fn, "For row-wise scaling the second input is required to be a float8_e4m3fn dtype."); | ||||
|  | ||||
|   if (use_bias) { | ||||
|     if (bf16_bias) { | ||||
|       if (use_fast_accum) { | ||||
|         if (use_e5m2) { | ||||
|           return dispatch_fp8_rowwise_kernel< | ||||
|               cutlass::float_e5m2_t, | ||||
|               true, | ||||
|               true, | ||||
|               cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|         } else { | ||||
|           return dispatch_fp8_rowwise_kernel< | ||||
|               cutlass::float_e4m3_t, | ||||
|               true, | ||||
|               true, | ||||
|               cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|         } | ||||
|       } else { | ||||
|         if (use_e5m2) { | ||||
|           return dispatch_fp8_rowwise_kernel< | ||||
|               cutlass::float_e5m2_t, | ||||
|               false, | ||||
|               true, | ||||
|               cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|         } else { | ||||
|           return dispatch_fp8_rowwise_kernel< | ||||
|               cutlass::float_e4m3_t, | ||||
|               false, | ||||
|               true, | ||||
|               cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|         } | ||||
|       } | ||||
|     } else { | ||||
|       if (use_fast_accum) { | ||||
|         if (use_e5m2) { | ||||
|           return dispatch_fp8_rowwise_kernel< | ||||
|               cutlass::float_e5m2_t, | ||||
|               true, | ||||
|               true, | ||||
|               float>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|         } else { | ||||
|           return dispatch_fp8_rowwise_kernel< | ||||
|               cutlass::float_e4m3_t, | ||||
|               true, | ||||
|               true, | ||||
|               float>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|         } | ||||
|       } else { | ||||
|         if (use_e5m2) { | ||||
|           return dispatch_fp8_rowwise_kernel< | ||||
|               cutlass::float_e5m2_t, | ||||
|               false, | ||||
|               true, | ||||
|               float>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|         } else { | ||||
|           return dispatch_fp8_rowwise_kernel< | ||||
|               cutlass::float_e4m3_t, | ||||
|               false, | ||||
|               true, | ||||
|               float>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|         } | ||||
|       } | ||||
|     } | ||||
|   } else { | ||||
|     if (use_fast_accum) { | ||||
|       if (use_e5m2) { | ||||
|         return dispatch_fp8_rowwise_kernel< | ||||
|             cutlass::float_e5m2_t, | ||||
|             true, | ||||
|             false, | ||||
|             float>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|       } else { | ||||
|         return dispatch_fp8_rowwise_kernel< | ||||
|             cutlass::float_e4m3_t, | ||||
|             true, | ||||
|             false, | ||||
|             float>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|       } | ||||
|     } else { | ||||
|       if (use_e5m2) { | ||||
|         return dispatch_fp8_rowwise_kernel< | ||||
|             cutlass::float_e5m2_t, | ||||
|             false, | ||||
|             false, | ||||
|             float>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|       } else { | ||||
|         return dispatch_fp8_rowwise_kernel< | ||||
|             cutlass::float_e4m3_t, | ||||
|             false, | ||||
|             false, | ||||
|             float>(XQ, WQ, x_scale, w_scale, bias, out); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| #else // BUILD_ROWWISE_FP8_KERNEL | ||||
|   TORCH_CHECK(false, "Rowwise scaling is not currenlty supported on your device"); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| } // namespace at::cuda::detail | ||||
| @ -1,15 +0,0 @@ | ||||
| #pragma once | ||||
| #include <ATen/core/TensorBase.h> | ||||
| #include <c10/util/Optional.h> | ||||
|  | ||||
|  | ||||
| namespace at::cuda::detail { | ||||
| TORCH_API void f8f8bf16_rowwise( | ||||
|     at::Tensor XQ, // FP8 | ||||
|     at::Tensor WQ, // FP8 | ||||
|     at::Tensor x_scale, // FP32 | ||||
|     at::Tensor w_scale, // FP32 | ||||
|     c10::optional<at::Tensor> bias, // BF16 | ||||
|     bool use_fast_accum, | ||||
|     at::Tensor& out); | ||||
| }  // at::cuda::detail | ||||
| @ -40,6 +40,8 @@ | ||||
| #include <magma_v2.h> | ||||
| #include <ATen/cuda/detail/CUDAHooks.h> | ||||
|  | ||||
| const bool use_magma_ = true; | ||||
|  | ||||
| namespace { | ||||
| struct MagmaInitializer { | ||||
|   MagmaInitializer() { | ||||
| @ -59,6 +61,9 @@ struct MagmaInitializer { | ||||
| #error "MAGMA release minor or micro version >= 10, please correct AT_MAGMA_VERSION" | ||||
| #endif | ||||
|  | ||||
| #else | ||||
| const bool use_magma_ = false; | ||||
|  | ||||
| #endif | ||||
|  | ||||
| namespace at::native { | ||||
| @ -79,9 +84,9 @@ void magmaLdlHermitian( | ||||
|     magma_int_t ldda, | ||||
|     magma_int_t* ipiv, | ||||
|     magma_int_t* info) { | ||||
|   static_assert( | ||||
|       false&&sizeof(scalar_t), | ||||
|       "LDL decomposition is not available." | ||||
|   TORCH_CHECK( | ||||
|       false, | ||||
|       "LDL decomposition is not available.", | ||||
|       "Please rebuild with MAGMA 2.5.4+."); | ||||
| } | ||||
|  | ||||
| @ -1029,13 +1034,18 @@ magma_trans_t to_magma(TransposeType trans) { | ||||
|  | ||||
| namespace { | ||||
|  | ||||
| #if AT_MAGMA_ENABLED() | ||||
| template <typename scalar_t> | ||||
| void apply_ldl_factor_magma( | ||||
|     const Tensor& A, | ||||
|     const Tensor& pivots, | ||||
|     const Tensor& info, | ||||
|     bool upper) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|   TORCH_CHECK( | ||||
|       false, | ||||
|       "torch.linalg.ldl_factor: MAGMA library not found in " | ||||
|       "compilation. Please rebuild with MAGMA."); | ||||
| #else | ||||
|   auto batch_size = batchCount(A); | ||||
|   magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); | ||||
|   magma_int_t leading_dim = magma_int_cast(A.stride(-1), "A.stride(-1)"); | ||||
| @ -1066,6 +1076,7 @@ void apply_ldl_factor_magma( | ||||
|   } | ||||
|   pivots.copy_(pivots_cpu); | ||||
|   info.copy_(info_cpu); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| void ldl_factor_magma( | ||||
| @ -1087,7 +1098,6 @@ void ldl_factor_magma( | ||||
|         apply_ldl_factor_magma<scalar_t>(LD, pivots, info, upper); | ||||
|       }); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| void ldl_factor_kernel( | ||||
|     const Tensor& LD, | ||||
| @ -1100,10 +1110,8 @@ void ldl_factor_kernel( | ||||
|     case at::LinalgBackend::Cusolver: | ||||
|       return ldl_factor_cusolver( | ||||
|           LD, pivots, info, upper, hermitian); | ||||
| #if AT_MAGMA_ENABLED() | ||||
|     case at::LinalgBackend::Magma: | ||||
|       return ldl_factor_magma(LD, pivots, info, upper, hermitian); | ||||
| #endif | ||||
|     default: | ||||
|     // By default use cusolver if available and magma otherwise. | ||||
|     // If cusolver and magma 2.5.4+ are both available and hermitian=true, | ||||
| @ -1147,9 +1155,12 @@ REGISTER_CUDA_DISPATCH(ldl_solve_stub, &ldl_solve_kernel) | ||||
|  | ||||
| // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
| #if AT_MAGMA_ENABLED() | ||||
| template <typename scalar_t> | ||||
| static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
| AT_ERROR("cholesky_solve: MAGMA library not found in " | ||||
|     "compilation. Please rebuild with MAGMA."); | ||||
| #else | ||||
|   magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; | ||||
|  | ||||
|   auto A_data = A.data_ptr<scalar_t>(); | ||||
| @ -1168,8 +1179,8 @@ static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info | ||||
|     auto b_mat_stride = matrixStride(b); | ||||
|     magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); | ||||
|  | ||||
|     scalar_t** A_array = nullptr; | ||||
|     scalar_t** b_array = nullptr; | ||||
|     scalar_t** A_array; | ||||
|     scalar_t** b_array; | ||||
|  | ||||
|     ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); | ||||
|     ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); | ||||
| @ -1186,7 +1197,7 @@ static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info | ||||
|     // Compute as many batches of 65535 possible | ||||
|     // The number of "mini"-batches are floor(batch_size / batch_limit) | ||||
|     // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves | ||||
|     int64_t mini_batches = batch_size / batch_limit, mini_idx = 0; | ||||
|     int64_t mini_batches = batch_size / batch_limit, mini_idx; | ||||
|     for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { | ||||
|       scalar_t** A_array_cur = &A_array[mini_idx]; | ||||
|       scalar_t** b_array_cur = &b_array[mini_idx]; | ||||
| @ -1210,6 +1221,7 @@ static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info | ||||
|  | ||||
|     info = info_tmp; | ||||
|   } | ||||
| #endif | ||||
| } | ||||
|  | ||||
| Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) { | ||||
| @ -1222,7 +1234,6 @@ Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bo | ||||
|   TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info); | ||||
|   return self_working_copy; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| // Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance. | ||||
| //     Batched cholesky_solve is dispatched to magma. | ||||
| @ -1232,20 +1243,14 @@ Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upp | ||||
|   switch (preferred_backend) { | ||||
|     case at::LinalgBackend::Cusolver: | ||||
|       return _cholesky_solve_helper_cuda_cusolver(self, A, upper); | ||||
| #if AT_MAGMA_ENABLED() | ||||
|     case at::LinalgBackend::Magma: | ||||
|       return _cholesky_solve_helper_cuda_magma(self, A, upper); | ||||
| #endif | ||||
|     default: | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|       return _cholesky_solve_helper_cuda_cusolver(self, A, upper); | ||||
| #else | ||||
|       if (batchCount(self) == 1) { | ||||
|       if (batchCount(self) == 1 || !use_magma_) { | ||||
|         return _cholesky_solve_helper_cuda_cusolver(self, A, upper); | ||||
|       } else { | ||||
|         return _cholesky_solve_helper_cuda_magma(self, A, upper); | ||||
|       } | ||||
| #endif | ||||
|   } | ||||
| #else | ||||
|   return _cholesky_solve_helper_cuda_magma(self, A, upper); | ||||
| @ -1254,9 +1259,14 @@ Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upp | ||||
|  | ||||
| // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
| #if AT_MAGMA_ENABLED() | ||||
| template <typename scalar_t> | ||||
| static void apply_cholesky(const Tensor& self, bool upper, const Tensor& info) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|   TORCH_CHECK( | ||||
|       false, | ||||
|       "Calling torch.linalg.cholesky on a CUDA tensor requires compiling ", | ||||
|       "PyTorch with MAGMA. Please use PyTorch built with MAGMA support."); | ||||
| #else | ||||
|   magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; | ||||
|  | ||||
|   auto self_data = self.data_ptr<scalar_t>(); | ||||
| @ -1278,7 +1288,7 @@ static void apply_cholesky(const Tensor& self, bool upper, const Tensor& info) { | ||||
|     auto self_mat_stride = matrixStride(self); | ||||
|     magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); | ||||
|  | ||||
|     scalar_t** self_array = nullptr; | ||||
|     scalar_t** self_array; | ||||
|  | ||||
|     ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); | ||||
|  | ||||
| @ -1304,6 +1314,7 @@ static void apply_cholesky(const Tensor& self, bool upper, const Tensor& info) { | ||||
|         uplo, n, self_array_cur, lda, info_array_cur, nbatches, magma_queue); | ||||
|     } | ||||
|   } | ||||
| #endif | ||||
| } | ||||
|  | ||||
| void cholesky_helper_magma(const Tensor& input, bool upper, const Tensor& info) { | ||||
| @ -1339,7 +1350,6 @@ void cholesky_helper_magma(const Tensor& input, bool upper, const Tensor& info) | ||||
|     } | ||||
|   } | ||||
| } | ||||
| #endif | ||||
|  | ||||
| static void cholesky_kernel(const Tensor& input, const Tensor& info, bool upper) { | ||||
| #if defined(USE_LINALG_SOLVER) && !defined(USE_ROCM) | ||||
| @ -1348,21 +1358,15 @@ static void cholesky_kernel(const Tensor& input, const Tensor& info, bool upper) | ||||
|     case at::LinalgBackend::Cusolver: | ||||
|       cholesky_helper_cusolver(input, upper, info); | ||||
|       break; | ||||
| #if AT_MAGMA_ENABLED() | ||||
|     case at::LinalgBackend::Magma: | ||||
|       cholesky_helper_magma(input, upper, info); | ||||
|       break; | ||||
| #endif | ||||
|     default: | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|       cholesky_helper_cusolver(input, upper, info); | ||||
| #else | ||||
|       if (batchCount(input) == 1 || use_cusolver_potrf_batched_) { | ||||
|       if (batchCount(input) == 1 || !use_magma_ || use_cusolver_potrf_batched_) { | ||||
|         cholesky_helper_cusolver(input, upper, info); | ||||
|       } else { | ||||
|         cholesky_helper_magma(input, upper, info); | ||||
|       } | ||||
| #endif | ||||
|   } | ||||
| #else | ||||
|   cholesky_helper_magma(input, upper, info); | ||||
| @ -1380,9 +1384,11 @@ This is an in-place routine, content of 'input' is overwritten. | ||||
| MAGMA requires 'infos' to reside in CPU memory. | ||||
| For more information see MAGMA's documentation for POTRS routine. | ||||
| */ | ||||
| #if AT_MAGMA_ENABLED() | ||||
| template <typename scalar_t> | ||||
| static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|   TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA."); | ||||
| #else | ||||
|   // magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally | ||||
|   // it transfers data several times between GPU and CPU and calls lapack routine on CPU | ||||
|   // using magmaCholeskySolveBatched is a lot faster | ||||
| @ -1412,6 +1418,7 @@ static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) { | ||||
|   int64_t info_tmp = 0; | ||||
|   apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp); | ||||
|   infos.fill_(info_tmp); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| // This is a type dispatching helper function for 'apply_cholesky_inverse' | ||||
| @ -1421,7 +1428,6 @@ Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool u | ||||
|   }); | ||||
|   return result; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) { | ||||
|   // This function calculates the inverse matrix in-place | ||||
| @ -1432,25 +1438,20 @@ Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) | ||||
|   switch (preferred_backend) { | ||||
|     case at::LinalgBackend::Cusolver: | ||||
|       return cholesky_inverse_kernel_impl_cusolver(result, infos, upper); | ||||
| #if AT_MAGMA_ENABLED() | ||||
|     case at::LinalgBackend::Magma: | ||||
|       return cholesky_inverse_kernel_impl_magma(result, infos, upper); | ||||
| #endif | ||||
|     default: | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|       return cholesky_inverse_kernel_impl_cusolver(result, infos, upper); | ||||
| #else | ||||
|       if (batchCount(result) == 1) { | ||||
|       if (batchCount(result) == 1 || | ||||
|           !use_magma_) { | ||||
|         return cholesky_inverse_kernel_impl_cusolver(result, infos, upper); | ||||
|       } else { | ||||
|         return cholesky_inverse_kernel_impl_magma(result, infos, upper); | ||||
|       } | ||||
|  | ||||
| #endif | ||||
|   } | ||||
| #else | ||||
|   return cholesky_inverse_kernel_impl_magma(result, infos, upper); | ||||
| #endif | ||||
|  | ||||
| } | ||||
|  | ||||
| REGISTER_CUDA_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl); | ||||
| @ -1525,9 +1526,14 @@ static void apply_lu_factor_looped_magma(const Tensor& input, const Tensor& pivo | ||||
|  | ||||
|   For further details, please see the MAGMA documentation for magma_dgetrf_batched. | ||||
| */ | ||||
| #if AT_MAGMA_ENABLED() | ||||
| template <typename scalar_t> | ||||
| static void apply_lu_factor_batched_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|   TORCH_CHECK( | ||||
|       false, | ||||
|       "Calling linalg.lu_factor on a CUDA tensor requires compiling ", | ||||
|       "PyTorch with MAGMA. Please rebuild with MAGMA."); | ||||
| #else | ||||
|   // There is a bug in lu_factor_batched_magma in MAGMA < 2.5.2, see | ||||
|   // https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on | ||||
|   std::tuple<magma_int_t, magma_int_t, magma_int_t> version; | ||||
| @ -1544,7 +1550,7 @@ static void apply_lu_factor_batched_magma(const Tensor& input, const Tensor& piv | ||||
|   magma_int_t n = magma_int_cast(input.size(-1), "n"); | ||||
|   auto leading_dimension = std::max<magma_int_t>(1, m); | ||||
|  | ||||
|   scalar_t** input_array = nullptr; | ||||
|   scalar_t** input_array; | ||||
|   ALLOCATE_ARRAY(input_array, scalar_t*, batch_size); | ||||
|  | ||||
|   // Set up array of pointers to matrices | ||||
| @ -1564,7 +1570,7 @@ static void apply_lu_factor_batched_magma(const Tensor& input, const Tensor& piv | ||||
|     // magmaLuBatched might not set the values for it | ||||
|     // see https://github.com/pytorch/pytorch/pull/53064 | ||||
|     pivots.fill_(1); | ||||
|     magma_int_t** pivots_array = nullptr; | ||||
|     magma_int_t** pivots_array; | ||||
|     ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); | ||||
|     for (int64_t i = 0; i < batch_size; i++) { | ||||
|       pivots_array[i] = &pivots_data[i * pivots_stride]; | ||||
| @ -1577,6 +1583,7 @@ static void apply_lu_factor_batched_magma(const Tensor& input, const Tensor& piv | ||||
|   // block CPU until all operations on the queue are finished | ||||
|   // this explicit sync prevents garbage results from the subsequent magmaLuSolveBatched call from a different queue | ||||
|   magma_queue_sync(magma_queue.get_queue()); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| static void lu_factor_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) { | ||||
| @ -1590,7 +1597,6 @@ static void lu_factor_batched_magma(const Tensor& input, const Tensor& pivots, c | ||||
|     apply_lu_factor_batched_magma<scalar_t>(input, pivots, infos, compute_pivots); | ||||
|   }); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| static void lu_factor(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) { | ||||
|   auto batch_size = batchCount(input); | ||||
| @ -1598,7 +1604,6 @@ static void lu_factor(const Tensor& input, const Tensor& pivots, const Tensor& i | ||||
|   auto m = input.size(-2); | ||||
|   auto n = input.size(-1); | ||||
|  | ||||
| #if AT_MAGMA_ENABLED() | ||||
|   const auto lu_factor_magma = [batch_size](const Tensor& input, const Tensor& pivots, const Tensor& infos, const bool compute_pivots) { | ||||
|     if (batch_size == 1) { | ||||
|       lu_factor_looped_magma(input, pivots, infos, compute_pivots); | ||||
| @ -1606,7 +1611,6 @@ static void lu_factor(const Tensor& input, const Tensor& pivots, const Tensor& i | ||||
|       lu_factor_batched_magma(input, pivots, infos, compute_pivots); | ||||
|     } | ||||
|   }; | ||||
| #endif | ||||
|  | ||||
|   const auto preferred_backend = at::globalContext().linalgPreferredBackend(); | ||||
| #ifdef USE_LINALG_SOLVER | ||||
| @ -1631,12 +1635,9 @@ static void lu_factor(const Tensor& input, const Tensor& pivots, const Tensor& i | ||||
|     lu_factor_cusolver(input, pivots, infos, compute_pivots); | ||||
|   } else | ||||
| #endif // ifdef USE_LINALG_SOLVER | ||||
| #if AT_MAGMA_ENABLED() | ||||
|   if (preferred_backend == at::LinalgBackend::Magma) { | ||||
|     lu_factor_magma(input, pivots, infos, compute_pivots); | ||||
|   } else | ||||
| #endif | ||||
|   {  // preferred backend == default | ||||
|   } else {  // preferred backend == default | ||||
| #ifdef USE_LINALG_SOLVER | ||||
| #if AT_MAGMA_ENABLED() | ||||
|     // If magma batched is buggy, we use cusolver | ||||
| @ -1700,8 +1701,8 @@ AT_ERROR("triangular_solve: MAGMA library not found in " | ||||
|   auto A_mat_stride = matrixStride(A); | ||||
|   auto b_mat_stride = matrixStride(b); | ||||
|  | ||||
|   scalar_t** A_array = nullptr; | ||||
|   scalar_t** b_array = nullptr; | ||||
|   scalar_t** A_array; | ||||
|   scalar_t** b_array; | ||||
|  | ||||
|   ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); | ||||
|   ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); | ||||
| @ -1719,7 +1720,7 @@ AT_ERROR("triangular_solve: MAGMA library not found in " | ||||
|   // The number of "mini"-batches are floor(batch_size / batch_limit) | ||||
|   // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves | ||||
|   int64_t mini_batches = batch_size / batch_limit; | ||||
|   int64_t mini_idx = 0; // this is outside the loop because it is used for the case batch_size % batch_limit != 0 | ||||
|   int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0 | ||||
|   for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { | ||||
|     scalar_t** A_array_cur = &A_array[mini_idx]; | ||||
|     scalar_t** b_array_cur = &b_array[mini_idx]; | ||||
| @ -1776,7 +1777,7 @@ Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau) { | ||||
| #ifdef USE_LINALG_SOLVER | ||||
|   return orgqr_helper_cusolver(result, tau); // cusolver | ||||
| #else | ||||
|   static_assert(false, "Calling torch.orgqr on a CUDA tensor requires compiling ", | ||||
|   TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ", | ||||
|     "PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support."); | ||||
| #endif | ||||
| } | ||||
| @ -1787,8 +1788,8 @@ void ormqr_kernel(const Tensor& input, const Tensor& tau, const Tensor& other, b | ||||
| #ifdef USE_LINALG_SOLVER | ||||
|   ormqr_cusolver(input, tau, other, left, transpose); | ||||
| #else | ||||
|   static_assert(false, | ||||
|       "Calling torch.ormqr on a CUDA tensor requires compiling " | ||||
|   TORCH_CHECK(false, | ||||
|       "Calling torch.ormqr on a CUDA tensor requires compiling ", | ||||
|       "PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support."); | ||||
| #endif | ||||
| } | ||||
| @ -1797,9 +1798,15 @@ REGISTER_CUDA_DISPATCH(ormqr_stub, &ormqr_kernel); | ||||
|  | ||||
| // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
| #if AT_MAGMA_ENABLED() | ||||
| template <typename scalar_t> | ||||
| static void apply_geqrf(const Tensor& input, const Tensor& tau) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|   TORCH_CHECK( | ||||
|     false, | ||||
|     "Calling torch.geqrf on a CUDA tensor requires compiling ", | ||||
|     "PyTorch with MAGMA. Please use PyTorch built with MAGMA support."); | ||||
| #else | ||||
|  | ||||
|   magma_int_t m = magma_int_cast(input.size(-2), "m"); | ||||
|   magma_int_t n = magma_int_cast(input.size(-1), "n"); | ||||
|  | ||||
| @ -1826,6 +1833,7 @@ static void apply_geqrf(const Tensor& input, const Tensor& tau) { | ||||
|     checkMagmaInternalError(info, "geqrf"); | ||||
|   } | ||||
|   tau.copy_(tau_cpu, /*non_blocking=*/true); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| // This is a type dispatching helper function for 'apply_geqrf' | ||||
| @ -1834,7 +1842,6 @@ void geqrf_magma(const Tensor& input, const Tensor& tau) { | ||||
|     apply_geqrf<scalar_t>(input, tau); | ||||
|   }); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| void geqrf_kernel(const Tensor& input, const Tensor& tau) { | ||||
| #ifdef USE_LINALG_SOLVER | ||||
| @ -1860,10 +1867,8 @@ void geqrf_kernel(const Tensor& input, const Tensor& tau) { | ||||
|   // - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu | ||||
|   // Refer to the below link for more details: | ||||
|   // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 | ||||
| #if AT_MAGMA_ENABLED() | ||||
|     case at::LinalgBackend::Magma: | ||||
|       return geqrf_magma(input, tau); | ||||
| #endif | ||||
|     case at::LinalgBackend::Cusolver: | ||||
|     default: | ||||
|       return geqrf_cusolver_backend(input, tau); | ||||
| @ -1875,9 +1880,14 @@ void geqrf_kernel(const Tensor& input, const Tensor& tau) { | ||||
|  | ||||
| REGISTER_CUDA_DISPATCH(geqrf_stub, &geqrf_kernel); | ||||
|  | ||||
| #if AT_MAGMA_ENABLED() | ||||
| template <typename scalar_t> | ||||
| static void apply_magma_eigh(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|   TORCH_CHECK( | ||||
|     false, | ||||
|     "Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ", | ||||
|     "PyTorch with MAGMA. Please use PyTorch built with MAGMA support."); | ||||
| #else | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU); | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU); | ||||
|  | ||||
| @ -1897,7 +1907,7 @@ static void apply_magma_eigh(const Tensor& values, const Tensor& vectors, const | ||||
|   auto values_data = values.data_ptr<value_t>(); | ||||
|   auto infos_data = infos.data_ptr<magma_int_t>(); | ||||
|  | ||||
|   scalar_t* wA = nullptr; | ||||
|   scalar_t* wA; | ||||
|   ALLOCATE_ARRAY(wA, scalar_t, lda * lda); | ||||
|  | ||||
|   // Run once, first to get the optimum work sizes. | ||||
| @ -1907,14 +1917,14 @@ static void apply_magma_eigh(const Tensor& values, const Tensor& vectors, const | ||||
|   magma_int_t lwork = -1; | ||||
|   scalar_t wkopt; | ||||
|   magma_int_t liwork = -1; | ||||
|   magma_int_t iwkopt = -1; | ||||
|   magma_int_t iwkopt; | ||||
|   magma_int_t lrwork = -1; | ||||
|   value_t rwkopt; | ||||
|   magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data, | ||||
|     wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data); | ||||
|  | ||||
|   scalar_t* work = nullptr; | ||||
|   magma_int_t* iwork = nullptr; | ||||
|   scalar_t* work; | ||||
|   magma_int_t* iwork; | ||||
|   lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size"); | ||||
|   liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size"); | ||||
|   ALLOCATE_ARRAY(work, scalar_t, lwork); | ||||
| @ -1941,6 +1951,7 @@ static void apply_magma_eigh(const Tensor& values, const Tensor& vectors, const | ||||
|       return; | ||||
|     } | ||||
|   } | ||||
| #endif | ||||
| } | ||||
|  | ||||
| // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
| @ -1979,17 +1990,14 @@ void linalg_eigh_magma(const Tensor& eigenvalues, const Tensor& eigenvectors, co | ||||
|     eigenvalues.copy_(eigenvalues_cpu); | ||||
|   } | ||||
| } | ||||
| #endif | ||||
|  | ||||
| void linalg_eigh_kernel(const Tensor& eigenvalues, const Tensor& eigenvectors, const Tensor& infos, bool upper, bool compute_eigenvectors) { | ||||
| #if defined(USE_LINALG_SOLVER) | ||||
|   auto preferred_backend = at::globalContext().linalgPreferredBackend(); | ||||
|   switch (preferred_backend) { | ||||
| #if AT_MAGMA_ENABLED() | ||||
|     case at::LinalgBackend::Magma: | ||||
|       linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); | ||||
|       break; | ||||
| #endif | ||||
|     case at::LinalgBackend::Cusolver: | ||||
|     default: | ||||
|       linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors); | ||||
| @ -2009,9 +2017,12 @@ This is an in-place routine, content of 'input', 'values', 'vectors' is overwrit | ||||
| 'infos' is an int Tensor containing error codes for each matrix in the batched input. | ||||
| For more information see MAGMA's documentation for GEEV routine. | ||||
| */ | ||||
| #if AT_MAGMA_ENABLED() | ||||
| template <typename scalar_t> | ||||
| void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
| TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. " | ||||
|                    "Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA."); | ||||
| #else | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU); | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU); | ||||
|   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU); | ||||
| @ -2061,6 +2072,7 @@ void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& in | ||||
|     magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr, | ||||
|       lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr); | ||||
|   } | ||||
| #endif | ||||
| } | ||||
|  | ||||
| // This is a type dispatching helper function for 'apply_linalg_eig' | ||||
| @ -2093,6 +2105,10 @@ static void apply_svd_magma(const Tensor& A, | ||||
|                             const Tensor& S, | ||||
|                             const Tensor& Vh, | ||||
|                             const Tensor& info) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
| AT_ERROR("linalg.svd: MAGMA library not found in " | ||||
|     "compilation. Please rebuild with MAGMA."); | ||||
| #else | ||||
|   using value_t = typename c10::scalar_value_type<scalar_t>::type; | ||||
|   const auto A_data = A.data_ptr<scalar_t>(); | ||||
|   const auto U_data = compute_uv ? U.data_ptr<scalar_t>() : nullptr; | ||||
| @ -2120,7 +2136,7 @@ static void apply_svd_magma(const Tensor& A, | ||||
|     rwork = static_cast<value_t*>(storage_rwork.mutable_data()); | ||||
|   } | ||||
|  | ||||
|   magma_int_t* iwork = nullptr; | ||||
|   magma_int_t* iwork; | ||||
|   ALLOCATE_ARRAY(iwork, magma_int_t, 8 * std::min(m, n)); | ||||
|  | ||||
|   // Query svd for the optimal lwork size | ||||
| @ -2135,7 +2151,7 @@ static void apply_svd_magma(const Tensor& A, | ||||
|                                 &wkopt, lwork, rwork, iwork, info_data); | ||||
|     lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size"); | ||||
|   } | ||||
|   scalar_t* work = nullptr; | ||||
|   scalar_t* work; | ||||
|   ALLOCATE_ARRAY(work, scalar_t, lwork); | ||||
|  | ||||
|   for (int64_t i = 0; i < batchsize; i++) { | ||||
| @ -2148,6 +2164,7 @@ static void apply_svd_magma(const Tensor& A, | ||||
|                                 work, lwork, rwork, iwork, | ||||
|                                 info_data + i); | ||||
|   } | ||||
| #endif | ||||
| } | ||||
|  | ||||
| void svd_magma(const Tensor& A, | ||||
| @ -2189,7 +2206,6 @@ void svd_magma(const Tensor& A, | ||||
|   S.copy_(S_, /*non_blocking*/true); | ||||
|   info.copy_(info, /*non_blocking*/true); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| void svd_kernel(const Tensor& A, | ||||
|                 const bool full_matrices, | ||||
| @ -2201,13 +2217,10 @@ void svd_kernel(const Tensor& A, | ||||
|                 const Tensor& info) { | ||||
| #ifdef USE_LINALG_SOLVER | ||||
|   // We always use cuSOLVER unless the user has specified they want to use MAGMA | ||||
| #if AT_MAGMA_ENABLED() | ||||
|   bool use_magma = at::globalContext().linalgPreferredBackend() == at::LinalgBackend::Magma; | ||||
|   if (use_magma) { | ||||
|     svd_magma(A, full_matrices, compute_uv, U, S, Vh, info); | ||||
|   } else | ||||
| #endif | ||||
|   { | ||||
|   } else { | ||||
|     // svd_cusolver computes V rather than Vh, so we pass a view of Vh.mT | ||||
|     // and then conjugate Vh in-place | ||||
|     svd_cusolver(A, full_matrices, compute_uv, driver, U, S, compute_uv ? Vh.mT() : Vh, info); | ||||
| @ -2238,9 +2251,14 @@ REGISTER_CUDA_DISPATCH(svd_stub, &svd_kernel) | ||||
|  | ||||
|   For further details, please see the MAGMA documentation for magma_dgetrs_gpu. | ||||
| */ | ||||
| #if AT_MAGMA_ENABLED() | ||||
| template <typename scalar_t> | ||||
| static void apply_lu_solve_looped_magma(const Tensor& LU, const Tensor& pivots, const Tensor& B, TransposeType transpose) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|   TORCH_CHECK( | ||||
|       false, | ||||
|       "Calling linalg.lu_solve on a CUDA tensor requires compiling ", | ||||
|       "PyTorch with MAGMA. Please rebuild with MAGMA."); | ||||
| #else | ||||
|   auto trans = to_magma(transpose); | ||||
|   auto b_data = B.data_ptr<scalar_t>(); | ||||
|   auto lu_data = LU.data_ptr<scalar_t>(); | ||||
| @ -2278,6 +2296,7 @@ static void apply_lu_solve_looped_magma(const Tensor& LU, const Tensor& pivots, | ||||
|     // so we don't need to check it all the time | ||||
|     TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0); | ||||
|   } | ||||
| #endif | ||||
| } | ||||
|  | ||||
| /* | ||||
| @ -2296,6 +2315,12 @@ static void apply_lu_solve_looped_magma(const Tensor& LU, const Tensor& pivots, | ||||
| */ | ||||
| template <typename scalar_t> | ||||
| static void apply_lu_solve_batched_magma(const Tensor& LU, const Tensor& pivots, const Tensor& B, TransposeType transpose) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|   TORCH_CHECK( | ||||
|       false, | ||||
|       "Calling linalg.lu_solve on a CUDA tensor requires compiling ", | ||||
|       "PyTorch with MAGMA. Please rebuild with MAGMA."); | ||||
| #else | ||||
|   TORCH_INTERNAL_ASSERT(batchCount(B) == batchCount(LU), "batch_size of LU and B must be the same"); | ||||
|   TORCH_INTERNAL_ASSERT(batchCount(LU) == batchCount(pivots.unsqueeze(-1)), "batch_size of LU and pivots must be the same"); | ||||
|   auto trans = to_magma(transpose); | ||||
| @ -2313,9 +2338,9 @@ static void apply_lu_solve_batched_magma(const Tensor& LU, const Tensor& pivots, | ||||
|   auto pivots_stride = pivots.size(-1); | ||||
|   magma_int_t batch_size = magma_int_cast(batchCount(B), "batchCount"); | ||||
|  | ||||
|   magma_int_t** pivots_array = nullptr; | ||||
|   scalar_t** lu_array = nullptr; | ||||
|   scalar_t** b_array = nullptr; | ||||
|   magma_int_t** pivots_array; | ||||
|   scalar_t** lu_array; | ||||
|   scalar_t** b_array; | ||||
|  | ||||
|   ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); | ||||
|   ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size); | ||||
| @ -2339,7 +2364,7 @@ static void apply_lu_solve_batched_magma(const Tensor& LU, const Tensor& pivots, | ||||
|     scalar_t** b_array_cur = &b_array[mini_idx]; | ||||
|     magma_int_t** pivots_array_cur = &pivots_array[mini_idx]; | ||||
|  | ||||
|     int info = -1; | ||||
|     int info; | ||||
|     magmaLuSolveBatched<scalar_t>( | ||||
|         n, nrhs, lu_array_cur, leading_dimension, | ||||
|         pivots_array_cur, b_array_cur, leading_dimension, | ||||
| @ -2349,6 +2374,7 @@ static void apply_lu_solve_batched_magma(const Tensor& LU, const Tensor& pivots, | ||||
|     // so we don't need to check it all the time | ||||
|     TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0); | ||||
|   } | ||||
| #endif | ||||
| } | ||||
|  | ||||
| static void lu_solve_batched_magma(const Tensor& LU, const Tensor& pivots, const Tensor& B, TransposeType trans) { | ||||
| @ -2364,7 +2390,6 @@ static void lu_solve_looped_magma(const Tensor& LU, const Tensor& pivots, const | ||||
|     apply_lu_solve_looped_magma<scalar_t>(LU, pivots, B, trans); | ||||
|   }); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| c10::MaybeOwned<Tensor> maybe_expand_lu(const Tensor& B, const Tensor& LU) { | ||||
|   // B and LU have the same number of dimensions | ||||
| @ -2399,11 +2424,9 @@ static void lu_solve_kernel(const Tensor& LU, const Tensor& pivots, const Tensor | ||||
|   auto b = batchCount(B); | ||||
|   auto n = LU.size(-2); | ||||
|   auto k = B.size(-1); | ||||
| #if AT_MAGMA_ENABLED() | ||||
|   // magma implementation of LU solve cannot handle a b tensor with last dim > 1024 | ||||
|   // See https://bitbucket.org/icl/magma/issues/19/dgesv_batched-dgetrs_batched-fails-for | ||||
|   bool over_batched_magma_dim_limit = k > 1024; | ||||
| #endif | ||||
|   // heuristics determined from tests discussed in https://github.com/pytorch/pytorch/pull/72935 | ||||
|  | ||||
|   // Computes X = U^{-1}L^{-1}P^T B via triangular solves | ||||
| @ -2418,7 +2441,7 @@ static void lu_solve_kernel(const Tensor& LU, const Tensor& pivots, const Tensor | ||||
|       .set_check_mem_overlap(false) | ||||
|       .check_all_same_dtype(false) | ||||
|       .resize_outputs(false) | ||||
|       .declare_static_shape(pivots_->sizes(), /*squash_dims=*/pivots_->dim() - 1) | ||||
|       .declare_static_shape(pivots_->sizes(), /*squash_dim=*/pivots_->dim() - 1) | ||||
|       .add_output(perm) | ||||
|       .add_const_input(*pivots_) | ||||
|       .build(); | ||||
| @ -2434,7 +2457,7 @@ static void lu_solve_kernel(const Tensor& LU, const Tensor& pivots, const Tensor | ||||
|       // B1 = P^T @ B  (must be done out-of-place as B is both source and target) | ||||
|       auto B1 = B.scatter(-2, inv_perm.unsqueeze(-1).expand_as(B), B); | ||||
|       // B = L^{-1} @ B1 | ||||
|       at::linalg_solve_triangular_out(const_cast<Tensor&>(B), *LU_, B1, /*upper=*/false, /*left=*/true, /*unitriangular=*/true); | ||||
|       at::linalg_solve_triangular_out(const_cast<Tensor&>(B), *LU_, std::move(B1), /*upper=*/false, /*left=*/true, /*unitriangular=*/true); | ||||
|       // B = U^{-1} @ B | ||||
|       at::linalg_solve_triangular_out(const_cast<Tensor&>(B), *LU_, B, /*upper=*/true); | ||||
|     } else { | ||||
| @ -2456,13 +2479,11 @@ static void lu_solve_kernel(const Tensor& LU, const Tensor& pivots, const Tensor | ||||
|   }; | ||||
| #endif | ||||
|  | ||||
| #if AT_MAGMA_ENABLED() | ||||
|   auto lu_solve_batched_magma_fn = [](const Tensor& LU, const Tensor& pivots, const Tensor& B, TransposeType trans) { | ||||
|     auto LU_ = maybe_expand_lu(B, LU); | ||||
|     auto pivots_ = maybe_expand_pivots(B, pivots); | ||||
|     lu_solve_batched_magma(*LU_, *pivots_, B, trans); | ||||
|   }; | ||||
| #endif | ||||
|  | ||||
|  | ||||
|   // Preferred Backend | ||||
| @ -2477,7 +2498,6 @@ static void lu_solve_kernel(const Tensor& LU, const Tensor& pivots, const Tensor | ||||
|     return; | ||||
|   } else | ||||
| #endif // ifdef USE_LINALG_SOLVER | ||||
| #if AT_MAGMA_ENABLED() | ||||
|   if (preferred_backend == at::LinalgBackend::Magma) { | ||||
|     // Looped magma is very slow, but batched magma is buggy in these two cases | ||||
|     if (!over_batched_magma_dim_limit && trans == TransposeType::NoTranspose) { | ||||
| @ -2488,7 +2508,6 @@ static void lu_solve_kernel(const Tensor& LU, const Tensor& pivots, const Tensor | ||||
|     } | ||||
|     return; | ||||
|   } | ||||
| #endif | ||||
|  | ||||
|   // Heuristic | ||||
|   //if (n == k) { | ||||
| @ -2529,12 +2548,9 @@ static void lu_solve_kernel(const Tensor& LU, const Tensor& pivots, const Tensor | ||||
|   } | ||||
|  | ||||
| if (n <= 8) { | ||||
| #if AT_MAGMA_ENABLED() | ||||
|   if (!over_batched_magma_dim_limit && trans == TransposeType::NoTranspose && k >= 256) { | ||||
|   if (use_magma_ && !over_batched_magma_dim_limit && trans == TransposeType::NoTranspose && k >= 256) { | ||||
|     lu_solve_batched_magma_fn(LU, pivots, B, trans); | ||||
|   } else | ||||
| #endif | ||||
|   { | ||||
|   } else { | ||||
|     lu_solve_batched_cublas_fn(LU, pivots, B, trans); | ||||
|   } | ||||
| } else if (n <= 64) { | ||||
| @ -2567,9 +2583,12 @@ REGISTER_CUDA_DISPATCH(lu_solve_stub, &lu_solve_kernel); | ||||
|  | ||||
| // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
|  | ||||
| #if AT_MAGMA_ENABLED() | ||||
| template <typename scalar_t> | ||||
| static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) { | ||||
| #if !AT_MAGMA_ENABLED() | ||||
|   TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in " | ||||
|     "compilation. Please rebuild with MAGMA."); | ||||
| #else | ||||
|   auto trans = MagmaNoTrans; | ||||
|   auto m = magma_int_cast(a.size(-2), "m"); | ||||
|   auto n = magma_int_cast(a.size(-1), "n"); | ||||
| @ -2599,6 +2618,7 @@ static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) { | ||||
|         hwork_ptr, lwork, infos_working_ptr); | ||||
|     } | ||||
|   ); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| void gels_magma(const Tensor& a, Tensor& b, Tensor& infos) { | ||||
| @ -2606,7 +2626,6 @@ void gels_magma(const Tensor& a, Tensor& b, Tensor& infos) { | ||||
|     apply_gels<scalar_t>(a, b, infos); | ||||
|   }); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| void linalg_lstsq_gels(const Tensor& A, const Tensor& B, const Tensor& /*infos*/) { | ||||
|   // The steps for using the QR decomposition for solving least squares problems | ||||
| @ -2695,10 +2714,8 @@ void gels_looped(const Tensor& a, Tensor& b, Tensor& infos) { | ||||
| #if defined(USE_LINALG_SOLVER) && !defined(USE_ROCM) | ||||
|   auto preferred_backend = at::globalContext().linalgPreferredBackend(); | ||||
|   switch (preferred_backend) { | ||||
| #if AT_MAGMA_ENABLED() | ||||
|     case at::LinalgBackend::Magma: | ||||
|       return gels_magma(a, b, infos); | ||||
| #endif | ||||
|     case at::LinalgBackend::Cusolver: | ||||
|     default: | ||||
|       // linalg_lstsq_gels is a generic function that is implemented using | ||||
|  | ||||
| @ -6,10 +6,9 @@ | ||||
| #include <torch/script.h> | ||||
|  | ||||
| namespace at { | ||||
| namespace native { | ||||
| namespace metal { | ||||
| namespace native::metal { | ||||
|  | ||||
| at::Tensor& copy_from_metal_(at::Tensor& dst, const at::Tensor& src) { | ||||
| static Tensor& copy_from_metal_(Tensor& dst, const Tensor& src) { | ||||
|   TORCH_INTERNAL_ASSERT( | ||||
|       src.device().type() == DeviceType::Metal, | ||||
|       "copy_from_metal input tensor's device is not metal"); | ||||
| @ -34,7 +33,7 @@ at::Tensor& copy_from_metal_(at::Tensor& dst, const at::Tensor& src) { | ||||
|   return dst; | ||||
| } | ||||
|  | ||||
| at::Tensor& copy_to_metal_(at::Tensor& dst, const at::Tensor& src) { | ||||
| static Tensor& copy_to_metal_(Tensor& dst, const Tensor& src) { | ||||
|   TORCH_INTERNAL_ASSERT( | ||||
|       dst.device().type() == DeviceType::Metal, | ||||
|       "copy_to_metal_ output tensor's device is not metal"); | ||||
| @ -54,7 +53,7 @@ at::Tensor& copy_to_metal_(at::Tensor& dst, const at::Tensor& src) { | ||||
|   return dst; | ||||
| } | ||||
|  | ||||
| at::Tensor& metal_copy_impl_(at::Tensor& dst, const at::Tensor& src) { | ||||
| static Tensor& metal_copy_impl_(Tensor& dst, const Tensor& src) { | ||||
|   if (src.device().type() == at::kMetal && dst.device().type() == at::kCPU) { | ||||
|     return copy_from_metal_(dst, src); | ||||
|   } | ||||
| @ -69,7 +68,7 @@ at::Tensor& metal_copy_impl_(at::Tensor& dst, const at::Tensor& src) { | ||||
|  | ||||
| #pragma mark - ATen Ops | ||||
|  | ||||
| Tensor empty( | ||||
| static Tensor empty( | ||||
|     c10::SymIntArrayRef sym_size, | ||||
|     optional<ScalarType> dtype, | ||||
|     optional<Layout> layout, | ||||
| @ -88,7 +87,7 @@ Tensor empty( | ||||
|       std::move(mt), at::device(at::kMetal).dtype(dtype)); | ||||
| }; | ||||
|  | ||||
| at::Tensor empty_strided( | ||||
| static Tensor empty_strided( | ||||
|     IntArrayRef size, | ||||
|     IntArrayRef stride, | ||||
|     optional<ScalarType> dtype, | ||||
| @ -109,8 +108,7 @@ TORCH_LIBRARY_IMPL(aten, Metal, m) { | ||||
|   m.impl(TORCH_SELECTIVE_NAME("aten::empty_strided"), TORCH_FN(empty_strided)); | ||||
| } | ||||
|  | ||||
| } // namespace metal | ||||
| } // namespace native | ||||
| } // namespace native::metal | ||||
|  | ||||
| struct MetalImpl : public at::metal::MetalInterface { | ||||
|   bool is_metal_available() const override { | ||||
|  | ||||
| @ -3,9 +3,7 @@ | ||||
|  | ||||
| #include <c10/util/ArrayRef.h> | ||||
|  | ||||
| namespace at { | ||||
| namespace native { | ||||
| namespace metal { | ||||
| namespace at::native::metal { | ||||
|  | ||||
| struct Conv2DParams final { | ||||
|   Conv2DParams() {} | ||||
| @ -46,8 +44,6 @@ struct Conv2DParams final { | ||||
|   int64_t OH; // output height | ||||
| }; | ||||
|  | ||||
| } // namespace metal | ||||
| } // namespace native | ||||
| } // namespace at | ||||
| } // namespace at::native::metal | ||||
|  | ||||
| #endif /* MetalConvParams_h */ | ||||
|  | ||||
| @ -5,9 +5,7 @@ | ||||
|  | ||||
| #include <string> | ||||
|  | ||||
| namespace at { | ||||
| namespace native { | ||||
| namespace metal { | ||||
| namespace at::native::metal { | ||||
|  | ||||
| struct MetalDeviceInfo { | ||||
|   std::string name; | ||||
| @ -42,8 +40,6 @@ static inline MetalDeviceInfo createDeviceInfo(id<MTLDevice> device) { | ||||
|   return device_info; | ||||
| } | ||||
|  | ||||
| } | ||||
| } | ||||
| } | ||||
| } // namespace at::native::metal | ||||
|  | ||||
| #endif | ||||
|  | ||||
| @ -6,9 +6,7 @@ | ||||
|  | ||||
| #include <ATen/ATen.h> | ||||
|  | ||||
| namespace at { | ||||
| namespace native { | ||||
| namespace metal { | ||||
| namespace at::native::metal { | ||||
|  | ||||
| enum class NeuronType { | ||||
|   None, | ||||
| @ -66,8 +64,6 @@ static inline MPSNNNeuronDescriptor* neuronDescriptor(NeuronType type) { | ||||
|   } | ||||
| } | ||||
|  | ||||
| } | ||||
| } | ||||
| } | ||||
| } // namespace at::native::metal | ||||
|  | ||||
| #endif /* MetalNeuronType_h */ | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	