mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-24 15:44:58 +08:00
Compare commits
146 Commits
google-mai
...
fx_cpp
Author | SHA1 | Date | |
---|---|---|---|
73c49ee963 | |||
0e6c204642 | |||
44371bd432 | |||
6e13c7e874 | |||
94165dba7b | |||
8a0bc8c9ee | |||
cbb7e3053f | |||
310f80995b | |||
ffc202a1b9 | |||
c446851829 | |||
613c7d270d | |||
2e42671619 | |||
0e3fe694d1 | |||
8a45cf4c64 | |||
1d84c7e100 | |||
6e5c2a1a3b | |||
6220602943 | |||
0ef5229569 | |||
f9508b4c1f | |||
fe74bbd6f0 | |||
921aa194c7 | |||
ad96f991a5 | |||
5ef081031e | |||
647815049e | |||
ef2b5ed500 | |||
39dd4740e6 | |||
bef586111a | |||
09cccbc1c7 | |||
11f2d8e823 | |||
5b3624117a | |||
ba81c3c290 | |||
3a620a0f65 | |||
8892ddaacc | |||
fdf1666b20 | |||
e647ea55a3 | |||
dcb63fcedb | |||
cafbcb6376 | |||
8ca4cefc7d | |||
0a6df4fca6 | |||
82d7a36a27 | |||
0c7f4353e5 | |||
662a78f957 | |||
19b31d899a | |||
b741819b05 | |||
3aa623d407 | |||
754e6d4ad0 | |||
85758fa5ae | |||
6c824cd9fb | |||
b9b89ed638 | |||
d9696ea624 | |||
fc6e3ff96d | |||
128952625b | |||
c219fa5eb9 | |||
8d16a73f0f | |||
852b7b4c99 | |||
ac51f782fe | |||
23c156cd2d | |||
a1b664adeb | |||
3090667cf9 | |||
224b4339e5 | |||
6e75024ff0 | |||
771be55bb0 | |||
3f9798a4fd | |||
543a870943 | |||
5f81265572 | |||
7efaeb1494 | |||
0c16800b4a | |||
01601ebd41 | |||
70724bdbfe | |||
00c6ca4459 | |||
190f06d468 | |||
a448b3ae95 | |||
2ff312359c | |||
d943357a21 | |||
68cc63ae27 | |||
7e48d6a497 | |||
dc8e3c2e90 | |||
6a2bf48cfa | |||
e8e0bdf541 | |||
96806b1777 | |||
3df53c2a8f | |||
747fc35ff5 | |||
5e5bbdb35e | |||
4d0ece8196 | |||
476bfe6cce | |||
50155e825b | |||
65aa16f968 | |||
f99409903c | |||
740cd0559f | |||
117ab34891 | |||
901226ae83 | |||
7ede78f9f5 | |||
e5b3387166 | |||
6dfdce92ba | |||
56a3d276fe | |||
80fa2778ed | |||
0a761f0627 | |||
54fe2d0e89 | |||
04272a0e12 | |||
baaa914bf7 | |||
9554300436 | |||
2184cdd291 | |||
7e059b3c95 | |||
e8670f6aea | |||
de4f8b9946 | |||
fba21edf5b | |||
936225d7b2 | |||
32fb68960e | |||
78a6b0c479 | |||
304956e1fb | |||
1d0c1087dd | |||
e9c5144cbc | |||
2ffdf556ea | |||
2d47385f0f | |||
4f87f47ea1 | |||
f0dd11df55 | |||
0de6d2427f | |||
a5b86a1ec0 | |||
a5ba9b2858 | |||
c58d3af3b4 | |||
9d849d4312 | |||
48a54146e7 | |||
f08fd8e9e3 | |||
c97e3ebb96 | |||
457df212e1 | |||
f5328542b5 | |||
5a3bea1e88 | |||
70ba6f0ab6 | |||
c8ff1cd387 | |||
638f543ac2 | |||
cd42b95047 | |||
68eb771265 | |||
2f7cfecd86 | |||
c1a43a69e4 | |||
9795c4224b | |||
b4a0161449 | |||
6adcf21b2b | |||
bf2c05352e | |||
80d34217c6 | |||
d3ad84c38f | |||
8bcebc8dae | |||
2fd75667b4 | |||
e98662bed9 | |||
ffaea656b5 | |||
a7c596870d | |||
4123323eff |
@ -91,9 +91,9 @@ _UCC_COMMIT=20eae37090a4ce1b32bcce6144ccad0b49943e0b
|
||||
# configuration, so we hardcode everything here rather than do it
|
||||
# from scratch
|
||||
case "$image" in
|
||||
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9)
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.4.0
|
||||
CUDNN_VERSION=8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -105,9 +105,9 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9)
|
||||
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.1.1
|
||||
CUDNN_VERSION=8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -119,9 +119,9 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks)
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.4.0
|
||||
CUDNN_VERSION=8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -134,9 +134,9 @@ case "$image" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks)
|
||||
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.1.1
|
||||
CUDNN_VERSION=8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -149,9 +149,9 @@ case "$image" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3.12-gcc9-inductor-benchmarks)
|
||||
pytorch-linux-focal-cuda12.1-cudnn9-py3.12-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.1.1
|
||||
CUDNN_VERSION=8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -164,9 +164,9 @@ case "$image" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.4-cudnn8-py3.12-gcc9-inductor-benchmarks)
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.4.0
|
||||
CUDNN_VERSION=8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -179,9 +179,9 @@ case "$image" in
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9)
|
||||
pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=11.8.0
|
||||
CUDNN_VERSION=8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -193,9 +193,9 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9)
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.4.0
|
||||
CUDNN_VERSION=8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -207,9 +207,9 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9)
|
||||
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.1.1
|
||||
CUDNN_VERSION=8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -221,9 +221,9 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9)
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9)
|
||||
CUDA_VERSION=12.4.0
|
||||
CUDNN_VERSION=8
|
||||
CUDNN_VERSION=9
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
@ -330,10 +330,10 @@ case "$image" in
|
||||
DOCS=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12)
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.8-clang12)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
CUDA_VERSION=11.8
|
||||
CUDNN_VERSION=8
|
||||
CUDNN_VERSION=9
|
||||
CLANG_VERSION=12
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
@ -380,7 +380,7 @@ case "$image" in
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CONDA_CMAKE=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter)
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CUDA_VERSION=11.8
|
||||
CONDA_CMAKE=yes
|
||||
@ -447,7 +447,7 @@ tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
#when using cudnn version 8 install it separately from cuda
|
||||
if [[ "$image" == *cuda* && ${OS} == "ubuntu" ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
if [[ ${CUDNN_VERSION} == 8 ]]; then
|
||||
if [[ ${CUDNN_VERSION} == 9 ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
fi
|
||||
fi
|
||||
@ -499,7 +499,7 @@ docker build \
|
||||
"$@" \
|
||||
.
|
||||
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`,
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`,
|
||||
# for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could
|
||||
# find the correct image. As a result, here we have to replace the
|
||||
# "$UBUNTU_VERSION" == "18.04-rc"
|
||||
|
@ -1 +1 @@
|
||||
bbe6246e37d8aa791c67daaf9d9d61b26c9ccfdc
|
||||
01cbe5045a6898c9a925f01435c8277b2fe6afcc
|
||||
|
@ -3,7 +3,7 @@
|
||||
set -ex
|
||||
|
||||
install_ubuntu() {
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`,
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`,
|
||||
# for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could
|
||||
# find the correct image. As a result, here we have to check for
|
||||
# "$UBUNTU_VERSION" == "18.04"*
|
||||
|
@ -1,23 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ ${CUDNN_VERSION} == 8 ]]; then
|
||||
if [[ -n "${CUDNN_VERSION}" ]]; then
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
mkdir tmp_cudnn
|
||||
pushd tmp_cudnn
|
||||
if [[ ${CUDA_VERSION:0:4} == "12.4" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.9.7.29_cuda12-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz
|
||||
elif [[ ${CUDA_VERSION:0:4} == "12.1" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.9.2.26_cuda12-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz
|
||||
elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.7.0.84_cuda11-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.7.0/local_installers/11.8/${CUDNN_NAME}.tar.xz
|
||||
if [[ ${CUDA_VERSION:0:2} == "12" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda12-archive"
|
||||
elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive"
|
||||
else
|
||||
print "Unsupported CUDA version ${CUDA_VERSION}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz
|
||||
tar xf ${CUDNN_NAME}.tar.xz
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/
|
||||
cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/
|
||||
|
@ -139,7 +139,7 @@ COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
ARG CUDNN_VERSION
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cudnn.sh install_cudnn.sh
|
||||
RUN if [ "${CUDNN_VERSION}" -eq 8 ]; then bash install_cudnn.sh; fi
|
||||
RUN if [ -n "${CUDNN_VERSION}" ]; then bash install_cudnn.sh; fi
|
||||
RUN rm install_cudnn.sh
|
||||
|
||||
# Install CUSPARSELT
|
||||
|
@ -368,7 +368,7 @@ test_inductor_cpp_wrapper_abi_compatible() {
|
||||
|
||||
echo "Testing Inductor cpp wrapper mode with TORCHINDUCTOR_ABI_COMPATIBLE=1"
|
||||
# cpu stack allocation causes segfault and needs more investigation
|
||||
python test/run_test.py --include inductor/test_cpu_cpp_wrapper
|
||||
PYTORCH_TESTING_DEVICE_ONLY_FOR="" python test/run_test.py --include inductor/test_cpu_cpp_wrapper
|
||||
python test/run_test.py --include inductor/test_cuda_cpp_wrapper
|
||||
|
||||
TORCHINDUCTOR_CPP_WRAPPER=1 python benchmarks/dynamo/timm_models.py --device cuda --accuracy --amp \
|
||||
|
11
.github/actionlint.yaml
vendored
11
.github/actionlint.yaml
vendored
@ -16,6 +16,17 @@ self-hosted-runner:
|
||||
- linux.8xlarge.nvidia.gpu
|
||||
- linux.16xlarge.nvidia.gpu
|
||||
- linux.g5.4xlarge.nvidia.gpu
|
||||
# Organization-wide AWS Linux Runners on Linux Foundation account
|
||||
- lf.linux.large
|
||||
- lf.linux.2xlarge
|
||||
- lf.linux.4xlarge
|
||||
- lf.linux.12xlarge
|
||||
- lf.linux.24xlarge
|
||||
- lf.linux.arm64.2xlarge
|
||||
- lf.linux.4xlarge.nvidia.gpu
|
||||
- lf.linux.8xlarge.nvidia.gpu
|
||||
- lf.linux.16xlarge.nvidia.gpu
|
||||
- lf.linux.g5.4xlarge.nvidia.gpu
|
||||
# Repo-specific IBM hosted S390x runner
|
||||
- linux.s390x
|
||||
# Organization wide AWS Windows runners
|
||||
|
1
.github/pytorch-probot.yml
vendored
1
.github/pytorch-probot.yml
vendored
@ -8,6 +8,7 @@ ciflow_push_tags:
|
||||
- ciflow/inductor
|
||||
- ciflow/inductor-perf-compare
|
||||
- ciflow/inductor-micro-benchmark
|
||||
- ciflow/inductor-cu124
|
||||
- ciflow/linux-aarch64
|
||||
- ciflow/mps
|
||||
- ciflow/nightly
|
||||
|
12
.github/scripts/generate_binary_build_matrix.py
vendored
12
.github/scripts/generate_binary_build_matrix.py
vendored
@ -19,7 +19,7 @@ CUDA_ARCHES = ["11.8", "12.1", "12.4"]
|
||||
CUDA_ARCHES_FULL_VERSION = {"11.8": "11.8.0", "12.1": "12.1.1", "12.4": "12.4.0"}
|
||||
|
||||
|
||||
CUDA_ARCHES_CUDNN_VERSION = {"11.8": "8", "12.1": "8", "12.4": "8"}
|
||||
CUDA_ARCHES_CUDNN_VERSION = {"11.8": "9", "12.1": "9", "12.4": "9"}
|
||||
|
||||
|
||||
ROCM_ARCHES = ["6.0", "6.1"]
|
||||
@ -42,7 +42,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | " # noqa: B950
|
||||
"nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
@ -55,7 +55,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | " # noqa: B950
|
||||
"nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
@ -68,7 +68,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
@ -347,6 +347,10 @@ def generate_wheels_matrix(
|
||||
for python_version in python_versions:
|
||||
for arch_version in arches:
|
||||
gpu_arch_type = arch_type(arch_version)
|
||||
# Disable py3.12 builds for ROCm because of triton dependency
|
||||
# on llnl-hatchet, which doesn't have py3.12 wheels available
|
||||
if gpu_arch_type == "rocm" and python_version == "3.12":
|
||||
continue
|
||||
gpu_arch_version = (
|
||||
""
|
||||
if arch_version == "cpu"
|
||||
|
12
.github/scripts/test_trymerge.py
vendored
12
.github/scripts/test_trymerge.py
vendored
@ -773,13 +773,13 @@ class TestBypassFailures(TestCase):
|
||||
# than the one on the base commit. This should still count as broken trunk
|
||||
"pr_num": 104214,
|
||||
"related_failure_count": 0,
|
||||
"unrelated_failure_count": 1,
|
||||
"flaky_or_broken_trunk": 1,
|
||||
},
|
||||
{
|
||||
# This PR had one broken trunk failure and it used ghstack
|
||||
"pr_num": 105145,
|
||||
"related_failure_count": 0,
|
||||
"unrelated_failure_count": 1,
|
||||
"flaky_or_broken_trunk": 1,
|
||||
},
|
||||
{
|
||||
# The failure on the merge base was retried successfully and
|
||||
@ -788,20 +788,20 @@ class TestBypassFailures(TestCase):
|
||||
# be used to detect broken trunk
|
||||
"pr_num": 107160,
|
||||
"related_failure_count": 0,
|
||||
"unrelated_failure_count": 4,
|
||||
"flaky_or_broken_trunk": 1,
|
||||
},
|
||||
{
|
||||
# This PR used Dr.CI broken trunk classification
|
||||
"pr_num": 111253,
|
||||
"related_failure_count": 1,
|
||||
"unrelated_failure_count": 2,
|
||||
"flaky_or_broken_trunk": 1,
|
||||
},
|
||||
]
|
||||
|
||||
for case in test_cases:
|
||||
pr_num = case["pr_num"]
|
||||
related_failure_count = case["related_failure_count"]
|
||||
unrelated_failure_count = case["unrelated_failure_count"]
|
||||
flaky_or_broken_trunk = case["flaky_or_broken_trunk"]
|
||||
|
||||
pr = GitHubPR("pytorch", "pytorch", pr_num)
|
||||
checks = pr.get_checkrun_conclusions()
|
||||
@ -823,7 +823,7 @@ class TestBypassFailures(TestCase):
|
||||
)
|
||||
self.assertTrue(len(pending) == 0)
|
||||
self.assertTrue(
|
||||
len(failed) == unrelated_failure_count + related_failure_count
|
||||
len(failed) == flaky_or_broken_trunk + related_failure_count
|
||||
)
|
||||
|
||||
def test_ignore_current(self, *args: Any) -> None:
|
||||
|
30
.github/scripts/trymerge.py
vendored
30
.github/scripts/trymerge.py
vendored
@ -2027,10 +2027,8 @@ def categorize_checks(
|
||||
pending_checks: List[Tuple[str, Optional[str], Optional[int]]] = []
|
||||
failed_checks: List[Tuple[str, Optional[str], Optional[int]]] = []
|
||||
|
||||
# ok_failed_checks is used with ok_failed_checks_threshold while ignorable_failed_checks
|
||||
# is used to keep track of all ignorable failures when saving the merge record on Rockset
|
||||
ok_failed_checks: List[Tuple[str, Optional[str], Optional[int]]] = []
|
||||
ignorable_failed_checks: Dict[str, List[Any]] = defaultdict(list)
|
||||
# failed_checks_categorization is used to keep track of all ignorable failures when saving the merge record on Rockset
|
||||
failed_checks_categorization: Dict[str, List[Any]] = defaultdict(list)
|
||||
|
||||
# If required_checks is not set or empty, consider all names are relevant
|
||||
relevant_checknames = [
|
||||
@ -2058,36 +2056,38 @@ def categorize_checks(
|
||||
continue
|
||||
elif not is_passing_status(check_runs[checkname].status):
|
||||
target = (
|
||||
ignorable_failed_checks[classification]
|
||||
failed_checks_categorization[classification]
|
||||
if classification
|
||||
in ("IGNORE_CURRENT_CHECK", "BROKEN_TRUNK", "FLAKY", "UNSTABLE")
|
||||
else failed_checks
|
||||
)
|
||||
target.append((checkname, url, job_id))
|
||||
|
||||
if classification in ("BROKEN_TRUNK", "FLAKY", "UNSTABLE"):
|
||||
ok_failed_checks.append((checkname, url, job_id))
|
||||
flaky_or_broken_trunk = (
|
||||
failed_checks_categorization["BROKEN_TRUNK"]
|
||||
+ failed_checks_categorization["FLAKY"]
|
||||
)
|
||||
|
||||
if ok_failed_checks:
|
||||
if flaky_or_broken_trunk:
|
||||
warn(
|
||||
f"The following {len(ok_failed_checks)} checks failed but were likely due flakiness or broken trunk: "
|
||||
+ ", ".join([x[0] for x in ok_failed_checks])
|
||||
f"The following {len(flaky_or_broken_trunk)} checks failed but were likely due flakiness or broken trunk: "
|
||||
+ ", ".join([x[0] for x in flaky_or_broken_trunk])
|
||||
+ (
|
||||
f" but this is greater than the threshold of {ok_failed_checks_threshold} so merge will fail"
|
||||
if ok_failed_checks_threshold is not None
|
||||
and len(ok_failed_checks) > ok_failed_checks_threshold
|
||||
and len(flaky_or_broken_trunk) > ok_failed_checks_threshold
|
||||
else ""
|
||||
)
|
||||
)
|
||||
|
||||
if (
|
||||
ok_failed_checks_threshold is not None
|
||||
and len(ok_failed_checks) > ok_failed_checks_threshold
|
||||
and len(flaky_or_broken_trunk) > ok_failed_checks_threshold
|
||||
):
|
||||
failed_checks = failed_checks + ok_failed_checks
|
||||
failed_checks = failed_checks + flaky_or_broken_trunk
|
||||
|
||||
# The list of ignorable_failed_checks is returned so that it can be saved into the Rockset merge record
|
||||
return (pending_checks, failed_checks, ignorable_failed_checks)
|
||||
# The list of failed_checks_categorization is returned so that it can be saved into the Rockset merge record
|
||||
return (pending_checks, failed_checks, failed_checks_categorization)
|
||||
|
||||
|
||||
def merge(
|
||||
|
18
.github/workflows/docker-builds.yml
vendored
18
.github/workflows/docker-builds.yml
vendored
@ -38,19 +38,19 @@ jobs:
|
||||
matrix:
|
||||
runner: [linux.12xlarge]
|
||||
docker-image-name: [
|
||||
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9,
|
||||
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda12.4-cudnn8-py3.12-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9,
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3.12-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9,
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9,
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9,
|
||||
pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda12.1-cudnn9-py3.12-gcc9-inductor-benchmarks,
|
||||
pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9,
|
||||
pytorch-linux-focal-py3.8-clang10,
|
||||
pytorch-linux-focal-py3.11-clang10,
|
||||
pytorch-linux-focal-py3.12-clang10,
|
||||
pytorch-linux-focal-rocm-n-1-py3,
|
||||
pytorch-linux-focal-rocm-n-py3,
|
||||
pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12,
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.8-clang12,
|
||||
pytorch-linux-focal-py3-clang9-android-ndk-r21e,
|
||||
pytorch-linux-jammy-py3.8-gcc11,
|
||||
pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks,
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
pytorch-linux-jammy-py3-clang15-asan,
|
||||
pytorch-linux-focal-py3-clang10-onnx,
|
||||
pytorch-linux-focal-linter,
|
||||
pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter,
|
||||
pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter,
|
||||
pytorch-linux-jammy-py3-clang12-executorch
|
||||
]
|
||||
include:
|
||||
|
7
.github/workflows/docker-release.yml
vendored
7
.github/workflows/docker-release.yml
vendored
@ -149,3 +149,10 @@ jobs:
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
if: always()
|
||||
|
||||
validate:
|
||||
needs: build
|
||||
uses: pytorch/builder/.github/workflows/validate-docker-images.yml@main
|
||||
with:
|
||||
channel: nightly
|
||||
ref: main
|
||||
|
10
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
10
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
@ -54,7 +54,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_8-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cpu-aarch64-test: # Testing
|
||||
@ -162,7 +162,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_9-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_9-cpu-aarch64-test: # Testing
|
||||
@ -270,7 +270,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_10-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_10-cpu-aarch64-test: # Testing
|
||||
@ -378,7 +378,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_11-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_11-cpu-aarch64-test: # Testing
|
||||
@ -486,7 +486,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_12-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-cpu-aarch64-test: # Testing
|
||||
|
6
.github/workflows/generated-linux-binary-manywheel-main.yml
generated
vendored
6
.github/workflows/generated-linux-binary-manywheel-main.yml
generated
vendored
@ -48,7 +48,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda11_8-test: # Testing
|
||||
@ -88,7 +88,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda12_1-test: # Testing
|
||||
@ -128,7 +128,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_4
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda12_4-test: # Testing
|
||||
|
236
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
236
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
@ -174,7 +174,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda11_8-test: # Testing
|
||||
@ -237,7 +237,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda12_1-test: # Testing
|
||||
@ -300,7 +300,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_4
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda12_4-test: # Testing
|
||||
@ -690,7 +690,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_9-cuda11_8-test: # Testing
|
||||
@ -753,7 +753,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_9-cuda12_1-test: # Testing
|
||||
@ -816,7 +816,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cuda12_4
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_9-cuda12_4-test: # Testing
|
||||
@ -1206,7 +1206,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_10-cuda11_8-test: # Testing
|
||||
@ -1269,7 +1269,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_10-cuda12_1-test: # Testing
|
||||
@ -1332,7 +1332,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cuda12_4
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_10-cuda12_4-test: # Testing
|
||||
@ -1722,7 +1722,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_11-cuda11_8-test: # Testing
|
||||
@ -1785,7 +1785,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_11-cuda12_1-test: # Testing
|
||||
@ -1848,7 +1848,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cuda12_4
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_11-cuda12_4-test: # Testing
|
||||
@ -2238,7 +2238,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-cuda11_8-test: # Testing
|
||||
@ -2301,7 +2301,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-cuda12_1-test: # Testing
|
||||
@ -2364,7 +2364,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cuda12_4
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-cuda12_4-test: # Testing
|
||||
@ -2410,209 +2410,3 @@ jobs:
|
||||
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
||||
manywheel-py3_12-rocm6_0-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm6_0
|
||||
build_environment: linux-binary-manywheel
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-rocm6_0-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: manywheel-py3_12-rocm6_0-build
|
||||
runs-on: linux.rocm.gpu
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
uses: ./.github/actions/setup-rocm
|
||||
- uses: actions/download-artifact@v3
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: manywheel-py3_12-rocm6_0
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
quiet-checkout: true
|
||||
- name: Clean pytorch/builder checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: builder
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-main
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
uses: ./.github/actions/teardown-rocm
|
||||
manywheel-py3_12-rocm6_0-upload: # Uploading
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
needs: manywheel-py3_12-rocm6_0-test
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm6_0
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
||||
manywheel-py3_12-rocm6_1-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.1
|
||||
GPU_ARCH_VERSION: 6.1
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm6_1
|
||||
build_environment: linux-binary-manywheel
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-rocm6_1-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: manywheel-py3_12-rocm6_1-build
|
||||
runs-on: linux.rocm.gpu
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.1
|
||||
GPU_ARCH_VERSION: 6.1
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
uses: ./.github/actions/setup-rocm
|
||||
- uses: actions/download-artifact@v3
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: manywheel-py3_12-rocm6_1
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
- name: Clean PyTorch checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
quiet-checkout: true
|
||||
- name: Clean pytorch/builder checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: builder
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm6.1-main
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
uses: ./.github/actions/teardown-rocm
|
||||
manywheel-py3_12-rocm6_1-upload: # Uploading
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
needs: manywheel-py3_12-rocm6_1-test
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: rocm6.1
|
||||
GPU_ARCH_VERSION: 6.1
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm6_1
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
10
.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
generated
vendored
10
.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
generated
vendored
@ -54,7 +54,7 @@ jobs:
|
||||
ALPINE_IMAGE: "docker.io/s390x/alpine"
|
||||
build_name: manywheel-py3_8-cpu-s390x
|
||||
build_environment: linux-s390x-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cpu-s390x-test: # Testing
|
||||
@ -117,7 +117,7 @@ jobs:
|
||||
ALPINE_IMAGE: "docker.io/s390x/alpine"
|
||||
build_name: manywheel-py3_9-cpu-s390x
|
||||
build_environment: linux-s390x-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_9-cpu-s390x-test: # Testing
|
||||
@ -180,7 +180,7 @@ jobs:
|
||||
ALPINE_IMAGE: "docker.io/s390x/alpine"
|
||||
build_name: manywheel-py3_10-cpu-s390x
|
||||
build_environment: linux-s390x-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_10-cpu-s390x-test: # Testing
|
||||
@ -243,7 +243,7 @@ jobs:
|
||||
ALPINE_IMAGE: "docker.io/s390x/alpine"
|
||||
build_name: manywheel-py3_11-cpu-s390x
|
||||
build_environment: linux-s390x-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_11-cpu-s390x-test: # Testing
|
||||
@ -306,7 +306,7 @@ jobs:
|
||||
ALPINE_IMAGE: "docker.io/s390x/alpine"
|
||||
build_name: manywheel-py3_12-cpu-s390x
|
||||
build_environment: linux-s390x-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-cpu-s390x-test: # Testing
|
||||
|
10
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
10
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
@ -46,7 +46,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.8"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
@ -165,7 +165,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.9"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
@ -284,7 +284,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.10"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
@ -403,7 +403,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.11"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
@ -522,7 +522,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.12"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
|
40
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
40
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
@ -46,7 +46,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.8"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -290,7 +290,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.8"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -536,7 +536,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.8"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -782,7 +782,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.8"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -1027,7 +1027,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.9"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -1271,7 +1271,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.9"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -1517,7 +1517,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.9"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -1763,7 +1763,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.9"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -2008,7 +2008,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.10"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -2252,7 +2252,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.10"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -2498,7 +2498,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.10"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -2744,7 +2744,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.10"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -2989,7 +2989,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.11"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -3233,7 +3233,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.11"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -3479,7 +3479,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.11"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -3725,7 +3725,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.11"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -3970,7 +3970,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.12"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -4214,7 +4214,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.12"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -4460,7 +4460,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.12"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -4706,7 +4706,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.12"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
|
108
.github/workflows/inductor-cu124.yml
vendored
Normal file
108
.github/workflows/inductor-cu124.yml
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
name: inductor-cu124
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- ciflow/inductor-cu124/*
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Run every 4 hours during the week and every 12 hours on the weekend
|
||||
- cron: 45 0,4,8,12,16,20 * * 1-5
|
||||
- cron: 45 4,12 * * 0,6
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
linux-focal-cuda12_4-py3_10-gcc9-inductor-build:
|
||||
# Should be synced with the one in inductor.yml, but this doesn't run inductor_timm
|
||||
name: cuda12.4-py3.10-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
sync-tag: linux-focal-cuda12_4-py3_10-gcc9-inductor-build
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "dynamic_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "dynamic_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "dynamic_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "dynamic_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "dynamic_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "aot_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "aot_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "aot_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "aot_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "aot_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_cpp_wrapper_abi_compatible", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-inductor-test:
|
||||
name: cuda12.4-py3.10-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-cuda12_4-py3_10-gcc9-inductor-build
|
||||
with:
|
||||
sync-tag: linux-focal-cuda12_4-py3_10-gcc9-inductor-test
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86
|
||||
docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build.outputs.test-matrix }}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp:
|
||||
name: cuda12.4-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm80
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor_torchbench_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.gcp.a100" },
|
||||
]}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-inductor-test-gcp:
|
||||
name: cuda12.4-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }}
|
||||
use-gha: anything-non-empty-to-use-gha
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_4-py3_12-gcc9-inductor-build:
|
||||
name: cuda12.4-py3.12-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.12-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3.12-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_4-py3_12-gcc9-inductor-test:
|
||||
name: cuda12.4-py3.12-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-cuda12_4-py3_12-gcc9-inductor-build
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.12-gcc9-sm86
|
||||
docker-image: ${{ needs.linux-focal-cuda12_4-py3_12-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_4-py3_12-gcc9-inductor-build.outputs.test-matrix }}
|
@ -21,7 +21,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
|
2
.github/workflows/inductor-perf-compare.yml
vendored
2
.github/workflows/inductor-perf-compare.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
|
@ -71,7 +71,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
|
92
.github/workflows/inductor-periodic.yml
vendored
92
.github/workflows/inductor-periodic.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -56,93 +56,3 @@ jobs:
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-inductor-build:
|
||||
# Should be synced with the one in inductor.yml, but this doesn't run inductor_timm
|
||||
name: cuda12.4-py3.10-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
sync-tag: linux-focal-cuda12_4-py3_10-gcc9-inductor-build
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "dynamic_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "dynamic_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "dynamic_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "dynamic_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "dynamic_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "aot_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "aot_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "aot_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "aot_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "aot_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_cpp_wrapper_abi_compatible", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-inductor-test:
|
||||
name: cuda12.4-py3.10-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-cuda12_4-py3_10-gcc9-inductor-build
|
||||
with:
|
||||
sync-tag: linux-focal-cuda12_4-py3_10-gcc9-inductor-test
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86
|
||||
docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build.outputs.test-matrix }}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp:
|
||||
name: cuda12.4-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm80
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor_torchbench_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.gcp.a100" },
|
||||
]}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-inductor-test-gcp:
|
||||
name: cuda12.4-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }}
|
||||
use-gha: anything-non-empty-to-use-gha
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_4-py3_12-gcc9-inductor-build:
|
||||
name: cuda12.4-py3.12-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.12-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3.12-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_4-py3_12-gcc9-inductor-test:
|
||||
name: cuda12.4-py3.12-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-cuda12_4-py3_12-gcc9-inductor-build
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.12-gcc9-sm86
|
||||
docker-image: ${{ needs.linux-focal-cuda12_4-py3_12-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_4-py3_12-gcc9-inductor-build.outputs.test-matrix }}
|
||||
|
8
.github/workflows/inductor.yml
vendored
8
.github/workflows/inductor.yml
vendored
@ -44,7 +44,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -86,7 +86,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -112,7 +112,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.12-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3.12-gcc9-inductor-benchmarks
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3.12-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -135,7 +135,7 @@ jobs:
|
||||
with:
|
||||
sync-tag: linux-focal-cuda12_4-py3_10-gcc9-inductor-build
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
|
4
.github/workflows/lint.yml
vendored
4
.github/workflows/lint.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
with:
|
||||
timeout: 120
|
||||
runner: linux.2xlarge
|
||||
docker-image: pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter
|
||||
docker-image: pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter
|
||||
# NB: A shallow checkout won't work here because calculate-docker-image requires a full checkout
|
||||
# to run git rev-parse HEAD~:.ci/docker when a new image is needed
|
||||
fetch-depth: 0
|
||||
@ -36,7 +36,7 @@ jobs:
|
||||
with:
|
||||
timeout: 120
|
||||
runner: linux.2xlarge
|
||||
docker-image: pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter
|
||||
docker-image: pytorch-linux-jammy-cuda11.8-cudnn9-py3.9-linter
|
||||
# NB: A shallow checkout won't work here because calculate-docker-image requires a full checkout
|
||||
# to run git rev-parse HEAD~:.ci/docker when a new image is needed
|
||||
fetch-depth: 0
|
||||
|
8
.github/workflows/periodic.yml
vendored
8
.github/workflows/periodic.yml
vendored
@ -42,7 +42,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
@ -65,7 +65,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
@ -120,7 +120,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda11.8-py3.9-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9
|
||||
cuda-arch-list: 8.6
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -142,7 +142,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda11.8-py3.10-gcc9-debug
|
||||
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9
|
||||
build-with-debug: true
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
|
20
.github/workflows/pull.yml
vendored
20
.github/workflows/pull.yml
vendored
@ -237,7 +237,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda11.8-py3.10-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "distributed", shard: 1, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
|
||||
@ -262,7 +262,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
@ -297,12 +297,12 @@ jobs:
|
||||
{ config: "default", shard: 1, num_shards: 1 },
|
||||
]}
|
||||
|
||||
linux-jammy-cuda-11_8-cudnn8-py3_8-clang12-build:
|
||||
name: linux-jammy-cuda11.8-cudnn8-py3.8-clang12
|
||||
linux-jammy-cuda-11_8-cudnn9-py3_8-clang12-build:
|
||||
name: linux-jammy-cuda11.8-cudnn9-py3.8-clang12
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
build-environment: linux-jammy-cuda11.8-cudnn8-py3.8-clang12
|
||||
docker-image-name: pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12
|
||||
build-environment: linux-jammy-cuda11.8-cudnn9-py3.8-clang12
|
||||
docker-image-name: pytorch-linux-jammy-cuda11.8-cudnn9-py3.8-clang12
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 1 },
|
||||
@ -361,7 +361,7 @@ jobs:
|
||||
uses: ./.github/workflows/_bazel-build-test.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-bazel-test
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
cuda-version: cpu
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -373,7 +373,7 @@ jobs:
|
||||
uses: ./.github/workflows/_bazel-build-test.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-bazel-test
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
cuda-version: "12.1"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -385,7 +385,7 @@ jobs:
|
||||
uses: ./.github/workflows/_bazel-build-test.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-bazel-test
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9
|
||||
cuda-version: "12.4"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -447,7 +447,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
cuda-arch-list: 8.6
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
|
4
.github/workflows/slow.yml
vendored
4
.github/workflows/slow.yml
vendored
@ -41,7 +41,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3-gcc9-slow-gradcheck
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
cuda-arch-list: 8.6
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -70,7 +70,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
cuda-arch-list: 8.6
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
|
@ -26,7 +26,7 @@ jobs:
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
with:
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
working-directory: pytorch
|
||||
|
||||
- name: Use following to pull public copy of the image
|
||||
|
2
.github/workflows/torchbench.yml
vendored
2
.github/workflows/torchbench.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
|
10
.github/workflows/trunk.yml
vendored
10
.github/workflows/trunk.yml
vendored
@ -39,7 +39,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9
|
||||
cuda-arch-list: 8.6
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
@ -66,7 +66,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: libtorch-linux-focal-cuda12.1-py3.7-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
build-generates-artifacts: false
|
||||
runner: linux.4xlarge
|
||||
test-matrix: |
|
||||
@ -80,7 +80,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-no-ops
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 1 },
|
||||
@ -91,7 +91,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: libtorch-linux-focal-cuda12.4-py3.7-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9
|
||||
build-generates-artifacts: false
|
||||
runner: linux.4xlarge
|
||||
test-matrix: |
|
||||
@ -105,7 +105,7 @@ jobs:
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-no-ops
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 1 },
|
||||
|
@ -1532,28 +1532,6 @@ exclude_patterns = [
|
||||
'torch/distributed/optim/post_localSGD_optimizer.py',
|
||||
'torch/distributed/optim/utils.py',
|
||||
'torch/distributed/optim/zero_redundancy_optimizer.py',
|
||||
'torch/distributed/pipeline/__init__.py',
|
||||
'torch/distributed/pipeline/sync/__init__.py',
|
||||
'torch/distributed/pipeline/sync/_balance/__init__.py',
|
||||
'torch/distributed/pipeline/sync/_balance/blockpartition.py',
|
||||
'torch/distributed/pipeline/sync/_balance/profile.py',
|
||||
'torch/distributed/pipeline/sync/batchnorm.py',
|
||||
'torch/distributed/pipeline/sync/checkpoint.py',
|
||||
'torch/distributed/pipeline/sync/copy.py',
|
||||
'torch/distributed/pipeline/sync/dependency.py',
|
||||
'torch/distributed/pipeline/sync/microbatch.py',
|
||||
'torch/distributed/pipeline/sync/phony.py',
|
||||
'torch/distributed/pipeline/sync/pipe.py',
|
||||
'torch/distributed/pipeline/sync/pipeline.py',
|
||||
'torch/distributed/pipeline/sync/skip/__init__.py',
|
||||
'torch/distributed/pipeline/sync/skip/layout.py',
|
||||
'torch/distributed/pipeline/sync/skip/namespace.py',
|
||||
'torch/distributed/pipeline/sync/skip/portal.py',
|
||||
'torch/distributed/pipeline/sync/skip/skippable.py',
|
||||
'torch/distributed/pipeline/sync/skip/tracker.py',
|
||||
'torch/distributed/pipeline/sync/stream.py',
|
||||
'torch/distributed/pipeline/sync/utils.py',
|
||||
'torch/distributed/pipeline/sync/worker.py',
|
||||
'torch/distributed/remote_device.py',
|
||||
'torch/distributed/rendezvous.py',
|
||||
'torch/distributed/rpc/__init__.py',
|
||||
@ -1847,8 +1825,6 @@ exclude_patterns = [
|
||||
'torch/testing/_internal/distributed/nn/__init__.py',
|
||||
'torch/testing/_internal/distributed/nn/api/__init__.py',
|
||||
'torch/testing/_internal/distributed/nn/api/remote_module_test.py',
|
||||
'torch/testing/_internal/distributed/pipe_with_ddp_test.py',
|
||||
'torch/testing/_internal/distributed/pipeline/__init__.py',
|
||||
'torch/testing/_internal/distributed/rpc/__init__.py',
|
||||
'torch/testing/_internal/distributed/rpc/dist_autograd_test.py',
|
||||
'torch/testing/_internal/distributed/rpc/dist_optimizer_test.py',
|
||||
@ -2103,7 +2079,7 @@ init_command = [
|
||||
'python3',
|
||||
'tools/linter/adapters/pip_init.py',
|
||||
'--dry-run={{DRYRUN}}',
|
||||
'ruff==0.4.6',
|
||||
'ruff==0.4.8',
|
||||
]
|
||||
is_formatter = true
|
||||
|
||||
|
@ -242,8 +242,7 @@ option(USE_COLORIZE_OUTPUT "Colorize output during compilation" ON)
|
||||
option(USE_ASAN "Use Address+Undefined Sanitizers" OFF)
|
||||
option(USE_TSAN "Use Thread Sanitizer" OFF)
|
||||
option(USE_CUDA "Use CUDA" ON)
|
||||
cmake_dependent_option(USE_XPU "Use XPU. Only available on Linux." ON "LINUX"
|
||||
OFF)
|
||||
option(USE_XPU "Use XPU" ON)
|
||||
cmake_dependent_option(
|
||||
BUILD_LAZY_CUDA_LINALG "Build cuda linalg ops as separate library" ON
|
||||
"USE_CUDA AND LINUX AND BUILD_PYTHON" OFF)
|
||||
|
@ -189,7 +189,7 @@ Other potentially useful environment variables may be found in `setup.py`.
|
||||
##### Intel GPU Support
|
||||
If you want to compile with Intel GPU support, follow these
|
||||
- [PyTorch Prerequisites for Intel GPUs](https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html) instructions.
|
||||
- Intel GPU is currently supported only for Linux systems.
|
||||
- Intel GPU is supported for Linux and Windows.
|
||||
|
||||
If you want to disable Intel GPU support, export the environment variable `USE_XPU=0`.
|
||||
Other potentially useful environment variables may be found in `setup.py`.
|
||||
@ -213,6 +213,7 @@ conda install -c pytorch magma-cuda121 # or the magma-cuda* that matches your C
|
||||
|
||||
# (optional) If using torch.compile with inductor/triton, install the matching version of triton
|
||||
# Run from the pytorch directory after cloning
|
||||
# For Intel GPU support, please explicitly `export USE_XPU=1` before running command.
|
||||
make triton
|
||||
```
|
||||
|
||||
|
@ -473,7 +473,6 @@ endif()
|
||||
|
||||
if(USE_CUDA AND NOT USE_ROCM)
|
||||
list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/include)
|
||||
list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/tools/util/include)
|
||||
if($ENV{ATEN_STATIC_CUDA})
|
||||
list(APPEND ATen_CUDA_DEPENDENCY_LIBS
|
||||
${CUDA_LIBRARIES}
|
||||
|
@ -218,8 +218,8 @@ static inline Tensor applySlice(
|
||||
? (*self_sizes)[dim]
|
||||
: self.sym_size(dim);
|
||||
if (!disable_slice_optimization &&
|
||||
TORCH_GUARD_SIZE_OBLIVIOUS(start.sym_eq(0)) && length == stop &&
|
||||
step == 1) {
|
||||
TORCH_GUARD_SIZE_OBLIVIOUS(start.sym_eq(0)) &&
|
||||
TORCH_GUARD_SIZE_OBLIVIOUS(length.sym_eq(stop)) && step == 1) {
|
||||
return self;
|
||||
}
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ thread_local std::array<at::ScalarType, at::COMPILE_TIME_MAX_DEVICE_TYPES>
|
||||
at::kBFloat16, // XLA / TPU
|
||||
at::ScalarType::Undefined, // Vulkan
|
||||
at::ScalarType::Undefined, // Metal
|
||||
at::kBFloat16, // XPU
|
||||
at::kHalf, // XPU
|
||||
at::ScalarType::Undefined, // MPS
|
||||
at::ScalarType::Undefined, // Meta (tensors with no data)
|
||||
at::kBFloat16, // HPU / HABANA
|
||||
|
@ -275,16 +275,6 @@ void expectOutOfPlaceMultiBoxedCallingWorks(const KernelFunction& func) {
|
||||
EXPECT_TRUE(stack[1].toTensor().is_same(t2));
|
||||
}
|
||||
|
||||
void expectBoxedCallingFailsWith(const KernelFunction& func, const char* errorMessage) {
|
||||
called_with_args = c10::nullopt;
|
||||
vector<IValue> stack {3, 4};
|
||||
OperatorHandle dummy = makeDummyOperatorHandle();
|
||||
|
||||
expectThrows<c10::Error>([&] {
|
||||
func.callBoxed(dummy, CPU_TEST_SET, &stack);
|
||||
}, errorMessage);
|
||||
}
|
||||
|
||||
//
|
||||
// unboxed calling tests:
|
||||
//
|
||||
|
@ -51,17 +51,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
|
||||
EXPECT_EQ(6, result[0].toInt());
|
||||
}
|
||||
|
||||
void expectCallsDecrement(DispatchKey dispatch_key) {
|
||||
at::AutoDispatchBelowAutograd mode;
|
||||
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(4, result[0].toInt());
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTestLegacyFunctionBasedKernel, givenKernel_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", &incrementKernel);
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
|
@ -662,18 +662,6 @@ void expectCallsConcatUnboxed(DispatchKey dispatch_key) {
|
||||
EXPECT_EQ("123", result);
|
||||
}
|
||||
|
||||
void expectCannotCallConcatBoxed(DispatchKey dispatch_key) {
|
||||
at::AutoDispatchBelowAutograd mode;
|
||||
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
expectThrows<c10::Error>(
|
||||
[&] {callOp(*op, dummyTensor(dispatch_key), "1", "2", 3);},
|
||||
"Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::call()."
|
||||
);
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTestFunctionBasedKernel, givenKernel_whenRegistered_thenCanBeCalledUnboxed) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, str a, str b, int c) -> str", RegisterOperators::options().kernel<decltype(concatKernel), &concatKernel>(DispatchKey::CPU));
|
||||
expectCallsConcatUnboxed(DispatchKey::CPU);
|
||||
|
@ -51,17 +51,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) {
|
||||
EXPECT_EQ(6, result[0].toInt());
|
||||
}
|
||||
|
||||
void expectCallsDecrement(DispatchKey dispatch_key) {
|
||||
at::AutoDispatchBelowAutograd mode;
|
||||
|
||||
// assert that schema and cpu kernel are present
|
||||
auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""});
|
||||
ASSERT_TRUE(op.has_value());
|
||||
auto result = callOp(*op, dummyTensor(dispatch_key), 5);
|
||||
EXPECT_EQ(1, result.size());
|
||||
EXPECT_EQ(4, result[0].toInt());
|
||||
}
|
||||
|
||||
TEST(OperatorRegistrationTestFunctorBasedKernel, givenKernel_whenRegistered_thenCanBeCalled) {
|
||||
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, int input) -> int", RegisterOperators::options().kernel<IncrementKernel>(DispatchKey::CPU));
|
||||
expectCallsIncrement(DispatchKey::CPU);
|
||||
|
@ -4,6 +4,21 @@
|
||||
#endif
|
||||
|
||||
namespace at::cpu {
|
||||
bool is_cpu_support_avx2() {
|
||||
#if !defined(__s390x__) && !defined(__powerpc__)
|
||||
return cpuinfo_initialize() && cpuinfo_has_x86_avx2();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool is_cpu_support_avx512() {
|
||||
#if !defined(__s390x__) && !defined(__powerpc__)
|
||||
return cpuinfo_initialize() && cpuinfo_has_x86_avx512f() && cpuinfo_has_x86_avx512vl() && cpuinfo_has_x86_avx512bw() && cpuinfo_has_x86_avx512dq();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool is_cpu_support_vnni() {
|
||||
#if !defined(__s390x__) && !defined(__powerpc__)
|
||||
|
@ -4,6 +4,9 @@
|
||||
|
||||
namespace at::cpu {
|
||||
|
||||
TORCH_API bool is_cpu_support_avx2();
|
||||
TORCH_API bool is_cpu_support_avx512();
|
||||
|
||||
// Detect if CPU support Vector Neural Network Instruction.
|
||||
TORCH_API bool is_cpu_support_vnni();
|
||||
|
||||
|
@ -170,43 +170,6 @@ CUDA_STUB3(cuLinkComplete, CUlinkState, void **, size_t *);
|
||||
CUDA_STUB3(cuFuncSetAttribute, CUfunction, CUfunction_attribute, int);
|
||||
CUDA_STUB3(cuFuncGetAttribute, int*, CUfunction_attribute, CUfunction);
|
||||
|
||||
#if defined(CUDA_VERSION) && CUDA_VERSION >= 12000
|
||||
CUresult CUDAAPI
|
||||
cuTensorMapEncodeTiled(
|
||||
CUtensorMap* tensorMap,
|
||||
CUtensorMapDataType tensorDataType,
|
||||
cuuint32_t tensorRank,
|
||||
void* globalAddress,
|
||||
const cuuint64_t* globalDim,
|
||||
const cuuint64_t* globalStrides,
|
||||
const cuuint32_t* boxDim,
|
||||
const cuuint32_t* elementStrides,
|
||||
CUtensorMapInterleave interleave,
|
||||
CUtensorMapSwizzle swizzle,
|
||||
CUtensorMapL2promotion l2Promotion,
|
||||
CUtensorMapFloatOOBfill oobFill) {
|
||||
auto fn = reinterpret_cast<decltype(&cuTensorMapEncodeTiled)>(
|
||||
getCUDALibrary().sym(__func__));
|
||||
if (!fn)
|
||||
throw std::runtime_error("Can't get cuTensorMapEncodeTiled");
|
||||
lazyNVRTC.cuTensorMapEncodeTiled = fn;
|
||||
return fn(
|
||||
tensorMap,
|
||||
tensorDataType,
|
||||
tensorRank,
|
||||
globalAddress,
|
||||
globalDim,
|
||||
globalStrides,
|
||||
boxDim,
|
||||
elementStrides,
|
||||
interleave,
|
||||
swizzle,
|
||||
l2Promotion,
|
||||
oobFill);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// Irregularly shaped functions
|
||||
CUresult CUDAAPI cuLaunchKernel(CUfunction f,
|
||||
unsigned int gridDimX,
|
||||
|
@ -34,8 +34,8 @@ struct PhiloxCudaState {
|
||||
int64_t* ptr;
|
||||
};
|
||||
|
||||
Payload seed_;
|
||||
Payload offset_;
|
||||
Payload seed_{};
|
||||
Payload offset_{};
|
||||
uint32_t offset_intragraph_ = 0;
|
||||
bool captured_ = false;
|
||||
};
|
||||
|
@ -59,25 +59,16 @@ namespace at { namespace cuda {
|
||||
_(cuLinkAddData) \
|
||||
_(cuLinkComplete) \
|
||||
_(cuFuncSetAttribute) \
|
||||
_(cuFuncGetAttribute) \
|
||||
|
||||
#if defined(CUDA_VERSION) && CUDA_VERSION >= 12000
|
||||
#define AT_FORALL_NVRTC_EXTENDED(_) \
|
||||
AT_FORALL_NVRTC_BASE(_) \
|
||||
_(cuTensorMapEncodeTiled)
|
||||
#else
|
||||
#define AT_FORALL_NVRTC_EXTENDED(_) \
|
||||
AT_FORALL_NVRTC_BASE(_)
|
||||
#endif
|
||||
_(cuFuncGetAttribute)
|
||||
|
||||
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11010
|
||||
#define AT_FORALL_NVRTC(_) \
|
||||
AT_FORALL_NVRTC_EXTENDED(_) \
|
||||
AT_FORALL_NVRTC_BASE(_) \
|
||||
_(nvrtcGetCUBINSize) \
|
||||
_(nvrtcGetCUBIN)
|
||||
#else
|
||||
#define AT_FORALL_NVRTC(_) \
|
||||
AT_FORALL_NVRTC_EXTENDED(_)
|
||||
AT_FORALL_NVRTC_BASE(_)
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
@ -31,46 +31,6 @@ Tensor index_select_backward_hack(const Tensor& grad, IntArrayRef self_sizes, in
|
||||
return at::zeros(self_sizes, grad.options()).index_add(dim, index, grad);
|
||||
}
|
||||
|
||||
static optional<std::tuple<Tensor,int64_t>> unwrap(const Tensor& tensor) {
|
||||
auto* wrapped = maybeGetTensorWrapper(tensor);
|
||||
if (wrapped) {
|
||||
if (wrapped->level().has_value()) {
|
||||
return std::make_tuple(wrapped->value(), *wrapped->level());
|
||||
}
|
||||
return unwrap(wrapped->value());
|
||||
}
|
||||
auto* batched = maybeGetBatchedImpl(tensor);
|
||||
if (batched) {
|
||||
return std::make_tuple(batched->value(), batched->level());
|
||||
}
|
||||
return nullopt;
|
||||
}
|
||||
|
||||
static bool can_perform_inplace(const Tensor& a, const Tensor& b) {
|
||||
// TODO: generalize this to more transforms
|
||||
auto a_ = unwrap(a);
|
||||
auto b_ = unwrap(b);
|
||||
if (!a_.has_value() && b_.has_value()) {
|
||||
return false;
|
||||
}
|
||||
if (!a_.has_value() && !b_.has_value()) {
|
||||
return true;
|
||||
}
|
||||
if (a_.has_value() && !b_.has_value()) {
|
||||
return true;
|
||||
}
|
||||
TORCH_INTERNAL_ASSERT(a_.has_value() && b_.has_value());
|
||||
|
||||
// If b has any wrapper that a does not, then we cannot do a.inplace_(b)
|
||||
if (std::get<1>(*a_) < std::get<1>(*b_)) {
|
||||
return false;
|
||||
}
|
||||
if (std::get<1>(*a_) > std::get<1>(*b_)) {
|
||||
return can_perform_inplace(std::get<0>(*a_), b);
|
||||
}
|
||||
return can_perform_inplace(std::get<0>(*a_), std::get<0>(*b_));
|
||||
}
|
||||
|
||||
// TODO: linear is pretty important for performance, but I'm not sure how to work
|
||||
// around the in-place.
|
||||
Tensor linear_hack(const Tensor& input, const Tensor& weight, const std::optional<Tensor>& bias_opt) {
|
||||
|
@ -1480,23 +1480,14 @@ Tensor& not_equal_(Tensor& self, const Scalar& other) { return self.ne_(other);
|
||||
Tensor& logical_and_out(const Tensor& self, const Tensor& other, Tensor& result) { return comparison_op_out(result, self, other, logical_and_stub); }
|
||||
Tensor logical_and(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_and_out)); }
|
||||
Tensor& logical_and_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_and_out)); }
|
||||
static Tensor& logical_and_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_and_out)); }
|
||||
static Tensor logical_and(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_and_out)); }
|
||||
static Tensor& logical_and_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_and_out)); }
|
||||
|
||||
Tensor& logical_or_out(const Tensor& self, const Tensor& other, Tensor& result) { return comparison_op_out(result, self, other, logical_or_stub); }
|
||||
Tensor logical_or(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_or_out)); }
|
||||
Tensor& logical_or_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_or_out)); }
|
||||
static Tensor& logical_or_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_or_out)); }
|
||||
static Tensor logical_or(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_or_out)); }
|
||||
static Tensor& logical_or_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_or_out)); }
|
||||
|
||||
Tensor& logical_xor_out(const Tensor& self, const Tensor& other, Tensor& result) { return comparison_op_out(result, self, other, logical_xor_stub); }
|
||||
Tensor logical_xor(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_xor_out)); }
|
||||
Tensor& logical_xor_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_xor_out)); }
|
||||
static Tensor& logical_xor_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_xor_out)); }
|
||||
static Tensor logical_xor(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_xor_out)); }
|
||||
static Tensor& logical_xor_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_xor_out)); }
|
||||
|
||||
// binary max, alias for maximum
|
||||
Tensor& max_out(const Tensor& self, const Tensor& other, Tensor& result) {
|
||||
|
@ -393,7 +393,7 @@ struct RegisterPRIVATEUSE1Dispatch {
|
||||
// REGISTER_DISPATCH now dispatches an AVX512 kernel to nullptr but registers other dispatches.
|
||||
// ALSO_REGISTER_AVX512_DISPATCH should be used for ensuring AVX512 dispatch, among others.
|
||||
#ifdef CPU_CAPABILITY_AVX512
|
||||
#define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, nullptr)
|
||||
#define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, ((void*)(fn) ? nullptr : nullptr))
|
||||
#else
|
||||
#define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn)
|
||||
#endif
|
||||
|
@ -856,7 +856,7 @@ namespace {
|
||||
/**
|
||||
* @brief Computes the optimal matrix chain multiplication order
|
||||
*
|
||||
* Follows the dynamic programming algorithm from Cormen et al,
|
||||
* Follows the dynamic programming algorithm from Cormen et al.,
|
||||
* "Introduction to Algorithms, Third Edition", Chapter 15.2,
|
||||
* p. 370-378. Note that the book uses 1-based indexing.
|
||||
*
|
||||
|
@ -2,9 +2,9 @@
|
||||
// Licensed under the BSD-3-Clause license
|
||||
// This is the CPU implementation of the Connectionist Temporal Loss.
|
||||
// We mostly follow Graves.
|
||||
// 1. Graves et al: http://www.cs.toronto.edu/~graves/icml_2006.pdf
|
||||
// 1. Graves et al.: http://www.cs.toronto.edu/~graves/icml_2006.pdf
|
||||
// We use the equations from above link, but note that [1] has 1-based indexing and we (of course) use 0-based.
|
||||
// Graves et al call the probabilities y, we use log_probs (also calling them inputs)
|
||||
// Graves et al. call the probabilities y, we use log_probs (also calling them inputs)
|
||||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
||||
|
||||
#include <ATen/core/Tensor.h>
|
||||
|
@ -499,13 +499,4 @@ Tensor nll_loss2d_symint(const Tensor & self, const Tensor & target, const std::
|
||||
return std::get<0>(at::nll_loss2d_forward_symint(self, target, weight, reduction, std::move(ignore_index)));
|
||||
}
|
||||
|
||||
// Duplicate of above code for non-symbolic ints. Kept for BC purposes and to minimize breakages.
|
||||
static Tensor nll_loss2d(const Tensor & self, const Tensor & target, const std::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index) {
|
||||
// See [Note: hacky wrapper removal for optional tensor]
|
||||
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
|
||||
const Tensor& weight = *weight_maybe_owned;
|
||||
|
||||
return std::get<0>(at::nll_loss2d_forward_symint(self, target, weight, reduction, ignore_index));
|
||||
}
|
||||
|
||||
} // namespace at::native
|
||||
|
@ -508,7 +508,7 @@ static inline C10_HOST_DEVICE scalar_t calc_polygamma(scalar_t x, int n) {
|
||||
|
||||
/* References
|
||||
* [igam1] "The Digital Library of Mathematical Functions", dlmf.nist.gov
|
||||
* [igam2] Maddock et. al., "Incomplete Gamma Functions",
|
||||
* [igam2] Maddock et al., "Incomplete Gamma Functions",
|
||||
* https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
|
||||
*/
|
||||
|
||||
|
@ -28,18 +28,6 @@ Tensor empty_meta_symint(
|
||||
size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt);
|
||||
}
|
||||
|
||||
// Kept only for BC with XLA
|
||||
static Tensor empty_strided_meta(
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
std::optional<ScalarType> dtype_opt,
|
||||
std::optional<Layout> layout_opt,
|
||||
std::optional<Device> device_opt,
|
||||
std::optional<bool> pin_memory_opt
|
||||
) {
|
||||
return empty_strided_meta_symint(c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype_opt, layout_opt, device_opt, pin_memory_opt);
|
||||
}
|
||||
|
||||
Tensor empty_strided_meta_symint(
|
||||
SymIntArrayRef size,
|
||||
SymIntArrayRef stride,
|
||||
|
@ -802,55 +802,6 @@ TORCH_IMPL_FUNC(slow_conv_transpose2d_structured_cpu)
|
||||
dilation);
|
||||
}
|
||||
|
||||
static std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose2d_backward_out_cpu(const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
const Tensor& weight,
|
||||
IntArrayRef kernel_size,
|
||||
IntArrayRef stride,
|
||||
IntArrayRef padding,
|
||||
IntArrayRef output_padding,
|
||||
IntArrayRef dilation,
|
||||
Tensor& grad_input,
|
||||
Tensor& grad_weight,
|
||||
Tensor& grad_bias) {
|
||||
if (grad_input.defined()) {
|
||||
slow_conv_transpose2d_backward_out_cpu_template(
|
||||
input,
|
||||
grad_output,
|
||||
grad_input,
|
||||
weight,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
output_padding,
|
||||
dilation);
|
||||
}
|
||||
|
||||
if (grad_bias.defined()) {
|
||||
at::sum_out(grad_bias, grad_output, IntArrayRef{0, 2, 3});
|
||||
}
|
||||
|
||||
if (grad_weight.defined()) {
|
||||
grad_weight.resize_(weight.sizes(), weight.suggest_memory_format());
|
||||
grad_weight.zero_();
|
||||
slow_conv_transpose2d_acc_grad_parameters_cpu(
|
||||
input,
|
||||
weight,
|
||||
grad_output,
|
||||
grad_weight,
|
||||
grad_bias,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
output_padding,
|
||||
dilation,
|
||||
1);
|
||||
}
|
||||
|
||||
return std::tuple<Tensor&, Tensor&, Tensor&>(
|
||||
grad_input, grad_weight, grad_bias);
|
||||
}
|
||||
|
||||
static std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose2d_backward_cpu(
|
||||
const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
|
@ -871,58 +871,6 @@ Tensor slow_conv_transpose3d_cpu(
|
||||
return output;
|
||||
}
|
||||
|
||||
static std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cpu(const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
const Tensor& weight,
|
||||
IntArrayRef kernel_size,
|
||||
IntArrayRef stride,
|
||||
IntArrayRef padding,
|
||||
IntArrayRef output_padding,
|
||||
IntArrayRef dilation,
|
||||
Tensor& grad_input,
|
||||
Tensor& grad_weight,
|
||||
Tensor& grad_bias) {
|
||||
if (grad_input.defined()) {
|
||||
slow_conv_transpose3d_backward_out_cpu_template(
|
||||
input,
|
||||
grad_output,
|
||||
grad_input,
|
||||
weight,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
output_padding,
|
||||
dilation);
|
||||
}
|
||||
|
||||
if (grad_weight.defined()) {
|
||||
grad_weight.resize_(weight.sizes());
|
||||
grad_weight.zero_();
|
||||
}
|
||||
|
||||
if (grad_bias.defined()) {
|
||||
grad_bias.resize_({weight.size(1)});
|
||||
grad_bias.zero_();
|
||||
}
|
||||
|
||||
if (grad_weight.defined() || grad_bias.defined()) {
|
||||
slow_conv_transpose3d_acc_grad_parameters_cpu(
|
||||
input,
|
||||
grad_output,
|
||||
grad_weight,
|
||||
grad_bias,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
output_padding,
|
||||
dilation,
|
||||
1);
|
||||
}
|
||||
|
||||
return std::tuple<Tensor&, Tensor&, Tensor&>(
|
||||
grad_input, grad_weight, grad_bias);
|
||||
}
|
||||
|
||||
static std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cpu(
|
||||
const Tensor& grad_output,
|
||||
const Tensor& input,
|
||||
|
@ -339,12 +339,6 @@ Tensor& gather_out(const Tensor& self, Dimname dim, const Tensor& index, bool sp
|
||||
Tensor index_add(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const Scalar &alpha) {
|
||||
reportNYIDimnameOverload("index_add");
|
||||
}
|
||||
static Tensor& index_add_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const Scalar &alpha) {
|
||||
reportNYIDimnameOverload("index_add");
|
||||
}
|
||||
static Tensor& index_add_out(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const Scalar& alpha, Tensor& result) {
|
||||
reportNYIDimnameOverload("index_add");
|
||||
}
|
||||
Tensor index_fill(const Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) {
|
||||
return at::index_fill(self, dimname_to_position(self, dim), index, source);
|
||||
}
|
||||
@ -372,21 +366,12 @@ Tensor index_select(const Tensor& self, Dimname dim, const Tensor& index) {
|
||||
Tensor scatter(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
|
||||
reportNYIDimnameOverload("scatter");
|
||||
}
|
||||
static Tensor& scatter_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
|
||||
reportNYIDimnameOverload("scatter");
|
||||
}
|
||||
Tensor scatter(const Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) {
|
||||
reportNYIDimnameOverload("scatter");
|
||||
}
|
||||
static Tensor& scatter_(Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) {
|
||||
reportNYIDimnameOverload("scatter");
|
||||
}
|
||||
Tensor scatter_add(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
|
||||
reportNYIDimnameOverload("scatter_add");
|
||||
}
|
||||
static Tensor& scatter_add_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
|
||||
reportNYIDimnameOverload("scatter_add");
|
||||
}
|
||||
std::tuple<Tensor&, Tensor&> sort_out(const Tensor& self, std::optional<bool> stable, Dimname dim, bool keepdim, Tensor& values, Tensor& indices) {
|
||||
reportNYIDimnameOverload("sort");
|
||||
}
|
||||
|
@ -2276,11 +2276,6 @@ bool cpu_equal(const Tensor& self, const Tensor& other) {
|
||||
return result.load();
|
||||
}
|
||||
|
||||
static Tensor value_selecting_reduction_backward(const Tensor& grad, int64_t dim, const Tensor& indices, at::IntArrayRef sizes, bool keepdim) {
|
||||
return at::native::value_selecting_reduction_backward_symint(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim);
|
||||
}
|
||||
|
||||
|
||||
// max(dim), min(dim), topk(dim), mode(dim), are examples of reduction
|
||||
// functions that select values. value_selecting_reduction_backward is the
|
||||
// backward function for those operators; it propagates the grad to the
|
||||
|
@ -301,14 +301,6 @@ void reflection_pad2d_backward_out_template(
|
||||
|
||||
} // namespace
|
||||
|
||||
// TODO: I tihnk this function should be removed since we implement it with
|
||||
// TORCH_IMPL_FUNC below
|
||||
static Tensor& reflection_pad1d_out_cpu(const Tensor& input, IntArrayRef padding,
|
||||
Tensor& output) {
|
||||
reflection_pad1d_kernel(kCPU, output, input, padding);
|
||||
return output;
|
||||
}
|
||||
|
||||
Tensor& reflection_pad1d_out_quantized_cpu(const Tensor& input, IntArrayRef padding,
|
||||
Tensor& output) {
|
||||
TORCH_CHECK(input.qscheme() == kPerTensorAffine, "Only per tensor quantization is supported");
|
||||
|
@ -231,14 +231,6 @@ TensorImpl* resize_impl_cpu_(
|
||||
return _resize_impl_(self, size, stride, resize_storage);
|
||||
}
|
||||
|
||||
static TensorImpl* resize_impl_meta_(
|
||||
TensorImpl* self,
|
||||
c10::SymIntArrayRef size,
|
||||
at::OptionalSymIntArrayRef stride,
|
||||
bool resize_storage = true) {
|
||||
return _resize_impl_(self, size, stride, resize_storage);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const Tensor& _resize_(
|
||||
const Tensor& self,
|
||||
|
@ -792,12 +792,6 @@ std::tuple<Tensor, Tensor> max(const Tensor& self, Dimname dim, bool keepdim) {
|
||||
std::tuple<Tensor&, Tensor&> max_out(const Tensor& self, Dimname dim, bool keepdim, Tensor& max, Tensor& max_indices) {
|
||||
return at::max_out(max, max_indices, self, dimname_to_position(self, dim), keepdim);
|
||||
}
|
||||
static Tensor argmax(const Tensor& /*self*/, Dimname /*dim*/, bool /*keepdim*/) {
|
||||
reportNYIDimnameOverload("argmax");
|
||||
}
|
||||
static Tensor argmin(const Tensor& /*self*/, Dimname /*dim*/, bool /*keepdim*/) {
|
||||
reportNYIDimnameOverload("argmin");
|
||||
}
|
||||
Tensor argsort(const Tensor& /*self*/, Dimname /*dim*/, bool /*keepdim*/) {
|
||||
reportNYIDimnameOverload("argsort");
|
||||
}
|
||||
|
@ -24,10 +24,6 @@
|
||||
|
||||
namespace at::native {
|
||||
|
||||
static bool is_cuda(const Tensor& self) {
|
||||
return self.is_cuda();
|
||||
}
|
||||
|
||||
bool is_distributed(const Tensor& self) {
|
||||
return false;
|
||||
}
|
||||
@ -60,18 +56,6 @@ bool is_neg(const Tensor& self) {
|
||||
return self.is_neg();
|
||||
}
|
||||
|
||||
static bool is_sparse(const Tensor& self) {
|
||||
return self.is_sparse();
|
||||
}
|
||||
|
||||
static bool is_sparse_csr(const Tensor& self) {
|
||||
return self.is_sparse_csr();
|
||||
}
|
||||
|
||||
static bool is_quantized(const Tensor& self) {
|
||||
return self.is_quantized();
|
||||
}
|
||||
|
||||
// True if `self` and `from` have compatible tensor type so that `from`'s
|
||||
// TensorImpl can be copied to `self`.
|
||||
bool _has_compatible_shallow_copy_type(const Tensor& self, const Tensor& from) {
|
||||
|
@ -1,7 +1,3 @@
|
||||
#include <cstdint>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/core/Scalar.h>
|
||||
#include <c10/core/ScalarType.h>
|
||||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
||||
#include <ATen/core/Tensor.h>
|
||||
#include <ATen/core/NamedTensor.h>
|
||||
@ -14,7 +10,6 @@
|
||||
#include <ATen/cuda/tunable/TunableGemm.h>
|
||||
#include <ATen/native/Resize.h>
|
||||
#include <c10/util/MaybeOwned.h>
|
||||
#include <ATen/native/cuda/RowwiseScaledMM.h>
|
||||
|
||||
#ifndef AT_PER_OPERATOR_HEADERS
|
||||
#include <ATen/Functions.h>
|
||||
@ -824,97 +819,24 @@ static bool _scaled_mm_allowed_device() {
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace{
|
||||
|
||||
enum class ScalingType {
|
||||
TensorWise,
|
||||
RowWise,
|
||||
Error
|
||||
};
|
||||
|
||||
// Validates the scale tensors to scaled_mm
|
||||
// And returns the type of scaling/which kernel to use
|
||||
ScalingType get_scaling_type(
|
||||
const c10::optional<at::Tensor>& scale_a,
|
||||
const c10::optional<at::Tensor>& scale_b,
|
||||
int64_t dim_m,
|
||||
int64_t dim_n) {
|
||||
TORCH_CHECK(
|
||||
scale_a.has_value() == scale_b.has_value(),
|
||||
"Both scale_a and scale_b must be present or absent.");
|
||||
|
||||
if (scale_a.has_value()) {
|
||||
// Both Per-Tensor and Row-wise scaling expect fp32 tensors
|
||||
TORCH_CHECK(
|
||||
scale_a->scalar_type() == kFloat && scale_b->scalar_type() == kFloat,
|
||||
"Both scale_a and scale_b must be float (fp32) tensors.");
|
||||
|
||||
// Check the singluar scale case for per-tensor scaling
|
||||
if (scale_a->numel() == 1 && scale_b->numel() == 1) {
|
||||
return ScalingType::TensorWise;
|
||||
} else if (scale_a->dim() == 1 && scale_a->size(0) == dim_m) {
|
||||
// Check the per-row scaling case
|
||||
#if !defined(USE_ROCM) && !defined(_MSC_VER) || \
|
||||
(defined(USE_ROCM) && ROCM_VERSION >= 60000)
|
||||
TORCH_CHECK(
|
||||
scale_a->dim() == 1 && scale_b->dim() == 1,
|
||||
"Both scale_a and scale_b must be 1-dimensional tensors");
|
||||
TORCH_CHECK(
|
||||
scale_b->size(0) == dim_n,
|
||||
"For row-wise scaling, scale_b must have size ",
|
||||
dim_n,
|
||||
" but got ",
|
||||
scale_b->size(0),
|
||||
".");
|
||||
TORCH_CHECK(
|
||||
scale_a->is_contiguous() && scale_b->is_contiguous(),
|
||||
"Both scale_a and scale_b must be contiguous.");
|
||||
return ScalingType::RowWise;
|
||||
#else
|
||||
TORCH_CHECK(false, "Per-row scaling is not supported for this platform!");
|
||||
return ScalingType::Error;
|
||||
#endif // !defined(USE_ROCM) && !defined(_MSC_VER) || (defined(USE_ROCM) &&
|
||||
// ROCM_VERSION >= 60000)
|
||||
} else {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"For row-wise scaling, scale_a must be size ",
|
||||
dim_m,
|
||||
" but got ",
|
||||
scale_a->numel(),
|
||||
" and scale_b must be size ",
|
||||
dim_n,
|
||||
" but got ",
|
||||
scale_b->numel(),
|
||||
".");
|
||||
// Unreachable
|
||||
return ScalingType::RowWise;
|
||||
}
|
||||
}
|
||||
return ScalingType::Error;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Computes matrix multiply + bias while applying scaling to input and output matrices and computes amax
|
||||
// Scales are only applicable when matrices are of Float8 type and assumbed to be equal to 1.0 by default.
|
||||
// If output matrix type is 16 or 32-bit type, neither scale_result is applied nor amax is computed.
|
||||
// Known limitations:
|
||||
// - Only works if mat1 is row-major and mat2 is column-major
|
||||
// - Only works if matrices sizes are divisible by 32
|
||||
// - If 1-dimensional tensors are used then scale_a should be size = mat1.size(0)
|
||||
// and scale_b should have size = to mat2.size(1)
|
||||
//
|
||||
// Arguments:
|
||||
// - `mat1`: the first operand of the matrix multiply, can be type `torch.float8_e4m3fn` or `torch.float8_e5m2`
|
||||
// - `mat2`: the second operand of the matrix multiply, can be type `torch.float8_e4m3fn` or `torch.float8_e5m2`
|
||||
// - `bias`: the bias, can be type `torch.float16` or `torch.bfloat16`
|
||||
// - `out_dtype`: the output dtype, can either be a float8 or a higher precision floating point type
|
||||
// - `scale_a`: a scalar or 1-dimensional tensor with the inverse scale of `mat1`, only needed if `mat1` is a float8 type
|
||||
// - `scale_b`: a scalar or 1-dimensional tensor with the inverse scale of `mat2`, only needed if `mat2` is a float8 type
|
||||
// - `scale_result`: a scalar tensor with the scale of the output, only utilized if the output is a float8 type
|
||||
// - `scale_a`: a scalar tensor with the inverse scale of `mat1`, only needed if `mat1` is a float8 type
|
||||
// - `scale_b`: a scalar tensor with the inverse scale of `mat2`, only needed if `mat2` is a float8 type
|
||||
// - `scale_result`: a scalar tensor with the scale of the output, only set if the output is a float8 type
|
||||
// - `use_fast_accum`: if true, enables fast float8 accumulation
|
||||
// - `out`: a reference to the output tensor
|
||||
// - `amax`: a reference to the amax tensor of the output, only mutated if the output is a float8 type and will be updated inplace
|
||||
// - `amax`: a reference to the amax tensor of the output, only needed if the output is a float8 type and will be updated inplace
|
||||
|
||||
std::tuple<Tensor&, Tensor&>
|
||||
_scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
|
||||
@ -933,11 +855,10 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
|
||||
TORCH_CHECK(
|
||||
mat1.sizes()[1] == mat2.sizes()[0], "mat1 and mat2 shapes cannot be multiplied (",
|
||||
mat1.sizes()[0], "x", mat1.sizes()[1], " and ", mat2.sizes()[0], "x", mat2.sizes()[1], ")");
|
||||
|
||||
// Check what type of scaling we are doing based on inputs
|
||||
ScalingType scaling_choice = get_scaling_type(scale_a, scale_b, mat1.size(0), mat2.size(1));
|
||||
TORCH_INTERNAL_ASSERT(scaling_choice != ScalingType::Error, "Scaling type not supported");
|
||||
|
||||
TORCH_CHECK(!scale_a || (scale_a->numel() == 1 && scale_a->scalar_type() == kFloat),
|
||||
"scale_a must be float scalar");
|
||||
TORCH_CHECK(!scale_b || (scale_b->numel() == 1 && scale_b->scalar_type() == kFloat),
|
||||
"scale_b must be a float scalar");
|
||||
TORCH_CHECK(!scale_result || (scale_result->numel() == 1 && scale_result->scalar_type() == kFloat),
|
||||
"scale_result must be a float scalar");
|
||||
TORCH_CHECK(!bias || bias->numel() == mat2.sizes()[1], "Bias must be size ", mat2.sizes()[1],
|
||||
@ -980,26 +901,12 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
|
||||
{scale_result_, "scale_result", 7}};
|
||||
checkAllSameGPU(__func__, targs);
|
||||
}
|
||||
// Validation checks have passed lets resize the output to actual size
|
||||
|
||||
IntArrayRef mat1_sizes = mat1.sizes();
|
||||
IntArrayRef mat2_sizes = mat2.sizes();
|
||||
at::native::resize_output(out, {mat1_sizes[0], mat2_sizes[1]});
|
||||
at::native::resize_output(amax, {});
|
||||
|
||||
// We are doing row-wise scaling
|
||||
if (scaling_choice == ScalingType::RowWise) {
|
||||
TORCH_CHECK(out.dtype() == kBFloat16, "Only bf16 high precsion output types are supported for row-wise scaling.");
|
||||
at::cuda::detail::f8f8bf16_rowwise(
|
||||
mat1,
|
||||
mat2,
|
||||
scale_a.value(),
|
||||
scale_b.value(),
|
||||
bias,
|
||||
use_fast_accum,
|
||||
out);
|
||||
return {out, amax};
|
||||
}
|
||||
|
||||
cublasCommonArgs args(mat1, mat2, out);
|
||||
const auto out_dtype_ = args.result->scalar_type();
|
||||
TORCH_CHECK(args.transa == 't' && args.transb == 'n', "Only multiplication of row-major and column-major matrices is supported by cuBLASLt");
|
||||
|
@ -2,9 +2,9 @@
|
||||
// Licensed under the BSD-3-Clause license
|
||||
// This is the GPU implementation of the Connectionist Temporal Loss.
|
||||
// We mostly follow Graves.
|
||||
// 1. Graves et al: http://www.cs.toronto.edu/~graves/icml_2006.pdf
|
||||
// 1. Graves et al.: http://www.cs.toronto.edu/~graves/icml_2006.pdf
|
||||
// We use the equations from above link, but note that [1] has 1-based indexing and we (of course) use 0-based.
|
||||
// Graves et al call the probabilities y, we use log_probs (also calling them inputs)
|
||||
// Graves et al. call the probabilities y, we use log_probs (also calling them inputs)
|
||||
// A few optimizations (similar to those here, but also some I didn't take) are described in
|
||||
// 2. Minmin Sun: http://on-demand.gputechconf.com/gtc/2016/presentation/s6383-minmin-sun-speech-recognition.pdf
|
||||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
||||
|
@ -29,18 +29,10 @@ static inline void maybe_resize_storage_cuda(TensorImpl* self, size_t new_size_b
|
||||
inline TensorImpl* resize_impl_cuda_(
|
||||
TensorImpl* self,
|
||||
IntArrayRef size,
|
||||
at::OptionalIntArrayRef stride,
|
||||
bool device_guard = true) {
|
||||
at::OptionalIntArrayRef stride) {
|
||||
if (self->sizes() == size && (!stride || self->strides() == stride)) {
|
||||
return self;
|
||||
}
|
||||
|
||||
// NB: We don't need to hold the device guard when calling from TH
|
||||
cuda::OptionalCUDAGuard guard;
|
||||
if (device_guard) {
|
||||
guard.set_index(self->storage().device().index());
|
||||
}
|
||||
|
||||
const auto itemsize = self->dtype().itemsize();
|
||||
const auto storage_offset = self->storage_offset();
|
||||
size_t storage_size = 1;
|
||||
|
@ -1,536 +0,0 @@
|
||||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
||||
#include <ATen/Dispatch.h>
|
||||
#include <ATen/core/Tensor.h>
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <ATen/cuda/nvrtc_stub/ATenNVRTC.h>
|
||||
|
||||
// Determine if the architecture supports rowwise scaled mm
|
||||
// Currenlty failing on windows with: https://github.com/NVIDIA/cutlass/issues/1571
|
||||
#if !defined(USE_ROCM) && !defined(_WIN32) && defined(CUDA_VERSION) && CUDA_VERSION >= 12000
|
||||
|
||||
#define BUILD_ROWWISE_FP8_KERNEL
|
||||
#endif
|
||||
|
||||
#if defined(BUILD_ROWWISE_FP8_KERNEL)
|
||||
|
||||
// We are going to override the cuTensorMapEncodeTiled driver api with our lazy loader
|
||||
static CUresult CUDAAPI nvrtc_cuTensorMapEncodeTiled(
|
||||
CUtensorMap* tensorMap,
|
||||
CUtensorMapDataType tensorDataType,
|
||||
cuuint32_t tensorRank,
|
||||
void* globalAddress,
|
||||
const cuuint64_t* globalDim,
|
||||
const cuuint64_t* globalStrides,
|
||||
const cuuint32_t* boxDim,
|
||||
const cuuint32_t* elementStrides,
|
||||
CUtensorMapInterleave interleave,
|
||||
CUtensorMapSwizzle swizzle,
|
||||
CUtensorMapL2promotion l2Promotion,
|
||||
CUtensorMapFloatOOBfill oobFill) {
|
||||
return at::globalContext().getNVRTC().cuTensorMapEncodeTiled(
|
||||
tensorMap,
|
||||
tensorDataType,
|
||||
tensorRank,
|
||||
globalAddress,
|
||||
globalDim,
|
||||
globalStrides,
|
||||
boxDim,
|
||||
elementStrides,
|
||||
interleave,
|
||||
swizzle,
|
||||
l2Promotion,
|
||||
oobFill);
|
||||
}
|
||||
|
||||
|
||||
#include <cutlass/core_io.h>
|
||||
#include <cutlass/cutlass.h>
|
||||
#include <cutlass/gemm/device/gemm.h>
|
||||
#include <cutlass/half.h>
|
||||
#include <cutlass/numeric_types.h>
|
||||
#include <cutlass/trace.h>
|
||||
#include <cutlass/util/host_tensor.h>
|
||||
|
||||
// Rename the global function symbol
|
||||
#define cuTensorMapEncodeTiled nvrtc_cuTensorMapEncodeTiled
|
||||
#include <cute/tensor.hpp>
|
||||
#undef cuTensorMapEncodeTiled
|
||||
// Set everything back to normal
|
||||
|
||||
#include <cutlass/gemm/collective/collective_builder.hpp>
|
||||
#include <cutlass/gemm/device/gemm_universal_adapter.h>
|
||||
#include <cutlass/epilogue/collective/collective_builder.hpp>
|
||||
|
||||
#include <cute/atom/mma_atom.hpp>
|
||||
#include <cutlass/gemm/dispatch_policy.hpp>
|
||||
#include <cutlass/gemm/kernel/gemm_universal.hpp>
|
||||
#include <cutlass/util/packed_stride.hpp>
|
||||
|
||||
|
||||
namespace {
|
||||
// Cutlass rowwise kernel
|
||||
template <
|
||||
int TB_M,
|
||||
int TB_N,
|
||||
int TB_K,
|
||||
int TBS_M,
|
||||
int TBS_N,
|
||||
int TBS_K,
|
||||
bool PONG,
|
||||
bool FAST_ACCUM,
|
||||
bool USE_BIAS,
|
||||
typename INPUT_DTYPE,
|
||||
typename BIAS_DTYPE>
|
||||
void f8f8bf16_rowwise_impl(
|
||||
at::Tensor XQ, // FP8
|
||||
at::Tensor WQ, // FP8
|
||||
at::Tensor x_scale,
|
||||
at::Tensor w_scale,
|
||||
c10::optional<at::Tensor> bias,
|
||||
at::Tensor out) {
|
||||
int M = XQ.size(0);
|
||||
int N = WQ.size(1);
|
||||
int K = XQ.size(1);
|
||||
|
||||
TORCH_CHECK(XQ.is_cuda() && XQ.is_contiguous());
|
||||
TORCH_CHECK(
|
||||
WQ.is_cuda() && WQ.ndimension() == 2 && WQ.stride(1) == WQ.size(0) &&
|
||||
WQ.stride(0) == 1);
|
||||
|
||||
// auto Y = at::empty({M, N}, XQ.options().dtype(at::kBFloat16));
|
||||
|
||||
using ElementInputA = INPUT_DTYPE;
|
||||
using LayoutInputA = cutlass::layout::RowMajor;
|
||||
constexpr int AlignmentInputA = 16 / sizeof(ElementInputA);
|
||||
|
||||
using ElementInputB = cutlass::float_e4m3_t;
|
||||
using LayoutInputB = cutlass::layout::ColumnMajor;
|
||||
constexpr int AlignmentInputB = 16 / sizeof(ElementInputB);
|
||||
|
||||
using ElementBias = BIAS_DTYPE;
|
||||
|
||||
using ElementOutput = cutlass::bfloat16_t;
|
||||
using LayoutOutput = cutlass::layout::RowMajor;
|
||||
constexpr int AlignmentOutput = 16 / sizeof(ElementOutput);
|
||||
|
||||
using ElementAccumulator = float;
|
||||
using ElementComputeEpilogue = float;
|
||||
using ArchTag = cutlass::arch::Sm90; // Tag indicating the minimum SM that
|
||||
// supports the intended feature
|
||||
using OperatorClass = cutlass::arch::OpClassTensorOp;
|
||||
using TileShape = cute::Shape<
|
||||
cute::Int<TB_M>,
|
||||
cute::Int<TB_N>,
|
||||
cute::Int<TB_K>>; // Threadblock-level
|
||||
// tile size
|
||||
using ClusterShape = cute::Shape<
|
||||
cute::Int<TBS_M>,
|
||||
cute::Int<TBS_N>,
|
||||
cute::Int<TBS_K>>; // Shape of the
|
||||
// threadblocks in a
|
||||
// cluster
|
||||
using KernelSchedule = cutlass::gemm::collective::
|
||||
KernelScheduleAuto; // Kernel to launch based on the default setting in
|
||||
// the Collective Builder
|
||||
|
||||
// Implement rowwise scaling epilogue.
|
||||
using XScale = cutlass::epilogue::fusion::Sm90ColBroadcast<
|
||||
0,
|
||||
TileShape,
|
||||
ElementComputeEpilogue,
|
||||
cute::Stride<cute::Int<1>, cute::Int<0>, cute::Int<0>>>;
|
||||
|
||||
using WScale = cutlass::epilogue::fusion::Sm90RowBroadcast<
|
||||
PONG ? 2 : 1,
|
||||
TileShape,
|
||||
ElementComputeEpilogue,
|
||||
cute::Stride<cute::Int<0>, cute::Int<1>, cute::Int<0>>>;
|
||||
|
||||
using Bias = cutlass::epilogue::fusion::Sm90RowBroadcast<
|
||||
PONG ? 2 : 1,
|
||||
TileShape,
|
||||
ElementBias,
|
||||
cute::Stride<cute::Int<0>, cute::Int<1>, cute::Int<0>>>;
|
||||
|
||||
using Accum = cutlass::epilogue::fusion::Sm90AccFetch;
|
||||
|
||||
using Compute0 = cutlass::epilogue::fusion::Sm90Compute<
|
||||
cutlass::multiplies,
|
||||
ElementComputeEpilogue, // First stage output type.
|
||||
ElementComputeEpilogue, // First stage input types.
|
||||
cutlass::FloatRoundStyle::round_to_nearest>;
|
||||
|
||||
using EVTCompute0 =
|
||||
cutlass::epilogue::fusion::Sm90EVT<Compute0, WScale, Accum>;
|
||||
|
||||
using Compute1 = cutlass::epilogue::fusion::Sm90Compute<
|
||||
cutlass::multiplies,
|
||||
cute::conditional_t< // Second stage output type.
|
||||
USE_BIAS,
|
||||
ElementBias,
|
||||
ElementOutput>,
|
||||
ElementComputeEpilogue, // Second stage input types.
|
||||
cutlass::FloatRoundStyle::round_to_nearest>;
|
||||
|
||||
using EVTCompute1 =
|
||||
cutlass::epilogue::fusion::Sm90EVT<Compute1, XScale, EVTCompute0>;
|
||||
|
||||
using ComputeBias = cutlass::epilogue::fusion::Sm90Compute<
|
||||
cutlass::plus,
|
||||
ElementOutput, // Final (optional) stage output type.
|
||||
ElementBias, // Final stage input types.
|
||||
cutlass::FloatRoundStyle::round_to_nearest>;
|
||||
|
||||
using EVTComputeBias =
|
||||
cutlass::epilogue::fusion::Sm90EVT<ComputeBias, Bias, EVTCompute1>;
|
||||
|
||||
using EpilogueEVT =
|
||||
cute::conditional_t<USE_BIAS, EVTComputeBias, EVTCompute1>;
|
||||
|
||||
using CollectiveEpilogue =
|
||||
typename cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm90,
|
||||
cutlass::arch::OpClassTensorOp,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAccumulator,
|
||||
ElementComputeEpilogue,
|
||||
ElementOutput,
|
||||
LayoutOutput,
|
||||
AlignmentOutput,
|
||||
ElementOutput,
|
||||
LayoutOutput,
|
||||
AlignmentOutput,
|
||||
cutlass::epilogue::TmaWarpSpecialized,
|
||||
EpilogueEVT>::CollectiveOp;
|
||||
|
||||
using DefaultSchedule = cutlass::gemm::KernelTmaWarpSpecialized;
|
||||
using PongSchedule = cutlass::gemm::KernelTmaWarpSpecializedPingpong;
|
||||
using FastDefaultSchedule =
|
||||
cutlass::gemm::KernelTmaWarpSpecializedFP8FastAccum;
|
||||
using FastPongSchedule =
|
||||
cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum;
|
||||
using SlowAccum = cute::conditional_t<PONG, PongSchedule, DefaultSchedule>;
|
||||
using FastAccum =
|
||||
cute::conditional_t<PONG, FastPongSchedule, FastDefaultSchedule>;
|
||||
using MainLoopSchedule =
|
||||
cute::conditional_t<FAST_ACCUM, FastAccum, SlowAccum>;
|
||||
|
||||
using CollectiveMainloop =
|
||||
typename cutlass::gemm::collective::CollectiveBuilder<
|
||||
ArchTag,
|
||||
OperatorClass,
|
||||
ElementInputA,
|
||||
LayoutInputA,
|
||||
AlignmentInputA,
|
||||
ElementInputB,
|
||||
LayoutInputB,
|
||||
AlignmentInputB,
|
||||
ElementAccumulator,
|
||||
TileShape,
|
||||
ClusterShape,
|
||||
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(
|
||||
sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
||||
MainLoopSchedule>::CollectiveOp;
|
||||
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
cute::Shape<int, int, int>,
|
||||
CollectiveMainloop,
|
||||
CollectiveEpilogue>;
|
||||
|
||||
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
||||
|
||||
using StrideInputA = typename Gemm::GemmKernel::StrideA;
|
||||
using StrideInputB = typename Gemm::GemmKernel::StrideB;
|
||||
using StrideOutput = typename Gemm::GemmKernel::StrideC;
|
||||
|
||||
StrideInputA stride_a = cutlass::make_cute_packed_stride(
|
||||
StrideInputA{}, cute::make_shape(M, K, 1));
|
||||
StrideInputB stride_b = cutlass::make_cute_packed_stride(
|
||||
StrideInputB{}, cute::make_shape(N, K, 1));
|
||||
StrideOutput stride_output = cutlass::make_cute_packed_stride(
|
||||
StrideOutput{}, cute::make_shape(M, N, 1));
|
||||
|
||||
typename Gemm::Arguments arguments{
|
||||
cutlass::gemm::GemmUniversalMode::kGemm,
|
||||
{M, N, K},
|
||||
{reinterpret_cast<ElementInputA*>(XQ.data_ptr()),
|
||||
stride_a,
|
||||
reinterpret_cast<ElementInputB*>(WQ.data_ptr()),
|
||||
stride_b},
|
||||
{{}, // Epilogue thread we populate below.
|
||||
(ElementOutput*)out.data_ptr<at::BFloat16>(),
|
||||
stride_output,
|
||||
(ElementOutput*)out.data_ptr<at::BFloat16>(),
|
||||
stride_output}};
|
||||
|
||||
if constexpr (USE_BIAS) {
|
||||
arguments.epilogue.thread = {
|
||||
{reinterpret_cast<ElementBias*>(bias.value().data_ptr())}, // bias
|
||||
// compute_1
|
||||
{
|
||||
{reinterpret_cast<ElementComputeEpilogue*>(
|
||||
x_scale.data_ptr())}, // x_scale
|
||||
// compute_0
|
||||
{
|
||||
{reinterpret_cast<ElementComputeEpilogue*>(
|
||||
w_scale.data_ptr())}, // w_scale
|
||||
{}, // Accumulator
|
||||
{} // Multiplies
|
||||
},
|
||||
{}, // Multiplies
|
||||
},
|
||||
{}, // Plus
|
||||
};
|
||||
} else {
|
||||
arguments.epilogue.thread = {
|
||||
{reinterpret_cast<ElementComputeEpilogue*>(
|
||||
x_scale.data_ptr())}, // x_scale
|
||||
// compute_0
|
||||
{
|
||||
{reinterpret_cast<ElementComputeEpilogue*>(
|
||||
w_scale.data_ptr())}, // w_scale
|
||||
{}, // Accumulator
|
||||
{} // Multiplies
|
||||
},
|
||||
{}, // Multiplies
|
||||
};
|
||||
}
|
||||
|
||||
Gemm gemm;
|
||||
|
||||
// Using the arguments, query for extra workspace required for matrix
|
||||
// multiplication computation
|
||||
size_t workspace_size = Gemm::get_workspace_size(arguments);
|
||||
|
||||
// Allocate workspace memory
|
||||
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
|
||||
|
||||
// Check the problem size is supported or not
|
||||
cutlass::Status status = gemm.can_implement(arguments);
|
||||
if (status != cutlass::Status::kSuccess) {
|
||||
throw std::runtime_error("cutlass cannot implement");
|
||||
}
|
||||
|
||||
// Initialize CUTLASS kernel with arguments and workspace pointer
|
||||
status = gemm.initialize(arguments, workspace.get());
|
||||
if (status != cutlass::Status::kSuccess) {
|
||||
throw std::runtime_error("cutlass cannot initialize");
|
||||
}
|
||||
|
||||
status = gemm(at::cuda::getCurrentCUDAStream());
|
||||
if (status != cutlass::Status::kSuccess) {
|
||||
throw std::runtime_error(
|
||||
std::string("cutlass cannot run") +
|
||||
cutlass::cutlassGetStatusString(status));
|
||||
}
|
||||
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
||||
}
|
||||
|
||||
// FP8 Rowwise Cutlass kernel dispatch.
|
||||
enum class KernelMode { Small, Large, Default };
|
||||
|
||||
KernelMode get_kernel_mode(at::Tensor XQ, at::Tensor WQ) {
|
||||
auto M = XQ.size(0);
|
||||
auto K = XQ.size(1);
|
||||
auto N = WQ.size(0);
|
||||
// Use a large kernel if at least two shapes are large....
|
||||
bool use_large_kernel =
|
||||
((M >= 2048 && K >= 2048) || (M >= 2048 && N >= 2048) ||
|
||||
(K >= 2048 && N >= 2048));
|
||||
if (M <= 128 || N <= 128) {
|
||||
return KernelMode::Small;
|
||||
} else if (use_large_kernel) {
|
||||
return KernelMode::Large;
|
||||
} else {
|
||||
return KernelMode::Default;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename InputDType, bool FastAccum, bool UseBias, typename BiasDType>
|
||||
void dispatch_fp8_rowwise_kernel(
|
||||
at::Tensor XQ,
|
||||
at::Tensor WQ,
|
||||
at::Tensor x_scale,
|
||||
at::Tensor w_scale,
|
||||
c10::optional<at::Tensor> bias,
|
||||
at::Tensor out) {
|
||||
KernelMode kernel = get_kernel_mode(XQ, WQ);
|
||||
if (kernel == KernelMode::Small) {
|
||||
return f8f8bf16_rowwise_impl<
|
||||
64,
|
||||
128,
|
||||
128,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
false,
|
||||
FastAccum,
|
||||
UseBias,
|
||||
InputDType,
|
||||
BiasDType>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else if (kernel == KernelMode::Large) {
|
||||
return f8f8bf16_rowwise_impl<
|
||||
128,
|
||||
128,
|
||||
128,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
true,
|
||||
FastAccum,
|
||||
UseBias,
|
||||
InputDType,
|
||||
BiasDType>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return f8f8bf16_rowwise_impl<
|
||||
128,
|
||||
128,
|
||||
128,
|
||||
1,
|
||||
2,
|
||||
1,
|
||||
false,
|
||||
FastAccum,
|
||||
UseBias,
|
||||
InputDType,
|
||||
BiasDType>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
#endif // !defined(USE_ROCM)
|
||||
|
||||
namespace at::cuda::detail {
|
||||
void f8f8bf16_rowwise(
|
||||
at::Tensor XQ, // FP8
|
||||
at::Tensor WQ, // FP8
|
||||
at::Tensor x_scale, // FP32
|
||||
at::Tensor w_scale, // FP32
|
||||
c10::optional<at::Tensor> bias, // BF16
|
||||
bool use_fast_accum,
|
||||
at::Tensor& out) {
|
||||
#if defined(BUILD_ROWWISE_FP8_KERNEL)
|
||||
// Check datatypes.
|
||||
TORCH_CHECK(
|
||||
x_scale.dtype() == at::kFloat && w_scale.dtype() == at::kFloat,
|
||||
"Scale tensors must be float32.");
|
||||
if (bias.has_value()) {
|
||||
TORCH_CHECK(
|
||||
bias.value().dtype() == at::kFloat ||
|
||||
bias.value().dtype() == at::kBFloat16,
|
||||
"Bias type must be bfloat16 or float32 if provided.");
|
||||
}
|
||||
// Extract problem size.
|
||||
int M = XQ.size(0);
|
||||
int N = WQ.size(1);
|
||||
int K = XQ.size(1);
|
||||
|
||||
bool use_bias = bias.has_value();
|
||||
bool bf16_bias = use_bias && bias.value().dtype() == at::kBFloat16;
|
||||
|
||||
// Templatize based on input dtype.
|
||||
bool use_e5m2 = XQ.dtype() == at::kFloat8_e5m2;
|
||||
TORCH_CHECK(WQ.dtype() == at::kFloat8_e4m3fn, "For row-wise scaling the second input is required to be a float8_e4m3fn dtype.");
|
||||
|
||||
if (use_bias) {
|
||||
if (bf16_bias) {
|
||||
if (use_fast_accum) {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
true,
|
||||
true,
|
||||
cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
true,
|
||||
true,
|
||||
cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
} else {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
false,
|
||||
true,
|
||||
cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
false,
|
||||
true,
|
||||
cutlass::bfloat16_t>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (use_fast_accum) {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
true,
|
||||
true,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
true,
|
||||
true,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
} else {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
false,
|
||||
true,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
false,
|
||||
true,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (use_fast_accum) {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
true,
|
||||
false,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
true,
|
||||
false,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
} else {
|
||||
if (use_e5m2) {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e5m2_t,
|
||||
false,
|
||||
false,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
} else {
|
||||
return dispatch_fp8_rowwise_kernel<
|
||||
cutlass::float_e4m3_t,
|
||||
false,
|
||||
false,
|
||||
float>(XQ, WQ, x_scale, w_scale, bias, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else // BUILD_ROWWISE_FP8_KERNEL
|
||||
TORCH_CHECK(false, "Rowwise scaling is not currenlty supported on your device");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace at::cuda::detail
|
@ -1,15 +0,0 @@
|
||||
#pragma once
|
||||
#include <ATen/core/TensorBase.h>
|
||||
#include <c10/util/Optional.h>
|
||||
|
||||
|
||||
namespace at::cuda::detail {
|
||||
TORCH_API void f8f8bf16_rowwise(
|
||||
at::Tensor XQ, // FP8
|
||||
at::Tensor WQ, // FP8
|
||||
at::Tensor x_scale, // FP32
|
||||
at::Tensor w_scale, // FP32
|
||||
c10::optional<at::Tensor> bias, // BF16
|
||||
bool use_fast_accum,
|
||||
at::Tensor& out);
|
||||
} // at::cuda::detail
|
@ -12,7 +12,7 @@
|
||||
|
||||
namespace at::native::onednn {
|
||||
|
||||
TORCH_API dnnl::memory make_onednn_memory(
|
||||
TORCH_XPU_API dnnl::memory make_onednn_memory(
|
||||
dnnl::memory::desc md,
|
||||
dnnl::engine& engine,
|
||||
void* ptr);
|
||||
@ -21,7 +21,7 @@ TORCH_API dnnl::memory make_onednn_memory(
|
||||
bool set_onednn_verbose(int level);
|
||||
|
||||
// GpuEngineManager singleton
|
||||
struct TORCH_API GpuEngineManager {
|
||||
struct TORCH_XPU_API GpuEngineManager {
|
||||
static GpuEngineManager& Instance(); // Singleton
|
||||
|
||||
dnnl::engine& get_engine(const Device& device) {
|
||||
@ -51,7 +51,7 @@ struct TORCH_API GpuEngineManager {
|
||||
};
|
||||
|
||||
// GpuStreamManager singleton
|
||||
struct TORCH_API GpuStreamManager {
|
||||
struct TORCH_XPU_API GpuStreamManager {
|
||||
static GpuStreamManager& Instance(); // Singleton
|
||||
|
||||
dnnl::stream get_stream() {
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <ATen/native/SpectralOpsUtils.h>
|
||||
#include <ATen/native/mps/MPSGraphSonomaOps.h>
|
||||
#include <ATen/native/mps/MPSGraphVenturaOps.h>
|
||||
#include <ATen/native/mps/OperationUtils.h>
|
||||
|
||||
#ifndef AT_PER_OPERATOR_HEADERS
|
||||
|
@ -38,6 +38,19 @@ static void addc_mul_div_out_mps(const Tensor& self,
|
||||
};
|
||||
|
||||
@autoreleasepool {
|
||||
bool executeGatherOpOnSelf =
|
||||
!(self.is_contiguous(MemoryFormat::Contiguous) || self.is_contiguous(MemoryFormat::ChannelsLast) ||
|
||||
self.is_contiguous(MemoryFormat::ChannelsLast3d));
|
||||
Tensor output_ = at::empty_like(self, executeGatherOpOnSelf ? MemoryFormat::Contiguous : MemoryFormat::Preserve);
|
||||
|
||||
bool executeGatherOpOnFirstTensor =
|
||||
!(tensor1.is_contiguous(MemoryFormat::Contiguous) || tensor1.is_contiguous(MemoryFormat::ChannelsLast) ||
|
||||
tensor1.is_contiguous(MemoryFormat::ChannelsLast3d));
|
||||
|
||||
bool executeGatherOpOnSecondTensor =
|
||||
!(tensor2.is_contiguous(MemoryFormat::Contiguous) || tensor2.is_contiguous(MemoryFormat::ChannelsLast) ||
|
||||
tensor2.is_contiguous(MemoryFormat::ChannelsLast3d));
|
||||
|
||||
string key = op_name + getTensorsStringKey({self, tensor1, tensor2});
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
@ -72,10 +85,12 @@ static void addc_mul_div_out_mps(const Tensor& self,
|
||||
});
|
||||
|
||||
// Inputs as placeholders
|
||||
Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor, self);
|
||||
Placeholder tensor1Placeholder = Placeholder(cachedGraph->firstTensor, tensor1);
|
||||
Placeholder tensor2Placeholder = Placeholder(cachedGraph->secondTensor, tensor2);
|
||||
Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor, output);
|
||||
Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor, self, nil, executeGatherOpOnSelf);
|
||||
Placeholder tensor1Placeholder = Placeholder(cachedGraph->firstTensor, tensor1, nil, executeGatherOpOnFirstTensor);
|
||||
Placeholder tensor2Placeholder =
|
||||
Placeholder(cachedGraph->secondTensor, tensor2, nil, executeGatherOpOnSecondTensor);
|
||||
Placeholder outputPlaceholder =
|
||||
Placeholder(cachedGraph->outputTensor, executeGatherOpOnSelf ? output_ : output, nil, false);
|
||||
MPSScalar value_scalar = getMPSScalar(value_opt, self.scalar_type());
|
||||
|
||||
// Create dictionary of inputs and outputs
|
||||
@ -87,6 +102,10 @@ static void addc_mul_div_out_mps(const Tensor& self,
|
||||
};
|
||||
|
||||
runMPSGraph(mpsStream, cachedGraph->graph(), feeds, outputPlaceholder);
|
||||
|
||||
if (executeGatherOpOnSelf) {
|
||||
output.copy_(output_);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5426,7 +5426,7 @@
|
||||
autogen: slice_backward.out
|
||||
|
||||
# NB: This op exists to back the implementation of reverse view_funcs for various views (chunk,
|
||||
# slice.Tensor, split_with_sizes, et. al.). Currently, these are only used during fake-ification
|
||||
# slice.Tensor, split_with_sizes, et al.). Currently, these are only used during fake-ification
|
||||
# of PT2 graph input subclass instances that are views. This means:
|
||||
# * This op shouldn't really show up in eager mode (so e.g. XLA shouldn't have to implement it)
|
||||
# * This op shouldn't show up in a PT2 graph (so a PT2 backend shouldn't have to implement it)
|
||||
|
@ -237,24 +237,5 @@ Tensor std_quantized_cpu(
|
||||
return result;
|
||||
}
|
||||
|
||||
static Tensor std_quantized_cpu(
|
||||
const Tensor& self,
|
||||
DimnameList dim,
|
||||
const std::optional<Scalar>& correction,
|
||||
bool keepdim) {
|
||||
return std_quantized_cpu(
|
||||
self, dimnames_to_positions(self, dim), correction, keepdim);
|
||||
}
|
||||
|
||||
static Tensor& std_out_quantized_cpu(
|
||||
Tensor& result,
|
||||
const Tensor& self,
|
||||
DimnameList dim,
|
||||
const std::optional<Scalar>& correction,
|
||||
bool keepdim) {
|
||||
return std_out_quantized_cpu(
|
||||
self, dimnames_to_positions(self, dim), correction, keepdim, result);
|
||||
}
|
||||
|
||||
} // namespace native
|
||||
} // namespace at
|
||||
|
@ -606,15 +606,6 @@ Tensor log_softmax_backward_sparse_cpu(
|
||||
return grad_input;
|
||||
}
|
||||
|
||||
static Tensor _sparse_softmax(const Tensor& input_, const int64_t dim_) {
|
||||
auto result = [&]() {
|
||||
NoNamesGuard guard;
|
||||
return at::_sparse_softmax(input_, dim_, false);
|
||||
}();
|
||||
namedinference::propagate_names(result, input_);
|
||||
return result;
|
||||
}
|
||||
|
||||
Tensor _sparse_softmax(const Tensor& input_, const int64_t dim_, std::optional<ScalarType> dtype) {
|
||||
auto result = [&]() {
|
||||
NoNamesGuard guard;
|
||||
|
@ -190,8 +190,8 @@ struct AttentionKernel {
|
||||
unsigned long long dropout_batch_head_rng_offset = 0;
|
||||
float dropout_prob = 0.0f;
|
||||
at::PhiloxCudaState rng_engine_inputs = at::PhiloxCudaState(0, 0);
|
||||
int64_t* extragraph_offset;
|
||||
int64_t* seed;
|
||||
int64_t* extragraph_offset = nullptr;
|
||||
int64_t* seed = nullptr;
|
||||
|
||||
// Moves pointers to what we should process
|
||||
// Returns "false" if there is no work to do
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
namespace at {
|
||||
|
||||
struct TORCH_API XPUGeneratorImpl : public GeneratorImpl {
|
||||
struct TORCH_XPU_API XPUGeneratorImpl : public GeneratorImpl {
|
||||
// Constructors
|
||||
XPUGeneratorImpl(DeviceIndex device_index = -1);
|
||||
~XPUGeneratorImpl() override = default;
|
||||
|
@ -25,7 +25,13 @@ std::string XPUHooks::showConfig() const {
|
||||
|
||||
int32_t XPUHooks::getGlobalIdxFromDevice(const at::Device& device) const {
|
||||
TORCH_CHECK(device.is_xpu(), "Only the XPU device type is expected.");
|
||||
#ifdef _WIN32
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"Default context is not supported on XPU on Windows. So we can NOT find its global index of the ATen device.");
|
||||
#else
|
||||
return at::xpu::getGlobalIdxFromDevice(device.index());
|
||||
#endif
|
||||
}
|
||||
|
||||
Generator XPUHooks::getXPUGenerator(DeviceIndex device_index) const {
|
||||
@ -38,7 +44,13 @@ const Generator& XPUHooks::getDefaultXPUGenerator(
|
||||
}
|
||||
|
||||
Device XPUHooks::getDeviceFromPtr(void* data) const {
|
||||
#ifdef _WIN32
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"Default context is not supported on XPU on Windows. So we can NOT find the ATen device of a pointer.");
|
||||
#else
|
||||
return at::xpu::getDeviceFromPtr(data);
|
||||
#endif
|
||||
}
|
||||
|
||||
c10::DeviceIndex XPUHooks::getNumGPUs() const {
|
||||
|
@ -1,58 +0,0 @@
|
||||
import torch
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
|
||||
def collate_sentences_lm(samples):
|
||||
if len(samples) == 0:
|
||||
return {}
|
||||
|
||||
id = torch.LongTensor([s["id"] for s in samples])
|
||||
src_tokens = torch.stack([s["source"] for s in samples], 0)
|
||||
tgt_tokens = torch.stack([s["target"] for s in samples], 0)
|
||||
ntokens = len(samples) * len(samples[0]["target"])
|
||||
src_lengths = torch.LongTensor([len(samples[0]["source"])] * len(samples))
|
||||
|
||||
batch = {
|
||||
"id": id,
|
||||
"nsentences": len(samples),
|
||||
"ntokens": ntokens,
|
||||
"input": src_tokens,
|
||||
"target": tgt_tokens,
|
||||
}
|
||||
return batch
|
||||
|
||||
|
||||
class BenchmarkLMDataset(Dataset):
|
||||
"""
|
||||
Dataset to benchmark a translation like seq2seq task.
|
||||
Args:
|
||||
vocab_size (int, optional): size of the vocabulary (default 10000).
|
||||
max_source_positions (int, optional): max number of tokens in the
|
||||
source sentence (default: 1024).
|
||||
total_samples (int, optional): the total number of rows in the
|
||||
dataset (default: 10000).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size=10000,
|
||||
max_source_positions=1024,
|
||||
total_samples=10000,
|
||||
):
|
||||
self.vocab_size = vocab_size
|
||||
self.max_source_positions = max_source_positions
|
||||
self.total_samples = total_samples
|
||||
self.sizes = [self.max_source_positions] * self.total_samples
|
||||
|
||||
def __getitem__(self, index):
|
||||
length = self.sizes[index]
|
||||
source = torch.randint(1, self.vocab_size, (length,))
|
||||
target = source.clone()
|
||||
return {
|
||||
"id": index,
|
||||
"source": source,
|
||||
"target": target,
|
||||
}
|
||||
|
||||
def __len__(self):
|
||||
return self.total_samples
|
@ -1,296 +0,0 @@
|
||||
import argparse
|
||||
import math
|
||||
import os
|
||||
import time
|
||||
|
||||
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.distributed import rpc
|
||||
|
||||
from torch.distributed.pipeline.sync import Pipe
|
||||
from torch.distributed.pipeline.sync.utils import partition_model
|
||||
from torch.optim import Adam
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
|
||||
def sizeof_fmt(num, suffix="B"):
|
||||
for unit in ["", "Ki", "Mi", "Gi", "Ti"]:
|
||||
if abs(num) < 1024.0:
|
||||
return f"{num:3.2f}{unit}B"
|
||||
num /= 1024.0
|
||||
|
||||
|
||||
def init_random_seed(seed: int):
|
||||
import numpy
|
||||
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed(seed)
|
||||
numpy.random.seed(seed)
|
||||
|
||||
|
||||
iteration_count = 0
|
||||
|
||||
|
||||
class EmbeddingLayer(nn.Embedding):
|
||||
def __init__(self, ntoken, ninp, initrange):
|
||||
super().__init__(ntoken, ninp)
|
||||
self.ninp = ninp
|
||||
nn.init.uniform_(self.weight, -initrange, initrange)
|
||||
|
||||
def forward(self, src):
|
||||
return super().forward(src) * math.sqrt(self.ninp)
|
||||
|
||||
|
||||
class PositionalEncodingLayer(nn.Module):
|
||||
def __init__(self, d_model, dropout=0.1, max_len=5000):
|
||||
super().__init__()
|
||||
self.dropout = nn.Dropout(p=dropout)
|
||||
|
||||
pe = torch.zeros(max_len, d_model)
|
||||
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
|
||||
div_term = torch.exp(
|
||||
torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)
|
||||
)
|
||||
pe[:, 0::2] = torch.sin(position * div_term)
|
||||
pe[:, 1::2] = torch.cos(position * div_term)
|
||||
pe = pe.unsqueeze(0).transpose(0, 1)
|
||||
self.register_buffer("pe", pe)
|
||||
|
||||
def forward(self, x):
|
||||
x = x + self.pe[: x.size(0), :]
|
||||
return self.dropout(x)
|
||||
|
||||
|
||||
class TransformerDecoderLayer(nn.TransformerEncoderLayer):
|
||||
"""Though this class inherits from torch.nn.TransformerEncoderLayer,
|
||||
it functions as a decoder in this model"""
|
||||
|
||||
def __init__(self, ninp, nhead, nhid, droupout):
|
||||
super().__init__(ninp, nhead, nhid, droupout)
|
||||
self.src_mask = None
|
||||
|
||||
def forward(self, src):
|
||||
global iteration_count
|
||||
iteration_count += 1
|
||||
|
||||
if self.src_mask is None or self.src_mask.size(0) != len(src):
|
||||
device = src.device
|
||||
mask = nn.Transformer.generate_square_subsequent_mask(len(src)).to(device)
|
||||
self.src_mask = mask
|
||||
|
||||
return super().forward(src, self.src_mask)
|
||||
|
||||
|
||||
class LinearLayer(nn.Linear):
|
||||
def __init__(self, ninp, ntoken, initrange):
|
||||
super().__init__(ninp, ntoken)
|
||||
nn.init.zeros_(self.bias)
|
||||
nn.init.uniform_(self.weight, -initrange, initrange)
|
||||
|
||||
|
||||
class TransformerLMSequential(nn.Sequential):
|
||||
"""A small language model based on the design of GPT-2 using nn.Sequential
|
||||
for compatibility with Pipe"""
|
||||
|
||||
def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):
|
||||
layers = [
|
||||
EmbeddingLayer(ntokens, ninp, initrange),
|
||||
PositionalEncodingLayer(ninp, dropout),
|
||||
]
|
||||
for _ in range(ndecoder):
|
||||
layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))
|
||||
|
||||
layers.append(LinearLayer(ninp, ntokens, initrange))
|
||||
super().__init__(*layers)
|
||||
|
||||
|
||||
def make_model(args, device, ntokens):
|
||||
ninp = 2048 # embedding dimension
|
||||
nhid = (
|
||||
2048 # the dimension of the feedforward network model in nn.TransformerEncoder
|
||||
)
|
||||
nhead = 32 # the number of heads in the multiheadattention models
|
||||
dropout = 0
|
||||
initrange = 0.1
|
||||
ndecoder = args.num_decoder_layers
|
||||
|
||||
model = TransformerLMSequential(
|
||||
ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder
|
||||
).to(device)
|
||||
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
lr = 0.01 # learning rate
|
||||
|
||||
def make_adam(model):
|
||||
return Adam(model.parameters(), lr=lr)
|
||||
|
||||
optimizer = make_adam
|
||||
|
||||
return model, criterion, optimizer
|
||||
|
||||
|
||||
def train(lm_dataloader, model, criterion, optimizer, vocab_size, args):
|
||||
model.train()
|
||||
|
||||
vocab_size = 10000
|
||||
total_loss = 0.0
|
||||
start_time = time.time()
|
||||
word_counter = 0
|
||||
|
||||
optimizer = optimizer(model)
|
||||
|
||||
def get_first_device(model):
|
||||
if model.devices:
|
||||
return model.devices[0]
|
||||
else:
|
||||
return torch.cuda.current_device()
|
||||
|
||||
def get_last_device(model):
|
||||
if model.devices:
|
||||
return model.devices[-1]
|
||||
else:
|
||||
return torch.cuda.current_device()
|
||||
|
||||
print(
|
||||
f"Number of parameters for model: {sum(p.numel() for p in model.parameters())}"
|
||||
)
|
||||
for i, batch in enumerate(lm_dataloader):
|
||||
bi = batch["input"]
|
||||
if args.max_batch and i > args.max_batch:
|
||||
break
|
||||
optimizer.zero_grad()
|
||||
try:
|
||||
tmp = batch["input"].to(get_first_device(model))
|
||||
output = model(tmp).local_value()
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"training failed on {torch.distributed.get_rank()}"
|
||||
) from e
|
||||
|
||||
target = batch["target"].to(get_last_device(model))
|
||||
output = output.to(target.device)
|
||||
|
||||
loss = criterion(output.view(-1, vocab_size), target.view(-1))
|
||||
loss.backward()
|
||||
del target
|
||||
del output
|
||||
|
||||
torch.nn.utils.clip_grad_value_(model.parameters(), 0.05)
|
||||
optimizer.step()
|
||||
|
||||
total_loss += loss.item()
|
||||
log_interval = 1
|
||||
word_counter += batch["ntokens"]
|
||||
if i % log_interval == 0 and i > 0:
|
||||
cur_loss = total_loss / log_interval
|
||||
elapsed = time.time() - start_time
|
||||
print(
|
||||
f"| batch {i:5d} | wps {word_counter / elapsed:5.2f} | loss {cur_loss:5.2f} | ppl {math.exp(cur_loss):8.2f}"
|
||||
)
|
||||
word_counter = 0
|
||||
total_loss = 0
|
||||
start_time = time.time()
|
||||
|
||||
print("Peak memory usage for GPUs: ", end="")
|
||||
for i in range(len(model.devices)):
|
||||
print(
|
||||
f"cuda:{i}: {sizeof_fmt(torch.cuda.memory_stats(i)['allocated_bytes.all.peak'])}, ",
|
||||
end="",
|
||||
)
|
||||
print()
|
||||
|
||||
|
||||
def generate_balance(num_devices, num_layers):
|
||||
balance = []
|
||||
layers_assigned = 0
|
||||
for i in range(num_devices):
|
||||
x = (num_layers - layers_assigned) / (num_devices - i)
|
||||
if x.is_integer():
|
||||
balance.append(int(x))
|
||||
layers_assigned += x
|
||||
else:
|
||||
balance.append(math.ceil(x))
|
||||
layers_assigned += math.ceil(x)
|
||||
return balance
|
||||
|
||||
|
||||
def make_model_and_data(args, device):
|
||||
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
||||
vocab_size = 10000
|
||||
model, criterion, optimizer = make_model(args, device, vocab_size)
|
||||
lm_dataset = BenchmarkLMDataset()
|
||||
lm_dataloader = DataLoader(
|
||||
lm_dataset,
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
num_workers=0,
|
||||
collate_fn=collate_sentences_lm,
|
||||
)
|
||||
return {
|
||||
"model": model,
|
||||
"criterion": criterion,
|
||||
"optimizer": optimizer,
|
||||
"data": lm_dataloader,
|
||||
"vocab_size": vocab_size,
|
||||
}
|
||||
|
||||
|
||||
def bench_single_process(args):
|
||||
os.environ.update({"MASTER_ADDR": args.host})
|
||||
os.environ.update({"MASTER_PORT": "10638"})
|
||||
|
||||
rpc.init_rpc(
|
||||
"worker",
|
||||
rank=0,
|
||||
world_size=1,
|
||||
)
|
||||
|
||||
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
|
||||
num_devices = min(args.num_devices, num_devices)
|
||||
assert num_devices > 0
|
||||
init_random_seed(0)
|
||||
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
||||
|
||||
blob = make_model_and_data(args, None)
|
||||
model = blob["model"]
|
||||
|
||||
balance = generate_balance(num_devices, len(model))
|
||||
model = partition_model(model, balance)
|
||||
p = Pipe(model, chunks=args.chunks, checkpoint=args.checkpoint)
|
||||
del model
|
||||
del blob["model"]
|
||||
|
||||
train(
|
||||
blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args
|
||||
)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="benchmark")
|
||||
parser.add_argument("--host", "-o", type=str, default="localhost", help="hostname")
|
||||
parser.add_argument(
|
||||
"--chunks", type=int, default=4, help="number of microbatches per batch"
|
||||
)
|
||||
parser.add_argument("--batch-size", type=int, default=8, help="size of a batch")
|
||||
parser.add_argument("--max-batch", type=int, default=10, help="Max number of batches")
|
||||
parser.add_argument(
|
||||
"--num-decoder-layers",
|
||||
type=int,
|
||||
default=10,
|
||||
help="Number of decoder layers in the model",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--checkpoint",
|
||||
default="except_last",
|
||||
choices=["always", "except_last", "never"],
|
||||
help="Checkpointing strategy for pipe",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num-devices", type=int, default=4, help="Number of GPU devices to use"
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parser.parse_args()
|
||||
print(f"Running benchmark with args: {args}")
|
||||
bench_single_process(args)
|
@ -218,7 +218,7 @@ tf_mixnet_l,pass,6
|
||||
|
||||
|
||||
|
||||
tinynet_a,pass,6
|
||||
tinynet_a,fail_accuracy,6
|
||||
|
||||
|
||||
|
||||
|
|
@ -178,7 +178,7 @@ hf_T5_base,eager_fail_to_run,0
|
||||
|
||||
|
||||
|
||||
hf_T5_generate,fail_to_run,5
|
||||
hf_T5_generate,pass,5
|
||||
|
||||
|
||||
|
||||
|
|
@ -178,7 +178,7 @@ hf_T5_base,eager_fail_to_run,0
|
||||
|
||||
|
||||
|
||||
hf_T5_generate,fail_to_run,5
|
||||
hf_T5_generate,pass,5
|
||||
|
||||
|
||||
|
||||
|
|
@ -182,7 +182,7 @@ phlippe_densenet,pass,6
|
||||
|
||||
|
||||
|
||||
phlippe_resnet,fail_accuracy,6
|
||||
phlippe_resnet,pass,6
|
||||
|
||||
|
||||
|
||||
|
|
@ -182,7 +182,7 @@ phlippe_densenet,pass,6
|
||||
|
||||
|
||||
|
||||
phlippe_resnet,fail_accuracy,6
|
||||
phlippe_resnet,pass,6
|
||||
|
||||
|
||||
|
||||
|
|
@ -218,7 +218,7 @@ tf_mixnet_l,pass,6
|
||||
|
||||
|
||||
|
||||
tinynet_a,pass,6
|
||||
tinynet_a,fail_accuracy,6
|
||||
|
||||
|
||||
|
||||
|
|
@ -178,7 +178,7 @@ hf_T5_base,eager_fail_to_run,0
|
||||
|
||||
|
||||
|
||||
hf_T5_generate,fail_to_run,5
|
||||
hf_T5_generate,pass,5
|
||||
|
||||
|
||||
|
||||
|
|
@ -6,7 +6,7 @@ adv_inception_v3,pass,6
|
||||
|
||||
|
||||
|
||||
beit_base_patch16_224,pass,7
|
||||
beit_base_patch16_224,fail_accuracy,7
|
||||
|
||||
|
||||
|
||||
|
|
@ -178,7 +178,7 @@ hf_T5_base,eager_fail_to_run,0
|
||||
|
||||
|
||||
|
||||
hf_T5_generate,fail_to_run,5
|
||||
hf_T5_generate,pass,5
|
||||
|
||||
|
||||
|
||||
|
|
@ -6,7 +6,7 @@ adv_inception_v3,pass,6
|
||||
|
||||
|
||||
|
||||
beit_base_patch16_224,pass,7
|
||||
beit_base_patch16_224,fail_accuracy,7
|
||||
|
||||
|
||||
|
||||
|
|
@ -143,6 +143,7 @@ CI_SKIP_DYNAMIC_BATCH_ONLY = {
|
||||
"pyhpc_equation_of_state",
|
||||
"pyhpc_turbulent_kinetic_energy",
|
||||
"detectron2_fcos_r_50_fpn",
|
||||
"hf_T5_generate",
|
||||
}
|
||||
|
||||
# These models currently fail accuracy with eager Adam optimizer
|
||||
@ -3974,9 +3975,12 @@ def run(runner, args, original_dir=None):
|
||||
assert "cuda" in args.devices, "Quantization requires CUDA device."
|
||||
assert args.bfloat16, "Quantization requires dtype bfloat16."
|
||||
try:
|
||||
from .torchao_backend import setup_baseline, torchao_optimize_ctx
|
||||
except ImportError:
|
||||
from torchao_backend import setup_baseline, torchao_optimize_ctx
|
||||
except ImportError:
|
||||
from userbenchmark.dynamo.dynamobench.torchao_backend import (
|
||||
setup_baseline,
|
||||
torchao_optimize_ctx,
|
||||
)
|
||||
|
||||
setup_baseline()
|
||||
baseline_ctx = functools.partial(
|
||||
|
@ -1,45 +0,0 @@
|
||||
import numpy as np
|
||||
|
||||
from utils import NUM_LOOP_ITERS
|
||||
|
||||
from caffe2.python import core, workspace
|
||||
|
||||
workspace.GlobalInit(["caffe2"])
|
||||
|
||||
|
||||
def add_blob(ws, blob_name, tensor_size):
|
||||
blob_tensor = np.random.randn(*tensor_size).astype(np.float32)
|
||||
ws.FeedBlob(blob_name, blob_tensor)
|
||||
|
||||
|
||||
class C2SimpleNet:
|
||||
"""
|
||||
This module constructs a net with 'op_name' operator. The net consist
|
||||
a series of such operator.
|
||||
It initializes the workspace with input blob equal to the number of parameters
|
||||
needed for the op.
|
||||
Provides forward method to run the net niter times.
|
||||
"""
|
||||
|
||||
def __init__(self, op_name, num_inputs=1, debug=False):
|
||||
self.input_names = []
|
||||
self.net = core.Net("framework_benchmark_net")
|
||||
self.input_names = [f"in_{i}" for i in range(num_inputs)]
|
||||
for i in range(num_inputs):
|
||||
add_blob(workspace, self.input_names[i], [1])
|
||||
self.net.AddExternalInputs(self.input_names)
|
||||
op_constructor = getattr(self.net, op_name)
|
||||
op_constructor(self.input_names)
|
||||
self.output_name = self.net._net.op[-1].output
|
||||
print(f"Benchmarking op {op_name}:")
|
||||
for _ in range(NUM_LOOP_ITERS):
|
||||
output_name = self.net._net.op[-1].output
|
||||
self.input_names[-1] = output_name[0]
|
||||
assert len(self.input_names) == num_inputs
|
||||
op_constructor(self.input_names)
|
||||
workspace.CreateNet(self.net)
|
||||
if debug:
|
||||
print(self.net._net)
|
||||
|
||||
def forward(self, niters):
|
||||
workspace.RunNet(self.net, niters, False)
|
@ -1,6 +1,5 @@
|
||||
import argparse
|
||||
|
||||
from C2Module import C2SimpleNet
|
||||
from pt_wrapper_module import WrapperModule
|
||||
|
||||
from SimpleAddModule import add_tensors_loop, SimpleAddModule
|
||||
@ -19,9 +18,6 @@ buck run @mode/opt <path-to-framework_overhead_benchmark>:framework_overhead_ben
|
||||
--add-op --graph-mode --eager-mode (Runs both graph mode and eager mode)
|
||||
buck run @mode/opt <path-to-framework_overhead_benchmark>:framework_overhead_benchmark --
|
||||
--add-op --graph-mode (Runs only graph mode)
|
||||
To run C2 benchmark:
|
||||
buck run @mode/opt <path-to-framework_overhead_benchmark>:framework_overhead_benchmark --
|
||||
--add-op --benchmark-c2-net
|
||||
"""
|
||||
|
||||
SUPPORTED_OPS = {"add_op"}
|
||||
@ -49,39 +45,22 @@ def benchmark_simple_fn(args, config, module_config, module_type, result):
|
||||
module_type: Type of the module to be wrapped. e.g. SimpleAddModule for add op.
|
||||
result: dictionary instance to be populated with the benchmark result (latency per iter).
|
||||
"""
|
||||
benchmark_c2_net = args.benchmark_c2_net
|
||||
print(f"Benchmarking {module_type.__name__}")
|
||||
if benchmark_c2_net:
|
||||
op_name = module_config.c2_op
|
||||
num_inputs = module_config.num_params
|
||||
module = C2SimpleNet(op_name, num_inputs=num_inputs, debug=args.debug)
|
||||
latency_per_iter_ms = benchmark_module(config, module)
|
||||
result[op_name] = latency_per_iter_ms
|
||||
else:
|
||||
f_name = (
|
||||
module_config.pt_fn.__name__
|
||||
+ ":Num Operands="
|
||||
+ str(module_config.num_params)
|
||||
)
|
||||
graph_mode_str = "Graph mode" + ":" + str(module_config.graph_mode)
|
||||
result_key = ",".join((f_name, graph_mode_str))
|
||||
module = WrapperModule(module_type, module_config, args.debug, args.save)
|
||||
latency_per_iter_ms = benchmark_module(
|
||||
config, module, args.use_throughput_benchmark
|
||||
)
|
||||
result[result_key] = latency_per_iter_ms
|
||||
f_name = (
|
||||
module_config.pt_fn.__name__ + ":Num Operands=" + str(module_config.num_params)
|
||||
)
|
||||
graph_mode_str = "Graph mode" + ":" + str(module_config.graph_mode)
|
||||
result_key = ",".join((f_name, graph_mode_str))
|
||||
module = WrapperModule(module_type, module_config, args.debug, args.save)
|
||||
latency_per_iter_ms = benchmark_module(
|
||||
config, module, args.use_throughput_benchmark
|
||||
)
|
||||
result[result_key] = latency_per_iter_ms
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--op", default="add_op", dest="op", type=str)
|
||||
parser.add_argument(
|
||||
"--benchmark-c2-net",
|
||||
"--benchmark_c2_net",
|
||||
default=False,
|
||||
dest="benchmark_c2_net",
|
||||
action="store_true",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use-throughput-benchmark",
|
||||
"--use_throughput_benchmark",
|
||||
@ -107,10 +86,6 @@ def main():
|
||||
if args.op not in SUPPORTED_OPS:
|
||||
print(f"Op {args.op} is not supported: Supported ops are:{SUPPORTED_OPS}")
|
||||
return
|
||||
assert not (
|
||||
args.benchmark_c2_net and args.use_throughput_benchmark
|
||||
), "Benchmarking of C2 net via throughput benchmarking is not yet supported"
|
||||
|
||||
num_warmup_iters = args.num_warmup_iters
|
||||
num_iters = args.num_iters
|
||||
config = BenchmarkConfig(num_warmup_iters, num_iters)
|
||||
@ -120,10 +95,7 @@ def main():
|
||||
result = {}
|
||||
if args.op == "add_op":
|
||||
num_params = 2
|
||||
if args.benchmark_c2_net:
|
||||
module_config = ModuleConfig(None, "Sum", num_params, None)
|
||||
else:
|
||||
module_config = ModuleConfig(add_tensors_loop, None, num_params, graph_mode)
|
||||
module_config = ModuleConfig(add_tensors_loop, None, num_params, graph_mode)
|
||||
benchmark_simple_fn(args, config, module_config, SimpleAddModule, result)
|
||||
print_results(result)
|
||||
|
||||
|
@ -219,7 +219,7 @@ class BatchRNN(nn.Module):
|
||||
|
||||
|
||||
class Lookahead(nn.Module):
|
||||
# Wang et al 2016 - Lookahead Convolution Layer for Unidirectional Recurrent Neural Networks
|
||||
# Wang et al., 2016 - Lookahead Convolution Layer for Unidirectional Recurrent Neural Networks
|
||||
# input shape - sequence, batch, feature - TxNxH
|
||||
# output shape - same as input
|
||||
def __init__(self, n_features, context):
|
||||
|
@ -1,6 +1,6 @@
|
||||
# PyTorch/Caffe2 Operator Micro-benchmarks
|
||||
# PyTorch Operator Micro-benchmarks
|
||||
|
||||
This benchmark suite provides a systemic way to measure the performance of operators for a wide range of inputs. The generated benchmark data fully characterized the performance of an operator in terms of execution time and the efficiency of the PyTorch/Caffe2 frameworks used.
|
||||
This benchmark suite provides a systemic way to measure the performance of operators for a wide range of inputs. The generated benchmark data fully characterized the performance of an operator in terms of execution time and the efficiency of the PyTorch frameworks used.
|
||||
|
||||
## Features
|
||||
|
||||
@ -8,7 +8,7 @@ Key Features:
|
||||
|
||||
1\. Language used: Python
|
||||
|
||||
2\. Supported Frameworks: PyTorch and Caffe2
|
||||
2\. Supported Frameworks: PyTorch
|
||||
|
||||
3\. Supported PyTorch mode: eager and JIT
|
||||
|
||||
@ -49,7 +49,7 @@ python -m benchmark_all_test
|
||||
```
|
||||
|
||||
## Code to support `torch.add` in the benchmark
|
||||
The following example shows the code to support `torch.add` with 27 different tests. In the subpages of this wiki, we'll step through the complete flow of adding PyTorch and Caffe2 operators to the benchmark suite. Existing benchmarks for operators are in `pt` and `c2` directories and we highly recommend putting your new operators in those locations.
|
||||
The following example shows the code to support `torch.add` with 27 different tests. In the subpages of this wiki, we'll step through the complete flow of adding PyTorch operators to the benchmark suite. Existing benchmarks for operators are in the `pt` directory and we highly recommend putting your new operators in those locations.
|
||||
|
||||
```python
|
||||
add_short_configs = op_bench.cross_product_configs(
|
||||
@ -77,7 +77,7 @@ op_bench.generate_pt_test(add_short_configs, AddBenchmark)
|
||||
The output is intended to be a human readable format. Here is an example output for `torch.add`:
|
||||
```
|
||||
# ----------------------------------------
|
||||
# PyTorch/Caffe2 Operator Micro-benchmarks
|
||||
# PyTorch Operator Micro-benchmarks
|
||||
# ----------------------------------------
|
||||
# Tag : short
|
||||
|
||||
@ -146,7 +146,7 @@ python -m pt.add_test --tag-filter long
|
||||
```
|
||||
|
||||
## Adding New Operators to the Benchmark Suite
|
||||
In the previous sections, we gave several examples to show how to run the already available operators in the benchmark suite. In the following sections, we'll step through the complete flow of adding PyTorch and Caffe2 operators to the benchmark suite. Existing benchmarks for operators are in `pt` and `c2` directories and we highly recommend putting your new operators in those directories as well.
|
||||
In the previous sections, we gave several examples to show how to run the already available operators in the benchmark suite. In the following sections, we'll step through the complete flow of adding PyTorch operators to the benchmark suite. Existing benchmarks for operators are in the `pt` directory and we highly recommend putting your new operators in those directories as well.
|
||||
|
||||
### Add a New PyTorch Operator
|
||||
Let's say you want to measure the execution time of the following operator:
|
||||
@ -260,55 +260,6 @@ if __name__ == "__main__":
|
||||
```
|
||||
That's it. You just added a new operator to the benchmark suite!
|
||||
|
||||
|
||||
### Add a New Caffe2 Operator
|
||||
The steps to add a new Caffe2 operator is the same as that for a PyTorch operator. The code below shows how to add Caffe2 `Add` operator:
|
||||
```python
|
||||
import operator_benchmark as op_bench
|
||||
from caffe2.python import core
|
||||
|
||||
add_long_configs = op_bench.cross_product_configs(
|
||||
M=[8, 64, 128],
|
||||
N=range(2, 10, 3),
|
||||
K=[2 ** x for x in range(0, 3)],
|
||||
tags=["long"]
|
||||
)
|
||||
|
||||
add_short_configs = op_bench.config_list(
|
||||
attrs=[
|
||||
[8, 16, 32],
|
||||
[16, 16, 64],
|
||||
[64, 64, 128],
|
||||
],
|
||||
attr_names=["M", "N", "K"],
|
||||
tags=["short"],
|
||||
)
|
||||
|
||||
class AddBenchmark(op_bench.Caffe2BenchmarkBase):
|
||||
|
||||
def init(self, M, N, K):
|
||||
self.input_one = self.tensor(M, N, K)
|
||||
self.input_two = self.tensor(M, N, K)
|
||||
self.output = self.tensor(M, N, K)
|
||||
self.set_module_name("add")
|
||||
|
||||
def forward(self):
|
||||
op = core.CreateOperator(
|
||||
"Add", [self.input_one, self.input_two], self.output, **self.args
|
||||
)
|
||||
|
||||
return op
|
||||
|
||||
op_bench.generate_c2_test(add_long_configs + add_short_configs, AddBenchmark)
|
||||
|
||||
if __name__ == "__main__":
|
||||
op_bench.benchmark_runner.main()
|
||||
```
|
||||
There are two things worth mentioning in this code:
|
||||
* `self.tensor` is a helper function which takes shapes and returns a Caffe2 blob. It is designed to make the tensor creation step easier compared to the standard Caffe2 way.
|
||||
* `generate_c2_test` is used to register Caffe2 tests with the benchmark.
|
||||
|
||||
|
||||
### Add a List of Operators
|
||||
In the previous sections, we introduced the steps required to add a single operator to the benchmark suite. There are scenarios where you want to extend the benchmark suite with a list of operators which can share the same inputs. For example, to benchmark `abs` and `acos` operators, you can use the same set of inputs for both.
|
||||
|
||||
@ -416,37 +367,3 @@ The example below shows the relevant code for that:
|
||||
self.input_one = torch.rand(M, N, K, requires_grad=True)
|
||||
generate_pt_gradient_test(long_configs + short_configs, TorchAddBenchmark)
|
||||
```
|
||||
#### For Caffe2 Gradient Ops
|
||||
To add Caffe2 gradient ops, we need to implement a new backward method in the benchmark class:
|
||||
```python
|
||||
class AddBenchmark(op_bench.Caffe2BenchmarkBase):
|
||||
|
||||
def init(self, M, N, K):
|
||||
self.input_one = self.tensor(M, N, K)
|
||||
self.input_two = self.tensor(M, N, K)
|
||||
self.input_one_grad = self.tensor(M, N, K)
|
||||
self.input_two_grad = self.tensor(M, N, K)
|
||||
self.output = self.tensor(M, N, K)
|
||||
self.set_module_name("add")
|
||||
|
||||
def forward(self):
|
||||
op = core.CreateOperator(
|
||||
"Add", [self.input_one, self.input_two], self.output, **self.args
|
||||
)
|
||||
|
||||
return op
|
||||
|
||||
def backward(self):
|
||||
grad_op = core.CreateOperator(
|
||||
"AddGradient",
|
||||
[self.output, self.input_one, self.input_two],
|
||||
[self.input_one_grad, self.input_two_grad], **self.args
|
||||
)
|
||||
|
||||
return grad_op
|
||||
|
||||
op_bench.generate_c2_gradient_test(long_configs + short_configs,AddBenchmark)
|
||||
```
|
||||
After the class is implemented, we need to register the tests with `generate_c2_gradient_test` function.
|
||||
|
||||
This concludes the overview of the operator benchmark suite.
|
||||
|
@ -1,202 +0,0 @@
|
||||
from collections import namedtuple
|
||||
|
||||
import benchmark_utils
|
||||
from benchmark_test_generator import _register_test
|
||||
|
||||
from caffe2.proto import caffe2_pb2
|
||||
from caffe2.python import core, workspace
|
||||
|
||||
from .benchmark_core import TestConfig
|
||||
|
||||
"""Caffe2 performance microbenchmarks.
|
||||
|
||||
This module contains Caffe2-specific functionalities for performance
|
||||
microbenchmarks.
|
||||
"""
|
||||
|
||||
|
||||
class Caffe2BenchmarkBase:
|
||||
"""This is a base class used to create Caffe2 operator benchmark"""
|
||||
|
||||
tensor_index = 0
|
||||
test_index = 0
|
||||
|
||||
def __init__(self):
|
||||
self.args = {}
|
||||
self.user_provided_name = None
|
||||
self._num_inputs_require_grads = 0
|
||||
self._pass_count = 0
|
||||
|
||||
def _set_backward_test(self, is_backward):
|
||||
pass
|
||||
|
||||
def _device_option(self, device):
|
||||
"""This method is used to set device option."""
|
||||
if device not in ["cuda", "cpu"]:
|
||||
raise ValueError("Missing attrs in configs")
|
||||
|
||||
if "cuda" in device:
|
||||
self.dev = core.DeviceOption(caffe2_pb2.CUDA, 0)
|
||||
else:
|
||||
self.dev = core.DeviceOption(caffe2_pb2.CPU)
|
||||
return self.dev
|
||||
|
||||
def tensor(self, shapes, dtype="float32", device="cpu"):
|
||||
"""A wapper function to create C2 tensor filled with random data.
|
||||
The name/label of the tensor is returned and it is available
|
||||
throughout the benchmark execution phase.
|
||||
Args:
|
||||
shapes: int or a sequence of ints to defining the shapes of the tensor
|
||||
dtype: use the dtypes from numpy
|
||||
(https://docs.scipy.org/doc/numpy/user/basics.types.html)
|
||||
Return:
|
||||
C2 tensor of dtype
|
||||
"""
|
||||
return self.feed_tensor(benchmark_utils.numpy_random(dtype, *shapes), device)
|
||||
|
||||
def feed_tensor(self, tensor, device="cpu"):
|
||||
"""Similar to tensor, but can supply any data compatible with FeedBlob"""
|
||||
blob_name = "blob_" + str(Caffe2BenchmarkBase.tensor_index)
|
||||
dev = self._device_option(device)
|
||||
with core.DeviceScope(dev):
|
||||
workspace.FeedBlob(blob_name, tensor)
|
||||
Caffe2BenchmarkBase.tensor_index += 1
|
||||
return blob_name
|
||||
|
||||
def module_name(self):
|
||||
"""this is used to label the operator being benchmarked"""
|
||||
if self.user_provided_name:
|
||||
return self.user_provided_name
|
||||
return self.__class__.__name__
|
||||
|
||||
def set_module_name(self, name):
|
||||
self.user_provided_name = name
|
||||
|
||||
def _value_to_str(self, value):
|
||||
"""if value is bool, we will convert it to 0 and 1"""
|
||||
ret = value
|
||||
if type(value) == bool:
|
||||
ret = int(value)
|
||||
return str(ret)
|
||||
|
||||
def test_name(self, name_type="long", **kargs):
|
||||
"""this is a globally unique name which can be used to
|
||||
label a specific test
|
||||
"""
|
||||
if name_type == "long":
|
||||
test_name_str = []
|
||||
for key in kargs:
|
||||
value = kargs[key]
|
||||
test_name_str.append(key + self._value_to_str(value))
|
||||
name = (self.module_name() + "_" + "_".join(test_name_str)).replace(" ", "")
|
||||
elif name_type == "short":
|
||||
# this is used to generate test name based on unique index
|
||||
name = "_".join(
|
||||
[self.module_name(), "test", str(Caffe2BenchmarkBase.test_index)]
|
||||
)
|
||||
Caffe2BenchmarkBase.test_index += 1
|
||||
return name
|
||||
|
||||
def extract_inputs_tuple(self):
|
||||
# add a dummy function here to match the interface of TorchBenchmarkBase
|
||||
pass
|
||||
|
||||
|
||||
class Caffe2OperatorTestCase:
|
||||
"""This class includes all the information needed to benchmark an operator.
|
||||
op_bench: it's a user-defined class (child of Caffe2BenchmarkBase)
|
||||
which includes input and operator, .etc
|
||||
test_config: a namedtuple includes test_name, input_shape, tag, run_backward.
|
||||
When run_backward is false, the run_forward method will be executed, otherwise
|
||||
run_backward method will be executed.
|
||||
"""
|
||||
|
||||
def __init__(self, op_bench, test_config):
|
||||
self.op_bench = op_bench
|
||||
self.test_config = test_config
|
||||
self.framework = "Caffe2"
|
||||
|
||||
def run_forward(self, num_runs, print_per_iter=False, cuda_sync=False):
|
||||
"""Run the forward path of an operator in a loop"""
|
||||
with core.DeviceScope(self.op_bench.dev):
|
||||
op = self.op_bench.forward()
|
||||
if not workspace.RunOperatorMultiple(op, num_runs):
|
||||
raise ValueError(f"Unable to run operator test case: {self.test_name}")
|
||||
|
||||
def run_backward(self, num_runs, print_per_iter=False):
|
||||
"""Run the backward path of an operator in a loop"""
|
||||
with core.DeviceScope(self.op_bench.dev):
|
||||
op = self.op_bench.backward()
|
||||
if not workspace.RunOperatorMultiple(op, num_runs):
|
||||
raise ValueError(
|
||||
f"Unable to run operator gradient test case: {self.test_name}"
|
||||
)
|
||||
|
||||
def _print_per_iter(self):
|
||||
pass
|
||||
|
||||
|
||||
def create_caffe2_op_test_case(op_bench, test_config):
|
||||
test_case = Caffe2OperatorTestCase(op_bench, test_config)
|
||||
test_config = test_case.test_config
|
||||
op = test_case.op_bench
|
||||
func_name = f"{op.module_name()}{test_case.framework}{str(test_config)}"
|
||||
return (func_name, test_case)
|
||||
|
||||
|
||||
OpMeta = namedtuple(
|
||||
"OpMeta",
|
||||
"op_type num_inputs input_dims input_types \
|
||||
output_dims num_outputs args device",
|
||||
)
|
||||
|
||||
|
||||
def generate_c2_test_from_ops(ops_metadata, bench_op, tags):
|
||||
"""
|
||||
This function is used to generate Caffe2 tests based on the metadata
|
||||
of operators. The metadata includes seven fields which are 1) op_type:
|
||||
the name of the operator. 2) num_inputs: the number of input blobs.
|
||||
3) input_dims: a dictionary which includes the shapes of the input blobs.
|
||||
4) input_types: a list which includes the types of input blobs. 5)
|
||||
output_dims: a dictionary which includes the shapes of output blobs.
|
||||
6) num_oupts: the number of output blobs. 7) args: a dictionary which
|
||||
includes the args for th operator.
|
||||
Here is an example to show the metadata for the WeighedSum operator
|
||||
op_type : WeightedSum
|
||||
num_inputs: 4
|
||||
input_dims: {'0': [256], '1': [1], '2': [256], '3': [1]}
|
||||
input_types: ['float', 'float', 'float', 'float']
|
||||
output_dims: {'0': [256]}
|
||||
num_outputs: 4
|
||||
args: {}
|
||||
TODO(mingzhe0908): introduce device and add it to the benchmark name
|
||||
"""
|
||||
for op_metadata in ops_metadata:
|
||||
tmp_attrs = OpMeta(
|
||||
op_metadata.op_type,
|
||||
op_metadata.num_inputs,
|
||||
op_metadata.input_dims,
|
||||
op_metadata.input_types,
|
||||
op_metadata.output_dims,
|
||||
op_metadata.num_outputs,
|
||||
op_metadata.args,
|
||||
op_metadata.device,
|
||||
)
|
||||
test_attrs = tmp_attrs._asdict()
|
||||
op = bench_op()
|
||||
op.init(**test_attrs)
|
||||
test_name = op.test_name("short")
|
||||
input_config = f"Shapes: {op_metadata.input_dims}, Type: {op_metadata.input_types}, Args: {str(op_metadata.args)}"
|
||||
test_config = TestConfig(test_name, input_config, tags, run_backward=False)
|
||||
if op is not None:
|
||||
create_caffe2_op_test_case(op, test_config)
|
||||
|
||||
|
||||
def generate_c2_test(configs, c2_bench_op):
|
||||
"""This function creates Caffe2 op test based on the given operator"""
|
||||
return _register_test(configs, c2_bench_op, create_caffe2_op_test_case, False)
|
||||
|
||||
|
||||
def generate_c2_gradient_test(configs, c2_bench_op):
|
||||
"""This function creates Caffe2 op test based on the given operator"""
|
||||
return _register_test(configs, c2_bench_op, create_caffe2_op_test_case, True)
|
@ -13,6 +13,7 @@ import torch
|
||||
# needs to be imported after torch
|
||||
import torch.utils.cpp_extension as cpp_extension # noqa: F401
|
||||
|
||||
|
||||
"""Performance microbenchmarks.
|
||||
|
||||
This module contains core functionalities for performance microbenchmark tests.
|
||||
@ -50,7 +51,7 @@ def _create_test(
|
||||
"""Create tests with the benchmark backend.
|
||||
Args:
|
||||
bench_op_obj: an object which instantiated from a subclass of
|
||||
Caffe2BenchmarkBase/TorchBenchmarkBase which includes tensor
|
||||
TorchBenchmarkBase which includes tensor
|
||||
creation and operator execution.
|
||||
orig_test_attrs: a dictionary includes test configs.
|
||||
tags: a attribute in test config to filter inputs
|
||||
@ -75,7 +76,7 @@ def _build_test(
|
||||
"""Generate PyTorch/Caffe2 tests of operators with different inputs.
|
||||
Args:
|
||||
configs: a dictionary that has the input shapes
|
||||
bench_op: a subclass of Caffe2BenchmarkBase/TorchBenchmarkBase which includes tensor
|
||||
bench_op: a subclass of TorchBenchmarkBase which includes tensor
|
||||
creation and operator execution
|
||||
OperatorTestCase: a named tuple to save the metadata of an test
|
||||
run_backward: a bool parameter indicating backward path
|
||||
@ -233,9 +234,7 @@ class BenchmarkRunner:
|
||||
)
|
||||
)
|
||||
else:
|
||||
if test_case.framework == "PyTorch":
|
||||
print(f"# Mode: {'JIT' if self.use_jit else 'Eager'}")
|
||||
|
||||
print(f"# Mode: {'JIT' if self.use_jit else 'Eager'}")
|
||||
print(
|
||||
f"# Name: {test_case.test_config.test_name}\n# Input: {test_case.test_config.input_config}"
|
||||
)
|
||||
@ -283,8 +282,7 @@ class BenchmarkRunner:
|
||||
and the execution time is reported
|
||||
"""
|
||||
test_case.run_forward(num_runs=1, print_per_iter=False, cuda_sync=False)
|
||||
if test_case.framework == "PyTorch":
|
||||
test_case._output_mean()
|
||||
test_case._output_mean()
|
||||
backward_time = timeit.timeit(
|
||||
functools.partial(test_case.run_backward, iters, print_per_iter), number=1
|
||||
)
|
||||
@ -357,9 +355,6 @@ class BenchmarkRunner:
|
||||
# Currently, this is a sub-string matching.
|
||||
op_test_config = test_case.test_config
|
||||
|
||||
if self.args.framework:
|
||||
frameworks = benchmark_utils.process_arg_list(self.args.framework)
|
||||
|
||||
operators = (
|
||||
benchmark_utils.process_arg_list(self.args.operators)
|
||||
if self.args.operators
|
||||
@ -370,7 +365,6 @@ class BenchmarkRunner:
|
||||
if (
|
||||
self._check_keep(op_test_config.test_name, self.args.test_name)
|
||||
and self._check_keep_list(test_case.op_bench.module_name(), operators)
|
||||
and self._check_keep_list(test_case.framework, frameworks)
|
||||
and self._check_operator_first_char(
|
||||
test_case.op_bench.module_name(), self.operator_range
|
||||
)
|
||||
|
@ -92,7 +92,7 @@ def parse_args():
|
||||
parser.add_argument(
|
||||
"--omp-num-threads",
|
||||
"--omp_num_threads",
|
||||
help="Number of OpenMP threads used in PyTorch/Caffe2 runtime",
|
||||
help="Number of OpenMP threads used in PyTorch runtime",
|
||||
default=None,
|
||||
type=int,
|
||||
)
|
||||
@ -100,7 +100,7 @@ def parse_args():
|
||||
parser.add_argument(
|
||||
"--mkl-num-threads",
|
||||
"--mkl_num_threads",
|
||||
help="Number of MKL threads used in PyTorch/Caffe2 runtime",
|
||||
help="Number of MKL threads used in PyTorch runtime",
|
||||
default=None,
|
||||
type=int,
|
||||
)
|
||||
@ -135,12 +135,6 @@ def parse_args():
|
||||
help="Only run the forward path of operators",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--framework",
|
||||
help="Comma-delimited list of frameworks to test (Caffe2, PyTorch)",
|
||||
default="Caffe2,PyTorch",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--device",
|
||||
help="Run tests on the provided architecture (cpu, cuda)",
|
||||
@ -160,8 +154,7 @@ def parse_args():
|
||||
# "Modifications to the environment variables after the program has started,
|
||||
# even if modified by the program itself, are ignored by the OpenMP implementation"
|
||||
benchmark_utils.set_omp_threads(args.omp_num_threads)
|
||||
if benchmark_utils.is_pytorch_enabled(args.framework):
|
||||
torch.set_num_threads(args.omp_num_threads)
|
||||
torch.set_num_threads(args.omp_num_threads)
|
||||
if args.mkl_num_threads:
|
||||
benchmark_utils.set_mkl_threads(args.mkl_num_threads)
|
||||
|
||||
|
@ -319,14 +319,6 @@ def op_list(**configs):
|
||||
return generated_configs
|
||||
|
||||
|
||||
def is_caffe2_enabled(framework_arg):
|
||||
return "Caffe2" in framework_arg
|
||||
|
||||
|
||||
def is_pytorch_enabled(framework_arg):
|
||||
return "PyTorch" in framework_arg
|
||||
|
||||
|
||||
def get_operator_range(chars_range):
|
||||
"""Generates the characters from chars_range inclusive."""
|
||||
if chars_range == "None" or chars_range is None:
|
||||
|
@ -1,49 +0,0 @@
|
||||
import benchmark_caffe2 as op_bench_c2
|
||||
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
|
||||
|
||||
import operator_benchmark as op_bench
|
||||
from caffe2.python import core
|
||||
|
||||
|
||||
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
|
||||
|
||||
# Configs for C2 add operator
|
||||
add_long_configs = op_bench.cross_product_configs(
|
||||
M=[8, 64, 128],
|
||||
N=range(2, 10, 3),
|
||||
K=[2**x for x in range(0, 3)],
|
||||
dtype=["int", "float"],
|
||||
tags=["long"],
|
||||
)
|
||||
|
||||
|
||||
add_short_configs = op_bench.config_list(
|
||||
attrs=[
|
||||
[8, 16, 32, "int"],
|
||||
[16, 16, 64, "float"],
|
||||
[64, 64, 128, "int"],
|
||||
],
|
||||
attr_names=["M", "N", "K", "dtype"],
|
||||
tags=["short"],
|
||||
)
|
||||
|
||||
|
||||
class AddBenchmark(op_bench_c2.Caffe2BenchmarkBase):
|
||||
def init(self, M, N, K, dtype):
|
||||
self.input_one = self.tensor([M, N, K], dtype)
|
||||
self.input_two = self.tensor([M, N, K], dtype)
|
||||
self.output = self.tensor([M, N, K], dtype)
|
||||
self.set_module_name("add")
|
||||
|
||||
def forward(self):
|
||||
op = core.CreateOperator(
|
||||
"Add", [self.input_one, self.input_two], self.output, **self.args
|
||||
)
|
||||
return op
|
||||
|
||||
|
||||
op_bench_c2.generate_c2_test(add_long_configs + add_short_configs, AddBenchmark)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
op_bench.benchmark_runner.main()
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user