CI upgradeapalooza bionic->focal, gcc7->gcc9, clang7->clang10 (#105260)

Bionic support was finished back in April 2023, see https://ubuntu.com/blog/ubuntu-18-04-eol-for-devices

And neither gcc-7 nor clang7 are fully compatible with c++17, update minimal tested gcc to gcc9 and clang to clang-10

Note: OpenMP support is  broken in Focal's `clang9`, so move up to a `clang10`

- Suppress `-Wuninitialized` in complex_test as gcc-11 fires a seemingly false-positive warning:
```
In file included from /home/malfet/git/pytorch/pytorch/c10/test/util/complex_test.cpp:1:
/home/malfet/git/pytorch/pytorch/c10/test/util/complex_test_common.h: In member function ‘virtual void memory::TestMemory_ReinterpretCast_Test::TestBody()’:
/home/malfet/git/pytorch/pytorch/c10/test/util/complex_test_common.h:38:25: warning: ‘z’ is used uninitialized [-Wuninitialized]
   38 |     c10::complex<float> zz = *reinterpret_cast<c10::complex<float>*>(&z);
      |                         ^~
/home/malfet/git/pytorch/pytorch/c10/test/util/complex_test_common.h:37:25: note: ‘z’ declared here
   37 |     std::complex<float> z(1, 2);
      |                         ^
```
- Downgrade `ucc` to 2.15, as 2.16 brings an incompatible libnccl, that causes crash during the initialization
- Install `pango` from condo environment for `doctr` torch bench tests to pass, as one available in the system is too new for conda
- Suppress some functorch tests when used with python-3.8+dynamo, see https://github.com/pytorch/pytorch/issues/107173
Pull Request resolved: https://github.com/pytorch/pytorch/pull/105260
Approved by: https://github.com/huydhn, https://github.com/Skylion007, https://github.com/ZainRizvi, https://github.com/seemethere
This commit is contained in:
Nikita Shulga
2023-08-15 03:07:01 +00:00
committed by PyTorch MergeBot
parent 9440a8cbec
commit 574442ba01
16 changed files with 292 additions and 267 deletions

View File

@ -46,9 +46,7 @@ if [[ "$image" == *xla* ]]; then
exit 0
fi
if [[ "$image" == *-bionic* ]]; then
UBUNTU_VERSION=18.04
elif [[ "$image" == *-focal* ]]; then
if [[ "$image" == *-focal* ]]; then
UBUNTU_VERSION=20.04
elif [[ "$image" == *-jammy* ]]; then
UBUNTU_VERSION=22.04
@ -88,7 +86,7 @@ _UCC_COMMIT=7cb07a76ccedad7e56ceb136b865eb9319c258ea
# configuration, so we hardcode everything here rather than do it
# from scratch
case "$image" in
pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9)
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9)
CUDA_VERSION=12.1.0
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.10
@ -102,7 +100,7 @@ case "$image" in
CONDA_CMAKE=yes
TRITON=yes
;;
pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks)
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks)
CUDA_VERSION=12.1.0
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.10
@ -117,7 +115,7 @@ case "$image" in
TRITON=yes
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-bionic-cuda11.8-cudnn8-py3-gcc9)
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9)
CUDA_VERSION=11.8.0
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.10
@ -131,7 +129,7 @@ case "$image" in
CONDA_CMAKE=yes
TRITON=yes
;;
pytorch-linux-bionic-cuda11.8-cudnn8-py3-gcc7)
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc7)
CUDA_VERSION=11.8.0
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.10
@ -145,7 +143,7 @@ case "$image" in
CONDA_CMAKE=yes
TRITON=yes
;;
pytorch-linux-bionic-cuda11.8-cudnn8-py3-gcc7-inductor-benchmarks)
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc7-inductor-benchmarks)
CUDA_VERSION=11.8.0
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.10
@ -193,9 +191,9 @@ case "$image" in
GRADLE_VERSION=6.8.3
NINJA_VERSION=1.9.0
;;
pytorch-linux-bionic-py3.8-clang9)
pytorch-linux-focal-py3.8-clang10)
ANACONDA_PYTHON_VERSION=3.8
CLANG_VERSION=9
CLANG_VERSION=10
PROTOBUF=yes
DB=yes
VISION=yes
@ -204,9 +202,9 @@ case "$image" in
CONDA_CMAKE=yes
TRITON=yes
;;
pytorch-linux-bionic-py3.11-clang9)
pytorch-linux-focal-py3.11-clang10)
ANACONDA_PYTHON_VERSION=3.11
CLANG_VERSION=9
CLANG_VERSION=10
PROTOBUF=yes
DB=yes
VISION=yes
@ -215,7 +213,7 @@ case "$image" in
CONDA_CMAKE=yes
TRITON=yes
;;
pytorch-linux-bionic-py3.8-gcc9)
pytorch-linux-focal-py3.8-gcc9)
ANACONDA_PYTHON_VERSION=3.8
GCC_VERSION=9
PROTOBUF=yes
@ -257,9 +255,9 @@ case "$image" in
TRITON=yes
DOCS=yes
;;
pytorch-linux-focal-py3.8-gcc7-inductor-benchmarks)
pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.8
GCC_VERSION=7
GCC_VERSION=11
PROTOBUF=yes
DB=yes
VISION=yes
@ -288,6 +286,17 @@ case "$image" in
CONDA_CMAKE=yes
TRITON=yes
;;
pytorch-linux-jammy-py3.8-gcc11)
ANACONDA_PYTHON_VERSION=3.8
GCC_VERSION=11
PROTOBUF=yes
DB=yes
VISION=yes
KATEX=yes
CONDA_CMAKE=yes
TRITON=yes
DOCS=yes
;;
pytorch-linux-focal-linter)
# TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
# We will need to update mypy version eventually, but that's for another day. The task

View File

@ -31,10 +31,13 @@ install_ubuntu() {
maybe_libomp_dev=""
fi
# TODO: Remove this once nvidia package repos are back online
# Comment out nvidia repositories to prevent them from getting apt-get updated, see https://github.com/pytorch/pytorch/issues/74968
# shellcheck disable=SC2046
sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list")
# HACK: UCC testing relies on libnccl library from NVIDIA repo, and version 2.16 crashes
# See https://github.com/pytorch/pytorch/pull/105260#issuecomment-1673399729
if [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "11.8"* ]]; then
maybe_libnccl_dev="libnccl2=2.15.5-1+cuda11.8 libnccl-dev=2.15.5-1+cuda11.8 --allow-downgrades --allow-change-held-packages"
else
maybe_libnccl_dev=""
fi
# Install common dependencies
apt-get update
@ -63,6 +66,7 @@ install_ubuntu() {
libasound2-dev \
libsndfile-dev \
${maybe_libomp_dev} \
${maybe_libnccl_dev} \
software-properties-common \
wget \
sudo \

View File

@ -22,5 +22,7 @@ function install_timm() {
pip_install "git+https://github.com/rwightman/pytorch-image-models@${commit}"
}
# Pango is needed for weasyprint which is needed for doctr
conda_install pango
install_huggingface
# install_timm

View File

@ -33,20 +33,17 @@ jobs:
fail-fast: false
matrix:
include:
- docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9
- docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
- docker-image-name: pytorch-linux-bionic-cuda11.8-cudnn8-py3-gcc9
- docker-image-name: pytorch-linux-bionic-cuda11.8-cudnn8-py3-gcc7
- docker-image-name: pytorch-linux-bionic-cuda11.8-cudnn8-py3-gcc7-inductor-benchmarks
- docker-image-name: pytorch-linux-bionic-py3.8-clang9
- docker-image-name: pytorch-linux-bionic-py3.11-clang9
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
- docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9
- docker-image-name: pytorch-linux-focal-py3.8-clang10
- docker-image-name: pytorch-linux-focal-py3.11-clang10
- docker-image-name: pytorch-linux-focal-rocm-n-1-py3
- docker-image-name: pytorch-linux-focal-rocm-n-py3
- docker-image-name: pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12
- docker-image-name: pytorch-linux-focal-py3-clang7-android-ndk-r19c
- docker-image-name: pytorch-linux-focal-py3.8-gcc7
- docker-image-name: pytorch-linux-focal-py3.8-gcc7-inductor-benchmarks
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks
- docker-image-name: pytorch-linux-jammy-py3-clang12-asan
- docker-image-name: pytorch-linux-focal-py3-clang10-onnx
- docker-image-name: pytorch-linux-focal-linter

View File

@ -11,12 +11,12 @@ concurrency:
cancel-in-progress: true
jobs:
linux-bionic-cuda12_1-py3_10-gcc9-inductor-build:
linux-focal-cuda12_1-py3_10-gcc9-inductor-build:
name: cuda12.1-py3.10-gcc9-sm80
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm80
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
cuda-arch-list: '8.0'
test-matrix: |
{ include: [
@ -28,14 +28,14 @@ jobs:
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
linux-bionic-cuda12_1-py3_10-gcc9-inductor-test:
linux-focal-cuda12_1-py3_10-gcc9-inductor-test:
name: cuda12.1-py3.10-gcc9-sm80
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda12_1-py3_10-gcc9-inductor-build
needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm80
docker-image: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
use-gha: anything-non-empty-to-use-gha
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}

View File

@ -56,12 +56,12 @@ concurrency:
cancel-in-progress: true
jobs:
linux-bionic-cuda12_1-py3_10-gcc9-inductor-build:
linux-focal-cuda12_1-py3_10-gcc9-inductor-build:
name: cuda12.1-py3.10-gcc9-sm80
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm80
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
cuda-arch-list: '8.0'
test-matrix: |
{ include: [
@ -81,31 +81,31 @@ jobs:
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
linux-bionic-cuda12_1-py3_10-gcc9-inductor-test-nightly:
linux-focal-cuda12_1-py3_10-gcc9-inductor-test-nightly:
name: cuda12.1-py3.10-gcc9-sm80
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda12_1-py3_10-gcc9-inductor-build
needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build
if: github.event.schedule == '0 7 * * *'
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm80
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-aotinductor-true-freezing_cudagraphs-true
docker-image: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
use-gha: anything-non-empty-to-use-gha
timeout-minutes: 720
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
linux-bionic-cuda12_1-py3_10-gcc9-inductor-test:
linux-focal-cuda12_1-py3_10-gcc9-inductor-test:
name: cuda12.1-py3.10-gcc9-sm80
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda12_1-py3_10-gcc9-inductor-build
needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build
if: github.event_name == 'workflow_dispatch'
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm80
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-${{ inputs.cppwrapper }}-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}
docker-image: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
use-gha: anything-non-empty-to-use-gha
timeout-minutes: 720
secrets:

View File

@ -15,12 +15,12 @@ concurrency:
cancel-in-progress: true
jobs:
linux-bionic-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build:
linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build:
name: cuda12.1-py3.10-gcc9-sm86-periodic-dynamo-benchmarks
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm86
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
cuda-arch-list: '8.6'
test-matrix: |
{ include: [
@ -40,13 +40,13 @@ jobs:
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
linux-bionic-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-test:
linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-test:
name: cuda12.1-py3.10-gcc9-sm86-periodic-dynamo-benchmarks
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build
needs: linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm86
docker-image: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}

View File

@ -13,12 +13,12 @@ concurrency:
cancel-in-progress: true
jobs:
linux-bionic-cuda12_1-py3_10-gcc9-inductor-build:
linux-focal-cuda12_1-py3_10-gcc9-inductor-build:
name: cuda12.1-py3.10-gcc9-sm86
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm86
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
cuda-arch-list: '8.6'
test-matrix: |
{ include: [
@ -36,23 +36,23 @@ jobs:
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
linux-bionic-cuda12_1-py3_10-gcc9-inductor-test:
linux-focal-cuda12_1-py3_10-gcc9-inductor-test:
name: cuda12.1-py3.10-gcc9-sm86
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda12_1-py3_10-gcc9-inductor-build
needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm86
docker-image: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
linux-bionic-cuda12_1-py3_10-gcc9-inductor-build-gcp:
linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp:
name: cuda12.1-py3.10-gcc9-sm80
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm80
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
cuda-arch-list: '8.0'
test-matrix: |
{ include: [
@ -61,24 +61,24 @@ jobs:
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
linux-bionic-cuda12_1-py3_10-gcc9-inductor-test-gcp:
linux-focal-cuda12_1-py3_10-gcc9-inductor-test-gcp:
name: cuda12.1-py3.10-gcc9-sm80
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda12_1-py3_10-gcc9-inductor-build-gcp
needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm80
docker-image: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }}
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }}
use-gha: anything-non-empty-to-use-gha
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
linux-focal-cpu-py3_8-gcc7-inductor-build:
name: linux-focal-cpu-py3.8-gcc7-inductor
linux-jammy-cpu-py3_8-gcc11-inductor-build:
name: linux-jammy-cpu-py3.8-gcc11-inductor
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-focal-py3_8-gcc7-build
docker-image-name: pytorch-linux-focal-py3.8-gcc7-inductor-benchmarks
build-environment: linux-jammy-py3_8-gcc11-build
docker-image-name: pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks
test-matrix: |
{ include: [
{ config: "inductor_huggingface_cpu_accuracy", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
@ -93,13 +93,13 @@ jobs:
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
linux-focal-cpu-py3_8-gcc7-inductor-test:
name: linux-focal-cpu-py3.8-gcc7-inductor
linux-jammy-cpu-py3_8-gcc11-inductor-test:
name: linux-jammy-cpu-py3.8-gcc11-inductor
uses: ./.github/workflows/_linux-test.yml
needs: linux-focal-cpu-py3_8-gcc7-inductor-build
needs: linux-jammy-cpu-py3_8-gcc11-inductor-build
with:
build-environment: linux-focal-py3_8-gcc7-build
docker-image: ${{ needs.linux-focal-cpu-py3_8-gcc7-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cpu-py3_8-gcc7-inductor-build.outputs.test-matrix }}
build-environment: linux-jammy-py3_8-gcc11-build
docker-image: ${{ needs.linux-jammy-cpu-py3_8-gcc11-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-jammy-cpu-py3_8-gcc11-inductor-build.outputs.test-matrix }}
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}

View File

@ -21,15 +21,15 @@ jobs:
name: docs build
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-focal-py3.8-gcc7
docker-image-name: pytorch-linux-focal-py3.8-gcc7
build-environment: linux-jammy-py3.8-gcc11
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
docs-push:
name: docs push
uses: ./.github/workflows/_docs.yml
needs: docs-build
with:
build-environment: linux-focal-py3.8-gcc7
build-environment: linux-jammy-py3.8-gcc11
docker-image: ${{ needs.docs-build.outputs.docker-image }}
push: ${{ github.event_name == 'schedule' || startsWith(github.event.ref, 'refs/tags/v') }}
run-doxygen: true

View File

@ -19,12 +19,12 @@ concurrency:
cancel-in-progress: true
jobs:
parallelnative-linux-focal-py3_8-gcc7-build:
name: parallelnative-linux-focal-py3.8-gcc7
parallelnative-linux-jammy-py3_8-gcc11-build:
name: parallelnative-linux-jammy-py3.8-gcc11
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: parallelnative-linux-focal-py3.8-gcc7
docker-image-name: pytorch-linux-focal-py3.8-gcc7
build-environment: parallelnative-linux-jammy-py3.8-gcc11
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
@ -32,21 +32,21 @@ jobs:
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
]}
parallelnative-linux-focal-py3_8-gcc7-test:
name: parallelnative-linux-focal-py3.8-gcc7
parallelnative-linux-jammy-py3_8-gcc11-test:
name: parallelnative-linux-jammy-py3.8-gcc11
uses: ./.github/workflows/_linux-test.yml
needs: parallelnative-linux-focal-py3_8-gcc7-build
needs: parallelnative-linux-jammy-py3_8-gcc11-build
with:
build-environment: parallelnative-linux-focal-py3.8-gcc7
docker-image: ${{ needs.parallelnative-linux-focal-py3_8-gcc7-build.outputs.docker-image }}
test-matrix: ${{ needs.parallelnative-linux-focal-py3_8-gcc7-build.outputs.test-matrix }}
build-environment: parallelnative-linux-jammy-py3.8-gcc11
docker-image: ${{ needs.parallelnative-linux-jammy-py3_8-gcc11-build.outputs.docker-image }}
test-matrix: ${{ needs.parallelnative-linux-jammy-py3_8-gcc11-build.outputs.test-matrix }}
linux-bionic-cuda11_8-py3_9-gcc7-build:
name: linux-bionic-cuda11.8-py3.9-gcc7
linux-focal-cuda11_8-py3_9-gcc9-build:
name: linux-focal-cuda11.8-py3.9-gcc9
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda11.8-py3.9-gcc7
docker-image-name: pytorch-linux-bionic-cuda11.8-cudnn8-py3-gcc7
build-environment: linux-focal-cuda11.8-py3.9-gcc9
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9
cuda-arch-list: 8.6
test-matrix: |
{ include: [
@ -54,21 +54,21 @@ jobs:
]}
build-with-debug: false
linux-bionic-cuda11_8-py3_9-gcc7-test:
name: linux-bionic-cuda11.8-py3.9-gcc7
linux-focal-cuda11_8-py3_9-gcc9-test:
name: linux-focal-cuda11.8-py3.9-gcc9
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda11_8-py3_9-gcc7-build
needs: linux-focal-cuda11_8-py3_9-gcc9-build
with:
build-environment: linux-bionic-cuda11.8-py3.9-gcc7
docker-image: ${{ needs.linux-bionic-cuda11_8-py3_9-gcc7-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda11_8-py3_9-gcc7-build.outputs.test-matrix }}
build-environment: linux-focal-cuda11.8-py3.9-gcc9
docker-image: ${{ needs.linux-focal-cuda11_8-py3_9-gcc9-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda11_8-py3_9-gcc9-build.outputs.test-matrix }}
linux-bionic-cuda11_8-py3_10-gcc7-debug-build:
name: linux-bionic-cuda11.8-py3.10-gcc7-debug
linux-focal-cuda11_8-py3_10-gcc9-debug-build:
name: linux-focal-cuda11.8-py3.10-gcc9-debug
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda11.8-py3.10-gcc7-debug
docker-image-name: pytorch-linux-bionic-cuda11.8-cudnn8-py3-gcc7
build-environment: linux-focal-cuda11.8-py3.10-gcc9-debug
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9
build-with-debug: true
test-matrix: |
{ include: [
@ -79,14 +79,14 @@ jobs:
{ config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
]}
linux-bionic-cuda11_8-py3_10-gcc7-debug-test:
name: linux-bionic-cuda11.8-py3.10-gcc7-debug
linux-focal-cuda11_8-py3_10-gcc9-debug-test:
name: linux-focal-cuda11.8-py3.10-gcc9-debug
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda11_8-py3_10-gcc7-debug-build
needs: linux-focal-cuda11_8-py3_10-gcc9-debug-build
with:
build-environment: linux-bionic-cuda11.8-py3.10-gcc7-debug
docker-image: ${{ needs.linux-bionic-cuda11_8-py3_10-gcc7-debug-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda11_8-py3_10-gcc7-debug-build.outputs.test-matrix }}
build-environment: linux-focal-cuda11.8-py3.10-gcc9-debug
docker-image: ${{ needs.linux-focal-cuda11_8-py3_10-gcc9-debug-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda11_8-py3_10-gcc9-debug-build.outputs.test-matrix }}
win-vs2019-cuda11_8-py3-build:
name: win-vs2019-cuda11.8-py3
@ -183,25 +183,25 @@ jobs:
{ config: "default", shard: 1, num_shards: 1, runner: "ubuntu-20.04-16x" },
]}
linux-vulkan-bionic-py3_11-clang9-build:
name: linux-vulkan-bionic-py3.11-clang9
linux-vulkan-focal-py3_11-clang10-build:
name: linux-vulkan-focal-py3.11-clang10
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-vulkan-bionic-py3.11-clang9
docker-image-name: pytorch-linux-bionic-py3.11-clang9
build-environment: linux-vulkan-focal-py3.11-clang10
docker-image-name: pytorch-linux-focal-py3.11-clang10
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
]}
linux-vulkan-bionic-py3_11-clang9-test:
name: linux-vulkan-bionic-py3.11-clang9
linux-vulkan-focal-py3_11-clang10-test:
name: linux-vulkan-focal-py3.11-clang10
uses: ./.github/workflows/_linux-test.yml
needs: linux-vulkan-bionic-py3_11-clang9-build
needs: linux-vulkan-focal-py3_11-clang10-build
with:
build-environment: linux-vulkan-bionic-py3.11-clang9
docker-image: ${{ needs.linux-vulkan-bionic-py3_11-clang9-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-vulkan-bionic-py3_11-clang9-build.outputs.test-matrix }}
build-environment: linux-vulkan-focal-py3.11-clang10
docker-image: ${{ needs.linux-vulkan-focal-py3_11-clang10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-vulkan-focal-py3_11-clang10-build.outputs.test-matrix }}
linux-focal-rocm5_6-py3_8-build:
name: linux-focal-rocm5.6-py3.8

View File

@ -18,12 +18,12 @@ concurrency:
cancel-in-progress: true
jobs:
linux-focal-py3_8-gcc7-build:
name: linux-focal-py3.8-gcc7
linux-jammy-py3_8-gcc11-build:
name: linux-jammy-py3.8-gcc11
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-focal-py3.8-gcc7
docker-image-name: pytorch-linux-focal-py3.8-gcc7
build-environment: linux-jammy-py3.8-gcc11
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
@ -36,40 +36,40 @@ jobs:
{ config: "distributed", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
]}
linux-focal-py3_8-gcc7-test:
name: linux-focal-py3.8-gcc7
linux-jammy-py3_8-gcc11-test:
name: linux-jammy-py3.8-gcc11
uses: ./.github/workflows/_linux-test.yml
needs: linux-focal-py3_8-gcc7-build
needs: linux-jammy-py3_8-gcc11-build
with:
build-environment: linux-focal-py3.8-gcc7
docker-image: ${{ needs.linux-focal-py3_8-gcc7-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-py3_8-gcc7-build.outputs.test-matrix }}
build-environment: linux-jammy-py3.8-gcc11
docker-image: ${{ needs.linux-jammy-py3_8-gcc11-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-jammy-py3_8-gcc11-build.outputs.test-matrix }}
linux-docs:
name: linux-docs
uses: ./.github/workflows/_docs.yml
needs: linux-focal-py3_8-gcc7-build
needs: linux-jammy-py3_8-gcc11-build
with:
build-environment: linux-focal-py3.8-gcc7
docker-image: ${{ needs.linux-focal-py3_8-gcc7-build.outputs.docker-image }}
build-environment: linux-jammy-py3.8-gcc11
docker-image: ${{ needs.linux-jammy-py3_8-gcc11-build.outputs.docker-image }}
linux-focal-py3_8-gcc7-no-ops:
name: linux-focal-py3.8-gcc7-no-ops
linux-jammy-py3_8-gcc11-no-ops:
name: linux-jammy-py3.8-gcc11-no-ops
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-focal-py3.8-gcc7-no-ops
docker-image-name: pytorch-linux-focal-py3.8-gcc7
build-environment: linux-jammy-py3.8-gcc11-no-ops
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 1 },
]}
linux-focal-py3_8-gcc7-pch:
name: linux-focal-py3.8-gcc7-pch
linux-jammy-py3_8-gcc11-pch:
name: linux-jammy-py3.8-gcc11-pch
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-focal-py3.8-gcc7-pch
docker-image-name: pytorch-linux-focal-py3.8-gcc7
build-environment: linux-jammy-py3.8-gcc11-pch
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 1 },
@ -123,12 +123,12 @@ jobs:
docker-image: ${{ needs.linux-focal-py3_8-clang10-onnx-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-py3_8-clang10-onnx-build.outputs.test-matrix }}
linux-bionic-py3_8-clang9-build:
name: linux-bionic-py3.8-clang9
linux-focal-py3_8-clang10-build:
name: linux-focal-py3.8-clang10
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-py3.8-clang9
docker-image-name: pytorch-linux-bionic-py3.8-clang9
build-environment: linux-focal-py3.8-clang10
docker-image-name: pytorch-linux-focal-py3.8-clang10
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
@ -140,21 +140,21 @@ jobs:
{ config: "dynamo", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
]}
linux-bionic-py3_8-clang9-test:
name: linux-bionic-py3.8-clang9
linux-focal-py3_8-clang10-test:
name: linux-focal-py3.8-clang10
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-py3_8-clang9-build
needs: linux-focal-py3_8-clang10-build
with:
build-environment: linux-bionic-py3.8-clang9
docker-image: ${{ needs.linux-bionic-py3_8-clang9-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-py3_8-clang9-build.outputs.test-matrix }}
build-environment: linux-focal-py3.8-clang10
docker-image: ${{ needs.linux-focal-py3_8-clang10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-py3_8-clang10-build.outputs.test-matrix }}
linux-bionic-py3_11-clang9-build:
name: linux-bionic-py3.11-clang9
linux-focal-py3_11-clang10-build:
name: linux-focal-py3.11-clang10
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-py3.11-clang9
docker-image-name: pytorch-linux-bionic-py3.11-clang9
build-environment: linux-focal-py3.11-clang10
docker-image-name: pytorch-linux-focal-py3.11-clang10
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
@ -166,21 +166,21 @@ jobs:
{ config: "dynamo", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
]}
linux-bionic-py3_11-clang9-test:
name: linux-bionic-py3.11-clang9
linux-focal-py3_11-clang10-test:
name: linux-focal-py3.11-clang10
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-py3_11-clang9-build
needs: linux-focal-py3_11-clang10-build
with:
build-environment: linux-bionic-py3.11-clang9
docker-image: ${{ needs.linux-bionic-py3_11-clang9-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-py3_11-clang9-build.outputs.test-matrix }}
build-environment: linux-focal-py3.11-clang10
docker-image: ${{ needs.linux-focal-py3_11-clang10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-py3_11-clang10-build.outputs.test-matrix }}
linux-bionic-cuda11_8-py3_10-gcc9-build:
name: linux-bionic-cuda11.8-py3.10-gcc9
linux-focal-cuda11_8-py3_10-gcc9-build:
name: linux-focal-cuda11.8-py3.10-gcc9
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda11.8-py3.10-gcc9
docker-image-name: pytorch-linux-bionic-cuda11.8-cudnn8-py3-gcc9
build-environment: linux-focal-cuda11.8-py3.10-gcc9
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9
test-matrix: |
{ include: [
{ config: "distributed", shard: 1, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
@ -188,22 +188,22 @@ jobs:
{ config: "distributed", shard: 3, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
]}
linux-bionic-cuda11_8-py3_10-gcc9-test:
name: linux-bionic-cuda11.8-py3.10-gcc9
linux-focal-cuda11_8-py3_10-gcc9-test:
name: linux-focal-cuda11.8-py3.10-gcc9
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda11_8-py3_10-gcc9-build
needs: linux-focal-cuda11_8-py3_10-gcc9-build
with:
timeout-minutes: 360
build-environment: linux-bionic-cuda11.8-py3.10-gcc9
docker-image: ${{ needs.linux-bionic-cuda11_8-py3_10-gcc9-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda11_8-py3_10-gcc9-build.outputs.test-matrix }}
build-environment: linux-focal-cuda11.8-py3.10-gcc9
docker-image: ${{ needs.linux-focal-cuda11_8-py3_10-gcc9-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda11_8-py3_10-gcc9-build.outputs.test-matrix }}
linux-bionic-cuda12_1-py3_10-gcc9-build:
name: linux-bionic-cuda12.1-py3.10-gcc9
linux-focal-cuda12_1-py3_10-gcc9-build:
name: linux-focal-cuda12.1-py3.10-gcc9
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9
build-environment: linux-focal-cuda12.1-py3.10-gcc9
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
@ -214,15 +214,15 @@ jobs:
{ config: "deploy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
]}
linux-bionic-cuda12_1-py3_10-gcc9-test:
name: linux-bionic-cuda12.1-py3.10-gcc9
linux-focal-cuda12_1-py3_10-gcc9-test:
name: linux-focal-cuda12.1-py3.10-gcc9
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda12_1-py3_10-gcc9-build
needs: linux-focal-cuda12_1-py3_10-gcc9-build
with:
timeout-minutes: 360
build-environment: linux-bionic-cuda12.1-py3.10-gcc9
docker-image: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-build.outputs.test-matrix }}
build-environment: linux-focal-cuda12.1-py3.10-gcc9
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.test-matrix }}
linux-jammy-py3-clang12-mobile-build:
name: linux-jammy-py3-clang12-mobile-build
@ -259,25 +259,25 @@ jobs:
{ config: "default", shard: 1, num_shards: 1 },
]}
linux-bionic-py3_8-clang8-xla-build:
name: linux-bionic-py3_8-clang8-xla
linux-focal-py3_8-clang8-xla-build:
name: linux-focal-py3_8-clang8-xla
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-py3_8-clang8-xla
build-environment: linux-focal-py3_8-clang8-xla
docker-image-name: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/xla_base:v1.0
test-matrix: |
{ include: [
{ config: "xla", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
]}
linux-bionic-py3_8-clang8-xla-test:
name: linux-bionic-py3_8-clang8-xla
linux-focal-py3_8-clang8-xla-test:
name: linux-focal-py3_8-clang8-xla
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-py3_8-clang8-xla-build
needs: linux-focal-py3_8-clang8-xla-build
with:
build-environment: linux-bionic-py3_8-clang8-xla
docker-image: ${{ needs.linux-bionic-py3_8-clang8-xla-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-py3_8-clang8-xla-build.outputs.test-matrix }}
build-environment: linux-focal-py3_8-clang8-xla
docker-image: ${{ needs.linux-focal-py3_8-clang8-xla-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-py3_8-clang8-xla-build.outputs.test-matrix }}
win-vs2019-cpu-py3-build:
name: win-vs2019-cpu-py3
@ -293,12 +293,12 @@ jobs:
{ config: "default", shard: 3, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
]}
linux-bionic-cpu-py3_10-gcc9-bazel-test:
name: linux-bionic-cpu-py3.10-gcc9-bazel-test
linux-focal-cpu-py3_10-gcc9-bazel-test:
name: linux-focal-cpu-py3.10-gcc9-bazel-test
uses: ./.github/workflows/_bazel-build-test.yml
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-bazel-test
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9
build-environment: linux-focal-cuda12.1-py3.10-gcc9-bazel-test
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
cuda-version: cpu
test-matrix: |
{ include: [
@ -339,12 +339,12 @@ jobs:
{ config: "default", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
]}
linux-focal-py3_8-gcc7-mobile-lightweight-dispatch-build:
name: linux-focal-py3.8-gcc7-mobile-lightweight-dispatch-build
linux-jammy-py3_8-gcc11-mobile-lightweight-dispatch-build:
name: linux-jammy-py3.8-gcc11-mobile-lightweight-dispatch-build
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-focal-py3.8-gcc7-mobile-lightweight-dispatch-build
docker-image-name: pytorch-linux-focal-py3.8-gcc7
build-environment: linux-jammy-py3.8-gcc111-mobile-lightweight-dispatch-build
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
build-generates-artifacts: false
test-matrix: |
{ include: [
@ -367,12 +367,12 @@ jobs:
{ config: "default", shard: 3, num_shards: 3, runner: "linux.rocm.gpu" },
]}
linux-bionic-cuda12_1-py3_10-gcc9-sm86-build:
name: linux-bionic-cuda12.1-py3.10-gcc9-sm86
linux-focal-cuda12_1-py3_10-gcc9-sm86-build:
name: linux-focal-cuda12.1-py3.10-gcc9-sm86
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm86
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
cuda-arch-list: 8.6
test-matrix: |
{ include: [
@ -383,11 +383,11 @@ jobs:
{ config: "default", shard: 5, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
]}
linux-bionic-cuda12_1-py3_10-gcc9-sm86-test:
name: linux-bionic-cuda12.1-py3.10-gcc9-sm86
linux-focal-cuda12_1-py3_10-gcc9-sm86-test:
name: linux-focal-cuda12.1-py3.10-gcc9-sm86
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda12_1-py3_10-gcc9-sm86-build
needs: linux-focal-cuda12_1-py3_10-gcc9-sm86-build
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm86
docker-image: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-sm86-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-sm86-build.outputs.test-matrix }}
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-sm86-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-sm86-build.outputs.test-matrix }}

View File

@ -17,12 +17,12 @@ concurrency:
cancel-in-progress: true
jobs:
linux-bionic-cuda12_1-py3-gcc9-slow-gradcheck-build:
name: linux-bionic-cuda12.1-py3-gcc9-slow-gradcheck
linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-build:
name: linux-focal-cuda12.1-py3-gcc9-slow-gradcheck
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda12.1-py3-gcc9-slow-gradcheck
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9
build-environment: linux-focal-cuda12.1-py3-gcc9-slow-gradcheck
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
cuda-arch-list: 8.6
test-matrix: |
{ include: [
@ -32,22 +32,22 @@ jobs:
{ config: "default", shard: 4, num_shards: 4, runner: "linux.g5.4xlarge.nvidia.gpu" },
]}
linux-bionic-cuda12_1-py3-gcc9-slow-gradcheck-test:
name: linux-bionic-cuda12.1-py3-gcc9-slow-gradcheck
linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-test:
name: linux-focal-cuda12.1-py3-gcc9-slow-gradcheck
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda12_1-py3-gcc9-slow-gradcheck-build
needs: linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-build
with:
build-environment: linux-bionic-cuda12.1-py3-gcc9-slow-gradcheck
docker-image: ${{ needs.linux-bionic-cuda12_1-py3-gcc9-slow-gradcheck-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda12_1-py3-gcc9-slow-gradcheck-build.outputs.test-matrix }}
build-environment: linux-focal-cuda12.1-py3-gcc9-slow-gradcheck
docker-image: ${{ needs.linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-build.outputs.test-matrix }}
timeout-minutes: 300
linux-bionic-cuda12_1-py3_10-gcc9-sm86-build:
name: linux-bionic-cuda12.1-py3.10-gcc9-sm86
linux-focal-cuda12_1-py3_10-gcc9-sm86-build:
name: linux-focal-cuda12.1-py3.10-gcc9-sm86
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm86
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
cuda-arch-list: 8.6
test-matrix: |
{ include: [
@ -55,34 +55,34 @@ jobs:
{ config: "slow", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
]}
linux-bionic-cuda12_1-py3_10-gcc9-sm86-test:
name: linux-bionic-cuda12.1-py3.10-gcc9-sm86
linux-focal-cuda12_1-py3_10-gcc9-sm86-test:
name: linux-focal-cuda12.1-py3.10-gcc9-sm86
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda12_1-py3_10-gcc9-sm86-build
needs: linux-focal-cuda12_1-py3_10-gcc9-sm86-build
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-sm86
docker-image: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-sm86-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-sm86-build.outputs.test-matrix }}
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-sm86-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-sm86-build.outputs.test-matrix }}
linux-bionic-py3_8-clang9-build:
name: linux-bionic-py3.8-clang9
linux-focal-py3_8-clang10-build:
name: linux-focal-py3.8-clang10
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-py3.8-clang9
docker-image-name: pytorch-linux-bionic-py3.8-clang9
build-environment: linux-focal-py3.8-clang10
docker-image-name: pytorch-linux-focal-py3.8-clang10
test-matrix: |
{ include: [
{ config: "slow", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
]}
linux-bionic-py3_8-clang9-test:
name: linux-bionic-py3.8-clang9
linux-focal-py3_8-clang10-test:
name: linux-focal-py3.8-clang10
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-py3_8-clang9-build
needs: linux-focal-py3_8-clang10-build
with:
build-environment: linux-bionic-py3.8-clang9
docker-image: ${{ needs.linux-bionic-py3_8-clang9-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-py3_8-clang9-build.outputs.test-matrix }}
build-environment: linux-focal-py3.8-clang10
docker-image: ${{ needs.linux-focal-py3_8-clang10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-py3_8-clang10-build.outputs.test-matrix }}
linux-focal-rocm5_6-py3_8-build:
name: linux-focal-rocm5.6-py3.8

View File

@ -18,23 +18,23 @@ concurrency:
jobs:
# Build PyTorch with BUILD_CAFFE2=ON
caffe2-linux-focal-py3_8-gcc7-build:
name: caffe2-linux-focal-py3.8-gcc7
caffe2-linux-jammy-py3_8-gcc11-build:
name: caffe2-linux-jammy-py3.8-gcc11
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: caffe2-linux-focal-py3.8-gcc7
docker-image-name: pytorch-linux-focal-py3.8-gcc7
build-environment: caffe2-linux-jammy-py3.8-gcc11
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 1 },
]}
linux-bionic-cuda12_1-py3_10-gcc9-build:
name: linux-bionic-cuda12.1-py3.10-gcc9
linux-focal-cuda12_1-py3_10-gcc9-build:
name: linux-focal-cuda12.1-py3.10-gcc9
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9
build-environment: linux-focal-cuda12.1-py3.10-gcc9
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
test-matrix: |
{ include: [
{ config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
@ -42,21 +42,21 @@ jobs:
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
]}
linux-bionic-cuda12_1-py3_10-gcc9-test:
name: linux-bionic-cuda12.1-py3.10-gcc9
linux-focal-cuda12_1-py3_10-gcc9-test:
name: linux-focal-cuda12.1-py3.10-gcc9
uses: ./.github/workflows/_linux-test.yml
needs: linux-bionic-cuda12_1-py3_10-gcc9-build
needs: linux-focal-cuda12_1-py3_10-gcc9-build
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9
docker-image: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-bionic-cuda12_1-py3_10-gcc9-build.outputs.test-matrix }}
build-environment: linux-focal-cuda12.1-py3.10-gcc9
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.test-matrix }}
libtorch-linux-bionic-cuda12_1-py3_7-gcc9-debug-build:
name: libtorch-linux-bionic-cuda12.1-py3.7-gcc9-debug
libtorch-linux-focal-cuda12_1-py3_7-gcc9-debug-build:
name: libtorch-linux-focal-cuda12.1-py3.7-gcc9-debug
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: libtorch-linux-bionic-cuda12.1-py3.7-gcc9
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9
build-environment: libtorch-linux-focal-cuda12.1-py3.7-gcc9
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
build-generates-artifacts: false
runner: linux.4xlarge
test-matrix: |
@ -65,12 +65,12 @@ jobs:
]}
# no-ops builds test USE_PER_OPERATOR_HEADERS=0 where ATen/ops is not generated
linux-bionic-cuda12_1-py3_10-gcc9-no-ops-build:
name: linux-bionic-cuda12.1-py3.10-gcc9-no-ops
linux-focal-cuda12_1-py3_10-gcc9-no-ops-build:
name: linux-focal-cuda12.1-py3.10-gcc9-no-ops
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-bionic-cuda12.1-py3.10-gcc9-no-ops
docker-image-name: pytorch-linux-bionic-cuda12.1-cudnn8-py3-gcc9
build-environment: linux-focal-cuda12.1-py3.10-gcc9-no-ops
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 1 },

View File

@ -1 +1,5 @@
#ifdef __GNUC__
#pragma GCC diagnostic ignored "-Wuninitialized"
#endif
#include <c10/test/util/complex_test_common.h>

View File

@ -16,6 +16,7 @@ using c10::weak_intrusive_ptr;
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic ignored "-Wunknown-warning-option"
#pragma GCC diagnostic ignored "-Wself-move"
#pragma GCC diagnostic ignored "-Wfree-nonheap-object"
#endif
#ifdef __clang__

View File

@ -48,6 +48,7 @@ from common_utils import (
DisableVmapFallback,
)
import types
import os
from collections import namedtuple
import contextlib
@ -1545,6 +1546,8 @@ class TestVmapOperators(Namespace.TestVmapBase):
test(op, (getter([], device), getter([B0], device)), in_dims=(None, 0))
test(op, (getter([2, B0], device), getter([2], device)), in_dims=(1, None))
@skipIf(TEST_WITH_TORCHDYNAMO and os.getenv('BUILD_ENVIRONMENT', '') == 'linux-focal-py3.8-clang10',
"Segfauls with dynamo on focal, see https://github.com/pytorch/pytorch/issues/107173")
@parametrize('case', [
subtest(_make_case(torch.add), name='add'),
subtest(_make_case(lambda x, y: x + y), name='add_dunder'),
@ -5034,6 +5037,11 @@ class TestRandomness(TestCase):
class TestTransformFailure(TestCase):
@parametrize('transform', ['vmap', 'grad', 'grad_and_value', 'vjp', 'jvp', 'jacrev', 'jacfwd'])
def test_fails_with_autograd_function(self, device, transform):
if (device == 'cpu' and transform in ['grad', 'vmap'] and
TEST_WITH_TORCHDYNAMO and os.getenv('BUILD_ENVIRONMENT', '') == 'linux-focal-py3.8-clang10'):
raise unittest.SkipTest("Unexpected successes on focal with dynamo," +
" see https://github.com/pytorch/pytorch/issues/107173")
class Test(torch.autograd.Function):
@staticmethod
def forward(_, input):