[BE] shorten the name part 1 (#62402)

Summary:
This should address part of https://github.com/pytorch/pytorch/issues/62357.

1. rename all files 'generated-*' to make it clear, filename will not be in CI workflow name
2. remove all 'pytorch-' in names
3. make sure the build test shell scripts are adopted to new name

Next change should reduce more device related naming

Pull Request resolved: https://github.com/pytorch/pytorch/pull/62402

Reviewed By: malfet

Differential Revision: D30021959

Pulled By: walterddr

fbshipit-source-id: 64b21a2020e25a507101c09c010cb593d8ac4146
This commit is contained in:
Rong Rong (AI Infra)
2021-08-02 07:55:29 -07:00
committed by Facebook GitHub Bot
parent 7565039ee9
commit c9d5325c52
25 changed files with 132 additions and 131 deletions

3
.gitattributes vendored
View File

@ -1,4 +1,3 @@
*.bat text eol=crlf
.circleci/config.yml linguist-generated=true
.github/workflows/periodic-pytorch-*.yml linguist-generated=true
.github/workflows/pytorch-*.yml linguist-generated=true
.github/workflows/generated-*.yml linguist-generated=true

View File

@ -13,6 +13,8 @@ WORKFLOWS = REPO_ROOT / ".github" / "workflows"
def concurrency_key(filename: Path) -> str:
workflow_name = filename.with_suffix("").name.replace("_", "-")
if workflow_name.startswith("generated-"):
workflow_name = workflow_name[len("generated-"):]
return f"{workflow_name}-${{{{ github.event.pull_request.number || github.sha }}}}"

View File

@ -156,7 +156,7 @@ class CIWorkflow:
assert self.test_runner_type in WINDOWS_RUNNERS, err_message
def generate_workflow_file(self, workflow_template: jinja2.Template) -> None:
output_file_path = GITHUB_DIR / f"workflows/{workflow.build_environment}.yml"
output_file_path = GITHUB_DIR / f"workflows/generated-{workflow.build_environment}.yml"
with open(output_file_path, "w") as output_file:
GENERATED = "generated"
output_file.writelines([f"# @{GENERATED} DO NOT EDIT MANUALLY\n"])
@ -168,7 +168,7 @@ class CIWorkflow:
WINDOWS_WORKFLOWS = [
CIWorkflow(
arch="windows",
build_environment="pytorch-win-vs2019-cpu-py3",
build_environment="win-vs2019-cpu-py3",
cuda_version="cpu",
test_runner_type=WINDOWS_CPU_TEST_RUNNER,
on_pull_request=True,
@ -176,7 +176,7 @@ WINDOWS_WORKFLOWS = [
),
CIWorkflow(
arch="windows",
build_environment="pytorch-win-vs2019-cuda10-cudnn7-py3",
build_environment="win-vs2019-cuda10-cudnn7-py3",
cuda_version="10.1",
test_runner_type=WINDOWS_CUDA_TEST_RUNNER,
on_pull_request=True,
@ -185,14 +185,14 @@ WINDOWS_WORKFLOWS = [
),
CIWorkflow(
arch="windows",
build_environment="pytorch-win-vs2019-cuda11-cudnn8-py3",
build_environment="win-vs2019-cuda11-cudnn8-py3",
cuda_version="11.1",
test_runner_type=WINDOWS_CUDA_TEST_RUNNER,
num_test_shards=2,
),
CIWorkflow(
arch="windows",
build_environment="periodic-pytorch-win-vs2019-cuda11-cudnn8-py3",
build_environment="periodic-win-vs2019-cuda11-cudnn8-py3",
cuda_version="11.3",
test_runner_type=WINDOWS_CUDA_TEST_RUNNER,
num_test_shards=2,
@ -203,7 +203,7 @@ WINDOWS_WORKFLOWS = [
LINUX_WORKFLOWS = [
CIWorkflow(
arch="linux",
build_environment="pytorch-linux-xenial-py3.6-gcc5.4",
build_environment="linux-xenial-py3.6-gcc5.4",
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
test_runner_type=LINUX_CPU_TEST_RUNNER,
on_pull_request=True,
@ -212,50 +212,50 @@ LINUX_WORKFLOWS = [
),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-paralleltbb-linux-xenial-py3.6-gcc5.4",
# build_environment="paralleltbb-linux-xenial-py3.6-gcc5.4",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-parallelnative-linux-xenial-py3.6-gcc5.4",
# build_environment="parallelnative-linux-xenial-py3.6-gcc5.4",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-pure_torch-linux-xenial-py3.6-gcc5.4",
# build_environment="pure_torch-linux-xenial-py3.6-gcc5.4",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-xenial-py3.6-gcc7",
# build_environment="linux-xenial-py3.6-gcc7",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc7",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-xenial-py3.6-clang5-asan",
# build_environment="linux-xenial-py3.6-clang5-asan",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-asan",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-xenial-py3.6-clang7-onnx",
# build_environment="linux-xenial-py3.6-clang7-onnx",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang7-onnx",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
CIWorkflow(
arch="linux",
build_environment="pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7",
build_environment="linux-bionic-cuda10.2-cudnn7-py3.9-gcc7",
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7",
test_runner_type=LINUX_CUDA_TEST_RUNNER,
num_test_shards=2,
),
CIWorkflow(
arch="linux",
build_environment="pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7",
build_environment="linux-xenial-cuda10.2-cudnn7-py3.6-gcc7",
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7",
test_runner_type=LINUX_CUDA_TEST_RUNNER,
enable_jit_legacy_test=1,
@ -273,28 +273,28 @@ LINUX_WORKFLOWS = [
),
CIWorkflow(
arch="linux",
build_environment="pytorch-libtorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7",
build_environment="libtorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7",
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7",
test_runner_type=LINUX_CUDA_TEST_RUNNER,
is_libtorch=True,
),
CIWorkflow(
arch="linux",
build_environment="pytorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7",
build_environment="linux-xenial-cuda11.1-cudnn8-py3.6-gcc7",
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
test_runner_type=LINUX_CUDA_TEST_RUNNER,
num_test_shards=2,
),
CIWorkflow(
arch="linux",
build_environment="pytorch-libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7",
build_environment="libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7",
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
test_runner_type=LINUX_CUDA_TEST_RUNNER,
is_libtorch=True,
),
CIWorkflow(
arch="linux",
build_environment="periodic-pytorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7",
build_environment="periodic-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7",
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7",
test_runner_type=LINUX_CUDA_TEST_RUNNER,
num_test_shards=2,
@ -302,7 +302,7 @@ LINUX_WORKFLOWS = [
),
CIWorkflow(
arch="linux",
build_environment="periodic-pytorch-libtorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7",
build_environment="periodic-libtorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7",
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7",
test_runner_type=LINUX_CUDA_TEST_RUNNER,
is_libtorch=True,
@ -310,25 +310,25 @@ LINUX_WORKFLOWS = [
),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-bionic-py3.6-clang9-noarch",
# build_environment="linux-bionic-py3.6-clang9-noarch",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.6-clang9",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-xla-linux-bionic-py3.6-clang9",
# build_environment="xla-linux-bionic-py3.6-clang9",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.6-clang9",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-vulkan-linux-bionic-py3.6-clang9",
# build_environment="vulkan-linux-bionic-py3.6-clang9",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.6-clang9",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
CIWorkflow(
arch="linux",
build_environment="pytorch-linux-bionic-py3.8-gcc9-coverage",
build_environment="linux-bionic-py3.8-gcc9-coverage",
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.8-gcc9",
test_runner_type=LINUX_CPU_TEST_RUNNER,
on_pull_request=True,
@ -340,55 +340,55 @@ LINUX_WORKFLOWS = [
),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-bionic-rocm3.9-py3.6",
# build_environment="linux-bionic-rocm3.9-py3.6",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-rocm3.9-py3.6",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-x86_32",
# build_environment="linux-xenial-py3.6-clang5-android-ndk-r19c-x86_32",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-x86_64",
# build_environment="linux-xenial-py3.6-clang5-android-ndk-r19c-x86_64",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-arm-v7a",
# build_environment="linux-xenial-py3.6-clang5-android-ndk-r19c-arm-v7a",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-arm-v8a",
# build_environment="linux-xenial-py3.6-clang5-android-ndk-r19c-arm-v8a",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile",
# build_environment="linux-xenial-py3.6-clang5-mobile",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-asan",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile-custom-dynamic",
# build_environment="linux-xenial-py3.6-clang5-mobile-custom-dynamic",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile-custom-static",
# build_environment="linux-xenial-py3.6-clang5-mobile-custom-static",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
# CIWorkflow(
# arch="linux",
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile-code-analysis",
# build_environment="linux-xenial-py3.6-clang5-mobile-code-analysis",
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
# test_runner_type=LINUX_CPU_TEST_RUNNER,
# ),
@ -398,7 +398,7 @@ LINUX_WORKFLOWS = [
BAZEL_WORKFLOWS = [
CIWorkflow(
arch="linux",
build_environment="pytorch-linux-xenial-py3.6-gcc7-bazel-test",
build_environment="linux-xenial-py3.6-gcc7-bazel-test",
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc7",
test_runner_type=LINUX_CPU_TEST_RUNNER,
),

View File

@ -3,7 +3,7 @@
{% block name -%}
# Template is at: .github/templates/bazel_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Bazel Linux CI (!{{ build_environment }})
name: !{{ build_environment }}
{%- endblock %}
{% block build +%}
# building and testing in a single job since bazel runs only small subset of tests

View File

@ -6,7 +6,7 @@
{%- block name -%}
# Template is at: .github/templates/linux_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Linux CI (!{{ build_environment }})
name: !{{ build_environment }}
{%- endblock %}
on:

View File

@ -5,7 +5,7 @@
# Template is at: .github/templates/windows_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Windows CI (!{{ build_environment }})
name: !{{ build_environment }}
on:
{%- if on_pull_request %}

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/linux_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Linux CI (pytorch-libtorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7)
name: libtorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7
on:
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
@ -12,7 +12,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: pytorch-libtorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7
BUILD_ENVIRONMENT: libtorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
TORCH_CUDA_ARCH_LIST: 5.2
@ -24,7 +24,7 @@ env:
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
concurrency:
group: pytorch-libtorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
group: libtorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -106,7 +106,7 @@ jobs:
needs: [calculate-docker-image, ]
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-libtorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-build
JOB_BASE_NAME: libtorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-build
steps:
- name: Log in to ECR
run: |

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/linux_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Linux CI (pytorch-libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7)
name: libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7
on:
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
@ -12,7 +12,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: pytorch-libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7
BUILD_ENVIRONMENT: libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
TORCH_CUDA_ARCH_LIST: 5.2
@ -24,7 +24,7 @@ env:
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
concurrency:
group: pytorch-libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
group: libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -106,7 +106,7 @@ jobs:
needs: [calculate-docker-image, ]
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-build
JOB_BASE_NAME: libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-build
steps:
- name: Log in to ECR
run: |

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/linux_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Linux CI (pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7)
name: linux-bionic-cuda10.2-cudnn7-py3.9-gcc7
on:
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
@ -12,7 +12,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7
BUILD_ENVIRONMENT: linux-bionic-cuda10.2-cudnn7-py3.9-gcc7
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
TORCH_CUDA_ARCH_LIST: 5.2
@ -24,7 +24,7 @@ env:
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
concurrency:
group: pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7-${{ github.event.pull_request.number || github.sha }}
group: linux-bionic-cuda10.2-cudnn7-py3.9-gcc7-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -106,7 +106,7 @@ jobs:
needs: [calculate-docker-image, ]
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7-build
JOB_BASE_NAME: linux-bionic-cuda10.2-cudnn7-py3.9-gcc7-build
steps:
- name: Log in to ECR
run: |
@ -237,7 +237,7 @@ jobs:
runs-on: ${{ matrix.runner }}
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7-test
JOB_BASE_NAME: linux-bionic-cuda10.2-cudnn7-py3.9-gcc7-test
TEST_CONFIG: ${{ matrix.config }}
SHARD_NUMBER: ${{ matrix.shard }}
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
@ -425,7 +425,7 @@ jobs:
env:
AWS_DEFAULT_REGION: us-east-1
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
JOB_BASE_NAME: pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7-test
JOB_BASE_NAME: linux-bionic-cuda10.2-cudnn7-py3.9-gcc7-test
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/linux_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Linux CI (pytorch-linux-bionic-py3.8-gcc9-coverage)
name: linux-bionic-py3.8-gcc9-coverage
on:
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
@ -14,7 +14,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: pytorch-linux-bionic-py3.8-gcc9-coverage
BUILD_ENVIRONMENT: linux-bionic-py3.8-gcc9-coverage
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-bionic-py3.8-gcc9
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
TORCH_CUDA_ARCH_LIST: 5.2
@ -26,7 +26,7 @@ env:
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
concurrency:
group: pytorch-linux-bionic-py3.8-gcc9-coverage-${{ github.event.pull_request.number || github.sha }}
group: linux-bionic-py3.8-gcc9-coverage-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -115,7 +115,7 @@ jobs:
needs: [calculate-docker-image, ciflow_should_run]
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-linux-bionic-py3.8-gcc9-coverage-build
JOB_BASE_NAME: linux-bionic-py3.8-gcc9-coverage-build
steps:
- name: Log in to ECR
run: |
@ -247,7 +247,7 @@ jobs:
runs-on: ${{ matrix.runner }}
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-linux-bionic-py3.8-gcc9-coverage-test
JOB_BASE_NAME: linux-bionic-py3.8-gcc9-coverage-test
TEST_CONFIG: ${{ matrix.config }}
SHARD_NUMBER: ${{ matrix.shard }}
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
@ -435,7 +435,7 @@ jobs:
env:
AWS_DEFAULT_REGION: us-east-1
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
JOB_BASE_NAME: pytorch-linux-bionic-py3.8-gcc9-coverage-test
JOB_BASE_NAME: linux-bionic-py3.8-gcc9-coverage-test
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/linux_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Linux CI (pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7)
name: linux-xenial-cuda10.2-cudnn7-py3.6-gcc7
on:
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
@ -14,7 +14,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7
BUILD_ENVIRONMENT: linux-xenial-cuda10.2-cudnn7-py3.6-gcc7
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
TORCH_CUDA_ARCH_LIST: 5.2
@ -26,7 +26,7 @@ env:
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
concurrency:
group: pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
group: linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -115,7 +115,7 @@ jobs:
needs: [calculate-docker-image, ciflow_should_run]
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-build
JOB_BASE_NAME: linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-build
steps:
- name: Log in to ECR
run: |
@ -247,7 +247,7 @@ jobs:
runs-on: ${{ matrix.runner }}
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-test
JOB_BASE_NAME: linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-test
TEST_CONFIG: ${{ matrix.config }}
SHARD_NUMBER: ${{ matrix.shard }}
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
@ -435,7 +435,7 @@ jobs:
env:
AWS_DEFAULT_REGION: us-east-1
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
JOB_BASE_NAME: pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-test
JOB_BASE_NAME: linux-xenial-cuda10.2-cudnn7-py3.6-gcc7-test
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/linux_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Linux CI (pytorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7)
name: linux-xenial-cuda11.1-cudnn8-py3.6-gcc7
on:
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
@ -12,7 +12,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: pytorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7
BUILD_ENVIRONMENT: linux-xenial-cuda11.1-cudnn8-py3.6-gcc7
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
TORCH_CUDA_ARCH_LIST: 5.2
@ -24,7 +24,7 @@ env:
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
concurrency:
group: pytorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
group: linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -106,7 +106,7 @@ jobs:
needs: [calculate-docker-image, ]
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-build
JOB_BASE_NAME: linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-build
steps:
- name: Log in to ECR
run: |
@ -237,7 +237,7 @@ jobs:
runs-on: ${{ matrix.runner }}
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-test
JOB_BASE_NAME: linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-test
TEST_CONFIG: ${{ matrix.config }}
SHARD_NUMBER: ${{ matrix.shard }}
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
@ -425,7 +425,7 @@ jobs:
env:
AWS_DEFAULT_REGION: us-east-1
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
JOB_BASE_NAME: pytorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-test
JOB_BASE_NAME: linux-xenial-cuda11.1-cudnn8-py3.6-gcc7-test
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/linux_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Linux CI (pytorch-linux-xenial-py3.6-gcc5.4)
name: linux-xenial-py3.6-gcc5.4
on:
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
@ -13,7 +13,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3.6-gcc5.4
BUILD_ENVIRONMENT: linux-xenial-py3.6-gcc5.4
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
TORCH_CUDA_ARCH_LIST: 5.2
@ -25,7 +25,7 @@ env:
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
concurrency:
group: pytorch-linux-xenial-py3.6-gcc5.4-${{ github.event.pull_request.number || github.sha }}
group: linux-xenial-py3.6-gcc5.4-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -107,7 +107,7 @@ jobs:
needs: [calculate-docker-image, ]
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-linux-xenial-py3.6-gcc5.4-build
JOB_BASE_NAME: linux-xenial-py3.6-gcc5.4-build
steps:
- name: Log in to ECR
run: |
@ -238,7 +238,7 @@ jobs:
runs-on: ${{ matrix.runner }}
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-linux-xenial-py3.6-gcc5.4-test
JOB_BASE_NAME: linux-xenial-py3.6-gcc5.4-test
TEST_CONFIG: ${{ matrix.config }}
SHARD_NUMBER: ${{ matrix.shard }}
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
@ -426,7 +426,7 @@ jobs:
env:
AWS_DEFAULT_REGION: us-east-1
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
JOB_BASE_NAME: pytorch-linux-xenial-py3.6-gcc5.4-test
JOB_BASE_NAME: linux-xenial-py3.6-gcc5.4-test
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/bazel_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Bazel Linux CI (pytorch-linux-xenial-py3.6-gcc7-bazel-test)
name: linux-xenial-py3.6-gcc7-bazel-test
on:
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
@ -12,7 +12,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3.6-gcc7-bazel-test
BUILD_ENVIRONMENT: linux-xenial-py3.6-gcc7-bazel-test
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc7
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
TORCH_CUDA_ARCH_LIST: 5.2
@ -24,7 +24,7 @@ env:
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
concurrency:
group: pytorch-linux-xenial-py3.6-gcc7-bazel-test-${{ github.event.pull_request.number || github.sha }}
group: linux-xenial-py3.6-gcc7-bazel-test-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -107,7 +107,7 @@ jobs:
needs: [calculate-docker-image, ]
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: pytorch-linux-xenial-py3.6-gcc7-bazel-test-build-and-test
JOB_BASE_NAME: linux-xenial-py3.6-gcc7-bazel-test-build-and-test
NUM_TEST_SHARDS: 1
steps:
- name: Log in to ECR

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/linux_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Linux CI (periodic-pytorch-libtorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7)
name: periodic-libtorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7
on:
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
@ -12,7 +12,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: periodic-pytorch-libtorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7
BUILD_ENVIRONMENT: periodic-libtorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
TORCH_CUDA_ARCH_LIST: 5.2
@ -24,7 +24,7 @@ env:
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
concurrency:
group: periodic-pytorch-libtorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
group: periodic-libtorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -113,7 +113,7 @@ jobs:
needs: [calculate-docker-image, ciflow_should_run]
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: periodic-pytorch-libtorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-build
JOB_BASE_NAME: periodic-libtorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-build
steps:
- name: Log in to ECR
run: |

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/linux_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Linux CI (periodic-pytorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7)
name: periodic-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7
on:
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
@ -12,7 +12,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: periodic-pytorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7
BUILD_ENVIRONMENT: periodic-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
TORCH_CUDA_ARCH_LIST: 5.2
@ -24,7 +24,7 @@ env:
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
concurrency:
group: periodic-pytorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
group: periodic-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -113,7 +113,7 @@ jobs:
needs: [calculate-docker-image, ciflow_should_run]
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: periodic-pytorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-build
JOB_BASE_NAME: periodic-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-build
steps:
- name: Log in to ECR
run: |
@ -245,7 +245,7 @@ jobs:
runs-on: ${{ matrix.runner }}
env:
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
JOB_BASE_NAME: periodic-pytorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-test
JOB_BASE_NAME: periodic-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-test
TEST_CONFIG: ${{ matrix.config }}
SHARD_NUMBER: ${{ matrix.shard }}
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
@ -433,7 +433,7 @@ jobs:
env:
AWS_DEFAULT_REGION: us-east-1
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
JOB_BASE_NAME: periodic-pytorch-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-test
JOB_BASE_NAME: periodic-linux-xenial-cuda11.3-cudnn8-py3.6-gcc7-test
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/windows_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Windows CI (periodic-pytorch-win-vs2019-cuda11-cudnn8-py3)
name: periodic-win-vs2019-cuda11-cudnn8-py3
on:
pull_request:
@ -11,7 +11,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: periodic-pytorch-win-vs2019-cuda11-cudnn8-py3
BUILD_ENVIRONMENT: periodic-win-vs2019-cuda11-cudnn8-py3
BUILD_WHEEL: 1
CUDA_VERSION: "11.3"
IN_CI: 1
@ -28,7 +28,7 @@ env:
USE_CUDA: 1
concurrency:
group: periodic-pytorch-win-vs2019-cuda11-cudnn8-py3-${{ github.event.pull_request.number || github.sha }}
group: periodic-win-vs2019-cuda11-cudnn8-py3-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -46,7 +46,7 @@ jobs:
working-directory: pytorch-${{ github.run_id }}
needs: [ciflow_should_run]
env:
JOB_BASE_NAME: periodic-pytorch-win-vs2019-cuda11-cudnn8-py3-build
JOB_BASE_NAME: periodic-win-vs2019-cuda11-cudnn8-py3-build
http_proxy: "http://internal-tf-lb-20210727220640487900000002-835786077.us-east-1.elb.amazonaws.com:3128"
https_proxy: "http://internal-tf-lb-20210727220640487900000002-835786077.us-east-1.elb.amazonaws.com:3128"
steps:
@ -126,7 +126,7 @@ jobs:
test:
env:
JOB_BASE_NAME: periodic-pytorch-win-vs2019-cuda11-cudnn8-py3-test
JOB_BASE_NAME: periodic-win-vs2019-cuda11-cudnn8-py3-test
SHARD_NUMBER: ${{ matrix.shard }}
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
TEST_CONFIG: ${{ matrix.config }}
@ -266,7 +266,7 @@ jobs:
env:
AWS_DEFAULT_REGION: us-east-1
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
JOB_BASE_NAME: periodic-pytorch-win-vs2019-cuda11-cudnn8-py3-test
JOB_BASE_NAME: periodic-win-vs2019-cuda11-cudnn8-py3-test
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/windows_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Windows CI (pytorch-win-vs2019-cpu-py3)
name: win-vs2019-cpu-py3
on:
pull_request:
@ -12,7 +12,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: pytorch-win-vs2019-cpu-py3
BUILD_ENVIRONMENT: win-vs2019-cpu-py3
BUILD_WHEEL: 1
CUDA_VERSION: "cpu"
IN_CI: 1
@ -27,7 +27,7 @@ env:
no_proxy: localhost,127.0.0.1,amazonaws.com,s3.amazonaws.com,169.254.169.254,169.254.170.2,/var/run/docker.sock
concurrency:
group: pytorch-win-vs2019-cpu-py3-${{ github.event.pull_request.number || github.sha }}
group: win-vs2019-cpu-py3-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -38,7 +38,7 @@ jobs:
run:
working-directory: pytorch-${{ github.run_id }}
env:
JOB_BASE_NAME: pytorch-win-vs2019-cpu-py3-build
JOB_BASE_NAME: win-vs2019-cpu-py3-build
http_proxy: "http://internal-tf-lb-20210727220640487900000002-835786077.us-east-1.elb.amazonaws.com:3128"
https_proxy: "http://internal-tf-lb-20210727220640487900000002-835786077.us-east-1.elb.amazonaws.com:3128"
steps:
@ -109,7 +109,7 @@ jobs:
test:
env:
JOB_BASE_NAME: pytorch-win-vs2019-cpu-py3-test
JOB_BASE_NAME: win-vs2019-cpu-py3-test
SHARD_NUMBER: ${{ matrix.shard }}
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
TEST_CONFIG: ${{ matrix.config }}
@ -241,7 +241,7 @@ jobs:
env:
AWS_DEFAULT_REGION: us-east-1
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
JOB_BASE_NAME: pytorch-win-vs2019-cpu-py3-test
JOB_BASE_NAME: win-vs2019-cpu-py3-test
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/windows_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Windows CI (pytorch-win-vs2019-cuda10-cudnn7-py3)
name: win-vs2019-cuda10-cudnn7-py3
on:
pull_request:
@ -12,7 +12,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: pytorch-win-vs2019-cuda10-cudnn7-py3
BUILD_ENVIRONMENT: win-vs2019-cuda10-cudnn7-py3
BUILD_WHEEL: 1
CUDA_VERSION: "10.1"
IN_CI: 1
@ -29,7 +29,7 @@ env:
USE_CUDA: 1
concurrency:
group: pytorch-win-vs2019-cuda10-cudnn7-py3-${{ github.event.pull_request.number || github.sha }}
group: win-vs2019-cuda10-cudnn7-py3-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -40,7 +40,7 @@ jobs:
run:
working-directory: pytorch-${{ github.run_id }}
env:
JOB_BASE_NAME: pytorch-win-vs2019-cuda10-cudnn7-py3-build
JOB_BASE_NAME: win-vs2019-cuda10-cudnn7-py3-build
http_proxy: "http://internal-tf-lb-20210727220640487900000002-835786077.us-east-1.elb.amazonaws.com:3128"
https_proxy: "http://internal-tf-lb-20210727220640487900000002-835786077.us-east-1.elb.amazonaws.com:3128"
steps:
@ -119,7 +119,7 @@ jobs:
test:
env:
JOB_BASE_NAME: pytorch-win-vs2019-cuda10-cudnn7-py3-test
JOB_BASE_NAME: win-vs2019-cuda10-cudnn7-py3-test
SHARD_NUMBER: ${{ matrix.shard }}
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
TEST_CONFIG: ${{ matrix.config }}
@ -259,7 +259,7 @@ jobs:
env:
AWS_DEFAULT_REGION: us-east-1
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
JOB_BASE_NAME: pytorch-win-vs2019-cuda10-cudnn7-py3-test
JOB_BASE_NAME: win-vs2019-cuda10-cudnn7-py3-test
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}

View File

@ -1,7 +1,7 @@
# @generated DO NOT EDIT MANUALLY
# Template is at: .github/templates/windows_ci_workflow.yml.j2
# Generation script: .github/scripts/generate_ci_workflows.py
name: Windows CI (pytorch-win-vs2019-cuda11-cudnn8-py3)
name: win-vs2019-cuda11-cudnn8-py3
on:
push:
@ -11,7 +11,7 @@ on:
workflow_dispatch:
env:
BUILD_ENVIRONMENT: pytorch-win-vs2019-cuda11-cudnn8-py3
BUILD_ENVIRONMENT: win-vs2019-cuda11-cudnn8-py3
BUILD_WHEEL: 1
CUDA_VERSION: "11.1"
IN_CI: 1
@ -28,7 +28,7 @@ env:
USE_CUDA: 1
concurrency:
group: pytorch-win-vs2019-cuda11-cudnn8-py3-${{ github.event.pull_request.number || github.sha }}
group: win-vs2019-cuda11-cudnn8-py3-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
@ -39,7 +39,7 @@ jobs:
run:
working-directory: pytorch-${{ github.run_id }}
env:
JOB_BASE_NAME: pytorch-win-vs2019-cuda11-cudnn8-py3-build
JOB_BASE_NAME: win-vs2019-cuda11-cudnn8-py3-build
http_proxy: "http://internal-tf-lb-20210727220640487900000002-835786077.us-east-1.elb.amazonaws.com:3128"
https_proxy: "http://internal-tf-lb-20210727220640487900000002-835786077.us-east-1.elb.amazonaws.com:3128"
steps:
@ -118,7 +118,7 @@ jobs:
test:
env:
JOB_BASE_NAME: pytorch-win-vs2019-cuda11-cudnn8-py3-test
JOB_BASE_NAME: win-vs2019-cuda11-cudnn8-py3-test
SHARD_NUMBER: ${{ matrix.shard }}
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
TEST_CONFIG: ${{ matrix.config }}
@ -258,7 +258,7 @@ jobs:
env:
AWS_DEFAULT_REGION: us-east-1
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
JOB_BASE_NAME: pytorch-win-vs2019-cuda11-cudnn8-py3-test
JOB_BASE_NAME: win-vs2019-cuda11-cudnn8-py3-test
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}

View File

@ -24,7 +24,7 @@ if [[ "$BUILD_ENVIRONMENT" == *-mobile-code-analysis* ]]; then
exec "$(dirname "${BASH_SOURCE[0]}")/build-mobile-code-analysis.sh" "$@"
fi
if [[ "$BUILD_ENVIRONMENT" == pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7* ]]; then
if [[ "$BUILD_ENVIRONMENT" == *linux-xenial-cuda11.1-cudnn8-py3-gcc7* ]]; then
# Enabling DEPLOY build (embedded torch python interpreter, experimental)
# only on one config for now, can expand later
export USE_DEPLOY=ON
@ -206,7 +206,7 @@ if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then
apply_patches
fi
if [[ "${BUILD_ENVIRONMENT}" == pytorch-linux-xenial-py3.6-gcc7-build || "${BUILD_ENVIRONMENT}" == pytorch-linux-xenial-py3.6-gcc5.4-build ]]; then
if [[ "${BUILD_ENVIRONMENT}" == *linux-xenial-py3.6-gcc7-build* || "${BUILD_ENVIRONMENT}" == *linux-xenial-py3.6-gcc5.4-build* ]]; then
export USE_GLOO_WITH_OPENSSL=ON
fi

View File

@ -70,7 +70,7 @@ declare -f -t trap_add
trap_add cleanup EXIT
if [[ "$BUILD_ENVIRONMENT" != *pytorch-win-* ]]; then
if [[ "$BUILD_ENVIRONMENT" != *win-* ]]; then
if which sccache > /dev/null; then
# Save sccache logs to file
sccache --stop-server > /dev/null 2>&1 || true
@ -124,9 +124,9 @@ if [ -z "$COMPACT_JOB_NAME" ]; then
exit 1
fi
if [[ "$BUILD_ENVIRONMENT" == *pytorch-linux-xenial-cuda10.1-cudnn7-py3* ]] || \
[[ "$BUILD_ENVIRONMENT" == *pytorch-linux-trusty-py3.6-gcc7* ]] || \
[[ "$BUILD_ENVIRONMENT" == *pytorch_macos* ]]; then
if [[ "$BUILD_ENVIRONMENT" == *linux-xenial-cuda10.1-cudnn7-py3* ]] || \
[[ "$BUILD_ENVIRONMENT" == *linux-trusty-py3.6-gcc7* ]] || \
[[ "$BUILD_ENVIRONMENT" == *macos* ]]; then
BUILD_TEST_LIBTORCH=1
else
# shellcheck disable=SC2034
@ -139,18 +139,18 @@ fi
# Linux bionic cannot find conda mkl with cmake 3.10, so we need a cmake from conda.
# Alternatively we could point cmake to the right place
# export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
if [[ "$BUILD_ENVIRONMENT" == *pytorch-xla-linux-bionic* ]] || \
[[ "$BUILD_ENVIRONMENT" == *pytorch-linux-xenial-cuda9-cudnn7-py2* ]] || \
[[ "$BUILD_ENVIRONMENT" == *pytorch-linux-xenial-cuda10.1-cudnn7-py3* ]] || \
[[ "$BUILD_ENVIRONMENT" == *pytorch-*centos* ]] || \
[[ "$BUILD_ENVIRONMENT" == *pytorch-linux-bionic* ]]; then
if [[ "$BUILD_ENVIRONMENT" == *xla-linux-bionic* ]] || \
[[ "$BUILD_ENVIRONMENT" == *linux-xenial-cuda9-cudnn7-py2* ]] || \
[[ "$BUILD_ENVIRONMENT" == *linux-xenial-cuda10.1-cudnn7-py3* ]] || \
[[ "$BUILD_ENVIRONMENT" == *centos* ]] || \
[[ "$BUILD_ENVIRONMENT" == *linux-bionic* ]]; then
if ! which conda; then
echo "Expected ${BUILD_ENVIRONMENT} to use conda, but 'which conda' returns empty"
exit 1
else
conda install -q -y cmake
fi
if [[ "$BUILD_ENVIRONMENT" == *pytorch-*centos* ]]; then
if [[ "$BUILD_ENVIRONMENT" == *centos* ]]; then
# cmake3 package will conflict with conda cmake
sudo yum -y remove cmake3 || true
fi

View File

@ -476,7 +476,7 @@ elif [[ "${BUILD_ENVIRONMENT}" == *libtorch* ]]; then
# TODO: run some C++ tests
echo "no-op at the moment"
elif [[ "${BUILD_ENVIRONMENT}" == *-test1 || "${JOB_BASE_NAME}" == *-test1 || "${SHARD_NUMBER}" == 1 ]]; then
if [[ "${BUILD_ENVIRONMENT}" == pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7-test1 ]]; then
if [[ "${BUILD_ENVIRONMENT}" == *linux-xenial-cuda11.1-cudnn8-py3-gcc7-test1* ]]; then
test_torch_deploy
fi
test_without_numpy
@ -507,7 +507,7 @@ else
test_distributed
test_benchmarks
test_rpc
if [[ "${BUILD_ENVIRONMENT}" == pytorch-linux-xenial-py3.6-gcc7-test || "${BUILD_ENVIRONMENT}" == pytorch-linux-xenial-py3.6-gcc5.4-test ]]; then
if [[ "${BUILD_ENVIRONMENT}" == *linux-xenial-py3.6-gcc7-test* || "${BUILD_ENVIRONMENT}" == *linux-xenial-py3.6-gcc5.4-test* ]]; then
test_python_gloo_with_tls
fi
fi

View File

@ -40,7 +40,7 @@ popd
pip install "ninja==1.10.0.post1" future "hypothesis==4.53.2" "expecttest==0.1.3" "librosa>=0.6.2" psutil pillow unittest-xml-reporting pytest
:: Only the CPU tests run coverage, which I know is not super clear: https://github.com/pytorch/pytorch/issues/56264
if "%BUILD_ENVIRONMENT%" == "pytorch-win-vs2019-cpu-py3" (
if "%BUILD_ENVIRONMENT%" == "win-vs2019-cpu-py3" (
:: coverage config file needed for plug-ins and settings to work
set PYTORCH_COLLECT_COVERAGE=1
python -mpip install coverage==5.5

View File

@ -73,7 +73,7 @@ run_tests() {
"$SCRIPT_HELPERS_DIR"/test_libtorch.bat
fi
else
if [[ "${BUILD_ENVIRONMENT}" == "pytorch-win-vs2019-cpu-py3" ]]; then
if [[ "${BUILD_ENVIRONMENT}" == *win-vs2019-cpu-py3* ]]; then
export PYTORCH_COLLECT_COVERAGE=1
export COVERAGE_RCFILE=$PWD/.coveragerc # coverage config file needed for plug-ins and settings to work
fi
@ -102,7 +102,7 @@ run_tests
assert_git_not_dirty
echo "TEST PASSED"
if [[ "${BUILD_ENVIRONMENT}" == "pytorch-win-vs2019-cpu-py3" ]]; then
if [[ "${BUILD_ENVIRONMENT}" == *win-vs2019-cpu-py3* ]]; then
pushd "$TEST_DIR"
python -mpip install coverage==5.5
python -mpip install -e "$PROJECT_DIR/tools/coverage_plugins_package"