mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 21:49:24 +08:00
Compare commits
82 Commits
async_tp
...
v1.12.0-rc
Author | SHA1 | Date | |
---|---|---|---|
8186aa7d6c | |||
01d9324fe1 | |||
5009086150 | |||
bfb6b24575 | |||
681a6e381c | |||
92437c6b4e | |||
566286f9db | |||
ac3086120d | |||
7964022214 | |||
1d5ecdb3b9 | |||
7eef782636 | |||
fa01ea406a | |||
21e1282098 | |||
2d3d6f9d05 | |||
da93b1cbeb | |||
d67c72cb53 | |||
ef26f13df9 | |||
4a9779aa4d | |||
9a94ddc081 | |||
dee3dc6070 | |||
30fce6836f | |||
0f93212516 | |||
585417e935 | |||
bd93fe635e | |||
cc6e2d3035 | |||
127922d451 | |||
4c3742be4b | |||
f12a1ff7f9 | |||
f913b4d9fb | |||
9229e451b2 | |||
d064733915 | |||
9d67727edf | |||
ec86ed25e9 | |||
2deba51e72 | |||
e9a12ec87f | |||
2a8e3ee91e | |||
47d558e862 | |||
bc0a9abad2 | |||
fa7d872ce3 | |||
d1d2be89fd | |||
0e58e3374e | |||
e3e753161c | |||
dc2b2f09d7 | |||
19ebdd7eab | |||
f8160b113e | |||
3e8119bf9a | |||
6660df9f22 | |||
8b7e19a87b | |||
9828013233 | |||
53fc6dc3db | |||
52435c6b1f | |||
9a66061326 | |||
eef0ec541e | |||
0ffefea581 | |||
7e12cfb29d | |||
24b9bd4398 | |||
5342e76039 | |||
08d70ab718 | |||
207bde1ee8 | |||
51428a8f43 | |||
c40f18454d | |||
8a5156a050 | |||
04d75d2008 | |||
2652da29ab | |||
aa8911885b | |||
528710ec89 | |||
de53f70e1d | |||
39ebb3e06e | |||
fd3cc823ce | |||
5bb7c617f6 | |||
8a627381c9 | |||
f56e16a70f | |||
c93a7f8bea | |||
919b53c5e7 | |||
2ad18abc49 | |||
9596b999f8 | |||
baabb4cb96 | |||
906a6e1df9 | |||
974f7f8080 | |||
8abf37d74e | |||
8ff2bc0c01 | |||
a119b7f6d4 |
@ -62,7 +62,7 @@ git --no-pager log --max-count 1
|
||||
popd
|
||||
|
||||
# Clone the Builder master repo
|
||||
retry git clone -q https://github.com/pytorch/builder.git "$BUILDER_ROOT"
|
||||
retry git clone -q https://github.com/pytorch/builder.git -b release/1.12 "$BUILDER_ROOT"
|
||||
pushd "$BUILDER_ROOT"
|
||||
echo "Using builder from "
|
||||
git --no-pager log --max-count 1
|
||||
|
1
.github/actionlint.yaml
vendored
1
.github/actionlint.yaml
vendored
@ -13,3 +13,4 @@ self-hosted-runner:
|
||||
- bm-runner
|
||||
- linux.rocm.gpu
|
||||
- macos-12
|
||||
- macos12.3-m1
|
||||
|
2
.github/templates/common.yml.j2
vendored
2
.github/templates/common.yml.j2
vendored
@ -8,7 +8,7 @@
|
||||
|
||||
# NOTE: If testing pytorch/builder changes you can change this variable to change what pytorch/builder reference
|
||||
# the binary builds will check out
|
||||
{%- set builder_branch = "main" -%}
|
||||
{%- set builder_branch = "release/1.12" -%}
|
||||
|
||||
{%- macro concurrency(build_environment) -%}
|
||||
concurrency:
|
||||
|
@ -42,7 +42,6 @@ env:
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_ROOT: /pytorch
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
@ -57,8 +56,8 @@ jobs:
|
||||
!{{ upload.binary_env(config) }}
|
||||
steps:
|
||||
!{{ common.setup_ec2_linux() }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository="pytorch/builder", branch=common.builder_branch) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository="pytorch/builder", branch=common.builder_branch, checkout_pr_head=False) }}
|
||||
{%- if config["gpu_arch_type"] == 'cuda' and config["gpu_arch_version"].startswith('11') %}
|
||||
- name: Set BUILD_SPLIT_CUDA
|
||||
run: |
|
||||
@ -130,8 +129,8 @@ jobs:
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository="pytorch/builder", branch=common.builder_branch) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository="pytorch/builder", branch=common.builder_branch, checkout_pr_head=False) }}
|
||||
{%- if config["gpu_arch_type"] == "rocm" %}
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
|
@ -85,8 +85,8 @@ jobs:
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository="pytorch/builder", branch=common.builder_branch) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository="pytorch/builder", branch=common.builder_branch, checkout_pr_head=False) }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
|
@ -51,7 +51,6 @@ env:
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
!{{ common.concurrency(build_environment) }}
|
||||
@ -66,8 +65,8 @@ jobs:
|
||||
steps:
|
||||
!{{ common.setup_ec2_windows() }}
|
||||
!{{ set_runner_specific_vars() }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository="pytorch/builder", branch=common.builder_branch) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository="pytorch/builder", branch=common.builder_branch, checkout_pr_head=False) }}
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
@ -102,8 +101,8 @@ jobs:
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository="pytorch/builder", branch=common.builder_branch) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository="pytorch/builder", branch=common.builder_branch, checkout_pr_head=False) }}
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
|
5
.github/workflows/_bazel-build-test.yml
vendored
5
.github/workflows/_bazel-build-test.yml
vendored
@ -106,6 +106,7 @@ jobs:
|
||||
CUSTOM_TEST_ARTIFACT_BUILD_DIR: build/custom_test_artifacts
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
|
||||
PR_BODY: ${{ github.event.pull_request.body }}
|
||||
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
|
||||
DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
@ -129,6 +130,8 @@ jobs:
|
||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
||||
-e SCCACHE_BUCKET \
|
||||
-e PR_LABELS \
|
||||
-e PYTORCH_RETRY_TEST_CASES \
|
||||
-e PYTORCH_OVERRIDE_FLAKY_SIGNAL \
|
||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
||||
--security-opt seccomp=unconfined \
|
||||
--cap-add=SYS_PTRACE \
|
||||
@ -168,6 +171,8 @@ jobs:
|
||||
BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
|
||||
JOB_BASE_NAME: ${{ inputs.build-environment }}-test
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
TAG: ${{ steps.parse-ref.outputs.tag }}
|
||||
WORKFLOW_ID: ${{ github.run_id }}
|
||||
|
2
.github/workflows/_ios-build-test.yml
vendored
2
.github/workflows/_ios-build-test.yml
vendored
@ -159,7 +159,7 @@ jobs:
|
||||
cd "${GITHUB_WORKSPACE}/ios/TestApp/benchmark"
|
||||
mkdir -p ../models
|
||||
if [ "${USE_COREML_DELEGATE}" == 1 ]; then
|
||||
pip install coremltools==5.0b5
|
||||
pip install coremltools==5.0b5 protobuf==3.20.1
|
||||
pip install six==1.16.0
|
||||
python coreml_backend.py
|
||||
else
|
||||
|
4
.github/workflows/_linux-test.yml
vendored
4
.github/workflows/_linux-test.yml
vendored
@ -75,6 +75,7 @@ jobs:
|
||||
CUSTOM_TEST_ARTIFACT_BUILD_DIR: build/custom_test_artifacts
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
|
||||
JOB_BASE_NAME: ${{ inputs.build-environment }}-test
|
||||
TEST_CONFIG: ${{ matrix.config }}
|
||||
SHARD_NUMBER: ${{ matrix.shard }}
|
||||
@ -123,6 +124,7 @@ jobs:
|
||||
-e PR_BODY \
|
||||
-e COMMIT_MESSAGES \
|
||||
-e PYTORCH_RETRY_TEST_CASES \
|
||||
-e PYTORCH_OVERRIDE_FLAKY_SIGNAL \
|
||||
-e PR_LABELS \
|
||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
||||
-e SCCACHE_BUCKET \
|
||||
@ -177,6 +179,8 @@ jobs:
|
||||
SHARD_NUMBER: ${{ matrix.shard }}
|
||||
BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
TAG: ${{ steps.parse-ref.outputs.tag }}
|
||||
WORKFLOW_ID: ${{ github.run_id }}
|
||||
|
57
.github/workflows/_mac-test-arm64.yml
vendored
Normal file
57
.github/workflows/_mac-test-arm64.yml
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
name: mac-test-arm64
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
build-environment:
|
||||
required: true
|
||||
type: string
|
||||
description: Top-level label for what's being built/tested.
|
||||
|
||||
|
||||
jobs:
|
||||
run_mps_test:
|
||||
name: "Run MPS tests"
|
||||
runs-on: macos12.3-m1
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
quiet-checkout: true
|
||||
|
||||
- name: Clean checkout
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
git clean -fxd
|
||||
|
||||
- name: Download build artifacts
|
||||
uses: ./.github/actions/download-build-artifacts
|
||||
with:
|
||||
name: ${{ inputs.build-environment }}
|
||||
use-gha: true
|
||||
|
||||
- name: Install PyTorch
|
||||
env:
|
||||
ENV_NAME: conda-test-env-${{ github.run_id }}
|
||||
PY_VERS: 3.8
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
# shellcheck disable=SC1090
|
||||
. ~/miniconda3/etc/profile.d/conda.sh
|
||||
set -ex
|
||||
conda create -yp "${ENV_NAME}" "python=${PY_VERS}" numpy expecttest
|
||||
# As wheels are cross-compiled they are reported as x86_64 ones
|
||||
ORIG_WHLNAME=$(ls -1 dist/*.whl); ARM_WHLNAME=${ORIG_WHLNAME/x86_64/arm64}; mv ${ORIG_WHLNAME} ${ARM_WHLNAME}
|
||||
conda run -p "${ENV_NAME}" python3 -mpip install dist/*.whl
|
||||
|
||||
- name: Run MPS tests
|
||||
env:
|
||||
ENV_NAME: conda-test-env-${{ github.run_id }}
|
||||
shell: arch -arch arm64 bash {0}
|
||||
run: |
|
||||
# shellcheck disable=SC1090
|
||||
. ~/miniconda3/etc/profile.d/conda.sh
|
||||
set -ex
|
||||
conda run --cwd test -p "${ENV_NAME}" python3 test_mps.py -v
|
||||
conda env remove -p "${ENV_NAME}"
|
3
.github/workflows/_mac-test.yml
vendored
3
.github/workflows/_mac-test.yml
vendored
@ -48,6 +48,7 @@ jobs:
|
||||
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
|
||||
PR_BODY: ${{ github.event.pull_request.body }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
|
||||
steps:
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
@ -108,6 +109,8 @@ jobs:
|
||||
SHARD_NUMBER: ${{ matrix.shard }}
|
||||
BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
TAG: ${{ steps.parse-ref.outputs.tag }}
|
||||
WORKFLOW_ID: ${{ github.run_id }}
|
||||
|
4
.github/workflows/_rocm-test.yml
vendored
4
.github/workflows/_rocm-test.yml
vendored
@ -74,6 +74,7 @@ jobs:
|
||||
CUSTOM_TEST_ARTIFACT_BUILD_DIR: build/custom_test_artifacts
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
|
||||
JOB_BASE_NAME: ${{ inputs.build-environment }}-test
|
||||
TEST_CONFIG: ${{ matrix.config }}
|
||||
SHARD_NUMBER: ${{ matrix.shard }}
|
||||
@ -120,6 +121,7 @@ jobs:
|
||||
-e PR_BODY \
|
||||
-e COMMIT_MESSAGES \
|
||||
-e PYTORCH_RETRY_TEST_CASES \
|
||||
-e PYTORCH_OVERRIDE_FLAKY_SIGNAL \
|
||||
-e PR_LABELS \
|
||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
||||
-e SCCACHE_BUCKET \
|
||||
@ -173,6 +175,8 @@ jobs:
|
||||
SHARD_NUMBER: ${{ matrix.shard }}
|
||||
BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
TAG: ${{ steps.parse-ref.outputs.tag }}
|
||||
WORKFLOW_ID: ${{ github.run_id }}
|
||||
|
1
.github/workflows/_win-build.yml
vendored
1
.github/workflows/_win-build.yml
vendored
@ -61,7 +61,6 @@ jobs:
|
||||
MAX_JOBS: 8
|
||||
CUDA_VERSION: ${{ inputs.cuda-version }}
|
||||
PYTHON_VERSION: "3.8"
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
SCCACHE_BUCKET: "ossci-compiler-cache"
|
||||
VC_PRODUCT: "BuildTools"
|
||||
|
3
.github/workflows/_win-test.yml
vendored
3
.github/workflows/_win-test.yml
vendored
@ -65,6 +65,7 @@ jobs:
|
||||
INSTALL_WINDOWS_SDK: 1
|
||||
PYTHON_VERSION: 3.8
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
VC_PRODUCT: "BuildTools"
|
||||
VC_VERSION: ""
|
||||
@ -116,6 +117,8 @@ jobs:
|
||||
SHARD_NUMBER: ${{ matrix.shard }}
|
||||
BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
TAG: ${{ steps.parse-ref.outputs.tag }}
|
||||
WORKFLOW_ID: ${{ github.run_id }}
|
||||
|
97
.github/workflows/generated-linux-binary-conda-nightly.yml
generated
vendored
97
.github/workflows/generated-linux-binary-conda-nightly.yml
generated
vendored
@ -31,7 +31,6 @@ env:
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_ROOT: /pytorch
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
@ -77,7 +76,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -88,7 +86,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -206,7 +204,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -217,7 +214,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -413,7 +410,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -424,7 +420,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -543,7 +539,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -554,7 +549,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -762,7 +757,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -773,7 +767,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -895,7 +889,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -906,7 +899,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1114,7 +1107,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1125,7 +1117,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1247,7 +1239,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1258,7 +1249,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1465,7 +1456,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1476,7 +1466,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1594,7 +1584,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1605,7 +1594,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1801,7 +1790,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1812,7 +1800,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1931,7 +1919,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1942,7 +1929,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2150,7 +2137,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2161,7 +2147,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2283,7 +2269,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2294,7 +2279,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2502,7 +2487,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2513,7 +2497,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2635,7 +2619,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2646,7 +2629,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2853,7 +2836,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2864,7 +2846,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2982,7 +2964,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2993,7 +2974,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3189,7 +3170,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3200,7 +3180,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3319,7 +3299,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3330,7 +3309,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3538,7 +3517,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3549,7 +3527,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3671,7 +3649,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3682,7 +3659,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3890,7 +3867,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3901,7 +3877,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4023,7 +3999,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4034,7 +4009,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4241,7 +4216,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4252,7 +4226,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4370,7 +4344,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4381,7 +4354,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4577,7 +4550,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4588,7 +4560,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4707,7 +4679,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4718,7 +4689,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4926,7 +4897,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4937,7 +4907,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5059,7 +5029,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5070,7 +5039,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5278,7 +5247,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5289,7 +5257,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5411,7 +5379,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5422,7 +5389,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
7
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-master.yml
generated
vendored
7
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-master.yml
generated
vendored
@ -27,7 +27,6 @@ env:
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_ROOT: /pytorch
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
@ -74,7 +73,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -85,7 +83,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -204,7 +202,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -215,7 +212,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
121
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
121
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
@ -31,7 +31,6 @@ env:
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_ROOT: /pytorch
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
@ -78,7 +77,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -89,7 +87,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -208,7 +206,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -219,7 +216,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -416,7 +413,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -427,7 +423,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -546,7 +542,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -557,7 +552,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -754,7 +749,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -765,7 +759,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -884,7 +878,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -895,7 +888,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1092,7 +1085,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1103,7 +1095,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1222,7 +1214,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1233,7 +1224,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1431,7 +1422,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1442,7 +1432,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1562,7 +1552,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1573,7 +1562,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1783,7 +1772,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1794,7 +1782,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1914,7 +1902,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1925,7 +1912,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2135,7 +2122,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2146,7 +2132,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2266,7 +2252,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2277,7 +2262,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2487,7 +2472,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2498,7 +2482,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2618,7 +2602,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2629,7 +2612,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2839,7 +2822,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2850,7 +2832,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2973,7 +2955,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2984,7 +2965,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3194,7 +3175,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3205,7 +3185,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3328,7 +3308,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3339,7 +3318,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3549,7 +3528,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3560,7 +3538,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3683,7 +3661,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3694,7 +3671,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3904,7 +3881,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3915,7 +3891,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4038,7 +4014,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4049,7 +4024,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4259,7 +4234,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4270,7 +4244,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4393,7 +4367,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4404,7 +4377,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4614,7 +4587,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4625,7 +4597,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4748,7 +4720,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4759,7 +4730,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4969,7 +4940,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4980,7 +4950,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5103,7 +5073,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5114,7 +5083,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5324,7 +5293,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5335,7 +5303,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5458,7 +5426,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5469,7 +5436,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5679,7 +5646,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5690,7 +5656,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5826,7 +5792,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5837,7 +5802,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6029,7 +5994,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6040,7 +6004,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6176,7 +6140,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6187,7 +6150,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6379,7 +6342,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6390,7 +6352,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6526,7 +6488,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6537,7 +6498,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6729,7 +6690,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6740,7 +6700,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6876,7 +6836,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6887,7 +6846,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
7
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-master.yml
generated
vendored
7
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-master.yml
generated
vendored
@ -27,7 +27,6 @@ env:
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_ROOT: /pytorch
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
@ -74,7 +73,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -85,7 +83,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -204,7 +202,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -215,7 +212,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
121
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
generated
vendored
121
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
generated
vendored
@ -31,7 +31,6 @@ env:
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_ROOT: /pytorch
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
@ -78,7 +77,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -89,7 +87,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -208,7 +206,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -219,7 +216,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -416,7 +413,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -427,7 +423,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -546,7 +542,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -557,7 +552,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -754,7 +749,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -765,7 +759,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -884,7 +878,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -895,7 +888,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1092,7 +1085,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1103,7 +1095,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1222,7 +1214,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1233,7 +1224,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1431,7 +1422,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1442,7 +1432,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1562,7 +1552,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1573,7 +1562,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1783,7 +1772,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1794,7 +1782,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1914,7 +1902,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1925,7 +1912,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2135,7 +2122,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2146,7 +2132,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2266,7 +2252,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2277,7 +2262,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2487,7 +2472,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2498,7 +2482,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2618,7 +2602,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2629,7 +2612,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2839,7 +2822,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2850,7 +2832,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2973,7 +2955,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2984,7 +2965,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3194,7 +3175,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3205,7 +3185,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3328,7 +3308,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3339,7 +3318,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3549,7 +3528,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3560,7 +3538,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3683,7 +3661,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3694,7 +3671,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3904,7 +3881,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3915,7 +3891,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4038,7 +4014,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4049,7 +4024,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4259,7 +4234,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4270,7 +4244,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4393,7 +4367,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4404,7 +4377,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4614,7 +4587,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4625,7 +4597,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4748,7 +4720,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4759,7 +4730,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4969,7 +4940,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4980,7 +4950,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5103,7 +5073,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5114,7 +5083,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5324,7 +5293,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5335,7 +5303,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5458,7 +5426,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5469,7 +5436,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5679,7 +5646,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5690,7 +5656,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5826,7 +5792,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5837,7 +5802,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6029,7 +5994,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6040,7 +6004,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6176,7 +6140,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6187,7 +6150,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6379,7 +6342,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6390,7 +6352,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6526,7 +6488,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6537,7 +6498,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6729,7 +6690,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6740,7 +6700,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6876,7 +6836,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6887,7 +6846,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
7
.github/workflows/generated-linux-binary-manywheel-master.yml
generated
vendored
7
.github/workflows/generated-linux-binary-manywheel-master.yml
generated
vendored
@ -27,7 +27,6 @@ env:
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_ROOT: /pytorch
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
@ -74,7 +73,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -85,7 +83,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -204,7 +202,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -215,7 +212,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
145
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
145
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
@ -31,7 +31,6 @@ env:
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_ROOT: /pytorch
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
@ -77,7 +76,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -88,7 +86,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -206,7 +204,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -217,7 +214,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -413,7 +410,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -424,7 +420,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -543,7 +539,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -554,7 +549,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -762,7 +757,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -773,7 +767,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -895,7 +889,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -906,7 +899,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1114,7 +1107,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1125,7 +1117,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1247,7 +1239,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1258,7 +1249,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1466,7 +1457,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1477,7 +1467,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1612,7 +1602,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1623,7 +1612,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1813,7 +1802,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1824,7 +1812,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1959,7 +1947,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1970,7 +1957,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2159,7 +2146,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2170,7 +2156,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2288,7 +2274,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2299,7 +2284,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2495,7 +2480,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2506,7 +2490,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2625,7 +2609,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2636,7 +2619,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2844,7 +2827,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2855,7 +2837,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2977,7 +2959,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2988,7 +2969,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3196,7 +3177,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3207,7 +3187,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3329,7 +3309,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3340,7 +3319,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3548,7 +3527,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3559,7 +3537,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3694,7 +3672,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3705,7 +3682,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3895,7 +3872,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3906,7 +3882,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4041,7 +4017,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4052,7 +4027,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4241,7 +4216,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4252,7 +4226,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4370,7 +4344,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4381,7 +4354,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4577,7 +4550,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4588,7 +4560,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4707,7 +4679,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4718,7 +4689,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -4926,7 +4897,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -4937,7 +4907,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5059,7 +5029,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5070,7 +5039,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5278,7 +5247,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5289,7 +5257,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5411,7 +5379,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5422,7 +5389,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5630,7 +5597,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5641,7 +5607,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5776,7 +5742,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5787,7 +5752,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -5977,7 +5942,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -5988,7 +5952,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6123,7 +6087,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6134,7 +6097,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6323,7 +6286,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6334,7 +6296,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6452,7 +6414,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6463,7 +6424,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6659,7 +6620,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6670,7 +6630,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -6789,7 +6749,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -6800,7 +6759,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -7008,7 +6967,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -7019,7 +6977,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -7141,7 +7099,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -7152,7 +7109,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -7360,7 +7317,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -7371,7 +7327,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -7493,7 +7449,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -7504,7 +7459,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -7712,7 +7667,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -7723,7 +7677,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -7858,7 +7812,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -7869,7 +7822,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -8059,7 +8012,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -8070,7 +8022,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -8205,7 +8157,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -8216,7 +8167,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
9
.github/workflows/generated-macos-arm64-binary-conda-nightly.yml
generated
vendored
9
.github/workflows/generated-macos-arm64-binary-conda-nightly.yml
generated
vendored
@ -77,7 +77,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -88,7 +87,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -252,7 +251,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -263,7 +261,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -427,7 +425,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -438,7 +435,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
12
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
12
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
@ -77,7 +77,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -88,7 +87,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -252,7 +251,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -263,7 +261,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -427,7 +425,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -438,7 +435,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -602,7 +599,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -613,7 +609,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
12
.github/workflows/generated-macos-binary-conda-nightly.yml
generated
vendored
12
.github/workflows/generated-macos-binary-conda-nightly.yml
generated
vendored
@ -75,7 +75,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -86,7 +85,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -250,7 +249,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -261,7 +259,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -425,7 +423,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -436,7 +433,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -600,7 +597,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -611,7 +607,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
12
.github/workflows/generated-macos-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
12
.github/workflows/generated-macos-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
@ -80,7 +80,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -91,7 +90,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -261,7 +260,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -272,7 +270,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -442,7 +440,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -453,7 +450,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -623,7 +620,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -634,7 +630,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
12
.github/workflows/generated-macos-binary-libtorch-pre-cxx11-nightly.yml
generated
vendored
12
.github/workflows/generated-macos-binary-libtorch-pre-cxx11-nightly.yml
generated
vendored
@ -80,7 +80,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -91,7 +90,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -261,7 +260,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -272,7 +270,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -442,7 +440,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -453,7 +450,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -623,7 +620,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -634,7 +630,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
12
.github/workflows/generated-macos-binary-wheel-nightly.yml
generated
vendored
12
.github/workflows/generated-macos-binary-wheel-nightly.yml
generated
vendored
@ -75,7 +75,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -86,7 +85,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -250,7 +249,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -261,7 +259,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -425,7 +423,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -436,7 +433,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -600,7 +597,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -611,7 +607,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
73
.github/workflows/generated-windows-binary-conda-nightly.yml
generated
vendored
73
.github/workflows/generated-windows-binary-conda-nightly.yml
generated
vendored
@ -28,7 +28,6 @@ env:
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
concurrency:
|
||||
@ -91,7 +90,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -102,7 +100,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -200,7 +198,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -211,7 +208,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -390,7 +387,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -401,7 +397,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -500,7 +496,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -511,7 +506,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -691,7 +686,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -702,7 +696,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -801,7 +795,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -812,7 +805,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -991,7 +984,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1002,7 +994,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1100,7 +1092,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1111,7 +1102,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1290,7 +1281,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1301,7 +1291,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1400,7 +1390,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1411,7 +1400,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1591,7 +1580,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1602,7 +1590,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1701,7 +1689,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1712,7 +1699,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1891,7 +1878,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1902,7 +1888,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2000,7 +1986,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2011,7 +1996,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2190,7 +2175,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2201,7 +2185,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2300,7 +2284,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2311,7 +2294,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2491,7 +2474,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2502,7 +2484,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2601,7 +2583,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2612,7 +2593,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2791,7 +2772,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2802,7 +2782,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2900,7 +2880,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2911,7 +2890,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3090,7 +3069,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3101,7 +3079,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3200,7 +3178,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3211,7 +3188,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3391,7 +3368,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3402,7 +3378,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3501,7 +3477,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3512,7 +3487,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
7
.github/workflows/generated-windows-binary-libtorch-debug-master.yml
generated
vendored
7
.github/workflows/generated-windows-binary-libtorch-debug-master.yml
generated
vendored
@ -24,7 +24,6 @@ env:
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
concurrency:
|
||||
@ -91,7 +90,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -102,7 +100,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -204,7 +202,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -215,7 +212,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
73
.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
generated
vendored
73
.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
generated
vendored
@ -28,7 +28,6 @@ env:
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
concurrency:
|
||||
@ -95,7 +94,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -106,7 +104,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -208,7 +206,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -219,7 +216,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -405,7 +402,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -416,7 +412,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -518,7 +514,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -529,7 +524,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -715,7 +710,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -726,7 +720,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -828,7 +822,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -839,7 +832,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1025,7 +1018,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1036,7 +1028,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1138,7 +1130,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1149,7 +1140,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1336,7 +1327,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1347,7 +1337,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1450,7 +1440,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1461,7 +1450,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1649,7 +1638,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1660,7 +1648,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1763,7 +1751,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1774,7 +1761,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1962,7 +1949,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1973,7 +1959,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2076,7 +2062,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2087,7 +2072,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2275,7 +2260,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2286,7 +2270,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2389,7 +2373,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2400,7 +2383,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2588,7 +2571,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2599,7 +2581,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2702,7 +2684,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2713,7 +2694,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2901,7 +2882,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2912,7 +2892,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3015,7 +2995,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3026,7 +3005,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3214,7 +3193,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3225,7 +3203,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3328,7 +3306,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3339,7 +3316,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3527,7 +3504,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3538,7 +3514,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3641,7 +3617,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3652,7 +3627,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
7
.github/workflows/generated-windows-binary-libtorch-release-master.yml
generated
vendored
7
.github/workflows/generated-windows-binary-libtorch-release-master.yml
generated
vendored
@ -24,7 +24,6 @@ env:
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
concurrency:
|
||||
@ -91,7 +90,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -102,7 +100,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -204,7 +202,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -215,7 +212,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
73
.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
generated
vendored
73
.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
generated
vendored
@ -28,7 +28,6 @@ env:
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
concurrency:
|
||||
@ -95,7 +94,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -106,7 +104,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -208,7 +206,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -219,7 +216,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -405,7 +402,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -416,7 +412,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -518,7 +514,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -529,7 +524,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -715,7 +710,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -726,7 +720,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -828,7 +822,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -839,7 +832,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1025,7 +1018,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1036,7 +1028,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1138,7 +1130,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1149,7 +1140,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1336,7 +1327,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1347,7 +1337,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1450,7 +1440,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1461,7 +1450,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1649,7 +1638,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1660,7 +1648,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1763,7 +1751,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1774,7 +1761,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1962,7 +1949,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1973,7 +1959,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2076,7 +2062,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2087,7 +2072,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2275,7 +2260,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2286,7 +2270,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2389,7 +2373,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2400,7 +2383,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2588,7 +2571,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2599,7 +2581,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2702,7 +2684,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2713,7 +2694,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2901,7 +2882,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2912,7 +2892,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3015,7 +2995,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3026,7 +3005,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3214,7 +3193,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3225,7 +3203,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3328,7 +3306,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3339,7 +3316,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3527,7 +3504,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3538,7 +3514,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3641,7 +3617,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3652,7 +3627,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
7
.github/workflows/generated-windows-binary-wheel-master.yml
generated
vendored
7
.github/workflows/generated-windows-binary-wheel-master.yml
generated
vendored
@ -24,7 +24,6 @@ env:
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
concurrency:
|
||||
@ -88,7 +87,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -99,7 +97,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -198,7 +196,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -209,7 +206,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
73
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
73
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
@ -28,7 +28,6 @@ env:
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
concurrency:
|
||||
@ -91,7 +90,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -102,7 +100,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -200,7 +198,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -211,7 +208,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -390,7 +387,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -401,7 +397,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -500,7 +496,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -511,7 +506,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -691,7 +686,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -702,7 +696,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -801,7 +795,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -812,7 +805,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -991,7 +984,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1002,7 +994,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1100,7 +1092,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1111,7 +1102,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1290,7 +1281,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1301,7 +1291,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1400,7 +1390,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1411,7 +1400,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1591,7 +1580,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1602,7 +1590,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1701,7 +1689,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1712,7 +1699,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1891,7 +1878,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -1902,7 +1888,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2000,7 +1986,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2011,7 +1996,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2190,7 +2175,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2201,7 +2185,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2300,7 +2284,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2311,7 +2294,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2491,7 +2474,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2502,7 +2484,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2601,7 +2583,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2612,7 +2593,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2791,7 +2772,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2802,7 +2782,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2900,7 +2880,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -2911,7 +2890,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3090,7 +3069,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3101,7 +3079,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3200,7 +3178,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3211,7 +3188,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3391,7 +3368,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3402,7 +3378,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3501,7 +3477,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
- name: Clean PyTorch checkout
|
||||
@ -3512,7 +3487,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: zhouzhuojie/checkout@05b13c9a0d21f08f6d5e64a1d5042246d13619d9
|
||||
with:
|
||||
ref: main
|
||||
ref: release/1.12
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
3
.github/workflows/nightly.yml
vendored
3
.github/workflows/nightly.yml
vendored
@ -5,6 +5,9 @@ on:
|
||||
- cron: 0 0 * * *
|
||||
push:
|
||||
tags:
|
||||
# NOTE: Doc build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
- ciflow/nightly/*
|
||||
workflow_dispatch:
|
||||
|
||||
|
9
.github/workflows/run_android_tests.yml
vendored
9
.github/workflows/run_android_tests.yml
vendored
@ -56,7 +56,16 @@ jobs:
|
||||
|
||||
- name: Build PyTorch Android
|
||||
run: |
|
||||
# Install NDK 21 after GitHub update
|
||||
# https://github.com/actions/virtual-environments/issues/5595
|
||||
ANDROID_ROOT="/usr/local/lib/android"
|
||||
ANDROID_SDK_ROOT="${ANDROID_ROOT}/sdk"
|
||||
SDKMANAGER="${ANDROID_SDK_ROOT}/cmdline-tools/latest/bin/sdkmanager"
|
||||
echo "y" | $SDKMANAGER "ndk;21.4.7075529"
|
||||
|
||||
export ANDROID_NDK="${ANDROID_SDK_ROOT}/ndk-bundle"
|
||||
ln -sfn $ANDROID_SDK_ROOT/ndk/21.4.7075529 $ANDROID_NDK
|
||||
|
||||
echo "CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname "$(which conda)")/../"}" >> "${GITHUB_ENV}"
|
||||
./scripts/build_pytorch_android.sh x86
|
||||
|
||||
|
13
.github/workflows/trunk.yml
vendored
13
.github/workflows/trunk.yml
vendored
@ -173,8 +173,8 @@ jobs:
|
||||
build-environment: macos-11-py3-x86-64
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 2, runner: "macos-12", xcode-version: "13.3.1" },
|
||||
{ config: "default", shard: 2, num_shards: 2, runner: "macos-12", xcode-version: "13.3.1" },
|
||||
{ config: "default", shard: 1, num_shards: 2, runner: "macos-12" },
|
||||
{ config: "default", shard: 2, num_shards: 2, runner: "macos-12" },
|
||||
]}
|
||||
secrets:
|
||||
AWS_OSSCI_METRICS_V2_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_V2_ACCESS_KEY_ID }}
|
||||
@ -199,11 +199,18 @@ jobs:
|
||||
build-environment: macos-10-15-py3-arm64
|
||||
xcode-version: "13.3.1"
|
||||
runner-type: macos-12
|
||||
build-generates-artifacts: false
|
||||
build-generates-artifacts: true
|
||||
secrets:
|
||||
MACOS_SCCACHE_S3_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
MACOS_SCCACHE_S3_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
|
||||
macos-12-3-py38-arm64-test:
|
||||
name: macos-12.3-py3.8-arm64-test
|
||||
uses: ./.github/workflows/_mac-test-arm64.yml
|
||||
needs: macos-10-15-py3-arm64
|
||||
with:
|
||||
build-environment: macos-10-15-py3-arm64
|
||||
|
||||
# please ensure that this and its corresponding job in pull.yml are in sync
|
||||
win-vs2019-cuda11_3-py3-build:
|
||||
name: win-vs2019-cuda11.3-py3
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -122,6 +122,10 @@ env
|
||||
.circleci/scripts/COMMIT_MSG
|
||||
scripts/release_notes/*.json
|
||||
|
||||
# These files get copied over on invoking setup.py
|
||||
torchgen/packaged/*
|
||||
!torchgen/packaged/README.md
|
||||
|
||||
# IPython notebook checkpoints
|
||||
.ipynb_checkpoints
|
||||
|
||||
|
2
.gitmodules
vendored
2
.gitmodules
vendored
@ -65,7 +65,7 @@
|
||||
[submodule "third_party/python-peachpy"]
|
||||
ignore = dirty
|
||||
path = third_party/python-peachpy
|
||||
url = https://github.com/Maratyszcza/PeachPy.git
|
||||
url = https://github.com/malfet/PeachPy.git
|
||||
[submodule "third_party/python-six"]
|
||||
ignore = dirty
|
||||
path = third_party/python-six
|
||||
|
@ -27,6 +27,7 @@ rm -rf "$OUT"
|
||||
|
||||
# aten codegen
|
||||
python -m torchgen.gen \
|
||||
-s aten/src/ATen \
|
||||
-d "$OUT"/torch/share/ATen
|
||||
|
||||
# torch codegen
|
||||
|
@ -99,6 +99,6 @@ function checkout_install_torchvision() {
|
||||
|
||||
function clone_pytorch_xla() {
|
||||
if [[ ! -d ./xla ]]; then
|
||||
git clone --recursive --quiet https://github.com/pytorch/xla.git
|
||||
git clone --recursive -b r1.12 https://github.com/pytorch/xla.git
|
||||
fi
|
||||
}
|
||||
|
@ -33,11 +33,11 @@ fi
|
||||
|
||||
cross_compile_arm64() {
|
||||
# Cross compilation for arm64
|
||||
USE_DISTRIBUTED=1 CMAKE_OSX_ARCHITECTURES=arm64 MACOSX_DEPLOYMENT_TARGET=11.0 USE_MKLDNN=OFF USE_NNPACK=OFF USE_QNNPACK=OFF BUILD_TEST=OFF python setup.py bdist_wheel
|
||||
USE_DISTRIBUTED=1 CMAKE_OSX_ARCHITECTURES=arm64 MACOSX_DEPLOYMENT_TARGET=11.0 USE_MKLDNN=OFF USE_QNNPACK=OFF BUILD_TEST=OFF python setup.py bdist_wheel
|
||||
}
|
||||
|
||||
compile_x86_64() {
|
||||
USE_DISTRIBUTED=1 USE_NNPACK=OFF python setup.py bdist_wheel
|
||||
USE_DISTRIBUTED=1 python setup.py bdist_wheel
|
||||
}
|
||||
|
||||
build_lite_interpreter() {
|
||||
|
@ -28,4 +28,24 @@ time python test/run_test.py --verbose -i distributed/test_c10d_spawn_nccl
|
||||
time python test/run_test.py --verbose -i distributed/test_store
|
||||
time python test/run_test.py --verbose -i distributed/test_pg_wrapper
|
||||
time python test/run_test.py --verbose -i distributed/rpc/cuda/test_tensorpipe_agent
|
||||
time python test/run_test.py --verbose -i distributed/_shard/checkpoint/test_checkpoint
|
||||
time python test/run_test.py --verbose -i distributed/_shard/checkpoint/test_file_system_checkpoint
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharding_spec/test_sharding_spec
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharding_plan/test_sharding_plan
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/test_megatron_prototype
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/test_sharded_tensor
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/test_sharded_tensor_reshard
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_chunk
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_elementwise_ops
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_embedding
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_embedding_bag
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_binary_cmp
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_init
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_linear
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_math_ops
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_matrix_ops
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/ops/test_softmax
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_optim/test_sharded_optim
|
||||
time python test/run_test.py --verbose -i distributed/_shard/test_partial_tensor
|
||||
time python test/run_test.py --verbose -i distributed/_shard/test_replicated_tensor
|
||||
assert_git_not_dirty
|
||||
|
@ -460,7 +460,7 @@ test_forward_backward_compatibility() {
|
||||
python -m venv venv
|
||||
# shellcheck disable=SC1091
|
||||
. venv/bin/activate
|
||||
pip_install --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
|
||||
pip_install --pre torch -f https://download.pytorch.org/whl/test/cpu/torch_test.html
|
||||
pip show torch
|
||||
python dump_all_function_schemas.py --filename nightly_schemas.txt
|
||||
# FC: verify newmodel can be load with old code.
|
||||
|
@ -947,7 +947,8 @@ endif()
|
||||
|
||||
if(APPLE)
|
||||
if(USE_MPS)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_MPS -fno-objc-arc -weak_framework Foundation -weak_framework MetalPerformanceShaders -weak_framework MetalPerformanceShadersGraph -weak_framework Metal")
|
||||
string(APPEND CMAKE_CXX_FLAGS " -DUSE_MPS -fno-objc-arc")
|
||||
string(APPEND CMAKE_SHARED_LINKER_FLAGS " -weak_framework Foundation -weak_framework MetalPerformanceShaders -weak_framework MetalPerformanceShadersGraph -weak_framework Metal")
|
||||
endif()
|
||||
string(APPEND CMAKE_CXX_FLAGS " -Wno-unused-private-field")
|
||||
string(APPEND CMAKE_CXX_FLAGS " -Wno-missing-braces")
|
||||
|
@ -73,3 +73,8 @@ test/test_unary_ufuncs.py @mruberry @ngimel
|
||||
test/test_binary_ufuncs.py @mruberry @ngimel
|
||||
test/test_reductions.py @mruberry @ngimel
|
||||
test/test_type_promotion.py @mruberry @ngimel
|
||||
|
||||
# torch MPS
|
||||
test/test_mps.py @kulinseth
|
||||
aten/src/ATen/mps/ @kulinseth
|
||||
aten/src/ATen/native/mps/ @kulinseth
|
||||
|
@ -420,7 +420,6 @@ if(USE_CUDA AND NOT USE_ROCM)
|
||||
${CUDA_LIBRARIES}
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/libcusparse_static.a
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/libcurand_static.a
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/libcublas_static.a
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/libcufft_static_nocallback.a
|
||||
)
|
||||
if(NOT BUILD_LAZY_CUDA_LINALG)
|
||||
|
@ -21,6 +21,10 @@
|
||||
#include <fbgemm/Fbgemm.h>
|
||||
#endif // USE_FBGEMM
|
||||
|
||||
#ifdef USE_MPS
|
||||
#include <ATen/mps/MPSDevice.h>
|
||||
#endif
|
||||
|
||||
namespace at {
|
||||
|
||||
Context::Context() = default;
|
||||
@ -225,16 +229,8 @@ bool Context::hasMKLDNN() {
|
||||
}
|
||||
|
||||
bool Context::hasMPS() {
|
||||
#if defined(__APPLE__)
|
||||
#if __is_target_os(macOS)
|
||||
if (__builtin_available(macOS 12.3, *) || __builtin_available(macOSApplicationExtension 12.3, *)) {
|
||||
return c10::impl::hasDeviceGuardImpl(at::DeviceType::MPS);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
#if USE_MPS
|
||||
return at::mps::is_available();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
|
@ -26,6 +26,13 @@ constexpr uint64_t storage_max() {
|
||||
return std::min(int64_max, size_max);
|
||||
}
|
||||
|
||||
inline void raise_warning_for_complex_half(ScalarType dtype) {
|
||||
if (dtype == kComplexHalf) {
|
||||
TORCH_WARN_ONCE(
|
||||
"ComplexHalf support is experimental and many operators don't support it yet.");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace (anonymous)
|
||||
|
||||
size_t computeStorageNbytesContiguous(
|
||||
@ -98,7 +105,7 @@ TensorBase empty_generic(
|
||||
ScalarType scalar_type,
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
at::detail::check_size_nonnegative(size);
|
||||
|
||||
at::detail::raise_warning_for_complex_half(scalar_type);
|
||||
caffe2::TypeMeta dtype = scalarTypeToTypeMeta(scalar_type);
|
||||
size_t size_bytes = computeStorageNbytesContiguous(size, dtype.itemsize());
|
||||
auto storage_impl = c10::make_intrusive<StorageImpl>(
|
||||
@ -132,7 +139,7 @@ TensorBase empty_strided_generic(
|
||||
c10::DispatchKeySet ks,
|
||||
ScalarType scalar_type) {
|
||||
at::detail::check_size_nonnegative(size);
|
||||
|
||||
at::detail::raise_warning_for_complex_half(scalar_type);
|
||||
caffe2::TypeMeta dtype = scalarTypeToTypeMeta(scalar_type);
|
||||
size_t size_bytes = computeStorageNbytes(size, stride, dtype.itemsize());
|
||||
auto storage_impl = c10::make_intrusive<StorageImpl>(
|
||||
|
@ -353,8 +353,8 @@ static inline void copy_to(const Tensor& dst, const Tensor& src) {
|
||||
// appear. Users can workaround that case by dst[index..] = src.reshape(..)
|
||||
dst.copy_(src);
|
||||
return;
|
||||
} else if (src.sizes().size() == 0 && src.device().type() == at::kCPU) {
|
||||
dst.fill_(src.item());
|
||||
} else if (src.dim() == 0 && src.device().type() == at::kCPU) {
|
||||
dst.fill_(src);
|
||||
return;
|
||||
}
|
||||
auto src_view = src.view(slicePrefix1sSize(src.sizes()));
|
||||
|
@ -253,7 +253,7 @@ TensorTypePtr TensorType::create(const at::Tensor& t) {
|
||||
VaryingShape<size_t> stride_indices;
|
||||
VaryingShape<int64_t> strides;
|
||||
VaryingShape<int64_t> sizes;
|
||||
if (t.layout() == at::kStrided) {
|
||||
if (t.layout() == at::kStrided && !t.is_nested()) {
|
||||
sizes = VaryingShape<int64_t>{t.sizes().vec()};
|
||||
strides = VaryingShape<int64_t>{t.strides().vec()};
|
||||
return TensorType::create(
|
||||
|
@ -12,6 +12,8 @@
|
||||
#define MPS_ERROR_RUNTIME_TOO_LOW \
|
||||
"The MPS backend is supported on MacOS 12.3+.", \
|
||||
"Current OS version can be queried using `sw_vers`"
|
||||
#define MPS_ERROR_DOUBLE_NOT_SUPPORTED "Cannot convert a MPS Tensor to float64 dtype " \
|
||||
"as the MPS framework doesn't support float64. Please use float32 instead."
|
||||
|
||||
namespace at { namespace detail {
|
||||
TensorBase empty_mps(
|
||||
@ -23,7 +25,7 @@ TensorBase empty_mps(
|
||||
c10::optional<c10::MemoryFormat> memory_format_opt) {
|
||||
#if defined(__APPLE__)
|
||||
#if __is_target_os(macOS)
|
||||
if (__builtin_available(macOS 12.3, *) || __builtin_available(macOSApplicationExtension 12.3, *)) {
|
||||
if (at::hasMPS()) {
|
||||
auto device = device_or_default(device_opt);
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device.type() == DeviceType::MPS);
|
||||
|
||||
@ -35,6 +37,8 @@ TensorBase empty_mps(
|
||||
auto* allocator = at::mps::GetMPSAllocator();
|
||||
int64_t nelements = c10::multiply_integers(size);
|
||||
auto dtype = dtype_or_default(dtype_opt);
|
||||
TORCH_CHECK_TYPE(dtype != ScalarType::Double, MPS_ERROR_DOUBLE_NOT_SUPPORTED);
|
||||
|
||||
auto dtype_meta = scalarTypeToTypeMeta(dtype);
|
||||
int64_t size_bytes = nelements * dtype_meta.itemsize();
|
||||
auto storage_impl = c10::make_intrusive<StorageImpl>(
|
||||
@ -83,9 +87,10 @@ TensorBase empty_strided_mps(
|
||||
c10::optional<Device> device_opt) {
|
||||
#if defined(__APPLE__)
|
||||
#if __is_target_os(macOS)
|
||||
if (__builtin_available(macOS 12.3, *) || __builtin_available(macOSApplicationExtension 12.3, *)) {
|
||||
if (at::hasMPS()) {
|
||||
auto device = device_or_default(device_opt);
|
||||
TORCH_INTERNAL_ASSERT(device.is_mps());
|
||||
TORCH_CHECK_TYPE(dtype != ScalarType::Double, MPS_ERROR_DOUBLE_NOT_SUPPORTED);
|
||||
const DeviceGuard device_guard(device);
|
||||
auto* allocator = at::mps::GetMPSAllocator();
|
||||
constexpr c10::DispatchKeySet mps_dks(c10::DispatchKey::MPS);
|
||||
|
@ -98,7 +98,7 @@ struct HeapBlock
|
||||
d.type = MTLHeapTypeAutomatic;
|
||||
heap = [device newHeapWithDescriptor: d];
|
||||
if (heap) {
|
||||
[heap setPurgeableState:MTLPurgeableStateEmpty];
|
||||
[heap setPurgeableState:MTLPurgeableStateNonVolatile];
|
||||
}
|
||||
[d release];
|
||||
}
|
||||
|
@ -101,7 +101,6 @@ id<MTLBuffer> MPSHeapAllocatorImpl::Malloc(size_t size, bool sharedStorage)
|
||||
TORCH_CHECK(size < m_max_buffer_size, "Invalid buffer size: ", format_size(size));
|
||||
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
__block id<MTLBuffer> buf = nil;
|
||||
|
||||
size_t alloc_size = get_allocation_size(size, sharedStorage);
|
||||
auto& pool = get_pool(alloc_size, sharedStorage);
|
||||
@ -252,39 +251,44 @@ bool MPSHeapAllocatorImpl::release_cached_buffers()
|
||||
} // namespace HeapAllocator
|
||||
|
||||
// Use "at::mps::GetMPSAllocator()" to acquire a handle to MPS Allocator
|
||||
static HeapAllocator::MPSHeapAllocatorImpl s_allocatorImpl;
|
||||
namespace {
|
||||
HeapAllocator::MPSHeapAllocatorImpl& _getAllocImpl() {
|
||||
static HeapAllocator::MPSHeapAllocatorImpl s_allocatorImpl;
|
||||
return s_allocatorImpl;
|
||||
}
|
||||
}
|
||||
|
||||
// MPS allocator struct to be registered with Pytorch
|
||||
struct TORCH_API MPSAllocator final : public at::Allocator {
|
||||
public:
|
||||
explicit MPSAllocator(bool useSharedStorage) :
|
||||
m_has_unified_memory(s_allocatorImpl.Device().hasUnifiedMemory), m_use_shared_storage(useSharedStorage)
|
||||
m_has_unified_memory(_getAllocImpl().Device().hasUnifiedMemory), m_use_shared_storage(useSharedStorage)
|
||||
{
|
||||
const bool enable_debug_info = isEnvVarEnabled("PYTORCH_DEBUG_MPS_ALLOCATOR");
|
||||
if (enable_debug_info) {
|
||||
s_allocatorImpl.enable_debug_info();
|
||||
_getAllocImpl().enable_debug_info();
|
||||
if (!m_use_shared_storage || m_has_unified_memory) {
|
||||
std::cerr << "Initializing "
|
||||
<< (useSharedStorage ? "shared" : "private")
|
||||
<< " heap allocator on "
|
||||
<< (m_has_unified_memory ? "unified" : "discrete")
|
||||
<< " device memory of size "
|
||||
<< s_allocatorImpl.Device().recommendedMaxWorkingSetSize / 1048576UL << " MB\n";
|
||||
<< _getAllocImpl().Device().recommendedMaxWorkingSetSize / 1048576UL << " MB\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~MPSAllocator() override {
|
||||
s_allocatorImpl.EmptyCache();
|
||||
_getAllocImpl().EmptyCache();
|
||||
}
|
||||
|
||||
DataPtr allocate(const size_t nbytes) const override {
|
||||
__block id<MTLBuffer> buf = nbytes > 0 ? s_allocatorImpl.Malloc(nbytes, m_use_shared_storage) : nullptr;
|
||||
__block id<MTLBuffer> buf = nbytes > 0 ? _getAllocImpl().Malloc(nbytes, m_use_shared_storage) : nullptr;
|
||||
return { buf, buf, &Delete, at::Device(at::DeviceType::MPS, 0)};
|
||||
}
|
||||
|
||||
DeleterFnPtr raw_deleter() const override { return &Delete; }
|
||||
bool is_shared(void* ptr) const { return s_allocatorImpl.isSharedBuffer(ptr); }
|
||||
bool is_shared(void* ptr) const { return _getAllocImpl().isSharedBuffer(ptr); }
|
||||
bool is_shared_storge_supported() const { return m_has_unified_memory; }
|
||||
|
||||
private:
|
||||
@ -292,7 +296,11 @@ private:
|
||||
// use shared buffers on unified memory
|
||||
bool m_use_shared_storage;
|
||||
|
||||
static void Delete(void* ptr) { if (ptr) s_allocatorImpl.Free(ptr); }
|
||||
static void Delete(void* ptr) {
|
||||
if (ptr) {
|
||||
_getAllocImpl().Free(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
static bool isEnvVarEnabled(const char *envvar) {
|
||||
const char *e = getenv(envvar);
|
||||
@ -305,15 +313,31 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
static MPSAllocator s_mps_shared_alloc(true);
|
||||
namespace {
|
||||
MPSAllocator& _getSharedAllocator() {
|
||||
static MPSAllocator s_mps_shared_alloc(true);
|
||||
return s_mps_shared_alloc;
|
||||
}
|
||||
MPSAllocator& _getPrivateAllocator() {
|
||||
static mps::MPSAllocator s_mps_private_alloc(false);
|
||||
return s_mps_private_alloc;
|
||||
}
|
||||
} // anonymous namespace
|
||||
|
||||
at::Allocator* getMPSSharedAllocator()
|
||||
{
|
||||
if (s_mps_shared_alloc.is_shared_storge_supported())
|
||||
return &s_mps_shared_alloc;
|
||||
auto& sa = _getSharedAllocator();
|
||||
if (sa.is_shared_storge_supported()) {
|
||||
return &sa;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
at::Allocator* getMPSStaticAllocator() {
|
||||
return &_getPrivateAllocator();
|
||||
}
|
||||
|
||||
} // namespace mps
|
||||
|
||||
namespace native {
|
||||
@ -325,7 +349,7 @@ namespace native {
|
||||
bool is_pinned_mps(const Tensor& self, c10::optional<Device> device)
|
||||
{
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!device.has_value() || device->is_mps());
|
||||
return at::mps::s_mps_shared_alloc.is_shared(self.storage().data());
|
||||
return at::mps::_getSharedAllocator().is_shared(self.storage().data());
|
||||
}
|
||||
|
||||
// torch.pin_memory() implementation
|
||||
@ -345,7 +369,4 @@ Tensor _pin_memory_mps(const Tensor& self, c10::optional<Device> device)
|
||||
|
||||
} // namespace native
|
||||
|
||||
static mps::MPSAllocator s_mps_private_alloc(false);
|
||||
REGISTER_ALLOCATOR(DeviceType::MPS, &s_mps_private_alloc);
|
||||
|
||||
} // namespace at
|
||||
|
@ -56,6 +56,8 @@ class TORCH_API MPSDevice {
|
||||
MPSDevice();
|
||||
};
|
||||
|
||||
TORCH_API bool is_available();
|
||||
|
||||
at::Allocator* GetMPSAllocator(bool useSharedAllocator = false);
|
||||
|
||||
} // namespace mps
|
||||
|
@ -20,12 +20,26 @@ MPSDevice::~MPSDevice() {
|
||||
_mtl_device = nil;
|
||||
}
|
||||
|
||||
MPSDevice::MPSDevice() {
|
||||
NSArray* devices = MTLCopyAllDevices();
|
||||
MPSDevice::MPSDevice(): _mtl_device(nil) {
|
||||
// Check that MacOS 12.3+ version of MPS framework is available
|
||||
// Create the MPSGraph and check method introduced in 12.3+
|
||||
// which is used by MPS backend.
|
||||
id mpsCD = NSClassFromString(@"MPSGraph");
|
||||
if ([mpsCD instancesRespondToSelector:@selector(LSTMWithSourceTensor:
|
||||
recurrentWeight:
|
||||
inputWeight:
|
||||
bias:
|
||||
initState:
|
||||
initCell:
|
||||
descriptor:
|
||||
name:)] == NO) {
|
||||
return;
|
||||
}
|
||||
NSArray* devices = [MTLCopyAllDevices() autorelease];
|
||||
for (unsigned long i = 0 ; i < [devices count] ; i++) {
|
||||
id<MTLDevice> device = devices[i];
|
||||
if(![device isLowPower]) { // exclude Intel GPUs
|
||||
_mtl_device = device;
|
||||
_mtl_device = [device retain];
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -33,8 +47,13 @@ MPSDevice::MPSDevice() {
|
||||
}
|
||||
|
||||
at::Allocator* getMPSSharedAllocator();
|
||||
at::Allocator* getMPSStaticAllocator();
|
||||
at::Allocator* GetMPSAllocator(bool useSharedAllocator) {
|
||||
return useSharedAllocator ? getMPSSharedAllocator() : GetAllocator(DeviceType::MPS);
|
||||
return useSharedAllocator ? getMPSSharedAllocator() : getMPSStaticAllocator();
|
||||
}
|
||||
|
||||
bool is_available() {
|
||||
return MPSDevice::getInstance()->device() != nil;
|
||||
}
|
||||
|
||||
} // namespace mps
|
||||
|
@ -47,7 +47,7 @@ MPSDataType getMPSDataType(ScalarType scalar_type);
|
||||
MPSDataType getMPSScalarType(ScalarType scalar_type);
|
||||
std::string getMPSTypeString(ScalarType scalar_type);
|
||||
std::string getMPSShapeString(MPSShape* shape);
|
||||
std::string getTensorsStringKey(const TensorList& tensors);
|
||||
std::string getTensorsStringKey(const TensorList& tensors, bool use_scalar_value = true);
|
||||
double getMPSScalarValue(const Tensor& t);
|
||||
std::string getArrayRefString(const IntArrayRef s);
|
||||
std::string getStridedKey(const Tensor& self, const IntArrayRef sz,
|
||||
@ -62,8 +62,7 @@ class Placeholder {
|
||||
public:
|
||||
Placeholder() : _placeholder(nullptr), _value(nullptr) {}
|
||||
Placeholder(MPSGraphTensor* mpsGraphTensor) : _placeholder(mpsGraphTensor), _value(nullptr) {}
|
||||
Placeholder(MPSGraphTensor* mpsGraphTensor, const Tensor& self, MPSShape *mpsShape = nullptr,
|
||||
bool check_view = true);
|
||||
Placeholder(MPSGraphTensor* mpsGraphTensor, const Tensor& self, MPSShape *mpsShape = nullptr);
|
||||
MPSGraphTensor* getMPSGraphTensor() {
|
||||
return _placeholder;
|
||||
}
|
||||
@ -74,16 +73,28 @@ class Placeholder {
|
||||
return _value == nullptr;
|
||||
}
|
||||
|
||||
void allocateViewTensor(const at::Tensor& src)
|
||||
{
|
||||
assert (!_viewOutput.numel());
|
||||
_viewOutput = at::native::empty_mps(
|
||||
src.sizes(),
|
||||
src.scalar_type(),
|
||||
c10::nullopt,
|
||||
kMPS,
|
||||
c10::nullopt,
|
||||
c10::nullopt);
|
||||
}
|
||||
|
||||
private:
|
||||
MPSGraphTensor* _placeholder;
|
||||
MPSGraphTensorData* _value;
|
||||
Tensor _viewOutput;
|
||||
};
|
||||
|
||||
void resize_tensor(Tensor* output);
|
||||
MPSGraphTensor* trunc_tensor(MPSGraph* mpsGraph, MPSGraphTensor* inputTensor);
|
||||
MPSGraphTensorData *getMPSGraphTensorData(MPSGraph* mpsGraph,
|
||||
MPSStream* mpsStream,
|
||||
const Tensor& tensor);
|
||||
MPSGraphTensorData *getMPSGraphTensorData(MPSGraph* mpsGraph, MPSStream* mpsStream, const Tensor& tensor);
|
||||
MPSGraphTensorData* getMPSGraphTensorFromScalar(MPSStream* mpsStream, const Scalar& scalar, MPSDataType dataType);
|
||||
|
||||
MPSGraph* make_mps_graph();
|
||||
void printTensorNDArray(const Tensor& t);
|
||||
@ -91,7 +102,6 @@ void printTensorNDArray(const Tensor& t);
|
||||
MPSGraphTensor* mpsGraphUnrankedPlaceHolder(MPSGraph *mpsGraph, MPSDataType dataType);
|
||||
MPSGraphTensor* mpsGraphRankedPlaceHolder(MPSGraph *mpsGraph, MPSDataType dataType, MPSShape* mpsShape);
|
||||
MPSGraphTensor* mpsGraphRankedPlaceHolder(MPSGraph *mpsGraph, const Tensor& tensor);
|
||||
MPSGraphTensor* mpsGraphConstantFloatPlaceHolder(MPSGraph *mpsGraph, const double value, MPSShape* mpsShape);
|
||||
MPSGraphTensor* mpsGraphConstantPlaceHolder(MPSGraph *mpsGraph, const double value, MPSShape* mpsShape, MPSDataType dataType);
|
||||
|
||||
string get_mem_format_string(c10::MemoryFormat memory_format);
|
||||
|
@ -117,8 +117,11 @@ MPSDataType getMPSDataType(ScalarType scalar_type) {
|
||||
return MPSDataTypeInt8;
|
||||
case ScalarType::Bool:
|
||||
return MPSDataTypeBool;
|
||||
case ScalarType::Double:
|
||||
TORCH_CHECK_TYPE(false, "Cannot convert a float64 Tensor to MPS as the MPS framework doesn't support float64. "
|
||||
"Please use float32 instead.")
|
||||
default:
|
||||
TORCH_CHECK_TYPE(false, "Trying to convert ", scalar_type, " to the MPS backend but there is no mapping for it.")
|
||||
TORCH_CHECK_TYPE(false, "Trying to convert ", scalar_type, " to the MPS backend but it does not have support for that dtype.")
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,7 +145,7 @@ MPSDataType getMPSScalarType(ScalarType scalar_type) {
|
||||
case ScalarType::Bool:
|
||||
return MPSDataTypeBool;
|
||||
default:
|
||||
TORCH_INTERNAL_ASSERT(false, "Trying to convert ", scalar_type, " to the MPS backend but there is no mapping for it.")
|
||||
TORCH_CHECK_TYPE(false, "Trying to convert ", scalar_type, " to the MPS backend but it does not have support for that dtype.")
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,7 +185,7 @@ std::string getArrayRefString(const IntArrayRef s) {
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string getTensorsStringKey(const TensorList& tensors) {
|
||||
std::string getTensorsStringKey(const TensorList& tensors, bool use_scalar_value) {
|
||||
std::string str;
|
||||
// The key format per tensor would look like ":MPSDataTypeFloat32[1,1,1,10]:"
|
||||
for (const Tensor& tensor: tensors) {
|
||||
@ -191,7 +194,7 @@ std::string getTensorsStringKey(const TensorList& tensors) {
|
||||
str += getMPSTypeString(tensor.scalar_type()) + "[";
|
||||
// if tensor is a scalar
|
||||
if (tensor.dim() == 0) {
|
||||
str += std::to_string(getMPSScalarValue(tensor));
|
||||
str += (use_scalar_value ? std::to_string(getMPSScalarValue(tensor)) : "Scalar");
|
||||
} else {
|
||||
const NSString* ns_shape_key = [[getMPSShape(tensor) valueForKey:@"description"] componentsJoinedByString:@","];
|
||||
str += std::string(ns_shape_key.UTF8String);
|
||||
@ -249,107 +252,123 @@ MPSShape* getMPSShape(IntArrayRef sizes) {
|
||||
|
||||
void printTensorNDArray(const Tensor& t) {
|
||||
if (!t.is_mps()) return;
|
||||
if(t.numel() == 0)
|
||||
{
|
||||
std::cout << "Empty tensor" << std::endl;
|
||||
return;
|
||||
}
|
||||
if(t.numel() == 0) return;
|
||||
// Get shape and data type
|
||||
auto selfShape = getMPSShape(t);
|
||||
auto selfDType = getMPSDataType(t.scalar_type());
|
||||
|
||||
// Initialize data
|
||||
id<MTLBuffer> selfBuf = __builtin_bit_cast(id<MTLBuffer>, t.storage().data());
|
||||
MPSGraphTensorData* tdata = [[MPSGraphTensorData alloc] initWithMTLBuffer:selfBuf
|
||||
MPSGraphTensorData* tdata = [[[MPSGraphTensorData alloc] initWithMTLBuffer:selfBuf
|
||||
shape:selfShape
|
||||
dataType:selfDType];
|
||||
dataType:selfDType] autorelease];
|
||||
[tdata printNDArray];
|
||||
}
|
||||
|
||||
id<MTLBuffer> gatherViewTensor(const at::Tensor& src, id<MTLBuffer> sourceBuffer) {
|
||||
assert (!src.is_contiguous());
|
||||
MPSCachedGraph* _getCachedGraph(const at::Tensor& src) {
|
||||
MPSGraphCache* cache_ = MPSGraphCache::getInstance();
|
||||
string key = getStridedKey(src, src.sizes(), src.strides(), src.storage_offset());
|
||||
MPSCachedGraph* cachedGraph = cache_->LookUp(key);
|
||||
|
||||
return cachedGraph;
|
||||
}
|
||||
|
||||
id<MTLBuffer> _gatherViewTensor(const at::Tensor& src, id<MTLBuffer> sourceBuffer, MPSCachedGraph* mpsCachedGraph, Tensor& output) {
|
||||
TORCH_CHECK(mpsCachedGraph != nil);
|
||||
|
||||
id<MTLDevice> device = MPSDevice::getInstance()->device();
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
|
||||
struct CachedGraph : public MPSCachedGraph
|
||||
{
|
||||
CachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
|
||||
MPSGraphTensor* inputTensor_ = nil;
|
||||
MPSGraphTensor* outputTensor_ = nil;
|
||||
IntArrayRef size_;
|
||||
IntArrayRef stride_;
|
||||
int64_t storage_offset_;
|
||||
};
|
||||
|
||||
CachedGraph* cachedGraph = static_cast<CachedGraph *>(mpsCachedGraph);
|
||||
|
||||
@autoreleasepool {
|
||||
struct CachedGraph : public MPSCachedGraph
|
||||
{
|
||||
CachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
|
||||
MPSGraphTensor* inputTensor_ = nil;
|
||||
MPSGraphTensor* outputTensor_ = nil;
|
||||
IntArrayRef size_;
|
||||
IntArrayRef stride_;
|
||||
int64_t storage_offset_;
|
||||
MPSGraphTensor* inputTensor = cachedGraph->inputTensor_;
|
||||
MPSGraphTensorData* inputTensorData = [[[MPSGraphTensorData alloc] initWithMTLBuffer: sourceBuffer
|
||||
shape: [inputTensor shape]
|
||||
dataType: [inputTensor dataType]] autorelease];
|
||||
id<MTLBuffer> resultBuffer = __builtin_bit_cast(id<MTLBuffer>, output.storage().data());
|
||||
MPSGraphTensorData* outputTensorData = [[[MPSGraphTensorData alloc] initWithMTLBuffer: resultBuffer
|
||||
shape: getMPSShape(src.sizes())
|
||||
dataType: getMPSDataType(src.scalar_type())] autorelease];
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* feeds = @{
|
||||
inputTensor : inputTensorData
|
||||
};
|
||||
|
||||
MPSGraphCache* cache_ = MPSGraphCache::getInstance();
|
||||
string key = getStridedKey(src, src.sizes(), src.strides(), src.storage_offset());
|
||||
CachedGraph* cachedGraph = static_cast<CachedGraph *>(cache_->LookUp(key));
|
||||
if (cachedGraph) {
|
||||
@autoreleasepool {
|
||||
MPSGraphTensor* inputTensor = cachedGraph->inputTensor_;
|
||||
auto output = at::native::empty_mps(
|
||||
src.sizes(),
|
||||
src.scalar_type(),
|
||||
c10::nullopt,
|
||||
kMPS,
|
||||
c10::nullopt,
|
||||
c10::nullopt);
|
||||
MPSGraphTensorData* inputTensorData = [[MPSGraphTensorData alloc] initWithMTLBuffer: sourceBuffer
|
||||
shape: [inputTensor shape]
|
||||
dataType: [inputTensor dataType]];
|
||||
id<MTLBuffer> resultBuffer = __builtin_bit_cast(id<MTLBuffer>, output.storage().data());
|
||||
MPSGraphTensorData* outputTensorData = [[MPSGraphTensorData alloc] initWithMTLBuffer: resultBuffer
|
||||
shape: getMPSShape(src.sizes())
|
||||
dataType: getMPSDataType(src.scalar_type())];
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* feeds = @{
|
||||
inputTensor : inputTensorData
|
||||
};
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* results = @{
|
||||
cachedGraph->outputTensor_ : outputTensorData
|
||||
};
|
||||
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* results = @{
|
||||
cachedGraph->outputTensor_ : outputTensorData
|
||||
};
|
||||
|
||||
runMPSGraph(stream, cachedGraph->graph(), feeds, results);
|
||||
return resultBuffer;
|
||||
}
|
||||
}
|
||||
runMPSGraph(stream, cachedGraph->graph(), feeds, results);
|
||||
return resultBuffer;
|
||||
}
|
||||
}
|
||||
|
||||
id<MTLBuffer> gatherViewTensor(const at::Tensor& src, id<MTLBuffer> sourceBuffer) {
|
||||
MPSCachedGraph* mpsCachedGraph = _getCachedGraph(src);
|
||||
if (mpsCachedGraph) {
|
||||
Tensor output = at::native::empty_mps(
|
||||
src.sizes(),
|
||||
src.scalar_type(),
|
||||
c10::nullopt,
|
||||
kMPS,
|
||||
c10::nullopt,
|
||||
c10::nullopt);
|
||||
|
||||
_gatherViewTensor(src, sourceBuffer, mpsCachedGraph, output);
|
||||
return __builtin_bit_cast(id<MTLBuffer>, output.storage().data());
|
||||
}
|
||||
|
||||
return nil;
|
||||
}
|
||||
|
||||
Placeholder::Placeholder(MPSGraphTensor* mpsGraphTensor, const Tensor& src,
|
||||
MPSShape *mpsShape, bool check_view)
|
||||
id<MTLBuffer> gatherViewTensorWithAllocatedMem(const at::Tensor& src, id<MTLBuffer> sourceBuffer, Tensor& output, MPSCachedGraph* mpsCachedGraph) {
|
||||
TORCH_CHECK(mpsCachedGraph != nil);
|
||||
|
||||
_gatherViewTensor(src, sourceBuffer, mpsCachedGraph, output);
|
||||
return __builtin_bit_cast(id<MTLBuffer>, output.storage().data());
|
||||
}
|
||||
|
||||
Placeholder::Placeholder(MPSGraphTensor* mpsGraphTensor, const Tensor& src, MPSShape *mpsShape)
|
||||
{
|
||||
Tensor src_ = src;
|
||||
TORCH_CHECK(src_.is_mps(), "Placeholder storage has not been allocated on MPS device!");
|
||||
// extract the pointer to MTLBuffer from the Tensor's storage
|
||||
id<MTLBuffer> srcBuf = __builtin_bit_cast(id<MTLBuffer>, src.storage().data());
|
||||
if (check_view && !src.is_contiguous()) {
|
||||
id<MTLBuffer> gatherTensor = gatherViewTensor(src, srcBuf);
|
||||
if (gatherTensor) {
|
||||
srcBuf = gatherTensor;
|
||||
if (src.is_view()) {
|
||||
MPSCachedGraph* cachedGraph = _getCachedGraph(src);
|
||||
if (cachedGraph) {
|
||||
allocateViewTensor(src);
|
||||
id<MTLBuffer> gatherTensor = gatherViewTensorWithAllocatedMem(src, srcBuf, _viewOutput, cachedGraph);
|
||||
if (gatherTensor) {
|
||||
srcBuf = gatherTensor;
|
||||
}
|
||||
} else {
|
||||
src_ = src.contiguous();
|
||||
srcBuf = __builtin_bit_cast(id<MTLBuffer>, src_.storage().data());
|
||||
}
|
||||
}
|
||||
const size_t buf_size = [srcBuf length];
|
||||
|
||||
// tensor.numel() could be zero, but tensor is valid as long as the buffer size is non-zero.
|
||||
// if buf_size is zero in here, it's not a user error. It could be a missing check for
|
||||
// if buffer size is zero in here, it's not a user error. It could be a missing check for
|
||||
// tensor.numel() == 0 in our internal implementations of ops.
|
||||
TORCH_INTERNAL_ASSERT(buf_size > 0, "Placeholder tensor is empty!");
|
||||
TORCH_INTERNAL_ASSERT([srcBuf length] > 0, "Placeholder tensor is empty!");
|
||||
|
||||
TORCH_CHECK(src_.storage().nbytes() <= buf_size, "Placeholder buffer size (", buf_size,
|
||||
") is not large enough to contain the Tensor storage of size ", src_.storage().nbytes());
|
||||
|
||||
const MPSDataType mpsDataType = getMPSDataType(src_.scalar_type());
|
||||
const MPSDataType mpsDataType = src_.dim() == 0 ? getMPSScalarType(src_.scalar_type()) : getMPSDataType(src_.scalar_type());
|
||||
if (!mpsShape)
|
||||
mpsShape = getMPSShape(src_);
|
||||
|
||||
_value = [[MPSGraphTensorData alloc] initWithMTLBuffer:srcBuf
|
||||
shape:mpsShape
|
||||
dataType:mpsDataType];
|
||||
_value = [[[MPSGraphTensorData alloc] initWithMTLBuffer:srcBuf
|
||||
shape:mpsShape
|
||||
dataType:mpsDataType] autorelease];
|
||||
TORCH_INTERNAL_ASSERT(_value);
|
||||
_placeholder = mpsGraphTensor;
|
||||
}
|
||||
@ -379,6 +398,43 @@ MPSGraphTensorData *getMPSGraphTensorData(MPSGraph* mpsGraph,
|
||||
return result;
|
||||
}
|
||||
|
||||
MPSGraphTensorData* getMPSGraphTensorFromScalar(MPSStream* mpsStream, const Scalar& scalar, MPSDataType dataType) {
|
||||
union v_t {
|
||||
float f; // MPS doesn't support 'double'
|
||||
int64_t i;
|
||||
bool b;
|
||||
} v;
|
||||
switch (dataType) {
|
||||
case MPSDataTypeFloat32:
|
||||
case MPSDataTypeFloat16:
|
||||
v.f = scalar.to<float>();
|
||||
break;
|
||||
case MPSDataTypeInt64:
|
||||
v.i = scalar.to<int64_t>();
|
||||
break;
|
||||
case MPSDataTypeInt32:
|
||||
v.i = scalar.to<int32_t>();
|
||||
break;
|
||||
case MPSDataTypeInt16:
|
||||
v.i = scalar.to<int16_t>();
|
||||
break;
|
||||
case MPSDataTypeInt8:
|
||||
v.i = scalar.to<int8_t>();
|
||||
break;
|
||||
case MPSDataTypeBool:
|
||||
v.b = scalar.to<bool>();
|
||||
break;
|
||||
default:
|
||||
TORCH_INTERNAL_ASSERT(false, "Unsupported scalar type on MPS backend.")
|
||||
}
|
||||
|
||||
MPSNDArrayDescriptor *tensorDesc = [MPSNDArrayDescriptor descriptorWithDataType:dataType shape:@[@1]];
|
||||
MPSNDArray *tensorNDArray = [[[MPSNDArray alloc] initWithDevice:mpsStream->device() descriptor:tensorDesc] autorelease];
|
||||
[tensorNDArray writeBytes:&v strideBytes:nil];
|
||||
MPSGraphTensorData* result = [[[MPSGraphTensorData alloc] initWithMPSNDArray:tensorNDArray] autorelease];
|
||||
return result;
|
||||
}
|
||||
|
||||
void resize_tensor(Tensor* output) {
|
||||
output->resize_(output->sizes());
|
||||
}
|
||||
@ -389,13 +445,6 @@ MPSGraph* make_mps_graph() {
|
||||
return mpsGraph;
|
||||
}
|
||||
|
||||
MPSGraphTensor* mpsGraphConstantFloatPlaceHolder(MPSGraph *mpsGraph, const double value, MPSShape* mpsShape) {
|
||||
// "value" is always double, so is the Placeholder's type (we only support Float32).
|
||||
return [mpsGraph constantWithScalar:value
|
||||
shape:mpsShape
|
||||
dataType:MPSDataTypeFloat32];
|
||||
}
|
||||
|
||||
MPSGraphTensor* mpsGraphConstantPlaceHolder(MPSGraph *mpsGraph, const double value, MPSShape* mpsShape, MPSDataType dataType) {
|
||||
// Bool is not handled by constantWithScalar
|
||||
MPSGraphTensor* constPlaceHolder = [mpsGraph constantWithScalar:value
|
||||
|
@ -26,6 +26,8 @@ void set_kernel_params
|
||||
kernel_sizeW = isizeW - (osizeW-1) * strideW;
|
||||
}
|
||||
|
||||
// Adaptive average pooling
|
||||
|
||||
Tensor& adaptive_avg_pool2d_out_mps
|
||||
(const Tensor& input,
|
||||
IntArrayRef output_size,
|
||||
@ -150,5 +152,93 @@ Tensor adaptive_avg_pool2d_backward_mps
|
||||
|
||||
}
|
||||
|
||||
// Adaptive max pooling
|
||||
|
||||
TORCH_IMPL_FUNC(adaptive_max_pool2d_out_mps)
|
||||
(const Tensor& input,
|
||||
IntArrayRef output_size,
|
||||
const Tensor& output,
|
||||
const Tensor& indices) {
|
||||
|
||||
for (int64_t i = 1; i < input.ndimension(); i++) {
|
||||
TORCH_CHECK(input.size(i) > 0,
|
||||
"adaptive_max_pool2d(): Expected input to have non-zero size for non-batch dimensions, "
|
||||
"but input has sizes ", input.sizes(), " with dimension ", i, " being "
|
||||
"empty");
|
||||
}
|
||||
|
||||
int64_t isizeH = input.size(-2);
|
||||
int64_t isizeW = input.size(-1);
|
||||
|
||||
int64_t osizeH = output_size[0];
|
||||
int64_t osizeW = output_size[1];
|
||||
|
||||
if(input.suggest_memory_format() == at::MemoryFormat::ChannelsLast)
|
||||
TORCH_CHECK(input.ndimension() == 4,
|
||||
"adaptive_avg_pool2d(): Expected 4D tensor, but got ",
|
||||
input.sizes())
|
||||
|
||||
switch (input.suggest_memory_format()) {
|
||||
case at::MemoryFormat::Contiguous:
|
||||
case at::MemoryFormat::ChannelsLast:
|
||||
break;
|
||||
default:
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"Unsupported memory format. Supports only ChannelsLast, Contiguous")
|
||||
}
|
||||
|
||||
int64_t strideH;
|
||||
int64_t strideW;
|
||||
int64_t kernel_sizeH;
|
||||
int64_t kernel_sizeW;
|
||||
|
||||
set_kernel_params(isizeH, isizeW,
|
||||
osizeH, osizeW,
|
||||
strideH, strideW,
|
||||
kernel_sizeH, kernel_sizeW);
|
||||
|
||||
auto outputs = at::max_pool2d_with_indices(input,
|
||||
IntArrayRef({kernel_sizeH, kernel_sizeW}),
|
||||
IntArrayRef({strideH, strideW}),
|
||||
IntArrayRef({0, 0}),
|
||||
IntArrayRef({1, 1}),
|
||||
false);
|
||||
|
||||
output.copy_(std::get<0>(outputs));
|
||||
indices.copy_(std::get<1>(outputs));
|
||||
}
|
||||
|
||||
TORCH_IMPL_FUNC(adaptive_max_pool2d_backward_out_mps)
|
||||
(const Tensor& gradOutput,
|
||||
const Tensor& input,
|
||||
const Tensor& indices,
|
||||
const Tensor& gradInput) {
|
||||
|
||||
int64_t isizeH = input.size(-2);
|
||||
int64_t isizeW = input.size(-1);
|
||||
int64_t osizeH = gradOutput.size(-2);
|
||||
int64_t osizeW = gradOutput.size(-1);
|
||||
|
||||
int64_t strideH, strideW, kernel_sizeH, kernel_sizeW;
|
||||
|
||||
set_kernel_params(isizeH, isizeW,
|
||||
osizeH, osizeW,
|
||||
strideH, strideW,
|
||||
kernel_sizeH, kernel_sizeW);
|
||||
|
||||
auto returnGradInput = at::max_pool2d_with_indices_backward(gradOutput,
|
||||
input,
|
||||
IntArrayRef({kernel_sizeH, kernel_sizeW}),
|
||||
IntArrayRef({strideH, strideW}),
|
||||
IntArrayRef({0, 0}),
|
||||
IntArrayRef({1, 1}),
|
||||
false,
|
||||
indices);
|
||||
|
||||
gradInput.copy_(returnGradInput);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -21,21 +21,23 @@ struct BinaryOpCachedGraph : public MPSCachedGraph
|
||||
typedef MPSGraphTensor* (^BinaryOpBlock)(MPSGraph*, MPSGraphTensor*, MPSGraphTensor*);
|
||||
#define BinaryOpFn() MPSGraphTensor* (MPSGraph* mpsGraph, MPSGraphTensor* primary, MPSGraphTensor* secondary)
|
||||
|
||||
void binaryOpTensor(const Tensor& self_t, const Tensor& other_t, const Tensor& output, std::string op_name, BinaryOpBlock binaryBlock)
|
||||
void binaryOpTensor(const Tensor& self, const Tensor& other, const Tensor& output, std::string op_name, BinaryOpBlock binaryBlock)
|
||||
{
|
||||
// it's possible to receive empty tensors here
|
||||
if (self_t.numel() == 0 || other_t.numel() == 0) {
|
||||
if (self.numel() == 0 || other.numel() == 0) {
|
||||
return;
|
||||
}
|
||||
MPSStream* mpsStream = getCurrentMPSStream();
|
||||
|
||||
const bool is_self_scalar = self_t.dim() == 0;
|
||||
const bool is_other_scalar = other_t.dim() == 0;
|
||||
Tensor self = is_self_scalar ? self_t : self_t.contiguous(at::MemoryFormat::Contiguous);
|
||||
Tensor other = is_other_scalar ? other_t : other_t.contiguous(at::MemoryFormat::Contiguous);
|
||||
const bool is_self_scalar = self.dim() == 0;
|
||||
const bool is_other_scalar = other.dim() == 0;
|
||||
|
||||
const MPSDataType self_dtype = getMPSScalarType((is_self_scalar && !is_other_scalar ? other : self).scalar_type());
|
||||
const MPSDataType other_dtype = getMPSScalarType((!is_other_scalar ? other : self).scalar_type());
|
||||
|
||||
MPSGraphCache* cache_ = MPSGraphCache::getInstance();
|
||||
@autoreleasepool {
|
||||
string key = op_name + getTensorsStringKey({self, other});
|
||||
string key = op_name + getTensorsStringKey({self, other}, /*use_scalar_value*/ false);
|
||||
BinaryOpCachedGraph* cachedGraph = static_cast<BinaryOpCachedGraph *>(cache_->LookUp(key));
|
||||
|
||||
if(!cachedGraph) {
|
||||
@ -44,16 +46,8 @@ void binaryOpTensor(const Tensor& self_t, const Tensor& other_t, const Tensor& o
|
||||
@autoreleasepool {
|
||||
MPSGraph* mpsGraph = make_mps_graph();
|
||||
newCachedGraph = new BinaryOpCachedGraph(mpsGraph);
|
||||
newCachedGraph->primaryTensor = !is_self_scalar ? mpsGraphRankedPlaceHolder(mpsGraph, self) :
|
||||
mpsGraphConstantPlaceHolder(mpsGraph, getMPSScalarValue(self), getMPSShape(other),
|
||||
// if other is scalar too, then use self's data type here and let the other
|
||||
// have the same data type as self in the secondaryTensor
|
||||
getMPSDataType((!is_other_scalar ? other : self).scalar_type()));
|
||||
|
||||
newCachedGraph->secondaryTensor = !is_other_scalar ? mpsGraphRankedPlaceHolder(mpsGraph, other) :
|
||||
mpsGraphConstantPlaceHolder(mpsGraph, getMPSScalarValue(other), getMPSShape(self),
|
||||
// regardless of self's data type, the scondaryTensor's type must match it.
|
||||
getMPSDataType(self.scalar_type()));
|
||||
newCachedGraph->primaryTensor = mpsGraphRankedPlaceHolder(mpsGraph, self_dtype , getMPSShape(self));
|
||||
newCachedGraph->secondaryTensor = mpsGraphRankedPlaceHolder(mpsGraph, other_dtype, getMPSShape(other));
|
||||
newCachedGraph->outputTensor = binaryBlock(mpsGraph, newCachedGraph->primaryTensor, newCachedGraph->secondaryTensor);
|
||||
}
|
||||
return newCachedGraph;
|
||||
@ -61,21 +55,27 @@ void binaryOpTensor(const Tensor& self_t, const Tensor& other_t, const Tensor& o
|
||||
cachedGraph = static_cast<BinaryOpCachedGraph *>(tmpCachedGraph);
|
||||
}
|
||||
|
||||
NSMutableDictionary *feeds = [[NSMutableDictionary new] autorelease];
|
||||
if (!is_self_scalar) {
|
||||
Placeholder selfPlaceholder = Placeholder(cachedGraph->primaryTensor, self);
|
||||
NSMutableDictionary *feeds = [[NSMutableDictionary new] autorelease];
|
||||
Placeholder selfPlaceholder;
|
||||
Placeholder otherPlaceholder;
|
||||
|
||||
if (is_self_scalar) {
|
||||
feeds[cachedGraph->primaryTensor] = getMPSGraphTensorFromScalar(mpsStream, self.item(), self_dtype);
|
||||
} else {
|
||||
selfPlaceholder = Placeholder(cachedGraph->primaryTensor, self);
|
||||
feeds[selfPlaceholder.getMPSGraphTensor()] = selfPlaceholder.getMPSGraphTensorData();
|
||||
}
|
||||
if (!is_other_scalar) {
|
||||
Placeholder otherPlaceholder = Placeholder(cachedGraph->secondaryTensor, other);
|
||||
if (is_other_scalar) {
|
||||
feeds[cachedGraph->secondaryTensor] = getMPSGraphTensorFromScalar(mpsStream, other.item(), other_dtype);
|
||||
} else {
|
||||
otherPlaceholder = Placeholder(cachedGraph->secondaryTensor, other);
|
||||
feeds[otherPlaceholder.getMPSGraphTensor()] = otherPlaceholder.getMPSGraphTensorData();
|
||||
}
|
||||
|
||||
Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor, output);
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* results = @{
|
||||
outputPlaceholder.getMPSGraphTensor() : outputPlaceholder.getMPSGraphTensorData()
|
||||
};
|
||||
runMPSGraph(getCurrentMPSStream(), cachedGraph->graph(), feeds, results);
|
||||
runMPSGraph(mpsStream, cachedGraph->graph(), feeds, results);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ Tensor _mps_convolution(
|
||||
MPSGraph* mpsGraph = native_mps::make_mps_graph();
|
||||
newCachedGraph = new CachedGraph(mpsGraph);
|
||||
|
||||
MPSGraphConvolution2DOpDescriptor *descriptor_ = [MPSGraphConvolution2DOpDescriptor new];
|
||||
MPSGraphConvolution2DOpDescriptor *descriptor_ = [[MPSGraphConvolution2DOpDescriptor new] autorelease];
|
||||
fill_conv_desc(descriptor_, stride[0], stride[1],
|
||||
dilation[0], dilation[1],
|
||||
padding[1], padding[0],
|
||||
@ -173,7 +173,7 @@ Tensor _mps_convolution(
|
||||
biasPlaceholder = native_mps::Placeholder(cachedGraph->biasTensor_, (bias_opt.value()).view({1, bias_shape[0], 1, 1}));
|
||||
auto outputPlaceholder = native_mps::Placeholder(cachedGraph->outputTensor_, *output);
|
||||
|
||||
NSMutableDictionary<MPSGraphTensor*, MPSGraphTensorData*>* feeds = [[NSMutableDictionary alloc] initWithCapacity: 3];
|
||||
NSMutableDictionary<MPSGraphTensor*, MPSGraphTensorData*>* feeds = [[[NSMutableDictionary alloc] initWithCapacity: 3] autorelease];
|
||||
feeds[inputPlaceholder.getMPSGraphTensor()] = inputPlaceholder.getMPSGraphTensorData();
|
||||
feeds[weightsPlaceholder.getMPSGraphTensor()] = weightsPlaceholder.getMPSGraphTensorData();
|
||||
if(bias_defined) {
|
||||
@ -262,7 +262,7 @@ Tensor mps_convolution_backward_input(
|
||||
MPSGraph* mpsGraph = native_mps::make_mps_graph();
|
||||
newCachedGraph = new CachedGraph(mpsGraph);
|
||||
|
||||
MPSGraphConvolution2DOpDescriptor *descriptor_ = [MPSGraphConvolution2DOpDescriptor new];
|
||||
MPSGraphConvolution2DOpDescriptor *descriptor_ = [[MPSGraphConvolution2DOpDescriptor new] autorelease];
|
||||
fill_conv_desc(descriptor_, stride[0], stride[1],
|
||||
dilation[0], dilation[1],
|
||||
padding[1], padding[0],
|
||||
@ -373,7 +373,7 @@ Tensor mps_convolution_backward_weights(
|
||||
MPSGraph* mpsGraph = native_mps::make_mps_graph();
|
||||
newCachedGraph = new CachedGraph(mpsGraph);
|
||||
|
||||
MPSGraphConvolution2DOpDescriptor *descriptor_ = [MPSGraphConvolution2DOpDescriptor new];
|
||||
MPSGraphConvolution2DOpDescriptor *descriptor_ = [[MPSGraphConvolution2DOpDescriptor new] autorelease];
|
||||
fill_conv_desc(descriptor_, stride[0], stride[1],
|
||||
dilation[0], dilation[1],
|
||||
padding[1], padding[0],
|
||||
|
@ -114,7 +114,7 @@ Tensor as_strided_tensorimpl_mps(const Tensor& self, IntArrayRef size,
|
||||
// 0 sizes won't result in any change in the shape of the Tensor so we can
|
||||
// skip it. Also if the memory is contiguous we don't need to do
|
||||
// gather-scatter operations using graph.
|
||||
if (size.size() > 0 && (!result.is_contiguous())) {
|
||||
if (size.size() > 0) {
|
||||
|
||||
// If self itself was a view tensor, that means we need to chain the graphs
|
||||
// else we will create a new entry in the cache
|
||||
@ -131,8 +131,7 @@ Tensor as_strided_tensorimpl_mps(const Tensor& self, IntArrayRef size,
|
||||
MPSGraphCache* cache_ = MPSGraphCache::getInstance();
|
||||
|
||||
@autoreleasepool {
|
||||
string lookup_key = mps::getStridedKey(self, self.sizes(), self.strides(),
|
||||
self.storage_offset());
|
||||
string lookup_key = mps::getStridedKey(self, size, stride, storage_offset);
|
||||
CachedGraph* cachedGraph = static_cast<CachedGraph *>(cache_->LookUp(lookup_key));
|
||||
|
||||
if(!cachedGraph) {
|
||||
@ -163,28 +162,6 @@ Tensor as_strided_tensorimpl_mps(const Tensor& self, IntArrayRef size,
|
||||
});
|
||||
cachedGraph = static_cast<CachedGraph *>(tmpCachedGraph);
|
||||
}
|
||||
} else {
|
||||
// Else part takes care of the chaining where multiple view operations
|
||||
// were implemented on the same underlying data storage ptr
|
||||
string insert_key = mps::getStridedKey(self, size, stride, storage_offset);
|
||||
MPSCachedGraph *tmpCachedGraph = cache_->CreateCachedGraph(insert_key, ^ MPSCachedGraph * () {
|
||||
CachedGraph *newCachedGraph = nil;
|
||||
@autoreleasepool {
|
||||
MPSGraph* mpsGraph = cachedGraph->graph();
|
||||
newCachedGraph = new CachedGraph(mpsGraph);
|
||||
newCachedGraph->inputTensor_ = cachedGraph->inputTensor_;
|
||||
newCachedGraph->outputTensor_ = chainViewOperation(mpsGraph, size,
|
||||
stride,
|
||||
storage_offset,
|
||||
cachedGraph->outputTensor_,
|
||||
self);
|
||||
newCachedGraph->size_ = size;
|
||||
newCachedGraph->stride_ = stride;
|
||||
newCachedGraph->storage_offset_ = storage_offset;
|
||||
}
|
||||
return newCachedGraph;
|
||||
});
|
||||
cachedGraph = static_cast<CachedGraph *>(tmpCachedGraph);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -218,6 +195,60 @@ static bool copy_requires_temporaries(const Tensor& dst, const Tensor& src) {
|
||||
}
|
||||
}
|
||||
|
||||
// Copy sourceBuffer into destBuffer, casting sourceBuffer to src.scalar_type().
|
||||
// The shapes and dtypes are taken from dst and src, but their storage pointers are not used.
|
||||
void copy_cast_mps(at::Tensor& dst, const at::Tensor& src,
|
||||
id<MTLBuffer> destBuffer, id<MTLBuffer> sourceBuffer) {
|
||||
using namespace mps;
|
||||
|
||||
struct CachedGraph : public MPSCachedGraph
|
||||
{
|
||||
CachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
|
||||
MPSGraphTensor* inputTensor_ = nil;
|
||||
MPSGraphTensor* outputTensor_ = nil;
|
||||
};
|
||||
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
MPSGraphCache* cache_ = MPSGraphCache::getInstance();
|
||||
|
||||
MPSDataType dstDType = getMPSDataType(dst.scalar_type());
|
||||
MPSDataType srcDType = getMPSDataType(src.scalar_type());
|
||||
MPSShape* dstShape = getMPSShape(dst);
|
||||
MPSShape* srcShape = getMPSShape(src);
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "copy_cast_mps" + getTensorsStringKey({src, dst});
|
||||
CachedGraph* cachedGraph = static_cast<CachedGraph *>(cache_->LookUp(key));
|
||||
|
||||
if (!cachedGraph) {
|
||||
MPSCachedGraph *tmpCachedGraph = cache_->CreateCachedGraph(key, ^ MPSCachedGraph * () {
|
||||
CachedGraph *newCachedGraph = nil;
|
||||
@autoreleasepool {
|
||||
MPSGraph* mpsGraph = make_mps_graph();
|
||||
newCachedGraph = new CachedGraph(mpsGraph);
|
||||
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, src);
|
||||
MPSGraphTensor* outputTensor = [mpsGraph castTensor:inputTensor toType:dstDType name:@"cast"];
|
||||
|
||||
newCachedGraph->inputTensor_ = inputTensor;
|
||||
newCachedGraph->outputTensor_ = outputTensor;
|
||||
}
|
||||
return newCachedGraph;
|
||||
});
|
||||
cachedGraph = static_cast<CachedGraph *>(tmpCachedGraph);
|
||||
}
|
||||
MPSGraphTensorData* srcData = [[[MPSGraphTensorData alloc]
|
||||
initWithMTLBuffer:sourceBuffer shape:srcShape dataType:srcDType]
|
||||
autorelease];
|
||||
MPSGraphTensorData* dstData = [[[MPSGraphTensorData alloc]
|
||||
initWithMTLBuffer:destBuffer shape:dstShape dataType:dstDType]
|
||||
autorelease];
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* feeds = @{cachedGraph->inputTensor_: srcData};
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* results = @{cachedGraph->outputTensor_: dstData};
|
||||
runMPSGraph(stream, cachedGraph->graph(), feeds, results);
|
||||
}
|
||||
}
|
||||
|
||||
static at::Tensor& copy_from_mps_(at::Tensor& dst_, const at::Tensor& src_,
|
||||
bool non_blocking) {
|
||||
|
||||
@ -233,11 +264,6 @@ static at::Tensor& copy_from_mps_(at::Tensor& dst_, const at::Tensor& src_,
|
||||
} else {
|
||||
dst = dst_;
|
||||
}
|
||||
dst._set_conj(dst_.is_conj());
|
||||
src._set_conj(src_.is_conj());
|
||||
|
||||
dst._set_neg(dst_.is_neg());
|
||||
src._set_neg(src_.is_neg());
|
||||
|
||||
auto storage_byte_offset = src_.storage_offset() * src_.itemsize();
|
||||
id<MTLBuffer> sourceBuffer = __builtin_bit_cast(id<MTLBuffer>, src_.storage().data());
|
||||
@ -260,6 +286,11 @@ static at::Tensor& copy_from_mps_(at::Tensor& dst_, const at::Tensor& src_,
|
||||
if (sourceBuffer == nil) return dst_;
|
||||
NSUInteger destOffset = dst.storage_offset() * dst.itemsize();
|
||||
|
||||
// In case of dtype change, first convert src inplace
|
||||
if (src_.dtype() != dst_.dtype()) {
|
||||
copy_cast_mps(dst_, src_, sourceBuffer, sourceBuffer);
|
||||
}
|
||||
|
||||
@autoreleasepool {
|
||||
MTLResourceOptions options = MTLResourceOptionCPUCacheModeDefault | MTLResourceStorageModeShared;
|
||||
NSUInteger alignedLength = 0;
|
||||
@ -313,10 +344,14 @@ static at::Tensor& copy_to_mps_(at::Tensor& dst_, const at::Tensor& src_,
|
||||
id<MTLBuffer> destBuffer = __builtin_bit_cast(id<MTLBuffer>, dst_.storage().data());
|
||||
|
||||
|
||||
if (!src.is_contiguous()) {
|
||||
if (src_.is_view()) {
|
||||
src = src_.to(dst_.dtype()).expand_as(dst_).contiguous();
|
||||
} else {
|
||||
src = src_;
|
||||
if (src.dtype() != dst_.dtype()) {
|
||||
// In case of dtype change, perform conversion on source device
|
||||
src = src.to(dst_.dtype());
|
||||
}
|
||||
}
|
||||
|
||||
if (!dst_.is_contiguous()) {
|
||||
@ -336,6 +371,8 @@ static at::Tensor& copy_to_mps_(at::Tensor& dst_, const at::Tensor& src_,
|
||||
options:options
|
||||
deallocator:nil];
|
||||
sourceOffset = uintptr_t(host_src) - uintptr_t(alignedPtr);
|
||||
if (src_.is_view() || !src_.is_contiguous())
|
||||
sourceOffset += src_.storage_offset() * src_.itemsize();
|
||||
|
||||
dispatch_sync(stream->queue(), ^() {
|
||||
@autoreleasepool {
|
||||
@ -386,7 +423,6 @@ void copy_blit_mps(void* dst, const void* src, size_t size) {
|
||||
|
||||
static at::Tensor& copy_kernel_mps(at::Tensor& dst_, const at::Tensor& src_,
|
||||
bool non_blocking) {
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
uint64_t size = src_.nbytes();
|
||||
auto src_byte_offset = src_.storage_offset() * src_.itemsize();
|
||||
id<MTLBuffer> sourceBuffer = __builtin_bit_cast(id<MTLBuffer>, src_.storage().data());
|
||||
@ -414,24 +450,24 @@ static at::Tensor& copy_kernel_mps(at::Tensor& dst_, const at::Tensor& src_,
|
||||
auto dst_byte_offset = dst.storage_offset() * dst.itemsize();
|
||||
id<MTLBuffer> destBuffer = __builtin_bit_cast(id<MTLBuffer>, dst.storage().data());
|
||||
|
||||
dispatch_sync(stream->queue(), ^() {
|
||||
@autoreleasepool {
|
||||
id<MTLCommandBuffer> commandBuffer = stream->commandBuffer();
|
||||
id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer blitCommandEncoder];
|
||||
|
||||
[blitEncoder copyFromBuffer:sourceBuffer
|
||||
sourceOffset:src_byte_offset
|
||||
toBuffer:destBuffer
|
||||
destinationOffset:dst_byte_offset
|
||||
size:size];
|
||||
[blitEncoder endEncoding];
|
||||
if (non_blocking) {
|
||||
stream->commit(true);
|
||||
} else {
|
||||
if (src.dtype() == dst.dtype()) {
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
dispatch_sync(stream->queue(), ^() {
|
||||
@autoreleasepool {
|
||||
id<MTLCommandBuffer> commandBuffer = stream->commandBuffer();
|
||||
id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer blitCommandEncoder];
|
||||
[blitEncoder copyFromBuffer:sourceBuffer
|
||||
sourceOffset:src_byte_offset
|
||||
toBuffer:destBuffer
|
||||
destinationOffset:dst_byte_offset
|
||||
size:size];
|
||||
[blitEncoder endEncoding];
|
||||
stream->commitAndWait();
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
} else {
|
||||
copy_cast_mps(dst_, src_, destBuffer, sourceBuffer);
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ Tensor& normal_mps_out(const Tensor& mean, const Tensor& std, c10::optional<Gene
|
||||
|
||||
@autoreleasepool {
|
||||
MPSShape* input_shape = getMPSShape(output);
|
||||
string key = "normal_mps_out:" + getMPSShapeString(input_shape) + ":" + getMPSTypeString(output.scalar_type());
|
||||
string key = "normal_mps_out:" + getMPSShapeString(input_shape) + ":" + getMPSTypeString(output.scalar_type()) + ":" + to_string(seed_);
|
||||
CachedGraph* cachedGraph = static_cast<CachedGraph *>(cache_->LookUp(key));
|
||||
|
||||
if(!cachedGraph) {
|
||||
@ -210,6 +210,7 @@ Tensor& normal_mps_out(const Tensor& mean, const Tensor& std, c10::optional<Gene
|
||||
// MPSGenerator
|
||||
MPSGraphTensor* randomTensor = [mpsGraph randomTensorWithShape:input_shape
|
||||
descriptor:desc
|
||||
seed:seed_
|
||||
name:nil];
|
||||
MPSGraphTensor* scaleTensor = [mpsGraph multiplicationWithPrimaryTensor:randomTensor
|
||||
secondaryTensor:stdTensor
|
||||
|
120
aten/src/ATen/native/mps/operations/Eye.mm
Normal file
120
aten/src/ATen/native/mps/operations/Eye.mm
Normal file
@ -0,0 +1,120 @@
|
||||
#include <ATen/ATen.h>
|
||||
#include <ATen/Tensor.h>
|
||||
#include <ATen/Utils.h>
|
||||
#include <ATen/mps/MPSStream.h>
|
||||
#include <ATen/native/mps/OperationUtils.h>
|
||||
#include <torch/library.h>
|
||||
#include <c10/util/Optional.h>
|
||||
|
||||
|
||||
// Steps to add op for MPS backend:
|
||||
// 1. Register the op in aten/src/ATen/native/native_functions.yaml with the "MPS" dispatch key
|
||||
// 2. Define the function interface for the MPS backend similar to other
|
||||
// backends depending on whether its structured or non-structured
|
||||
// 3. Add boiler-plate error checking code as expected for the Op
|
||||
// 4. The code structure roughly follows the pattern
|
||||
// a) get the MPS stream handle to encode work onto
|
||||
// b) get an instance of MPSGraphCache and create a key unique to the Graph
|
||||
// needed for implementing this Op. Any shape, dataType or parameter
|
||||
// passed to the MPSGraph during its construction will need to be included
|
||||
// here.
|
||||
// c) Create the graph using make_mps_graph() and add operations to the
|
||||
// instance of MPSGraph. This is if the Cache->lookup() fails.
|
||||
// d) Store the MPSGraphTensors for inputs and output which are needed at
|
||||
// runtime.
|
||||
// e) Use the CachedGraph instance's inputs and output to create Placeholders
|
||||
// You will need to pass in Tensor to create MPSGraphTensorData objects.
|
||||
// f) Using MPSGraphTensor and MPSGraphTensorData instances create a feeds
|
||||
// dictionary.
|
||||
// g) Then call runMPSGraph() with input params and return the result.
|
||||
//
|
||||
|
||||
|
||||
namespace at {
|
||||
namespace native {
|
||||
|
||||
Tensor& eye_out_mps(int64_t n, Tensor& result) {
|
||||
// the default value of `m` equals to `n`
|
||||
return eye_out_mps(n, n, result);
|
||||
}
|
||||
|
||||
Tensor& eye_out_mps(int64_t n, int64_t m, Tensor& result) {
|
||||
|
||||
// This is one example of boiler-plate error checking, taking after CPU/CUDA counterparts
|
||||
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
|
||||
TORCH_CHECK(m >= 0, "m must be greater or equal to 0, got ", m);
|
||||
|
||||
result.resize_({n, m});
|
||||
result.zero_();
|
||||
|
||||
// Handle empty outputs
|
||||
if(result.numel() == 0)
|
||||
return result;
|
||||
|
||||
// Get MPS stream
|
||||
using namespace mps;
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
|
||||
// Derive from MPSCachedGraph
|
||||
// This structure is used to cache an MPSGraph with certain keys, so that we don't have to compile the same MPSGraph time and time again for the same operation
|
||||
// The keys of this structure are based on the inputs and outputs needed for the operation
|
||||
// Here, we don't have any input tensors, just an output tensor
|
||||
struct CachedGraph : public MPSCachedGraph
|
||||
{
|
||||
CachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
|
||||
MPSGraphTensor* outputTensor_ = nil;
|
||||
};
|
||||
|
||||
MPSGraphCache* cache_ = MPSGraphCache::getInstance();
|
||||
|
||||
@autoreleasepool {
|
||||
// A key is used to identify the MPSGraph which was created once, and can be reused if the parameters, data types etc match the earlier created MPSGraph
|
||||
string key = "eye_out_mps:" + getTensorsStringKey({result});
|
||||
CachedGraph* cachedGraph = static_cast<CachedGraph *>(cache_->LookUp(key));
|
||||
if(!cachedGraph) {
|
||||
MPSCachedGraph *tmpCachedGraph = cache_->CreateCachedGraph(key, ^ MPSCachedGraph * () {
|
||||
|
||||
CachedGraph *newCachedGraph = nil;
|
||||
|
||||
@autoreleasepool {
|
||||
// Initialize graph
|
||||
MPSGraph* mpsGraph = make_mps_graph();
|
||||
newCachedGraph = new CachedGraph(mpsGraph);
|
||||
MPSGraphTensor* onesTensor = [mpsGraph constantWithScalar:1.0f
|
||||
shape:getMPSShape(result)
|
||||
dataType:getMPSDataType(result.scalar_type())];
|
||||
|
||||
// Here we can call the MPSGraph API needed to execute the operation.
|
||||
// The API details can be found here: https://developer.apple.com/documentation/metalperformanceshadersgraph/mpsgraph
|
||||
MPSGraphTensor* outputTensor = [mpsGraph bandPartWithTensor:onesTensor
|
||||
numLower:0
|
||||
numUpper:0
|
||||
name:nil];
|
||||
newCachedGraph->outputTensor_ = outputTensor;
|
||||
}
|
||||
return newCachedGraph;
|
||||
});
|
||||
cachedGraph = static_cast<CachedGraph *>(tmpCachedGraph);
|
||||
}
|
||||
|
||||
// Create placeholders which use the keys of the CachedGraph to create inputs and outputs of the operation
|
||||
Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor_, result);
|
||||
|
||||
// Create dictionary of inputs/feeds and outputs/results
|
||||
// In this case, there are no inputs, so the feeds are nil
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* feeds = nil;
|
||||
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* results = @{
|
||||
outputPlaceholder.getMPSGraphTensor() : outputPlaceholder.getMPSGraphTensorData()
|
||||
};
|
||||
|
||||
// Run the graph
|
||||
runMPSGraph(stream, cachedGraph->graph(), feeds, results);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
@ -18,6 +18,7 @@
|
||||
#include <ATen/native/IndexingUtils.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <c10/core/QScheme.h>
|
||||
#include <c10/util/SmallVector.h>
|
||||
|
||||
#ifdef __OBJC__
|
||||
#include <MetalPerformanceShaders/MetalPerformanceShaders.h>
|
||||
@ -32,22 +33,22 @@ Tensor index_select_mps(const Tensor & self,
|
||||
IntArrayRef input_shape = self.sizes();
|
||||
auto num_input_dims = input_shape.size();
|
||||
|
||||
IntArrayRef index_shape = index.sizes();
|
||||
auto num_indices = index.numel();
|
||||
TORCH_CHECK_INDEX(index.dim() <= 1, "index_select(): Index is supposed to be a vector");
|
||||
|
||||
dim = maybe_wrap_dim(dim, self.dim());
|
||||
int64_t* shape_data = (int64_t*)malloc(num_input_dims * sizeof(int64_t));
|
||||
std::vector<int64_t> shape_data(num_input_dims);
|
||||
|
||||
// Calculate new shape
|
||||
for(int i = 0; i < num_input_dims; i++) {
|
||||
if(i == dim)
|
||||
for(auto i : c10::irange(num_input_dims)) {
|
||||
if (i == dim) {
|
||||
shape_data[i] = num_indices;
|
||||
else
|
||||
} else {
|
||||
shape_data[i] = input_shape[i];
|
||||
}
|
||||
}
|
||||
|
||||
IntArrayRef output_shape = IntArrayRef(shape_data, num_input_dims);
|
||||
IntArrayRef output_shape = IntArrayRef(shape_data.data(), num_input_dims);
|
||||
|
||||
Tensor result = at::native::empty_mps(
|
||||
output_shape,
|
||||
@ -57,8 +58,6 @@ Tensor index_select_mps(const Tensor & self,
|
||||
c10::nullopt,
|
||||
c10::nullopt);
|
||||
|
||||
free(shape_data);
|
||||
|
||||
index_select_out_mps(self, dim, index, result);
|
||||
return result;
|
||||
}
|
||||
@ -245,13 +244,10 @@ Tensor embedding_dense_backward_mps(
|
||||
IntArrayRef indices_shape = indices.sizes();
|
||||
int64_t num_indices_dims = indices_shape.size();
|
||||
|
||||
int64_t* outgoing_gradient_shape = (int64_t *) malloc(sizeof(int64_t) * 2);
|
||||
int64_t D = incoming_gradient_shape[num_incoming_gradient_dims - 1];
|
||||
outgoing_gradient_shape[0] = num_weights;
|
||||
outgoing_gradient_shape[1] = D;
|
||||
int64_t num_outgoing_gradient_dims = 2;
|
||||
c10::SmallVector<int64_t, 2> outgoing_gradient_shape{num_weights, D};
|
||||
Tensor outgoing_gradient = at::native::empty_mps(
|
||||
IntArrayRef(outgoing_gradient_shape, num_outgoing_gradient_dims),
|
||||
IntArrayRef(outgoing_gradient_shape.data(), outgoing_gradient_shape.size()),
|
||||
grad_.scalar_type(),
|
||||
c10::nullopt,
|
||||
kMPS,
|
||||
@ -288,7 +284,7 @@ Tensor embedding_dense_backward_mps(
|
||||
MPSGraphTensor *outgoingGradTensor;
|
||||
outgoingGradTensor = [mpsGraph scatterNDWithUpdatesTensor:incomingGradTensor
|
||||
indicesTensor:reshapedIndicesTensor
|
||||
shape:native_mps::getMPSShape(IntArrayRef(outgoing_gradient_shape, num_outgoing_gradient_dims))
|
||||
shape:native_mps::getMPSShape(IntArrayRef(outgoing_gradient_shape.data(), outgoing_gradient_shape.size()))
|
||||
batchDimensions:0
|
||||
mode:MPSGraphScatterModeAdd
|
||||
name:@"edb"];
|
||||
@ -316,7 +312,6 @@ Tensor embedding_dense_backward_mps(
|
||||
};
|
||||
native_mps::runMPSGraph(stream, cachedGraph->graph(), feeds, results);
|
||||
}
|
||||
free(outgoing_gradient_shape);
|
||||
return outgoing_gradient;
|
||||
}
|
||||
|
||||
|
@ -338,12 +338,9 @@ Tensor& addmm_out_mps_impl(
|
||||
cachedGraph = static_cast<CachedGraph *>(tmpCachedGraph);
|
||||
}
|
||||
|
||||
Placeholder selfPlaceholder = Placeholder(cachedGraph->selfTensor_, self,
|
||||
nullptr, true);
|
||||
Placeholder otherPlaceholder = Placeholder(cachedGraph->otherTensor_, other,
|
||||
nullptr, true);
|
||||
Placeholder biasPlaceholder = Placeholder(cachedGraph->biasTensor_, bias,
|
||||
nullptr, false);
|
||||
Placeholder selfPlaceholder = Placeholder(cachedGraph->selfTensor_, self);
|
||||
Placeholder otherPlaceholder = Placeholder(cachedGraph->otherTensor_, other);
|
||||
Placeholder biasPlaceholder = Placeholder(cachedGraph->biasTensor_, bias);
|
||||
Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor_, output);
|
||||
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* feeds = @{
|
||||
|
@ -288,6 +288,7 @@ Tensor& bce_loss_out_impl(const Tensor& input, const Tensor& target,
|
||||
Placeholder lossPlaceholder = Placeholder(cachedGraph->lossTensor, loss_squeezed);
|
||||
|
||||
NSMutableDictionary *feeds = [[NSMutableDictionary new] autorelease];
|
||||
|
||||
feeds[inputPlaceholder.getMPSGraphTensor()] = inputPlaceholder.getMPSGraphTensorData();
|
||||
feeds[targetPlaceholder.getMPSGraphTensor()] = targetPlaceholder.getMPSGraphTensorData();
|
||||
if (weight.defined()) {
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <ATen/mps/MPSStream.h>
|
||||
#include <ATen/native/mps/OperationUtils.h>
|
||||
#include <ATen/native/Pool.h>
|
||||
#include <ATen/native/layer_norm.h>
|
||||
#include <torch/library.h>
|
||||
|
||||
namespace at {
|
||||
@ -69,7 +70,6 @@ std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_mps_out
|
||||
Tensor& save_var) {
|
||||
|
||||
namespace native_mps = at::native::mps;
|
||||
|
||||
struct CachedGraph : public native_mps::MPSCachedGraph
|
||||
{
|
||||
CachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
|
||||
@ -800,5 +800,426 @@ std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_mps
|
||||
|
||||
}
|
||||
|
||||
// Layer norm forward for MPS
|
||||
std::tuple<Tensor, Tensor, Tensor> layer_norm_mps(
|
||||
const Tensor& input,
|
||||
IntArrayRef normalized_shape,
|
||||
const c10::optional<Tensor>& weight_opt,
|
||||
const c10::optional<Tensor>& bias_opt,
|
||||
double eps) {
|
||||
|
||||
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
|
||||
const Tensor& weight = *weight_maybe_owned;
|
||||
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
|
||||
const Tensor& bias = *bias_maybe_owned;
|
||||
|
||||
auto M_N = _check_layer_norm_inputs(input, normalized_shape, weight, bias);
|
||||
auto M = M_N.first;
|
||||
auto X = input.expect_contiguous();
|
||||
auto gamma = weight.expect_contiguous();
|
||||
|
||||
auto input_shape = input.sizes();
|
||||
const auto input_ndim = input.dim();
|
||||
const int normalized_ndim = normalized_shape.size();
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
const int axis = input_ndim - normalized_ndim;
|
||||
at::Tensor input_reshaped = input.view({1, M, -1});
|
||||
// Unlike Batch Normalization, which applies scalar scale and bias for each
|
||||
// entire channel/plane with the affine option, Layer Normalization applies
|
||||
// per-element scale and bias. E.g. For input {N, C, H, W}, weight for
|
||||
// batchnorm has shape {C} while weight for layernorm has shape {H, W} or {W}.
|
||||
auto outputs = at::native_batch_norm(
|
||||
input_reshaped, /*weight=*/{}, /*bias=*/{}, /*running_mean=*/{},
|
||||
/*running_var=*/{}, /*training=*/true, /*momentum=*/0, eps);
|
||||
at::Tensor out = std::get<0>(outputs);
|
||||
out = out.view(input_shape);
|
||||
if (weight.defined() && bias.defined()) {
|
||||
out = bias.addcmul(out, weight, 1);
|
||||
} else if (weight.defined()) {
|
||||
out = out.mul(weight);
|
||||
} else if (bias.defined()) {
|
||||
out = out.add(bias);
|
||||
}
|
||||
at::Tensor mean = std::get<1>(outputs);
|
||||
at::Tensor variance = std::get<2>(outputs);
|
||||
|
||||
at::Tensor rstd = at::rsqrt(at::add(variance, eps));
|
||||
|
||||
std::vector<int64_t> stat_shape;
|
||||
for (const auto idx : c10::irange(axis)) {
|
||||
stat_shape.push_back(input_shape[idx]);
|
||||
}
|
||||
for (const auto idx : c10::irange(axis, input.dim())) {
|
||||
(void)idx; // Suppress unused variable
|
||||
stat_shape.push_back(1);
|
||||
}
|
||||
mean = mean.view(stat_shape);
|
||||
rstd = rstd.view(stat_shape);
|
||||
return std::make_tuple(out, mean, rstd);
|
||||
}
|
||||
|
||||
std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_mps(
|
||||
const Tensor& grad_out,
|
||||
const Tensor& input,
|
||||
IntArrayRef normalized_shape,
|
||||
const Tensor& mean,
|
||||
const Tensor& rstd,
|
||||
const c10::optional<Tensor>& weight_opt /* optional */,
|
||||
const c10::optional<Tensor>& bias_opt /* optional */,
|
||||
std::array<bool, 3> grad_input_mask) {
|
||||
|
||||
c10::MaybeOwned<Tensor> weight_maybe_owned =
|
||||
at::borrow_from_optional_tensor(weight_opt);
|
||||
const Tensor& weight = *weight_maybe_owned;
|
||||
c10::MaybeOwned<Tensor> bias_maybe_owned =
|
||||
at::borrow_from_optional_tensor(bias_opt);
|
||||
const Tensor& bias = *bias_maybe_owned;
|
||||
|
||||
auto M_N = _check_layer_norm_inputs(input, normalized_shape, weight, bias);
|
||||
auto M = M_N.first;
|
||||
auto N = M_N.second;
|
||||
auto X = input.expect_contiguous();
|
||||
auto gamma = weight.expect_contiguous();
|
||||
auto beta = bias.expect_contiguous();
|
||||
auto dOut = grad_out.expect_contiguous();
|
||||
|
||||
Tensor grad_input;
|
||||
Tensor grad_weight;
|
||||
Tensor grad_bias;
|
||||
if (grad_input_mask[0]) {
|
||||
grad_input = at::native::empty_like(
|
||||
*X,
|
||||
c10::nullopt /* dtype */,
|
||||
c10::nullopt /* layout */,
|
||||
kMPS /* device */,
|
||||
c10::nullopt /* pin_memory */,
|
||||
at::MemoryFormat::Contiguous);
|
||||
}
|
||||
if (grad_input_mask[1]) {
|
||||
grad_weight = M > 0 ? at::native::empty_like(
|
||||
*gamma,
|
||||
c10::nullopt /* dtype */,
|
||||
c10::nullopt /* layout */,
|
||||
kMPS /* device */,
|
||||
c10::nullopt /* pin_memory */,
|
||||
at::MemoryFormat::Contiguous)
|
||||
: at::native::zeros_like(
|
||||
*gamma,
|
||||
c10::nullopt /* dtype */,
|
||||
c10::nullopt /* layout */,
|
||||
kMPS /* device */,
|
||||
c10::nullopt /* pin_memory */,
|
||||
at::MemoryFormat::Contiguous);
|
||||
}
|
||||
if (grad_input_mask[2]) {
|
||||
grad_bias = M > 0 ? at::native::empty_like(
|
||||
*beta,
|
||||
c10::nullopt /* dtype */,
|
||||
c10::nullopt /* layout */,
|
||||
kMPS /* device */,
|
||||
c10::nullopt /* pin_memory */,
|
||||
at::MemoryFormat::Contiguous)
|
||||
: at::native::zeros_like(
|
||||
*beta,
|
||||
c10::nullopt /* dtype */,
|
||||
c10::nullopt /* layout */,
|
||||
kMPS /* device */,
|
||||
c10::nullopt /* pin_memory */,
|
||||
at::MemoryFormat::Contiguous);
|
||||
}
|
||||
if (M > 0) {
|
||||
|
||||
namespace native_mps = at::native::mps;
|
||||
|
||||
// Derive from MPSCachedGraph
|
||||
struct CachedGraph : public native_mps::MPSCachedGraph
|
||||
{
|
||||
CachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
|
||||
MPSGraphTensor* gradOutputTensor_ = nil;
|
||||
MPSGraphTensor* inputTensor_ = nil;
|
||||
MPSGraphTensor* weightTensor_ = nil;
|
||||
MPSGraphTensor* meanTensor_ = nil;
|
||||
MPSGraphTensor* rstdTensor_ = nil;
|
||||
MPSGraphTensor* gradInputTensor_ = nil;
|
||||
MPSGraphTensor* gradWeightTensor_ = nil;
|
||||
MPSGraphTensor* gradBiasTensor_ = nil;
|
||||
};
|
||||
|
||||
native_mps::MPSGraphCache* cache_ = native_mps::MPSGraphCache::getInstance();
|
||||
|
||||
auto stream = at::mps::getCurrentMPSStream();
|
||||
|
||||
const bool has_weight = (weight_opt.has_value() && weight_opt->defined());
|
||||
|
||||
if (grad_input.numel() == 0) {
|
||||
return std::make_tuple(grad_input, grad_weight, grad_bias);
|
||||
}
|
||||
|
||||
// const auto memory_format = input.suggest_memory_format();
|
||||
|
||||
@autoreleasepool {
|
||||
|
||||
MPSShape* input_shape = mps::getMPSShape(*X);
|
||||
MPSShape* gamma_shape = mps::getMPSShape(normalized_shape);
|
||||
|
||||
auto num_normalized_dims = [gamma_shape count];
|
||||
auto num_channel_dims = [input_shape count] - num_normalized_dims;
|
||||
|
||||
NSMutableArray<NSNumber*>* gamma_axes = [NSMutableArray<NSNumber*> arrayWithCapacity:num_channel_dims];
|
||||
|
||||
for(int i = 0; i < num_channel_dims; i++)
|
||||
gamma_axes[i] = [NSNumber numberWithInt:i];
|
||||
|
||||
// Axes along which to reduce to get "batch norm" gradient
|
||||
// This will be applied on shape [1, M, -1]
|
||||
NSMutableArray<NSNumber*>* bn_axes = [NSMutableArray<NSNumber*> arrayWithCapacity:num_normalized_dims];
|
||||
for(int i = 0; i < num_normalized_dims; i++)
|
||||
bn_axes[i] = [NSNumber numberWithInt:(1+1+i)];
|
||||
|
||||
// Shape of input to do "batch norm" backward
|
||||
// This is [1, M, -1]
|
||||
NSMutableArray<NSNumber*>* bn_shape = [NSMutableArray<NSNumber*> arrayWithCapacity:(num_normalized_dims+2)];
|
||||
bn_shape[0] = [NSNumber numberWithInt:1];
|
||||
bn_shape[1] = [NSNumber numberWithInt:M];
|
||||
for(int i = 0; i < num_normalized_dims; i++)
|
||||
bn_shape[i+2] = input_shape[i+num_channel_dims];
|
||||
|
||||
// Shape of mean to do "batch norm" backward
|
||||
// This is [1, M, [1,1,1..1]]
|
||||
NSMutableArray<NSNumber*>* bn_mean_shape = [NSMutableArray<NSNumber*> arrayWithCapacity:(num_normalized_dims+2)];
|
||||
bn_mean_shape[0] = [NSNumber numberWithInt:1];
|
||||
bn_mean_shape[1] = [NSNumber numberWithInt:M];
|
||||
for(int i = 0; i < num_normalized_dims; i++)
|
||||
bn_mean_shape[i+2] = [NSNumber numberWithInt:1];
|
||||
|
||||
// Shape of gamma to multiply with "batch norm" backward
|
||||
// This is [1, 1, -1]
|
||||
NSMutableArray<NSNumber*>* bn_gamma_shape = [NSMutableArray<NSNumber*> arrayWithCapacity:(num_normalized_dims+2)];
|
||||
bn_gamma_shape[0] = [NSNumber numberWithInt:1];
|
||||
bn_gamma_shape[1] = [NSNumber numberWithInt:1];
|
||||
for(int i = 0; i < num_normalized_dims; i++)
|
||||
bn_gamma_shape[i+2] = input_shape[i+num_channel_dims];
|
||||
|
||||
string key = "layer_norm_backward_mps:"
|
||||
+ std::to_string(has_weight) + ":"
|
||||
+ native_mps::getArrayRefString(normalized_shape) + ":"
|
||||
+ native_mps::getArrayRefString((*X).sizes()) + ":"
|
||||
+ native_mps::getMPSTypeString((*X).scalar_type());
|
||||
|
||||
CachedGraph* cachedGraph = static_cast<CachedGraph *>(cache_->LookUp(key));
|
||||
|
||||
if(!cachedGraph) {
|
||||
native_mps::MPSCachedGraph *tmpCachedGraph = cache_->CreateCachedGraph(key, ^ native_mps::MPSCachedGraph * () {
|
||||
|
||||
CachedGraph *newCachedGraph = nil;
|
||||
|
||||
@autoreleasepool {
|
||||
MPSGraph* mpsGraph = native_mps::make_mps_graph();
|
||||
newCachedGraph = new CachedGraph(mpsGraph);
|
||||
|
||||
MPSGraphTensor* inputTensor = native_mps::mpsGraphRankedPlaceHolder(mpsGraph, *X);
|
||||
MPSGraphTensor* gradOutputTensor = native_mps::mpsGraphRankedPlaceHolder(mpsGraph, *dOut);
|
||||
MPSGraphTensor* weightTensor = nil;
|
||||
if(has_weight)
|
||||
weightTensor = native_mps::mpsGraphRankedPlaceHolder(mpsGraph, *gamma);
|
||||
|
||||
// Mean and inv std tensors to be saved and returned
|
||||
MPSGraphTensor* meanTensor = native_mps::mpsGraphRankedPlaceHolder(mpsGraph, mean);
|
||||
MPSGraphTensor* rstdTensor = native_mps::mpsGraphRankedPlaceHolder(mpsGraph, rstd);
|
||||
|
||||
MPSGraphTensor* gradInputTensor = nil;
|
||||
MPSGraphTensor* gradWeightTensor = nil;
|
||||
MPSGraphTensor* gradBiasTensor = nil;
|
||||
|
||||
if(grad_input_mask[1]) {
|
||||
MPSGraphTensor* xMinusMean = [mpsGraph subtractionWithPrimaryTensor:inputTensor
|
||||
secondaryTensor:meanTensor
|
||||
name:nil];
|
||||
MPSGraphTensor* bnForwardTensor = [mpsGraph multiplicationWithPrimaryTensor:xMinusMean
|
||||
secondaryTensor:rstdTensor
|
||||
name:nil];
|
||||
MPSGraphTensor* gradBnMulTensor = [mpsGraph multiplicationWithPrimaryTensor:bnForwardTensor
|
||||
secondaryTensor:gradOutputTensor
|
||||
name:nil];
|
||||
gradWeightTensor = [mpsGraph reductionSumWithTensor:gradBnMulTensor
|
||||
axes:gamma_axes
|
||||
name:nil];
|
||||
}
|
||||
if(grad_input_mask[2]) {
|
||||
gradBiasTensor = [mpsGraph reductionSumWithTensor:gradOutputTensor
|
||||
axes:gamma_axes
|
||||
name:nil];
|
||||
}
|
||||
if(grad_input_mask[0]) {
|
||||
|
||||
// Reshape input to [1, M, -1]
|
||||
// Reshape mean and rstd to [1, M, -1]
|
||||
// Reshape gamma to [1, 1, -1] (-1 has N dims)
|
||||
|
||||
MPSGraphTensor* bnInputTensor = [mpsGraph reshapeTensor:inputTensor
|
||||
withShape:bn_shape
|
||||
name:nil];
|
||||
MPSGraphTensor* bnGradOutputTensor = [mpsGraph reshapeTensor:gradOutputTensor
|
||||
withShape:bn_shape
|
||||
name:nil];
|
||||
// Do this at the end
|
||||
if(has_weight) {
|
||||
MPSGraphTensor* bnGammaTensor = [mpsGraph reshapeTensor:weightTensor
|
||||
withShape:bn_gamma_shape
|
||||
name:nil];
|
||||
bnGradOutputTensor = [mpsGraph multiplicationWithPrimaryTensor:bnGradOutputTensor
|
||||
secondaryTensor:bnGammaTensor
|
||||
name:nil];
|
||||
}
|
||||
MPSGraphTensor* bnMeanTensor = [mpsGraph reshapeTensor:meanTensor
|
||||
withShape:bn_mean_shape
|
||||
name:nil];
|
||||
MPSGraphTensor* bnRstdTensor = [mpsGraph reshapeTensor:rstdTensor
|
||||
withShape:bn_mean_shape
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* mulTensor = [mpsGraph constantWithScalar:N
|
||||
shape:@[@1]
|
||||
dataType:MPSDataTypeInt32];
|
||||
|
||||
MPSGraphTensor* numberToReduceTensor = mulTensor;
|
||||
|
||||
MPSGraphTensor* cast2Tensor = [mpsGraph castTensor:numberToReduceTensor
|
||||
toType:bnInputTensor.dataType
|
||||
name:@"cast2Tensor"];
|
||||
|
||||
MPSGraphTensor* sizeReciprocalTensor = [mpsGraph reciprocalWithTensor:cast2Tensor
|
||||
name:nil];
|
||||
|
||||
// TODO: Reduce redundant computation
|
||||
MPSGraphTensor* xMinusMean = [mpsGraph subtractionWithPrimaryTensor:bnInputTensor
|
||||
secondaryTensor:bnMeanTensor
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* normalizedTensor = [mpsGraph multiplicationWithPrimaryTensor:xMinusMean
|
||||
secondaryTensor:bnRstdTensor
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* bnGradMulTensor = [mpsGraph multiplicationWithPrimaryTensor:bnGradOutputTensor
|
||||
secondaryTensor:normalizedTensor
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* gammaGradient = [mpsGraph reductionSumWithTensor:bnGradMulTensor
|
||||
axes:bn_axes
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* betaGradient = [mpsGraph reductionSumWithTensor:bnGradOutputTensor
|
||||
axes:bn_axes
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* gradient1 = [mpsGraph multiplicationWithPrimaryTensor:bnGradOutputTensor
|
||||
secondaryTensor:bnRstdTensor
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* gradient2_1 = [mpsGraph multiplicationWithPrimaryTensor:sizeReciprocalTensor
|
||||
secondaryTensor:xMinusMean
|
||||
name:nil];
|
||||
|
||||
// reverseVariance is square of rstd
|
||||
MPSGraphTensor* reverseVariance = [mpsGraph squareWithTensor:bnRstdTensor
|
||||
name:nil];
|
||||
MPSGraphTensor* gradient2_2 = [mpsGraph multiplicationWithPrimaryTensor:gammaGradient
|
||||
secondaryTensor:reverseVariance
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* gradient2 = [mpsGraph multiplicationWithPrimaryTensor:gradient2_1
|
||||
secondaryTensor:gradient2_2
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* gradient3_1 = [mpsGraph multiplicationWithPrimaryTensor:sizeReciprocalTensor
|
||||
secondaryTensor:betaGradient
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* gradient3 = [mpsGraph multiplicationWithPrimaryTensor:gradient3_1
|
||||
secondaryTensor:bnRstdTensor
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* gradient4 = [mpsGraph subtractionWithPrimaryTensor:gradient1
|
||||
secondaryTensor:gradient2
|
||||
name:nil];
|
||||
|
||||
MPSGraphTensor* gradient = [mpsGraph subtractionWithPrimaryTensor:gradient4
|
||||
secondaryTensor:gradient3
|
||||
name:nil];
|
||||
|
||||
gradInputTensor = [mpsGraph reshapeTensor:gradient
|
||||
withShape:input_shape
|
||||
name:nil];
|
||||
|
||||
}
|
||||
|
||||
if(grad_input_mask[1]) {
|
||||
gradWeightTensor = [mpsGraph reshapeTensor:gradWeightTensor
|
||||
withShape:gamma_shape
|
||||
name:nil];
|
||||
}
|
||||
if(grad_input_mask[2]) {
|
||||
gradBiasTensor = [mpsGraph reshapeTensor:gradBiasTensor
|
||||
withShape:gamma_shape
|
||||
name:nil];
|
||||
}
|
||||
|
||||
newCachedGraph->gradOutputTensor_ = gradOutputTensor;
|
||||
newCachedGraph->inputTensor_ = inputTensor;
|
||||
newCachedGraph->weightTensor_ = weightTensor;
|
||||
newCachedGraph->meanTensor_ = meanTensor;
|
||||
newCachedGraph->rstdTensor_ = rstdTensor;
|
||||
newCachedGraph->gradInputTensor_ = gradInputTensor;
|
||||
newCachedGraph->gradWeightTensor_ = gradWeightTensor;
|
||||
newCachedGraph->gradBiasTensor_ = gradBiasTensor;
|
||||
}
|
||||
return newCachedGraph;
|
||||
});
|
||||
cachedGraph = static_cast<CachedGraph *>(tmpCachedGraph);
|
||||
}
|
||||
|
||||
auto inputPlaceholder = native_mps::Placeholder(cachedGraph->inputTensor_, *X);
|
||||
auto gradOutputPlaceholder = native_mps::Placeholder(cachedGraph->gradOutputTensor_, *dOut);
|
||||
auto weightPlaceholder = native_mps::Placeholder();
|
||||
if(has_weight)
|
||||
weightPlaceholder = native_mps::Placeholder(cachedGraph->weightTensor_, *gamma);
|
||||
auto saveMeanPlaceholder = native_mps::Placeholder(cachedGraph->meanTensor_, mean);
|
||||
auto saveVarPlaceholder = native_mps::Placeholder(cachedGraph->rstdTensor_, rstd);
|
||||
|
||||
auto gradInputPlaceholder = native_mps::Placeholder();
|
||||
if(grad_input_mask[0])
|
||||
gradInputPlaceholder = native_mps::Placeholder(cachedGraph->gradInputTensor_, grad_input);
|
||||
auto gradWeightPlaceholder = native_mps::Placeholder();
|
||||
if(grad_input_mask[1])
|
||||
gradWeightPlaceholder = native_mps::Placeholder(cachedGraph->gradWeightTensor_, grad_weight);
|
||||
auto gradBiasPlaceholder = native_mps::Placeholder();;
|
||||
if(grad_input_mask[2])
|
||||
gradBiasPlaceholder = native_mps::Placeholder(cachedGraph->gradBiasTensor_, grad_bias);
|
||||
|
||||
NSMutableDictionary *feeds = [[NSMutableDictionary new] autorelease];
|
||||
feeds[inputPlaceholder.getMPSGraphTensor()] = inputPlaceholder.getMPSGraphTensorData();
|
||||
feeds[gradOutputPlaceholder.getMPSGraphTensor()] = gradOutputPlaceholder.getMPSGraphTensorData();
|
||||
if(has_weight)
|
||||
feeds[weightPlaceholder.getMPSGraphTensor()] = weightPlaceholder.getMPSGraphTensorData();
|
||||
feeds[saveMeanPlaceholder.getMPSGraphTensor()] = saveMeanPlaceholder.getMPSGraphTensorData();
|
||||
feeds[saveVarPlaceholder.getMPSGraphTensor()] = saveVarPlaceholder.getMPSGraphTensorData();
|
||||
|
||||
NSMutableDictionary *results = [[NSMutableDictionary new] autorelease];
|
||||
if(grad_input_mask[0])
|
||||
results[gradInputPlaceholder.getMPSGraphTensor()] = gradInputPlaceholder.getMPSGraphTensorData();
|
||||
if(grad_input_mask[1])
|
||||
results[gradWeightPlaceholder.getMPSGraphTensor()] = gradWeightPlaceholder.getMPSGraphTensorData();
|
||||
if(grad_input_mask[2])
|
||||
results[gradBiasPlaceholder.getMPSGraphTensor()] = gradBiasPlaceholder.getMPSGraphTensorData();
|
||||
|
||||
native_mps::runMPSGraph(stream, cachedGraph->graph(), feeds, results);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias));
|
||||
|
||||
}
|
||||
|
||||
} // namespace native
|
||||
} // namespace at
|
||||
|
@ -15,26 +15,21 @@ Tensor& addc_mul_div_out_mps(const Tensor& self,
|
||||
const bool is_div,
|
||||
const string op_name)
|
||||
{
|
||||
using scalar_t = double;
|
||||
scalar_t value_scalar = value_opt.to<scalar_t>();
|
||||
if (&output != &self) {
|
||||
output.resize_(output.sizes());
|
||||
}
|
||||
TORCH_CHECK(output.is_mps());
|
||||
MPSStream* mpsStream = getCurrentMPSStream();
|
||||
|
||||
// Derive from MPSCachedGraph
|
||||
struct CachedGraph : public MPSCachedGraph
|
||||
{
|
||||
CachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
|
||||
MPSGraphTensor *inputTensor = nil, *outputTensor = nil;
|
||||
MPSGraphTensor *firstTensor = nil, *secondTensor = nil;
|
||||
MPSGraphTensor *firstTensor = nil, *secondTensor = nil, *valueTensor = nil;
|
||||
};
|
||||
MPSGraphCache* cache_ = MPSGraphCache::getInstance();
|
||||
|
||||
@autoreleasepool {
|
||||
string key = op_name + to_string(value_scalar)
|
||||
+ getTensorsStringKey({self, tensor1, tensor2})+ ":"
|
||||
+ getMPSTypeString(value_opt.type());
|
||||
string key = op_name + getTensorsStringKey({self, tensor1, tensor2}, false);
|
||||
|
||||
CachedGraph* cachedGraph = static_cast<CachedGraph *>(cache_->LookUp(key));
|
||||
|
||||
@ -49,6 +44,7 @@ Tensor& addc_mul_div_out_mps(const Tensor& self,
|
||||
newCachedGraph->inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, self);
|
||||
newCachedGraph->firstTensor = mpsGraphRankedPlaceHolder(mpsGraph, tensor1);
|
||||
newCachedGraph->secondTensor = mpsGraphRankedPlaceHolder(mpsGraph, tensor2);
|
||||
newCachedGraph->valueTensor = mpsGraphUnrankedPlaceHolder(mpsGraph, getMPSScalarType(self.scalar_type()));
|
||||
|
||||
// the tensor to be optionally multiplied by value_scalar
|
||||
MPSGraphTensor *multiplicandTensor = nil;
|
||||
@ -62,15 +58,9 @@ Tensor& addc_mul_div_out_mps(const Tensor& self,
|
||||
name:nil];
|
||||
}
|
||||
// the tensor to be added to input_tensor
|
||||
MPSGraphTensor *addendTensor = multiplicandTensor;
|
||||
// if value_scalar is 1.0, then we don't bother adding another multiply to graph
|
||||
if (value_scalar != 1.0) {
|
||||
MPSGraphTensor* valueTensor = [mpsGraph constantWithScalar:value_scalar
|
||||
dataType:getMPSScalarType(value_opt.type())];
|
||||
addendTensor = [mpsGraph multiplicationWithPrimaryTensor:multiplicandTensor
|
||||
secondaryTensor:valueTensor
|
||||
name:nil];
|
||||
}
|
||||
MPSGraphTensor *addendTensor = [mpsGraph multiplicationWithPrimaryTensor:multiplicandTensor
|
||||
secondaryTensor:newCachedGraph->valueTensor
|
||||
name:nil];
|
||||
newCachedGraph->outputTensor = [mpsGraph additionWithPrimaryTensor:newCachedGraph->inputTensor
|
||||
secondaryTensor:addendTensor
|
||||
name:nil];
|
||||
@ -87,18 +77,18 @@ Tensor& addc_mul_div_out_mps(const Tensor& self,
|
||||
Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor, output);
|
||||
|
||||
// Create dictionary of inputs and outputs
|
||||
// Utility to dump out graph : [mpsGraph dump];
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* feeds = @{
|
||||
selfPlaceholder.getMPSGraphTensor() : selfPlaceholder.getMPSGraphTensorData(),
|
||||
tensor1Placeholder.getMPSGraphTensor() : tensor1Placeholder.getMPSGraphTensorData(),
|
||||
tensor2Placeholder.getMPSGraphTensor() : tensor2Placeholder.getMPSGraphTensorData()
|
||||
tensor2Placeholder.getMPSGraphTensor() : tensor2Placeholder.getMPSGraphTensorData(),
|
||||
cachedGraph->valueTensor : getMPSGraphTensorFromScalar(mpsStream, value_opt, getMPSScalarType(self.scalar_type())),
|
||||
};
|
||||
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* results = @{
|
||||
outputPlaceholder.getMPSGraphTensor() : outputPlaceholder.getMPSGraphTensorData()
|
||||
};
|
||||
|
||||
runMPSGraph(getCurrentMPSStream(), cachedGraph->graph(), feeds, results);
|
||||
runMPSGraph(mpsStream, cachedGraph->graph(), feeds, results);
|
||||
}
|
||||
|
||||
return output;
|
||||
|
@ -13,6 +13,43 @@
|
||||
namespace at {
|
||||
namespace native {
|
||||
|
||||
namespace {
|
||||
struct RangeCachedGraph : public mps::MPSCachedGraph {
|
||||
API_AVAILABLE(macosx(12.3))
|
||||
RangeCachedGraph(MPSGraph *mpsGraph, MPSDataType dataType, int32_t shapeVal, bool needsClamp = false, bool startLessEnd = false): MPSCachedGraph(mpsGraph) {
|
||||
@autoreleasepool {
|
||||
auto shapeTensor = [mpsGraph constantWithData:[NSData dataWithBytes:&shapeVal length:sizeof(int32_t)]
|
||||
shape: @[@1]
|
||||
dataType:MPSDataTypeInt32];
|
||||
auto coordsTensor = [mpsGraph coordinateAlongAxis:0
|
||||
withShapeTensor:shapeTensor
|
||||
name:nil];
|
||||
coordsTensor = [mpsGraph castTensor:coordsTensor toType:dataType name:@"coords"];
|
||||
|
||||
startTensor = mps::mpsGraphRankedPlaceHolder(mpsGraph, dataType, @[@1]);
|
||||
multiplyTensor = mps::mpsGraphRankedPlaceHolder(mpsGraph, dataType, @[@1]);
|
||||
auto scaledCoords = [mpsGraph multiplicationWithPrimaryTensor:coordsTensor
|
||||
secondaryTensor:multiplyTensor
|
||||
name:nil];
|
||||
outputTensor = [mpsGraph additionWithPrimaryTensor:scaledCoords
|
||||
secondaryTensor:startTensor
|
||||
name:nil];
|
||||
if (needsClamp) {
|
||||
endTensor = mps::mpsGraphRankedPlaceHolder(mpsGraph, dataType, @[@1]);
|
||||
outputTensor = [mpsGraph clampWithTensor:outputTensor
|
||||
minValueTensor: startLessEnd? startTensor : endTensor
|
||||
maxValueTensor: startLessEnd? endTensor : startTensor
|
||||
name: nil];
|
||||
}
|
||||
}
|
||||
}
|
||||
MPSGraphTensor *startTensor = nil;
|
||||
MPSGraphTensor *endTensor = nil;
|
||||
MPSGraphTensor *multiplyTensor = nil;
|
||||
MPSGraphTensor *outputTensor = nil;
|
||||
};
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
Tensor& arange_mps_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
|
||||
AT_DISPATCH_MPS_TYPES(result.scalar_type(), "arange_mps", [&]() {
|
||||
@ -53,8 +90,30 @@ Tensor& arange_mps_out(const Scalar& start, const Scalar& end, const Scalar& ste
|
||||
}
|
||||
bool is_contiguous = result.is_contiguous();
|
||||
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
|
||||
using namespace mps;
|
||||
auto cache_ = MPSGraphCache::getInstance();
|
||||
auto stream = getCurrentMPSStream();
|
||||
auto mpsDataType = getMPSDataType(result.scalar_type());
|
||||
@autoreleasepool {
|
||||
string key = "arange_mps_out:" + getTensorsStringKey({result}) + ":" + to_string(size);
|
||||
auto cachedGraph = static_cast<RangeCachedGraph *>(cache_->LookUp(key));
|
||||
if (!cachedGraph) {
|
||||
auto *tmpCachedGraph = cache_->CreateCachedGraph(key, ^ MPSCachedGraph *() {
|
||||
auto mpsGraph = make_mps_graph();
|
||||
return new RangeCachedGraph(mpsGraph, mpsDataType, size);
|
||||
});
|
||||
cachedGraph = static_cast<RangeCachedGraph *>(tmpCachedGraph);
|
||||
}
|
||||
Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor, r);
|
||||
NSMutableDictionary *feeds = [[NSMutableDictionary new] autorelease];
|
||||
feeds[cachedGraph->startTensor] = getMPSGraphTensorFromScalar(stream, start, mpsDataType);
|
||||
feeds[cachedGraph->multiplyTensor] = getMPSGraphTensorFromScalar(stream, Scalar(step), mpsDataType);
|
||||
|
||||
//TODO: Add arange Metal kernel.
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* results = @{
|
||||
outputPlaceholder.getMPSGraphTensor() : outputPlaceholder.getMPSGraphTensorData()
|
||||
};
|
||||
runMPSGraph(stream, cachedGraph->graph(), feeds, results);
|
||||
}
|
||||
|
||||
if(!is_contiguous) {
|
||||
result.copy_(r);
|
||||
@ -63,4 +122,69 @@ Tensor& arange_mps_out(const Scalar& start, const Scalar& end, const Scalar& ste
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Tensor& linspace_out_mps(const Scalar& start, const Scalar& end, int64_t steps, Tensor& result) {
|
||||
using namespace mps;
|
||||
|
||||
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
|
||||
if (result.numel() != steps) {
|
||||
result.resize_({steps});
|
||||
}
|
||||
|
||||
if (steps == 0) {
|
||||
// skip
|
||||
} else if (steps == 1) {
|
||||
result.fill_(start);
|
||||
} else {
|
||||
Tensor r = result.is_contiguous() ? result : result.contiguous();
|
||||
|
||||
// Do the MPSGraph computation
|
||||
MPSGraphCache* cache_ = MPSGraphCache::getInstance();
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
|
||||
bool start_less_end = (start.to<double>() <= end.to<double>());
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "linspace_out_mps:" + getTensorsStringKey({result}) + ":" + to_string(steps) + to_string(start_less_end);
|
||||
RangeCachedGraph* cachedGraph = static_cast<RangeCachedGraph *>(cache_->LookUp(key));
|
||||
|
||||
if(!cachedGraph) {
|
||||
MPSCachedGraph *tmpCachedGraph = cache_->CreateCachedGraph(key, ^ MPSCachedGraph * () {
|
||||
|
||||
RangeCachedGraph *newCachedGraph = nil;
|
||||
|
||||
@autoreleasepool {
|
||||
MPSGraph* mpsGraph = make_mps_graph();
|
||||
newCachedGraph = new RangeCachedGraph(mpsGraph, MPSDataTypeFloat32, steps, true, start_less_end);
|
||||
|
||||
if(getMPSDataType(result.scalar_type()) != MPSDataTypeFloat32) {
|
||||
newCachedGraph->outputTensor = [mpsGraph castTensor:newCachedGraph->outputTensor toType:getMPSDataType(result.scalar_type()) name:@"output"];
|
||||
}
|
||||
}
|
||||
return newCachedGraph;
|
||||
});
|
||||
cachedGraph = static_cast<RangeCachedGraph *>(tmpCachedGraph);
|
||||
}
|
||||
|
||||
NSMutableDictionary *feeds = [[NSMutableDictionary new] autorelease];
|
||||
auto multiplyScalar = (end.to<double>() - start.to<double>()) / ((double)steps - 1.0f);
|
||||
Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor, r);
|
||||
|
||||
// Create dictionary of inputs and outputs
|
||||
feeds[cachedGraph->startTensor] = getMPSGraphTensorFromScalar(stream, start, MPSDataTypeFloat32);
|
||||
feeds[cachedGraph->endTensor] = getMPSGraphTensorFromScalar(stream, end, MPSDataTypeFloat32);
|
||||
feeds[cachedGraph->multiplyTensor] = getMPSGraphTensorFromScalar(stream, Scalar(multiplyScalar), MPSDataTypeFloat32);
|
||||
|
||||
NSDictionary<MPSGraphTensor*, MPSGraphTensorData*>* results = @{
|
||||
outputPlaceholder.getMPSGraphTensor() : outputPlaceholder.getMPSGraphTensorData()
|
||||
};
|
||||
runMPSGraph(stream, cachedGraph->graph(), feeds, results);
|
||||
}
|
||||
|
||||
if (!result.is_contiguous()) {
|
||||
result.copy_(r);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}} // namespace at::native
|
||||
|
@ -88,6 +88,43 @@ void set_axes(NSMutableArray<NSNumber *> * &axes,
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to prepare axes and tensor shapes
|
||||
void set_axes_and_shapes(const Tensor& input_t,
|
||||
IntArrayRef dims,
|
||||
NSMutableArray<NSNumber*> * &axes,
|
||||
NSMutableArray<NSNumber*> * &apparent_input_shape,
|
||||
NSMutableArray<NSNumber*> * &apparent_output_shape,
|
||||
NSMutableArray<NSNumber*> * &output_shape) {
|
||||
|
||||
IntArrayRef input_shape = input_t.sizes();
|
||||
|
||||
int64_t num_input_dims = input_shape.size();
|
||||
int64_t num_reduce_dims = dims.size();
|
||||
int64_t num_output_dims;
|
||||
|
||||
num_output_dims = num_reduce_dims == 0 ? 1 : num_input_dims;
|
||||
|
||||
// Reduction axes
|
||||
set_axes(axes, num_reduce_dims, dims, input_shape.size());
|
||||
|
||||
// Shapes
|
||||
set_apparent_shapes(apparent_output_shape,
|
||||
apparent_input_shape,
|
||||
num_reduce_dims,
|
||||
num_input_dims,
|
||||
num_output_dims,
|
||||
input_shape,
|
||||
axes);
|
||||
|
||||
// Squeeze dims for output shape
|
||||
output_shape = [NSMutableArray<NSNumber*> arrayWithCapacity:0];
|
||||
for(int i=0; i < num_output_dims; i++) {
|
||||
if([apparent_output_shape[i] longValue] != 1) {
|
||||
[output_shape addObject:apparent_output_shape[i]];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void reduction_out_mps
|
||||
(const Tensor& input_t,
|
||||
IntArrayRef dim,
|
||||
@ -107,6 +144,13 @@ void reduction_out_mps
|
||||
|
||||
namespace native_mps = at::native::mps;
|
||||
|
||||
NSMutableArray<NSNumber*> *axes = nil;
|
||||
NSMutableArray<NSNumber*> *apparent_input_shape = nil;
|
||||
NSMutableArray<NSNumber*> *apparent_output_shape = nil;
|
||||
NSMutableArray<NSNumber*> *output_shape = nil;
|
||||
|
||||
set_axes_and_shapes(input_t, dim, axes, apparent_input_shape, apparent_output_shape, output_shape);
|
||||
|
||||
// Derive from MPSCachedGraph
|
||||
struct CachedGraph : public native_mps::MPSCachedGraph
|
||||
{
|
||||
@ -117,27 +161,6 @@ void reduction_out_mps
|
||||
|
||||
native_mps::MPSGraphCache* cache_ = native_mps::MPSGraphCache::getInstance();
|
||||
|
||||
int64_t num_input_dims = input_shape.size();
|
||||
int64_t num_reduce_dims = dim.size();
|
||||
int64_t num_output_dims;
|
||||
|
||||
// For output shape calculation, assume that keepdim is true
|
||||
num_output_dims = num_input_dims;
|
||||
NSMutableArray<NSNumber*> *apparent_output_shape = nil;
|
||||
NSMutableArray<NSNumber*> *apparent_input_shape = nil;
|
||||
|
||||
// Reduction axes
|
||||
NSMutableArray<NSNumber *> *axes;
|
||||
set_axes(axes, num_reduce_dims, dim, input_shape.size());
|
||||
|
||||
set_apparent_shapes(apparent_output_shape,
|
||||
apparent_input_shape,
|
||||
num_reduce_dims,
|
||||
num_input_dims,
|
||||
num_output_dims,
|
||||
input_shape,
|
||||
axes);
|
||||
|
||||
if (output_t.numel() == 0 || input_t.numel() == 0) {
|
||||
return;
|
||||
}
|
||||
@ -173,22 +196,34 @@ void reduction_out_mps
|
||||
|
||||
MPSGraphTensor* castOutputTensor = nil;
|
||||
|
||||
if(reduction_type == "sum")
|
||||
if(reduction_type == "sum") {
|
||||
castOutputTensor = [mpsGraph reductionSumWithTensor:castInputTensor
|
||||
axes:axes
|
||||
name:nil];
|
||||
else if(reduction_type == "prod")
|
||||
} else if(reduction_type == "prod") {
|
||||
castOutputTensor = [mpsGraph reductionProductWithTensor:castInputTensor
|
||||
axes:axes
|
||||
name:nil];
|
||||
else if(reduction_type == "mean")
|
||||
} else if(reduction_type == "mean") {
|
||||
castOutputTensor = [mpsGraph meanOfTensor:inputTensor
|
||||
axes:axes
|
||||
name:nil];
|
||||
} else if(reduction_type == "count_nonzero") {
|
||||
MPSGraphTensor* zeros = [mpsGraph constantWithScalar:0
|
||||
dataType:castInputTensor.dataType];
|
||||
|
||||
MPSGraphTensor* nonZeros = [mpsGraph notEqualWithPrimaryTensor:castInputTensor
|
||||
secondaryTensor:zeros
|
||||
name:nil];
|
||||
|
||||
castOutputTensor = [mpsGraph reductionSumWithTensor:nonZeros
|
||||
axes:axes
|
||||
name:nil];
|
||||
}
|
||||
|
||||
MPSGraphTensor* outputTensor = nil;
|
||||
|
||||
if(input_t.scalar_type() != ScalarType::Float)
|
||||
if(output_t.scalar_type() != ScalarType::Float)
|
||||
outputTensor = [mpsGraph castTensor:castOutputTensor
|
||||
toType:(native_mps::getMPSDataType(output_t.scalar_type()))
|
||||
name:@"outputTensor"];
|
||||
@ -281,6 +316,35 @@ Tensor prod_mps(const Tensor &self, c10::optional<ScalarType> opt_dtype) {
|
||||
return output_t;
|
||||
}
|
||||
|
||||
|
||||
Tensor count_nonzero_mps(const Tensor& self, IntArrayRef dims){
|
||||
NSMutableArray<NSNumber*> *axes = nil;
|
||||
NSMutableArray<NSNumber*> *apparent_input_shape = nil;
|
||||
NSMutableArray<NSNumber*> *apparent_output_shape = nil;
|
||||
NSMutableArray<NSNumber*> *output_shape = nil;
|
||||
|
||||
set_axes_and_shapes(self, dims, axes, apparent_input_shape, apparent_output_shape, output_shape);
|
||||
|
||||
int64_t* raw_output_shape = (int64_t *)malloc([output_shape count] * sizeof(int64_t));
|
||||
for(int i=0; i < [output_shape count]; i++) {
|
||||
raw_output_shape[i] = [output_shape[i] longValue];
|
||||
}
|
||||
|
||||
Tensor output_t = at::native::empty_mps(
|
||||
IntArrayRef(raw_output_shape, [output_shape count]),
|
||||
ScalarType::Long,
|
||||
c10::nullopt,
|
||||
kMPS,
|
||||
c10::nullopt,
|
||||
c10::nullopt);
|
||||
|
||||
reduction_out_mps(self, dims, false, self.scalar_type(), const_cast<Tensor&>(output_t), "count_nonzero", "count_nonzero_mps");
|
||||
|
||||
free(raw_output_shape);
|
||||
|
||||
return output_t;
|
||||
}
|
||||
|
||||
TORCH_IMPL_FUNC(mean_out_mps)
|
||||
(const Tensor& input_t,
|
||||
IntArrayRef dim,
|
||||
|
@ -74,7 +74,6 @@ Tensor repeat_mps(const Tensor& self, IntArrayRef repeats) {
|
||||
|
||||
TORCH_CHECK(repeats.size() >= (size_t)self.dim(),
|
||||
"Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor");
|
||||
|
||||
struct CachedGraph : public MPSCachedGraph
|
||||
{
|
||||
CachedGraph(MPSGraph *graph) : MPSCachedGraph(graph) {}
|
||||
|
@ -13,9 +13,8 @@ namespace mps {
|
||||
|
||||
typedef MPSGraphTensor* (^UnaryOpBlock)(MPSGraph*, MPSGraphTensor*);
|
||||
|
||||
void unary_op(const Tensor& self_t, const Tensor& output, std::string op_name, UnaryOpBlock unaryBlock)
|
||||
void unary_op(const Tensor& self, const Tensor& output, std::string op_name, UnaryOpBlock unaryBlock)
|
||||
{
|
||||
Tensor self = self_t.contiguous(at::MemoryFormat::Contiguous);
|
||||
if (!output.is_same_size(self)) {
|
||||
output.resize_(self.sizes());
|
||||
}
|
||||
@ -26,7 +25,7 @@ void unary_op(const Tensor& self_t, const Tensor& output, std::string op_name, U
|
||||
};
|
||||
MPSGraphCache* cache_ = MPSGraphCache::getInstance();
|
||||
@autoreleasepool {
|
||||
string key = op_name + getTensorsStringKey({self});
|
||||
string key = op_name + getTensorsStringKey({self}, /*use_scalar_value*/ false);
|
||||
CachedGraph* cachedGraph = static_cast<CachedGraph *>(cache_->LookUp(key));
|
||||
|
||||
if(!cachedGraph) {
|
||||
|
@ -1487,6 +1487,7 @@
|
||||
dispatch:
|
||||
CPU: count_nonzero_cpu
|
||||
CUDA: count_nonzero_cuda
|
||||
MPS: count_nonzero_mps
|
||||
|
||||
- func: count_nonzero(Tensor self, int? dim=None) -> Tensor
|
||||
variants: function, method
|
||||
@ -2176,11 +2177,13 @@
|
||||
dispatch:
|
||||
CPU: eye_out_cpu
|
||||
CUDA: eye_out_cuda
|
||||
MPS: eye_out_mps
|
||||
|
||||
- func: eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!)
|
||||
dispatch:
|
||||
CPU: eye_out_cpu
|
||||
CUDA: eye_out_cuda
|
||||
MPS: eye_out_mps
|
||||
|
||||
- func: flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
|
||||
variants: function, method
|
||||
@ -2680,12 +2683,14 @@
|
||||
dispatch:
|
||||
CPU: layer_norm_cpu
|
||||
CUDA: layer_norm_cuda
|
||||
MPS: layer_norm_mps
|
||||
CompositeImplicitAutograd: math_native_layer_norm
|
||||
|
||||
- func: native_layer_norm_backward(Tensor grad_out, Tensor input, int[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
|
||||
dispatch:
|
||||
CPU: layer_norm_backward_cpu
|
||||
CUDA: layer_norm_backward_cuda
|
||||
MPS: layer_norm_backward_mps
|
||||
|
||||
- func: nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
|
||||
variants: function, method
|
||||
@ -2777,6 +2782,7 @@
|
||||
dispatch:
|
||||
CPU, Meta: linspace_out
|
||||
CUDA: linspace_cuda_out
|
||||
MPS: linspace_out_mps
|
||||
|
||||
- func: log(Tensor self) -> Tensor
|
||||
device_check: NoCheck # TensorIterator
|
||||
@ -9781,6 +9787,7 @@
|
||||
dispatch:
|
||||
CPU: adaptive_max_pool2d_out_cpu
|
||||
CUDA: adaptive_max_pool2d_out_cuda
|
||||
MPS: adaptive_max_pool2d_out_mps
|
||||
|
||||
# Return: (Tensor output, Tensor indices)
|
||||
- func: adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
|
||||
@ -9793,6 +9800,7 @@
|
||||
dispatch:
|
||||
CPU: adaptive_max_pool2d_backward_out_cpu
|
||||
CUDA: adaptive_max_pool2d_backward_out_cuda
|
||||
MPS: adaptive_max_pool2d_backward_out_mps
|
||||
|
||||
- func: adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
|
||||
python_module: nn
|
||||
|
@ -981,8 +981,17 @@ elseif(USE_CUDA)
|
||||
target_link_libraries(torch_cuda_linalg PRIVATE
|
||||
torch_cpu
|
||||
torch_cuda
|
||||
${CUDA_cusolver_LIBRARY}
|
||||
)
|
||||
if($ENV{ATEN_STATIC_CUDA})
|
||||
target_link_libraries(torch_cuda_linalg PRIVATE
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/libcusolver_static.a
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/liblapack_static.a # needed for libcusolver_static
|
||||
)
|
||||
else()
|
||||
target_link_libraries(torch_cuda_linalg PRIVATE
|
||||
${CUDA_cusolver_LIBRARY}
|
||||
)
|
||||
endif()
|
||||
# NS: TODO, is this really necessary?
|
||||
if(USE_MAGMA AND CAFFE2_STATIC_LINK_CUDA)
|
||||
target_link_libraries(torch_cuda_linalg PRIVATE
|
||||
|
@ -289,10 +289,7 @@ add_library(caffe2::cublas INTERFACE IMPORTED)
|
||||
if(CAFFE2_STATIC_LINK_CUDA AND NOT WIN32)
|
||||
set_property(
|
||||
TARGET caffe2::cublas PROPERTY INTERFACE_LINK_LIBRARIES
|
||||
"${CUDA_TOOLKIT_ROOT_DIR}/lib64/libcublas_static.a")
|
||||
set_property(
|
||||
TARGET caffe2::cublas APPEND PROPERTY INTERFACE_LINK_LIBRARIES
|
||||
"${CUDA_TOOLKIT_ROOT_DIR}/lib64/libcublasLt_static.a")
|
||||
${CUDA_CUBLAS_LIBRARIES})
|
||||
# Add explicit dependency to cudart_static to fix
|
||||
# libcublasLt_static.a.o): undefined reference to symbol 'cudaStreamWaitEvent'
|
||||
# error adding symbols: DSO missing from command line
|
||||
|
@ -16,7 +16,7 @@ pushd "$(dirname "$0")/../../.."
|
||||
|
||||
cp torch/_utils_internal.py tools/shared
|
||||
|
||||
python -m torchgen.gen
|
||||
python -m torchgen.gen --source-path aten/src/ATen
|
||||
|
||||
python tools/setup_helpers/generate_code.py \
|
||||
--native-functions-path aten/src/ATen/native/native_functions.yaml \
|
||||
|
@ -111,7 +111,8 @@ Loading Batched and Non-Batched Data
|
||||
|
||||
:class:`~torch.utils.data.DataLoader` supports automatically collating
|
||||
individual fetched data samples into batches via arguments
|
||||
:attr:`batch_size`, :attr:`drop_last`, and :attr:`batch_sampler`.
|
||||
:attr:`batch_size`, :attr:`drop_last`, :attr:`batch_sampler`, and
|
||||
:attr:`collate_fn` (which has a default function).
|
||||
|
||||
|
||||
Automatic batching (default)
|
||||
@ -209,7 +210,8 @@ arrays in PyTorch tensors.
|
||||
**When automatic batching is enabled**, :attr:`collate_fn` is called with a list
|
||||
of data samples at each time. It is expected to collate the input samples into
|
||||
a batch for yielding from the data loader iterator. The rest of this section
|
||||
describes behavior of the default :attr:`collate_fn` in this case.
|
||||
describes the behavior of the default :attr:`collate_fn`
|
||||
(:func:`~torch.utils.data.default_collate`).
|
||||
|
||||
For instance, if each data sample consists of a 3-channel image and an integral
|
||||
class label, i.e., each element of the dataset returns a tuple
|
||||
@ -232,6 +234,10 @@ Users may use customized :attr:`collate_fn` to achieve custom batching, e.g.,
|
||||
collating along a dimension other than the first, padding sequences of
|
||||
various lengths, or adding support for custom data types.
|
||||
|
||||
If you run into a situation where the outputs of :class:`~torch.utils.data.DataLoader`
|
||||
have dimensions or type that is different from your expectation, you may
|
||||
want to check your :attr:`collate_fn`.
|
||||
|
||||
Single- and Multi-process Data Loading
|
||||
--------------------------------------
|
||||
|
||||
|
@ -56,6 +56,7 @@ Features described in this documentation are classified by release status:
|
||||
tensor_view
|
||||
torch.amp <amp>
|
||||
torch.autograd <autograd>
|
||||
torch.library <library>
|
||||
cuda
|
||||
torch.backends <backends>
|
||||
torch.distributed <distributed>
|
||||
|
@ -874,6 +874,11 @@ now supported.
|
||||
b = 2
|
||||
return x, b
|
||||
|
||||
Fusion Backends
|
||||
~~~~~~~~~~~~~~~
|
||||
There are a couple of fusion backends available to optimize TorchScript execution. The default fuser on CPUs is NNC, which can perform fusions for both CPUs and GPUs. The default fuser on GPUs is NVFuser, which supports a wider range of operators and has demonstrated generated kernels with improved throughput. See the `NVFuser documentation <https://github.com/pytorch/pytorch/blob/release/1.12/torch/csrc/jit/codegen/cuda/README.md>`_ for more details on usage and debugging.
|
||||
|
||||
|
||||
References
|
||||
~~~~~~~~~~
|
||||
.. toctree::
|
||||
|
42
docs/source/library.rst
Normal file
42
docs/source/library.rst
Normal file
@ -0,0 +1,42 @@
|
||||
torch.library
|
||||
===================================
|
||||
|
||||
Python operator registration API provides capabilities for extending PyTorch's core library
|
||||
of operators with user defined operators. Currently, this can be done in two ways:
|
||||
|
||||
#. Creating new libraries
|
||||
|
||||
* Lets you to register **new operators** and kernels for various backends and functionalities by specifying the appropriate dispatch keys. For example,
|
||||
|
||||
* Consider registering a new operator ``add`` in your newly created namespace ``foo``. You can access this operator using the ``torch.ops`` API and calling into by calling ``torch.ops.foo.add``. You can also access specific registered overloads by calling ``torch.ops.foo.add.{overload_name}``.
|
||||
|
||||
* If you registered a new kernel for the ``CUDA`` dispatch key for this operator, then your custom defined function will be called for CUDA tensor inputs.
|
||||
|
||||
* This can be done by creating Library class objects of ``"DEF"`` kind.
|
||||
|
||||
#. Extending existing C++ libraries (e.g., aten)
|
||||
|
||||
* Lets you register kernels for **existing operators** corresponding to various backends and functionalities by specifying the appropriate dispatch keys.
|
||||
|
||||
* This may come in handy to fill up spotty operator support for a feature implemented through a dispatch key. For example.,
|
||||
|
||||
* You can add operator support for Meta Tensors (by registering function to the ``Meta`` dispatch key).
|
||||
|
||||
* This can be done by creating Library class objects of ``"IMPL"`` kind.
|
||||
|
||||
A tutorial that walks you through some examples on how to use this API is available on `Google Colab <https://colab.research.google.com/drive/1RRhSfk7So3Cn02itzLWE9K4Fam-8U011?usp=sharing>`_.
|
||||
|
||||
.. warning::
|
||||
Dispatcher is a complicated PyTorch concept and having a sound understanding of Dispatcher is crucial
|
||||
to be able to do anything advanced with this API. `This blog post <http://blog.ezyang.com/2020/09/lets-talk-about-the-pytorch-dispatcher/>`_
|
||||
is a good starting point to learn about Dispatcher.
|
||||
|
||||
.. currentmodule:: torch.library
|
||||
|
||||
.. autoclass:: torch.library.Library
|
||||
:members:
|
||||
|
||||
We have also added some function decorators to make it convenient to register functions for operators:
|
||||
|
||||
* :func:`torch.library.impl`
|
||||
* :func:`torch.library.define`
|
@ -120,6 +120,7 @@ Dropout functions
|
||||
dropout
|
||||
alpha_dropout
|
||||
feature_alpha_dropout
|
||||
dropout1d
|
||||
dropout2d
|
||||
dropout3d
|
||||
|
||||
|
@ -250,6 +250,7 @@ Dropout Layers
|
||||
:template: classtemplate.rst
|
||||
|
||||
nn.Dropout
|
||||
nn.Dropout1d
|
||||
nn.Dropout2d
|
||||
nn.Dropout3d
|
||||
nn.AlphaDropout
|
||||
|
@ -59,7 +59,8 @@ Below you can find a small example showcasing this::
|
||||
TensorFloat-32(TF32) on Ampere devices
|
||||
--------------------------------------
|
||||
|
||||
Starting in PyTorch 1.7, there is a new flag called `allow_tf32` which defaults to true.
|
||||
Starting in PyTorch 1.7, there is a new flag called `allow_tf32`. This flag
|
||||
defaults to True in PyTorch 1.7 to PyTorch 1.11, and False in PyTorch 1.12 and later.
|
||||
This flag controls whether PyTorch is allowed to use the TensorFloat32 (TF32) tensor cores,
|
||||
available on new NVIDIA GPUs since Ampere, internally to compute matmul (matrix multiplies
|
||||
and batched matrix multiplies) and convolutions.
|
||||
@ -72,7 +73,8 @@ matmuls and convolutions are controlled separately, and their corresponding flag
|
||||
|
||||
.. code:: python
|
||||
|
||||
# The flag below controls whether to allow TF32 on matmul. This flag defaults to True.
|
||||
# The flag below controls whether to allow TF32 on matmul. This flag defaults to False
|
||||
# in PyTorch 1.12 and later.
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
|
||||
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
|
||||
@ -95,6 +97,7 @@ To get an idea of the precision and speed, see the example code below:
|
||||
b = b_full.float()
|
||||
|
||||
# Do matmul at TF32 mode.
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
ab_tf32 = a @ b # takes 0.016s on GA100
|
||||
error = (ab_tf32 - ab_full).abs().max() # 0.1747
|
||||
relative_error = error / mean # 0.0022
|
||||
@ -106,7 +109,7 @@ To get an idea of the precision and speed, see the example code below:
|
||||
relative_error = error / mean # 0.000039
|
||||
|
||||
From the above example, we can see that with TF32 enabled, the speed is ~7x faster, relative error
|
||||
compared to double precision is approximately 2 orders of magnitude larger. If the full FP32 precision
|
||||
compared to double precision is approximately 2 orders of magnitude larger. If full FP32 precision
|
||||
is needed, users can disable TF32 by:
|
||||
|
||||
.. code:: python
|
||||
|
@ -14,27 +14,31 @@ capabilities to setup and run operations on GPU.
|
||||
|
||||
To get started, simply move your Tensor and Module to the ``mps`` device:
|
||||
|
||||
.. code::
|
||||
.. code:: python
|
||||
|
||||
# Make sure the current PyTorch binary was built with MPS enabled
|
||||
print(torch.backends.mps.is_built())
|
||||
# And that the current hardware and MacOS version are sufficient to
|
||||
# be able to use MPS
|
||||
print(torch.backends.mps.is_available())
|
||||
# Check that MPS is available
|
||||
if not torch.backends.mps.is_available():
|
||||
if not torch.backends.mps.is_built():
|
||||
print("MPS not available because the current PyTorch install was not "
|
||||
"built with MPS enabled.")
|
||||
else:
|
||||
print("MPS not available because the current MacOS version is not 12.3+ "
|
||||
"and/or you do not have an MPS-enabled device on this machine.")
|
||||
|
||||
mps_device = torch.device("mps")
|
||||
else:
|
||||
mps_device = torch.device("mps")
|
||||
|
||||
# Create a Tensor directly on the mps device
|
||||
x = torch.ones(5, device=mps_device)
|
||||
# Or
|
||||
x = torch.ones(5, device="mps")
|
||||
# Create a Tensor directly on the mps device
|
||||
x = torch.ones(5, device=mps_device)
|
||||
# Or
|
||||
x = torch.ones(5, device="mps")
|
||||
|
||||
# Any operation happens on the GPU
|
||||
y = x * 2
|
||||
# Any operation happens on the GPU
|
||||
y = x * 2
|
||||
|
||||
# Move your model to mps just like any other device
|
||||
model = YourFavoriteNet()
|
||||
model.to(mps_device)
|
||||
# Move your model to mps just like any other device
|
||||
model = YourFavoriteNet()
|
||||
model.to(mps_device)
|
||||
|
||||
# Now every call runs on the GPU
|
||||
pred = model(x)
|
||||
# Now every call runs on the GPU
|
||||
pred = model(x)
|
||||
|
@ -54,16 +54,14 @@ datatype. E.g.:
|
||||
TensorFloat-32(TF32) on Nvidia Ampere devices
|
||||
---------------------------------------------
|
||||
|
||||
On Ampere Nvidia GPUs, PyTorch by default uses TensorFloat32 (TF32) to speed up mathematically
|
||||
intensive operations, in particular matrix multiplications and convolutions. When operation is performed
|
||||
using TF32 tensor cores, only the first 10 bits of the input mantissa are read. This leads to less accurate
|
||||
results, and surprising results such as multiplying a matrix by identity matrix produces
|
||||
results that are different from the input.
|
||||
Most neural network workloads have the same convergence behavior when using tf32 as they have
|
||||
with fp32, however, if better accuracy is desired, TF32 can be turned off with
|
||||
``torch.backends.cuda.matmul.allow_tf32 = False``
|
||||
On Ampere Nvidia GPUs, PyTorch can use TensorFloat32 (TF32) to speed up mathematically intensive operations, in particular matrix multiplications and convolutions.
|
||||
When an operation is performed using TF32 tensor cores, only the first 10 bits of the input mantissa are read.
|
||||
This elision leads to less accurate results, and surprising results (e.g., multiplying a matrix by the identity matrix produces results that are different from the input).
|
||||
By default, this option is disabled for matrix multiplications and enabled for convolutions, although most neural network workloads have the same convergence behavior when using TF32 as they have with fp32.
|
||||
However, if better throughput is desired for matrix multiplications, TF32 can be turned on with ``torch.backends.cuda.matmul.allow_tf32 = True``.
|
||||
Conversely, if better accuracy is desired for convolutions, TF32 can be turned off with ``torch.backends.cudnn.allow_tf32 = False``.
|
||||
|
||||
For more information see :ref:`TensorFloat32<tf32_on_ampere>`
|
||||
For more information see :ref:`TensorFloat32<tf32_on_ampere>`.
|
||||
|
||||
Reduced Precision Reduction for FP16 GEMMs
|
||||
------------------------------------------
|
||||
|
@ -608,7 +608,6 @@ Utilities
|
||||
get_float32_matmul_precision
|
||||
set_warn_always
|
||||
is_warn_always_enabled
|
||||
vmap
|
||||
_assert
|
||||
|
||||
|
||||
|
38
setup.py
38
setup.py
@ -362,6 +362,33 @@ def check_submodules():
|
||||
'benchmark'), ['CMakeLists.txt'])
|
||||
|
||||
|
||||
# Windows has very bad support for symbolic links.
|
||||
# Instead of using symlinks, we're going to copy files over
|
||||
def mirror_files_into_torchgen():
|
||||
# (new_path, orig_path)
|
||||
# Directories are OK and are recursively mirrored.
|
||||
paths = [
|
||||
('torchgen/packaged/ATen/native/native_functions.yaml', 'aten/src/ATen/native/native_functions.yaml'),
|
||||
('torchgen/packaged/ATen/native/tags.yaml', 'aten/src/ATen/native/tags.yaml'),
|
||||
('torchgen/packaged/ATen/templates', 'aten/src/ATen/templates'),
|
||||
]
|
||||
for new_path, orig_path in paths:
|
||||
# Create the dirs involved in new_path if they don't exist
|
||||
if not os.path.exists(new_path):
|
||||
os.makedirs(os.path.dirname(new_path), exist_ok=True)
|
||||
|
||||
# Copy the files from the orig location to the new location
|
||||
if os.path.isfile(orig_path):
|
||||
shutil.copyfile(orig_path, new_path)
|
||||
continue
|
||||
if os.path.isdir(orig_path):
|
||||
if os.path.exists(new_path):
|
||||
# copytree fails if the tree exists already, so remove it.
|
||||
shutil.rmtree(new_path)
|
||||
shutil.copytree(orig_path, new_path)
|
||||
continue
|
||||
raise RuntimeError("Check the file paths in `mirror_files_into_torchgen()`")
|
||||
|
||||
# all the work we need to do _before_ setup runs
|
||||
def build_deps():
|
||||
report('-- Building version ' + version)
|
||||
@ -911,6 +938,7 @@ if __name__ == '__main__':
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
|
||||
mirror_files_into_torchgen()
|
||||
if RUN_BUILD_DEPS:
|
||||
build_deps()
|
||||
|
||||
@ -1080,7 +1108,15 @@ if __name__ == '__main__':
|
||||
'utils/model_dump/code.js',
|
||||
'utils/model_dump/*.mjs',
|
||||
],
|
||||
'torchgen': [],
|
||||
'torchgen': [
|
||||
# Recursive glob doesn't work in setup.py,
|
||||
# https://github.com/pypa/setuptools/issues/1806
|
||||
# To make this robust we should replace it with some code that
|
||||
# returns a list of everything under packaged/
|
||||
'packaged/ATen/*',
|
||||
'packaged/ATen/native/*',
|
||||
'packaged/ATen/templates/*',
|
||||
],
|
||||
'caffe2': [
|
||||
'python/serialized_test/data/operator_test/*.zip',
|
||||
],
|
||||
|
@ -101,8 +101,8 @@ class TestShardedTensorMatrixOps(ShardedTensorTestBase):
|
||||
enumerable_spec, 10, 10, init_rrefs=False, dtype=torch.double
|
||||
)
|
||||
with self.assertRaisesRegex(
|
||||
NotImplementedError,
|
||||
"Only ChunkShardingSpec supported for 'transpose'",
|
||||
RuntimeError,
|
||||
"not supported",
|
||||
):
|
||||
st.transpose(1, 0)
|
||||
|
||||
|
@ -19,7 +19,7 @@ from torch.distributed._shard.api import (
|
||||
_reshard_output,
|
||||
)
|
||||
from torch.distributed._shard.sharded_tensor import (
|
||||
sharded_op_impl,
|
||||
custom_sharded_op_impl,
|
||||
pre_load_state_dict_hook,
|
||||
state_dict_hook,
|
||||
ShardedTensor,
|
||||
@ -174,7 +174,7 @@ class TestShardParameter(ShardedTensorTestBase):
|
||||
with self.assertRaisesRegex(ValueError, 'does not match with src_rank'):
|
||||
shard_parameter(fc, 'weight', spec, src_rank=self.rank)
|
||||
|
||||
with self.assertRaisesRegex(AttributeError, 'Linear have no attribute'):
|
||||
with self.assertRaisesRegex(AttributeError, 'has no attribute'):
|
||||
shard_parameter(fc, 'foo', spec)
|
||||
|
||||
with self.assertRaisesRegex(ValueError, 'Expected Linear.bias to be a Tensor, but found str'):
|
||||
@ -2463,7 +2463,7 @@ class TestShardedTensorCustomOps(ShardedTensorTestBase):
|
||||
@requires_nccl()
|
||||
def test_custom_op(self):
|
||||
|
||||
@sharded_op_impl(torch.asin)
|
||||
@custom_sharded_op_impl(torch.asin)
|
||||
def my_sharded_asin(types, args, kwargs, process_group):
|
||||
return torch.asin(args[0].local_shards()[0].tensor)
|
||||
|
||||
@ -2491,7 +2491,7 @@ class TestShardedTensorCustomOps(ShardedTensorTestBase):
|
||||
from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
|
||||
|
||||
@custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.linear)
|
||||
def my_sharded_linear(types, args, kwargs):
|
||||
def my_sharded_linear(types, args, kwargs, process_group):
|
||||
return t
|
||||
|
||||
spec = ChunkShardingSpec(
|
||||
@ -2515,12 +2515,12 @@ class TestShardedTensorCustomOps(ShardedTensorTestBase):
|
||||
def test_custom_op_errors(self):
|
||||
|
||||
with self.assertRaisesRegex(TypeError, 'expects signature'):
|
||||
@sharded_op_impl(torch.nn.functional.linear)
|
||||
@custom_sharded_op_impl(torch.nn.functional.linear)
|
||||
def my_op1(types, args, kwargs, process_group, random_param):
|
||||
pass
|
||||
|
||||
with self.assertRaisesRegex(TypeError, 'expects signature'):
|
||||
@sharded_op_impl(torch.nn.functional.linear)
|
||||
@custom_sharded_op_impl(torch.nn.functional.linear)
|
||||
def my_op2(types):
|
||||
pass
|
||||
|
||||
|
111
test/distributed/fsdp/test_checkpoint_wrapper.py
Normal file
111
test/distributed/fsdp/test_checkpoint_wrapper.py
Normal file
@ -0,0 +1,111 @@
|
||||
# Owner(s): ["oncall: distributed"]
|
||||
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
|
||||
checkpoint_wrapper,
|
||||
CheckpointImpl
|
||||
)
|
||||
|
||||
from torch.utils.checkpoint import checkpoint
|
||||
|
||||
from torch.testing._internal.common_utils import (
|
||||
run_tests,
|
||||
TestCase,
|
||||
)
|
||||
|
||||
import unittest
|
||||
|
||||
class CheckpointWrapperTest(TestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
def test_load_activation_checkpointed_module(self):
|
||||
lin = nn.Linear(10, 10, bias=False)
|
||||
lin = checkpoint_wrapper(lin)
|
||||
state_dict = deepcopy(lin.state_dict())
|
||||
# Load into non-checkpoint wrapped linear module
|
||||
lin_new = nn.Linear(10, 10, bias=False)
|
||||
lin_new.load_state_dict(state_dict)
|
||||
for p1, p2 in zip(lin.parameters(), lin_new.parameters()):
|
||||
self.assertEqual(p1, p2)
|
||||
self.assertTrue(torch.allclose(p1, p2))
|
||||
|
||||
# Load non-checkpoint wrapped module into checkpoint wrapped one
|
||||
# Make params different
|
||||
for p in lin_new.parameters():
|
||||
with torch.no_grad():
|
||||
p.add_(0.5)
|
||||
|
||||
state_dict = deepcopy(lin_new.state_dict())
|
||||
# Verify checkpoint wrapped linear can load unwrapped linear
|
||||
lin.load_state_dict(state_dict)
|
||||
for p1, p2 in zip(lin.parameters(), lin_new.parameters()):
|
||||
self.assertEqual(p1, p2)
|
||||
|
||||
@unittest.skipIf(not torch.cuda.is_available(), "Test requires CUDA")
|
||||
def test_checkpoint_wrapper_parity(self):
|
||||
"""
|
||||
Tests that using checkpoint_wrapper or the functional
|
||||
torch.utils.checkpoint (with the same reentrant config)
|
||||
results in the same maximum memory usage, i.e. they are
|
||||
equivalent memory usage wise.
|
||||
"""
|
||||
class Model(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
n: int,
|
||||
use_cp: bool,
|
||||
use_wrapper: bool = False,
|
||||
use_reentrant: bool = True
|
||||
):
|
||||
super().__init__()
|
||||
self.layers = nn.ModuleList()
|
||||
self.n = n
|
||||
self.use_cp = use_cp
|
||||
self.use_wrapper = use_wrapper
|
||||
self.use_reentrant = use_reentrant
|
||||
wrp = partial(
|
||||
checkpoint_wrapper,
|
||||
checkpoint_impl=CheckpointImpl.REENTRANT if use_reentrant else CheckpointImpl.NO_REENTRANT
|
||||
)
|
||||
for i in range(self.n):
|
||||
l = nn.Sequential(nn.Linear(256, 256), nn.Linear(256, 256), nn.Linear(256, 256))
|
||||
use_checkpoint_wrapper = self.use_wrapper
|
||||
if use_checkpoint_wrapper:
|
||||
l = wrp(l)
|
||||
self.layers.append(l)
|
||||
|
||||
def forward(self, x):
|
||||
for i in range(self.n):
|
||||
if (
|
||||
self.use_wrapper or
|
||||
not self.use_cp
|
||||
):
|
||||
x = self.layers[i](x)
|
||||
else:
|
||||
x = checkpoint(self.layers[i], x, use_reentrant=self.use_reentrant)
|
||||
return x
|
||||
|
||||
def test(use_checkpointing, use_wrapper, use_reentrant):
|
||||
a = Model(8, use_checkpointing, use_wrapper=use_wrapper, use_reentrant=use_reentrant).cuda()
|
||||
x = torch.randn(10000, 256, requires_grad=True).cuda()
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
loss = a(x).sum()
|
||||
loss.backward()
|
||||
return torch.cuda.max_memory_allocated()
|
||||
|
||||
functional_no_reentrant = test(use_checkpointing=True, use_wrapper=False, use_reentrant=False)
|
||||
wrapper_no_reentrant = test(use_checkpointing=False, use_wrapper=True, use_reentrant=False)
|
||||
self.assertEqual(functional_no_reentrant, wrapper_no_reentrant)
|
||||
|
||||
functional_reentrant = test(use_checkpointing=True, use_wrapper=False, use_reentrant=True)
|
||||
wrapper_reentrant = test(use_checkpointing=False, use_wrapper=True, use_reentrant=True)
|
||||
self.assertEqual(functional_no_reentrant, wrapper_no_reentrant)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_tests()
|
@ -392,7 +392,7 @@ class TestFSDPOptimState(FSDPTest):
|
||||
``on_gpu=True`` and on CPU if ``on_gpu=False``."""
|
||||
for param_state in osd["state"].values():
|
||||
for value in param_state.values():
|
||||
if torch.is_tensor(value):
|
||||
if torch.is_tensor(value) and value.dim() > 0:
|
||||
if on_gpu:
|
||||
self.assertTrue(value.is_cuda)
|
||||
else:
|
||||
|
@ -1173,8 +1173,15 @@ class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer):
|
||||
layers are assigned to different devices."""
|
||||
if self.rank >= 2:
|
||||
return
|
||||
self.dist_init(self.rank, world_size=2)
|
||||
self._test_zero_model_parallel(parameters_as_bucket_view)
|
||||
# Disable DDP + ReplicatedTensor when `parameter_as_bucket_view=True`
|
||||
# since then ZeroRedundancyOptimizer modifies the model parameters in
|
||||
# place.
|
||||
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
|
||||
context = _ddp_replicated_tensor(False) if parameters_as_bucket_view \
|
||||
else suppress()
|
||||
with context:
|
||||
self.dist_init(self.rank, world_size=2)
|
||||
self._test_zero_model_parallel(parameters_as_bucket_view)
|
||||
|
||||
def _test_ddp_zero_overlap(
|
||||
self,
|
||||
|
85
test/distributed/rpc/test_share_memory.py
Normal file
85
test/distributed/rpc/test_share_memory.py
Normal file
@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env python3
|
||||
# Owner(s): ["oncall: distributed"]
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
||||
if not dist.is_available():
|
||||
print("Distributed not available, skipping tests", file=sys.stderr)
|
||||
sys.exit(0)
|
||||
|
||||
import copyreg
|
||||
import os
|
||||
import contextlib
|
||||
|
||||
from torch import multiprocessing
|
||||
import torch.multiprocessing.reductions as TorchMpReductions
|
||||
import torch.distributed.rpc as rpc
|
||||
from torch.distributed.rpc.internal import _InternalRPCPickler
|
||||
from torch.distributed.rpc.api import _use_rpc_pickler
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests
|
||||
|
||||
@contextlib.contextmanager
|
||||
def fs_sharing():
|
||||
prev_strategy = multiprocessing.get_sharing_strategy()
|
||||
multiprocessing.set_sharing_strategy('file_system')
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
multiprocessing.set_sharing_strategy(prev_strategy)
|
||||
|
||||
class ShareMemoryRPCPickler(_InternalRPCPickler):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._dispatch_table
|
||||
# pyre-fixme[4]: Attribute must be annotated.
|
||||
self._dispatch_table = copyreg.dispatch_table.copy()
|
||||
|
||||
for t in torch._storage_classes:
|
||||
self._dispatch_table[t] = TorchMpReductions.reduce_storage
|
||||
|
||||
for t in torch._tensor_classes:
|
||||
self._dispatch_table[t] = TorchMpReductions.reduce_tensor
|
||||
self._dispatch_table[torch.Tensor] = TorchMpReductions.reduce_tensor
|
||||
self._dispatch_table[
|
||||
torch.nn.parameter.Parameter
|
||||
] = TorchMpReductions.reduce_tensor
|
||||
|
||||
def worker_loop(a):
|
||||
rpc.init_rpc('worker1', rank=1, world_size=2)
|
||||
rpc.shutdown()
|
||||
|
||||
def worker_fn(m):
|
||||
pass
|
||||
|
||||
class TestRPCPickler(TestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
def test_case(self):
|
||||
os.environ['MASTER_ADDR'] = 'localhost'
|
||||
os.environ['MASTER_PORT'] = '29500'
|
||||
|
||||
with fs_sharing():
|
||||
r = multiprocessing.spawn(worker_loop, join=False)
|
||||
|
||||
try:
|
||||
with _use_rpc_pickler(ShareMemoryRPCPickler()):
|
||||
rpc.init_rpc(
|
||||
'worker0',
|
||||
rank=0,
|
||||
world_size=2)
|
||||
m = torch.nn.Linear(1, 2)
|
||||
m.share_memory()
|
||||
rref = rpc.remote(
|
||||
'worker1',
|
||||
worker_fn,
|
||||
args=(m,))
|
||||
|
||||
rref.to_here()
|
||||
finally:
|
||||
rpc.shutdown()
|
||||
r.join()
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
@ -2243,6 +2243,15 @@ class DistributedDataParallelTest(
|
||||
),
|
||||
)
|
||||
|
||||
@requires_nccl()
|
||||
@skip_if_lt_x_gpu(2)
|
||||
def test_channels_last_contig(self):
|
||||
store = c10d.FileStore(self.file_name, self.world_size)
|
||||
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
|
||||
device = torch.device(f"cuda:{self.rank}")
|
||||
tensor = torch.ones((2, 16, 768, 1152), dtype=torch.float32, device=device).to(memory_format=torch.channels_last)
|
||||
process_group.broadcast([tensor]).wait()
|
||||
|
||||
|
||||
|
||||
class NcclErrorHandlingTest(MultiProcessTestCase):
|
||||
|
@ -562,6 +562,9 @@ class TestQuantizedOps(TestCase):
|
||||
"""Tests the correctness of the quantized::qnnpack_tanh op."""
|
||||
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
|
||||
qparams=hu.qparams()))
|
||||
@unittest.skip(
|
||||
"this is broken without changes to any relevant code, "
|
||||
"we need to remove hypothesis testing in CI")
|
||||
def test_qtanh(self, X):
|
||||
# Note: QNNPACK is tested separately in TestQNNPackOps
|
||||
X, (scale, zero_point, torch_type) = X
|
||||
@ -4772,6 +4775,9 @@ class TestQuantizedConv(TestCase):
|
||||
Y_zero_point=st.integers(0, 4),
|
||||
use_bias=st.booleans())
|
||||
@override_qengines
|
||||
@unittest.skip(
|
||||
"this is broken without changes to any relevant code, "
|
||||
"we need to remove hypothesis testing in CI")
|
||||
def test_qconv_transpose2d(
|
||||
self,
|
||||
batch_size,
|
||||
@ -4899,6 +4905,9 @@ class TestQuantizedConv(TestCase):
|
||||
Y_zero_point=st.integers(0, 4),
|
||||
use_bias=st.booleans())
|
||||
@override_qengines
|
||||
@unittest.skip(
|
||||
"this is broken without changes to any relevant code, "
|
||||
"we need to remove hypothesis testing in CI")
|
||||
def test_qconv_transpose3d(
|
||||
self,
|
||||
batch_size,
|
||||
|
@ -1151,6 +1151,9 @@ class TestQuantizedTensor(TestCase):
|
||||
qparams=hu.qparams()),
|
||||
reduce_range=st.booleans()
|
||||
)
|
||||
@unittest.skip(
|
||||
"this is broken without changes to any relevant code, "
|
||||
"we need to remove hypothesis testing in CI")
|
||||
def test_choose_qparams(self, X, reduce_range):
|
||||
X, (scale, zero_point, torch_type) = X
|
||||
X = torch.from_numpy(X)
|
||||
|
@ -425,6 +425,9 @@ class TestFakeQuantizeOps(TestCase):
|
||||
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
|
||||
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
|
||||
qparams=hu.qparams(dtypes=torch.quint8)))
|
||||
@unittest.skip(
|
||||
"this is broken without changes to any relevant code, "
|
||||
"we need to remove hypothesis testing in CI")
|
||||
def test_learnable_forward_per_tensor_cpu(self, X):
|
||||
X, (_, _, _) = X
|
||||
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
|
||||
@ -827,6 +830,9 @@ class TestFakeQuantizeOps(TestCase):
|
||||
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
|
||||
X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
|
||||
qparams=hu.qparams(dtypes=torch.quint8)))
|
||||
@unittest.skip(
|
||||
"this is broken without changes to any relevant code, "
|
||||
"we need to remove hypothesis testing in CI")
|
||||
def test_backward_per_channel(self, device, X):
|
||||
r"""Tests the backward method.
|
||||
"""
|
||||
@ -936,6 +942,9 @@ class TestFakeQuantizeOps(TestCase):
|
||||
|
||||
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
|
||||
qparams=hu.qparams(dtypes=torch.quint8)))
|
||||
@unittest.skip(
|
||||
"this is broken without changes to any relevant code, "
|
||||
"we need to remove hypothesis testing in CI")
|
||||
def test_learnable_backward_per_channel_cpu(self, X):
|
||||
torch.random.manual_seed(NP_RANDOM_SEED)
|
||||
X, (_, _, axis, _) = X
|
||||
|
@ -176,6 +176,7 @@ WINDOWS_BLOCKLIST = [
|
||||
"distributed/nn/jit/test_instantiator",
|
||||
"distributed/rpc/test_faulty_agent",
|
||||
"distributed/rpc/test_tensorpipe_agent",
|
||||
"distributed/rpc/test_share_memory",
|
||||
"distributed/rpc/cuda/test_tensorpipe_agent",
|
||||
"distributed/pipeline/sync/skip/test_api",
|
||||
"distributed/pipeline/sync/skip/test_gpipe",
|
||||
@ -201,6 +202,8 @@ WINDOWS_BLOCKLIST = [
|
||||
"distributed/pipeline/sync/test_worker",
|
||||
"distributed/elastic/agent/server/test/api_test",
|
||||
"distributed/elastic/multiprocessing/api_test",
|
||||
"distributed/_shard/checkpoint/test_checkpoint"
|
||||
"distributed/_shard/checkpoint/test_file_system_checkpoint"
|
||||
"distributed/_shard/sharding_spec/test_sharding_spec",
|
||||
"distributed/_shard/sharding_plan/test_sharding_plan",
|
||||
"distributed/_shard/sharded_tensor/test_megatron_prototype",
|
||||
@ -216,8 +219,6 @@ WINDOWS_BLOCKLIST = [
|
||||
"distributed/_shard/sharded_tensor/ops/test_math_ops",
|
||||
"distributed/_shard/sharded_tensor/ops/test_matrix_ops",
|
||||
"distributed/_shard/sharded_tensor/ops/test_softmax",
|
||||
"distributed/_shard/sharded_tensor/ops/test_tensor_ops",
|
||||
"distributed/_shard/sharding_spec/test_sharding_spec",
|
||||
"distributed/_shard/sharded_optim/test_sharded_optim",
|
||||
"distributed/_shard/test_partial_tensor",
|
||||
"distributed/_shard/test_replicated_tensor",
|
||||
@ -227,7 +228,10 @@ ROCM_BLOCKLIST = [
|
||||
"distributed/nn/jit/test_instantiator",
|
||||
"distributed/rpc/test_faulty_agent",
|
||||
"distributed/rpc/test_tensorpipe_agent",
|
||||
"distributed/rpc/test_share_memory",
|
||||
"distributed/rpc/cuda/test_tensorpipe_agent",
|
||||
"distributed/_shard/checkpoint/test_checkpoint"
|
||||
"distributed/_shard/checkpoint/test_file_system_checkpoint"
|
||||
"distributed/_shard/sharding_spec/test_sharding_spec",
|
||||
"distributed/_shard/sharding_plan/test_sharding_plan",
|
||||
"distributed/_shard/sharded_tensor/test_megatron_prototype",
|
||||
@ -243,8 +247,6 @@ ROCM_BLOCKLIST = [
|
||||
"distributed/_shard/sharded_tensor/ops/test_math_ops",
|
||||
"distributed/_shard/sharded_tensor/ops/test_matrix_ops",
|
||||
"distributed/_shard/sharded_tensor/ops/test_softmax",
|
||||
"distributed/_shard/sharded_tensor/ops/test_tensor_ops",
|
||||
"distributed/_shard/sharding_spec/test_sharding_spec",
|
||||
"distributed/_shard/sharded_optim/test_sharded_optim",
|
||||
"distributed/_shard/test_partial_tensor",
|
||||
"distributed/_shard/test_replicated_tensor",
|
||||
@ -612,6 +614,7 @@ CUSTOM_HANDLERS = {
|
||||
"distributed/test_pg_wrapper": get_run_test_with_subprocess_fn(),
|
||||
"distributed/rpc/test_faulty_agent": get_run_test_with_subprocess_fn(),
|
||||
"distributed/rpc/test_tensorpipe_agent": get_run_test_with_subprocess_fn(),
|
||||
"distributed/rpc/test_share_memory": get_run_test_with_subprocess_fn(),
|
||||
"distributed/rpc/cuda/test_tensorpipe_agent": get_run_test_with_subprocess_fn(),
|
||||
}
|
||||
|
||||
|
@ -4434,6 +4434,132 @@ for shape in [(1,), ()]:
|
||||
mean_combined = torch.stack(feat_combined).mean()
|
||||
mean_combined.backward()
|
||||
|
||||
@unittest.skipIf(not torch.cuda.is_available(), "Test requires CUDA")
|
||||
@slowTest
|
||||
def test_checkpointing_without_reentrant_memory_savings(self):
|
||||
class MyModel(nn.Module):
|
||||
def __init__(self, n, use_checkpoint, use_reentrant):
|
||||
super().__init__()
|
||||
self.n = n
|
||||
self.use_checkpoint = use_checkpoint
|
||||
self.use_reentrant = use_reentrant
|
||||
self.layers = nn.ModuleList()
|
||||
for i in range(self.n):
|
||||
layer = nn.Sequential(
|
||||
nn.Linear(256, 256), nn.Linear(256, 256), nn.Linear(256, 256)
|
||||
)
|
||||
self.layers.append(layer)
|
||||
# pre-allocate the grad so that increased memory usage is mainly
|
||||
# due to activations.
|
||||
for layer in self.layers:
|
||||
for lin in layer:
|
||||
lin.weight.grad = torch.ones_like(lin.weight)
|
||||
lin.bias.grad = torch.ones_like(lin.bias)
|
||||
|
||||
def forward(self, x):
|
||||
for i in range(self.n):
|
||||
if not self.use_checkpoint:
|
||||
x = self.layers[i](x)
|
||||
else:
|
||||
x = checkpoint(self.layers[i], x, use_reentrant=self.use_reentrant)
|
||||
|
||||
return x
|
||||
|
||||
model_no_checkpoint = MyModel(8, use_checkpoint=False, use_reentrant=False).cuda()
|
||||
model_reentrant_checkpoint = MyModel(8, use_checkpoint=True, use_reentrant=True).cuda()
|
||||
model_no_reentrant_checkpoint = MyModel(8, use_checkpoint=True, use_reentrant=False).cuda()
|
||||
|
||||
x = torch.randn(100, 256, requires_grad=True, device='cuda')
|
||||
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
loss = model_no_checkpoint(x.clone()).sum()
|
||||
loss.backward()
|
||||
mem_no_checkpoint = torch.cuda.max_memory_allocated()
|
||||
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
loss = model_reentrant_checkpoint(x.clone()).sum()
|
||||
loss.backward()
|
||||
mem_reentrant_checkpoint = torch.cuda.max_memory_allocated()
|
||||
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
loss = model_no_reentrant_checkpoint(x.clone()).sum()
|
||||
loss.backward()
|
||||
mem_no_reentrant_checkpoint = torch.cuda.max_memory_allocated()
|
||||
|
||||
self.assertTrue(mem_reentrant_checkpoint < mem_no_checkpoint)
|
||||
self.assertTrue(mem_no_reentrant_checkpoint < mem_no_checkpoint)
|
||||
|
||||
def test_checkpointing_without_reentrant_custom_function_raises(self):
|
||||
"""
|
||||
Accessing ctx.saved_tensors multiple times in a custom function
|
||||
backward pass with non-reentrant checkpoint currently throws due to
|
||||
saved tensors not being recomputed in between the accesses.
|
||||
"""
|
||||
# For verifying first access to ctx.saved_tensors succeeded.
|
||||
|
||||
_first_saved_tensor_access_succeeded = False
|
||||
|
||||
class MyFunc(torch.autograd.Function):
|
||||
@staticmethod
|
||||
def forward(ctx, x, y, z):
|
||||
w = x * y * z
|
||||
out = w + w
|
||||
ctx.save_for_backward(x, y, z, w, out)
|
||||
return out
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, grad_out):
|
||||
x, y, z, w, out = ctx.saved_tensors
|
||||
nonlocal _first_saved_tensor_access_succeeded
|
||||
_first_saved_tensor_access_succeeded = True
|
||||
# Raises issue in non-reentrant checkpointing where
|
||||
# second access to saved tensors raises because they were
|
||||
# not recomputed.
|
||||
x_2, y_2, z_2, w_2, out_2 = ctx.saved_tensors
|
||||
|
||||
x = torch.tensor(1., requires_grad=True)
|
||||
y = torch.tensor(2., requires_grad=True)
|
||||
z = torch.tensor(3., requires_grad=True)
|
||||
|
||||
def foo(x, y, z):
|
||||
x = x * y * z
|
||||
y = y * y * z
|
||||
z = z * z
|
||||
out = MyFunc.apply(x, y, z)
|
||||
return out
|
||||
|
||||
out = checkpoint(foo, x, y, z, use_reentrant=False)
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"Attempt to retrieve a tensor saved by autograd multiple times"
|
||||
):
|
||||
out.sum().backward()
|
||||
|
||||
self.assertTrue(_first_saved_tensor_access_succeeded)
|
||||
|
||||
def test_access_saved_tensor_twice_without_recomputation_raises(self):
|
||||
"""
|
||||
If using saved tensor hooks based checkpointing and a saved tensor
|
||||
is accessed multiple times without triggering recomputation in the
|
||||
middle, error is raised indicating so.
|
||||
"""
|
||||
def foo(a):
|
||||
b = a * a
|
||||
c = a * b
|
||||
d = torch.exp(a)
|
||||
return d
|
||||
|
||||
a = torch.randn(5, requires_grad=True)
|
||||
d = checkpoint(foo, a, use_reentrant=False)
|
||||
# First access
|
||||
d.grad_fn._saved_result
|
||||
# Second access raises error
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"Attempt to retrieve a tensor saved by autograd multiple times"
|
||||
):
|
||||
d.grad_fn._saved_result
|
||||
|
||||
@slowTest
|
||||
@parametrize("input_requires_grad", [True, False])
|
||||
def test_checkpointing_without_reentrant(self, input_requires_grad):
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user