mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 21:49:24 +08:00
Compare commits
35 Commits
cslpull91
...
v2.3.0-rc9
Author | SHA1 | Date | |
---|---|---|---|
12d0e693d0 | |||
38acd812ab | |||
b197f540bc | |||
dc81d19aac | |||
108305e47b | |||
a8b009185d | |||
b67b277268 | |||
a8f93a5c71 | |||
fa07dc5132 | |||
2a82d31f78 | |||
4bb5cb51e6 | |||
ef38d0572e | |||
5a53185e65 | |||
bc9e23abb5 | |||
8194fae625 | |||
12acd4c9b3 | |||
857797d148 | |||
233dfe4d6a | |||
e22b534b10 | |||
8602990e3f | |||
685cc955df | |||
b1c2430fbd | |||
3002eb2556 | |||
e1a846d6b8 | |||
4a9a8c606d | |||
d3201f48b1 | |||
74832f12fa | |||
02cdb400d7 | |||
37257774c6 | |||
c4e5434423 | |||
b4f90aae1b | |||
94d6463255 | |||
6a89a753b1 | |||
d69c421912 | |||
6725db07ae |
@ -1 +1 @@
|
||||
0a22a91d04c2b4a029a69a198eac390089c3e891
|
||||
d08e16b738ab550c3af51305df624d5c823dc445
|
||||
|
@ -1 +1 @@
|
||||
a9bc1a36470eefafe0e2ab2503b8698f1e89e7e3
|
||||
79c6c9b209a5692b9a895398f4f3a033f8f80415
|
||||
|
@ -57,8 +57,21 @@ fi
|
||||
# Uncomment the below when resolved to track the latest conda update
|
||||
# as_jenkins conda update -y -n base conda
|
||||
|
||||
if [[ $(uname -m) == "aarch64" ]]; then
|
||||
export SYSROOT_DEP="sysroot_linux-aarch64=2.17"
|
||||
else
|
||||
export SYSROOT_DEP="sysroot_linux-64=2.17"
|
||||
fi
|
||||
|
||||
# Install correct Python version
|
||||
as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y python="$ANACONDA_PYTHON_VERSION"
|
||||
# Also ensure sysroot is using a modern GLIBC to match system compilers
|
||||
as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y\
|
||||
python="$ANACONDA_PYTHON_VERSION" \
|
||||
${SYSROOT_DEP}
|
||||
|
||||
# libstdcxx from conda default channels are too old, we need GLIBCXX_3.4.30
|
||||
# which is provided in libstdcxx 12 and up.
|
||||
conda_install libstdcxx-ng=12.3.0 -c conda-forge
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
if [[ $(uname -m) == "aarch64" ]]; then
|
||||
@ -110,14 +123,5 @@ fi
|
||||
pip_install -r /opt/conda/requirements-docs.txt
|
||||
fi
|
||||
|
||||
# HACK HACK HACK
|
||||
# gcc-9 for ubuntu-18.04 from http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu
|
||||
# Pulls llibstdc++6 13.1.0-8ubuntu1~18.04 which is too new for conda
|
||||
# So remove libstdc++6.so.3.29 installed by https://anaconda.org/anaconda/libstdcxx-ng/files?version=11.2.0
|
||||
# Same is true for gcc-12 from Ubuntu-22.04
|
||||
if grep -e [12][82].04.[623] /etc/issue >/dev/null; then
|
||||
rm /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/lib/libstdc++.so.6
|
||||
fi
|
||||
|
||||
popd
|
||||
fi
|
||||
|
@ -1 +1 @@
|
||||
3.0.0
|
||||
2.3.0
|
||||
|
@ -255,6 +255,11 @@ else
|
||||
# or building non-XLA tests.
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* &&
|
||||
"$BUILD_ENVIRONMENT" != *xla* ]]; then
|
||||
if [[ "$BUILD_ENVIRONMENT" != *py3.8* ]]; then
|
||||
# Install numpy-2.0 release candidate for builds
|
||||
# Which should be backward compatible with Numpy-1.X
|
||||
python -mpip install --pre numpy==2.0.0rc1
|
||||
fi
|
||||
WERROR=1 python setup.py bdist_wheel
|
||||
else
|
||||
python setup.py bdist_wheel
|
||||
|
@ -178,7 +178,7 @@ function install_torchrec_and_fbgemm() {
|
||||
|
||||
function clone_pytorch_xla() {
|
||||
if [[ ! -d ./xla ]]; then
|
||||
git clone --recursive --quiet https://github.com/pytorch/xla.git
|
||||
git clone --recursive -b r2.3 https://github.com/pytorch/xla.git
|
||||
pushd xla
|
||||
# pin the xla hash so that we don't get broken by changes to xla
|
||||
git checkout "$(cat ../.github/ci_commit_pins/xla.txt)"
|
||||
|
@ -78,7 +78,7 @@ TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]]; then
|
||||
# Only linux Python < 3.12 are supported wheels for triton
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64' and python_version < '3.12'"
|
||||
TRITON_REQUIREMENT="triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
|
||||
TRITON_REQUIREMENT="pytorch-triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
|
||||
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
|
||||
TRITON_SHORTHASH=$(cut -c1-10 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton.txt)
|
||||
TRITON_REQUIREMENT="pytorch-triton==${TRITON_VERSION}+${TRITON_SHORTHASH}; ${TRITON_CONSTRAINT}"
|
||||
|
2
.github/ci_commit_pins/xla.txt
vendored
2
.github/ci_commit_pins/xla.txt
vendored
@ -1 +1 @@
|
||||
707a632930bfde19ffb361cdf5c31a7682af4e67
|
||||
r2.3
|
||||
|
@ -27,3 +27,6 @@ rockset==1.0.3
|
||||
z3-solver==4.12.2.0
|
||||
tensorboard==2.13.0
|
||||
optree==0.9.1
|
||||
# NB: test_hparams_* from test_tensorboard is failing with protobuf 5.26.0 in
|
||||
# which the stringify metadata is wrong when escaping double quote
|
||||
protobuf==3.20.2
|
||||
|
18
.github/scripts/build_triton_wheel.py
vendored
18
.github/scripts/build_triton_wheel.py
vendored
@ -10,9 +10,6 @@ from typing import Optional
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
REPO_DIR = SCRIPT_DIR.parent.parent
|
||||
|
||||
# TODO: Remove me once Triton version is again in sync for vanilla and ROCm
|
||||
ROCM_TRITION_VERSION = "2.1.0"
|
||||
|
||||
|
||||
def read_triton_pin(rocm_hash: bool = False) -> str:
|
||||
triton_file = "triton.txt" if not rocm_hash else "triton-rocm.txt"
|
||||
@ -99,7 +96,14 @@ def build_triton(
|
||||
triton_repo = "https://github.com/openai/triton"
|
||||
triton_pkg_name = "pytorch-triton"
|
||||
check_call(["git", "clone", triton_repo], cwd=tmpdir)
|
||||
check_call(["git", "checkout", commit_hash], cwd=triton_basedir)
|
||||
if release:
|
||||
ver, rev, patch = version.split(".")
|
||||
check_call(
|
||||
["git", "checkout", f"release/{ver}.{rev}.x"], cwd=triton_basedir
|
||||
)
|
||||
else:
|
||||
check_call(["git", "checkout", commit_hash], cwd=triton_basedir)
|
||||
|
||||
if build_conda:
|
||||
with open(triton_basedir / "meta.yaml", "w") as meta:
|
||||
print(
|
||||
@ -109,7 +113,7 @@ def build_triton(
|
||||
print("source:\n path: .\n", file=meta)
|
||||
print(
|
||||
"build:\n string: py{{py}}\n number: 1\n script: cd python; "
|
||||
"python setup.py install --record=record.txt\n",
|
||||
"python setup.py install --single-version-externally-managed --record=record.txt\n",
|
||||
" script_env:\n - MAX_JOBS\n",
|
||||
file=meta,
|
||||
)
|
||||
@ -155,7 +159,7 @@ def build_triton(
|
||||
patch_init_py(
|
||||
triton_pythondir / "triton" / "__init__.py",
|
||||
version=f"{version}",
|
||||
expected_version=ROCM_TRITION_VERSION if build_rocm else None,
|
||||
expected_version=None,
|
||||
)
|
||||
|
||||
if build_rocm:
|
||||
@ -164,7 +168,7 @@ def build_triton(
|
||||
triton_pythondir / "setup.py",
|
||||
name=triton_pkg_name,
|
||||
version=f"{version}",
|
||||
expected_version=ROCM_TRITION_VERSION,
|
||||
expected_version=None,
|
||||
)
|
||||
check_call("scripts/amd/setup_rocm_libs.sh", cwd=triton_basedir, shell=True)
|
||||
print("ROCm libraries setup for triton installation...")
|
||||
|
4
.github/scripts/filter_test_configs.py
vendored
4
.github/scripts/filter_test_configs.py
vendored
@ -62,9 +62,9 @@ SUPPORTED_PERIODICAL_MODES: Dict[str, Callable[[Optional[str]], bool]] = {
|
||||
}
|
||||
|
||||
# The link to the published list of disabled jobs
|
||||
DISABLED_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/disabled-jobs.json"
|
||||
DISABLED_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/disabled-jobs.json?versionId=qO7aEr.Og33PtLXfNq0j0yj.bbLC7SzR"
|
||||
# and unstable jobs
|
||||
UNSTABLE_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/unstable-jobs.json"
|
||||
UNSTABLE_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/unstable-jobs.json?versionId=7NhgpqKTtGXVUnL1C79KboTW_5qQx8y5"
|
||||
|
||||
# Some constants used to handle disabled and unstable jobs
|
||||
JOB_NAME_SEP = "/"
|
||||
|
2
.github/templates/common.yml.j2
vendored
2
.github/templates/common.yml.j2
vendored
@ -8,7 +8,7 @@
|
||||
# NOTE: If testing pytorch/builder changes you can change this variable to change what pytorch/builder reference
|
||||
# the binary builds will check out
|
||||
{%- set builder_repo = "pytorch/builder" -%}
|
||||
{%- set builder_branch = "main" -%}
|
||||
{%- set builder_branch = "release/2.3" -%}
|
||||
|
||||
{%- macro concurrency(build_environment) -%}
|
||||
concurrency:
|
||||
|
@ -100,8 +100,8 @@ jobs:
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository=common.builder_repo, branch=common.builder_branch) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository=common.builder_repo, branch=common.builder_branch, checkout_pr_head=False) }}
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
|
@ -81,8 +81,8 @@ jobs:
|
||||
elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
|
||||
echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
|
||||
fi
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository=common.builder_repo, branch=common.builder_branch) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository=common.builder_repo, branch=common.builder_branch, checkout_pr_head=False) }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
uses: nick-fields/retry@v2.8.2
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
|
@ -65,8 +65,8 @@ jobs:
|
||||
steps:
|
||||
!{{ common.setup_ec2_windows() }}
|
||||
!{{ set_runner_specific_vars() }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository=common.builder_repo, branch=common.builder_branch) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository=common.builder_repo, branch=common.builder_branch, checkout_pr_head=False) }}
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
@ -105,8 +105,8 @@ jobs:
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository=common.builder_repo, branch=common.builder_branch) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository=common.builder_repo, branch=common.builder_branch, checkout_pr_head=False) }}
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
|
12
.github/workflows/_android-build-test.yml
vendored
12
.github/workflows/_android-build-test.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
||||
keep-going: ${{ steps.filter.outputs.keep-going }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
@ -59,25 +59,25 @@ jobs:
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.3
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image-name }}
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -141,5 +141,5 @@ jobs:
|
||||
if: always()
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
if: always()
|
||||
|
12
.github/workflows/_android-full-build-test.yml
vendored
12
.github/workflows/_android-full-build-test.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
||||
keep-going: ${{ steps.filter.outputs.keep-going }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
@ -59,25 +59,25 @@ jobs:
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.3
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image-name }}
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -186,5 +186,5 @@ jobs:
|
||||
if: always()
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
if: always()
|
||||
|
14
.github/workflows/_bazel-build-test.yml
vendored
14
.github/workflows/_bazel-build-test.yml
vendored
@ -42,7 +42,7 @@ jobs:
|
||||
reenabled-issues: ${{ steps.filter.outputs.reenabled-issues }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
@ -64,30 +64,30 @@ jobs:
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.3
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image-name }}
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.3
|
||||
if: ${{ inputs.cuda-version != 'cpu' }}
|
||||
|
||||
- name: Output disk space left
|
||||
@ -196,5 +196,5 @@ jobs:
|
||||
file-suffix: bazel-${{ github.job }}_${{ steps.get-job-id.outputs.job-id }}
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
if: always()
|
||||
|
13
.github/workflows/_binary-build-linux.yml
vendored
13
.github/workflows/_binary-build-linux.yml
vendored
@ -78,7 +78,7 @@ on:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ inputs.runs_on }}
|
||||
timeout-minutes: 180
|
||||
timeout-minutes: 210
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ inputs.PYTORCH_ROOT }}
|
||||
BUILDER_ROOT: ${{ inputs.BUILDER_ROOT }}
|
||||
@ -139,13 +139,13 @@ jobs:
|
||||
run: env
|
||||
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.github-token }}
|
||||
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' }}
|
||||
|
||||
@ -173,7 +173,6 @@ jobs:
|
||||
- name: Checkout PyTorch to pytorch dir
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -187,7 +186,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder to builder dir
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -213,7 +212,7 @@ jobs:
|
||||
|
||||
- name: Pull Docker image
|
||||
if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' }}
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ inputs.DOCKER_IMAGE }}
|
||||
|
||||
@ -270,7 +269,7 @@ jobs:
|
||||
|
||||
- name: Teardown Linux
|
||||
if: always()
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
|
13
.github/workflows/_binary-test-linux.yml
vendored
13
.github/workflows/_binary-test-linux.yml
vendored
@ -127,14 +127,14 @@ jobs:
|
||||
} >> "${GITHUB_ENV} }}"
|
||||
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.github-token }}
|
||||
|
||||
# Setup the environment
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' }}
|
||||
|
||||
@ -155,7 +155,6 @@ jobs:
|
||||
- name: Checkout PyTorch to pytorch dir
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
|
||||
@ -168,7 +167,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder to builder dir
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -199,12 +198,12 @@ jobs:
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
|
||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.3
|
||||
if: ${{ inputs.GPU_ARCH_TYPE == 'cuda' && steps.filter.outputs.is-test-matrix-empty == 'False' }}
|
||||
|
||||
- name: Pull Docker image
|
||||
if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' }}
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ inputs.DOCKER_IMAGE }}
|
||||
|
||||
@ -214,7 +213,7 @@ jobs:
|
||||
|
||||
- name: Teardown Linux
|
||||
if: always()
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
|
2
.github/workflows/_binary-upload.yml
vendored
2
.github/workflows/_binary-upload.yml
vendored
@ -95,7 +95,7 @@ jobs:
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
|
6
.github/workflows/_buck-build-test.yml
vendored
6
.github/workflows/_buck-build-test.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
||||
keep-going: ${{ steps.filter.outputs.keep-going }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
@ -44,7 +44,7 @@ jobs:
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Set up JDK 8
|
||||
uses: actions/setup-java@v3
|
||||
@ -53,7 +53,7 @@ jobs:
|
||||
distribution: 'temurin'
|
||||
|
||||
- name: Setup miniconda
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.3
|
||||
with:
|
||||
python-version: 3.8
|
||||
environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
|
||||
|
10
.github/workflows/_docs.yml
vendored
10
.github/workflows/_docs.yml
vendored
@ -66,7 +66,7 @@ jobs:
|
||||
name: build-docs-${{ matrix.docs_type }}-${{ inputs.push }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
instructions: |
|
||||
@ -77,19 +77,19 @@ jobs:
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.3
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image }}
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -187,5 +187,5 @@ jobs:
|
||||
s3-prefix: pytorch/pytorch/${{ github.event.pull_request.number }}/functorchdocs
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
if: always()
|
||||
|
6
.github/workflows/_ios-build-test.yml
vendored
6
.github/workflows/_ios-build-test.yml
vendored
@ -46,7 +46,7 @@ jobs:
|
||||
keep-going: ${{ steps.filter.outputs.keep-going }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
@ -80,7 +80,7 @@ jobs:
|
||||
steps:
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Populate CI build options
|
||||
shell: bash
|
||||
@ -102,7 +102,7 @@ jobs:
|
||||
brew install libtool
|
||||
|
||||
- name: Setup miniconda for iOS
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.3
|
||||
with:
|
||||
python-version: "3.9"
|
||||
environment-file: .github/requirements/conda-env-iOS.txt
|
||||
|
10
.github/workflows/_linux-build.yml
vendored
10
.github/workflows/_linux-build.yml
vendored
@ -73,7 +73,7 @@ jobs:
|
||||
test-matrix: ${{ steps.filter.outputs.test-matrix }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@ -82,14 +82,14 @@ jobs:
|
||||
# checkout because when we run this action we don't *have* a local
|
||||
# checkout. In other cases you should prefer a local checkout.
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.3
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image-name }}
|
||||
|
||||
@ -103,7 +103,7 @@ jobs:
|
||||
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -209,5 +209,5 @@ jobs:
|
||||
path: sccache-stats-*.json
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
if: always()
|
||||
|
12
.github/workflows/_linux-test.yml
vendored
12
.github/workflows/_linux-test.yml
vendored
@ -57,7 +57,7 @@ jobs:
|
||||
timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
if: ${{ !contains(matrix.runner, 'gcp.a100') }}
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -66,14 +66,14 @@ jobs:
|
||||
docker exec -it $(docker container ps --format '{{.ID}}') bash
|
||||
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.3
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image }}
|
||||
|
||||
@ -87,13 +87,13 @@ jobs:
|
||||
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
||||
id: install-nvidia-driver
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.3
|
||||
if: contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu')
|
||||
|
||||
- name: Lock NVIDIA A100 40GB Frequency
|
||||
@ -307,7 +307,7 @@ jobs:
|
||||
path: ./**/core.[1-9]*
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
if: always()
|
||||
|
||||
# NB: We are currently having an intermittent GPU-related issue on G5 runners with
|
||||
|
10
.github/workflows/_mac-build.yml
vendored
10
.github/workflows/_mac-build.yml
vendored
@ -71,11 +71,11 @@ jobs:
|
||||
test-matrix: ${{ steps.filter.outputs.test-matrix }}
|
||||
steps:
|
||||
- name: Clean up disk space before running MacOS workflow
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@main
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.3
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Set xcode version
|
||||
env:
|
||||
@ -87,7 +87,7 @@ jobs:
|
||||
|
||||
- name: Setup miniconda
|
||||
if: inputs.environment-file == ''
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.3
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
|
||||
@ -97,7 +97,7 @@ jobs:
|
||||
# environment even though the arch is x86-64
|
||||
- name: Setup miniconda using the provided environment file
|
||||
if: inputs.environment-file != ''
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.3
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
environment-file: ${{ inputs.environment-file }}
|
||||
@ -207,4 +207,4 @@ jobs:
|
||||
- name: Clean up disk space
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@main
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.3
|
||||
|
6
.github/workflows/_mac-test-mps.yml
vendored
6
.github/workflows/_mac-test-mps.yml
vendored
@ -40,7 +40,7 @@ jobs:
|
||||
reenabled-issues: ${{ steps.filter.outputs.reenabled-issues }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -81,7 +81,7 @@ jobs:
|
||||
use-gha: true
|
||||
|
||||
- name: Setup miniconda
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.3
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
|
||||
@ -159,4 +159,4 @@ jobs:
|
||||
- name: Clean up disk space
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@main
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.3
|
||||
|
8
.github/workflows/_mac-test.yml
vendored
8
.github/workflows/_mac-test.yml
vendored
@ -79,11 +79,11 @@ jobs:
|
||||
done
|
||||
|
||||
- name: Clean up disk space before running MacOS workflow
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@main
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.3
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Download build artifacts
|
||||
uses: ./.github/actions/download-build-artifacts
|
||||
@ -98,7 +98,7 @@ jobs:
|
||||
use-gha: true
|
||||
|
||||
- name: Setup miniconda
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.3
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
|
||||
@ -227,4 +227,4 @@ jobs:
|
||||
- name: Clean up disk space
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@main
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.3
|
||||
|
6
.github/workflows/_rocm-test.yml
vendored
6
.github/workflows/_rocm-test.yml
vendored
@ -58,7 +58,7 @@ jobs:
|
||||
steps:
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
@ -80,12 +80,12 @@ jobs:
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.3
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image }}
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
|
6
.github/workflows/_run_android_tests.yml
vendored
6
.github/workflows/_run_android_tests.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
||||
keep-going: ${{ steps.filter.outputs.keep-going }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
@ -54,10 +54,10 @@ jobs:
|
||||
SUPPORT_ABI: '${{ matrix.support_abi }}'
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Setup miniconda
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.3
|
||||
with:
|
||||
python-version: 3.8
|
||||
environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}.txt
|
||||
|
6
.github/workflows/_win-build.yml
vendored
6
.github/workflows/_win-build.yml
vendored
@ -60,10 +60,10 @@ jobs:
|
||||
git config --global core.fsmonitor false
|
||||
|
||||
- name: Clean up leftover processes on non-ephemeral Windows runner
|
||||
uses: pytorch/test-infra/.github/actions/cleanup-runner@main
|
||||
uses: pytorch/test-infra/.github/actions/cleanup-runner@release/2.3
|
||||
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
instructions: |
|
||||
@ -78,7 +78,7 @@ jobs:
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
|
6
.github/workflows/_win-test.yml
vendored
6
.github/workflows/_win-test.yml
vendored
@ -54,10 +54,10 @@ jobs:
|
||||
git config --global core.fsmonitor false
|
||||
|
||||
- name: Clean up leftover processes on non-ephemeral Windows runner
|
||||
uses: pytorch/test-infra/.github/actions/cleanup-runner@main
|
||||
uses: pytorch/test-infra/.github/actions/cleanup-runner@release/2.3
|
||||
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
instructions: |
|
||||
@ -73,7 +73,7 @@ jobs:
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
|
6
.github/workflows/_xpu-test.yml
vendored
6
.github/workflows/_xpu-test.yml
vendored
@ -54,7 +54,7 @@ jobs:
|
||||
steps:
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Setup XPU
|
||||
uses: ./.github/actions/setup-xpu
|
||||
@ -72,12 +72,12 @@ jobs:
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.3
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image }}
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
|
18
.github/workflows/build-triton-wheel.yml
vendored
18
.github/workflows/build-triton-wheel.yml
vendored
@ -3,7 +3,7 @@ name: Build Triton wheels
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/2.3
|
||||
tags:
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
@ -47,12 +47,12 @@ jobs:
|
||||
BUILD_DEVICE: ${{ matrix.device }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -60,7 +60,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ env.DOCKER_IMAGE }}
|
||||
|
||||
@ -125,7 +125,7 @@ jobs:
|
||||
path: ${{ runner.temp }}/artifacts/*
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
if: always()
|
||||
|
||||
upload-wheel:
|
||||
@ -203,12 +203,12 @@ jobs:
|
||||
PY_VERS: ${{ matrix.py_vers }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -216,7 +216,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ env.DOCKER_IMAGE }}
|
||||
|
||||
@ -252,7 +252,7 @@ jobs:
|
||||
path: ${{ runner.temp }}/artifacts/*
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
if: always()
|
||||
|
||||
upload-conda:
|
||||
|
2
.github/workflows/check-labels.yml
vendored
2
.github/workflows/check-labels.yml
vendored
@ -31,7 +31,7 @@ jobs:
|
||||
runs-on: linux.20_04.4x
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
|
@ -11,7 +11,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Run close_nonexistent_disable_issues.py
|
||||
env:
|
||||
|
8
.github/workflows/docker-builds.yml
vendored
8
.github/workflows/docker-builds.yml
vendored
@ -74,21 +74,21 @@ jobs:
|
||||
# [see note: pytorch repo ref]
|
||||
# deep clone (fetch-depth 0) required for git merge-base
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Build docker image
|
||||
id: build-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.3
|
||||
with:
|
||||
docker-image-name: ${{ matrix.docker-image-name }}
|
||||
always-rebuild: true
|
||||
push: true
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ steps.build-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -120,5 +120,5 @@ jobs:
|
||||
if: always()
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
if: always()
|
||||
|
6
.github/workflows/docker-release.yml
vendored
6
.github/workflows/docker-release.yml
vendored
@ -40,7 +40,7 @@ jobs:
|
||||
matrix: ${{ steps.generate-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: true
|
||||
@ -68,7 +68,7 @@ jobs:
|
||||
CUDNN_VERSION: ${{ matrix.cudnn_version }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.3
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
# [see note: pytorch repo ref]
|
||||
@ -141,5 +141,5 @@ jobs:
|
||||
ghcr.io/pytorch/pytorch-nightly:latest
|
||||
docker push ghcr.io/pytorch/pytorch-nightly:latest
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.3
|
||||
if: always()
|
||||
|
30
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
30
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
@ -48,7 +48,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
runs_on: linux.arm64.2xlarge
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
@ -69,7 +69,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
@ -91,7 +91,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cpu-aarch64
|
||||
secrets:
|
||||
@ -111,7 +111,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
runs_on: linux.arm64.2xlarge
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
@ -132,7 +132,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
@ -154,7 +154,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu-aarch64
|
||||
secrets:
|
||||
@ -174,7 +174,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
runs_on: linux.arm64.2xlarge
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
@ -195,7 +195,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
@ -217,7 +217,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu-aarch64
|
||||
secrets:
|
||||
@ -237,7 +237,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
runs_on: linux.arm64.2xlarge
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
@ -258,7 +258,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
@ -280,7 +280,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu-aarch64
|
||||
secrets:
|
||||
@ -300,7 +300,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
runs_on: linux.arm64.2xlarge
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
@ -321,7 +321,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
@ -343,7 +343,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu-aarch64
|
||||
secrets:
|
||||
|
90
.github/workflows/generated-linux-binary-conda-nightly.yml
generated
vendored
90
.github/workflows/generated-linux-binary-conda-nightly.yml
generated
vendored
@ -48,7 +48,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: conda-py3_8-cpu
|
||||
build_environment: linux-binary-conda
|
||||
@ -66,7 +66,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: conda-py3_8-cpu
|
||||
build_environment: linux-binary-conda
|
||||
@ -87,7 +87,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: conda-py3_8-cpu
|
||||
secrets:
|
||||
@ -108,7 +108,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
runs_on: linux.24xlarge
|
||||
build_name: conda-py3_8-cuda11_8
|
||||
@ -128,7 +128,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: conda-py3_8-cuda11_8
|
||||
build_environment: linux-binary-conda
|
||||
@ -150,7 +150,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: conda-py3_8-cuda11_8
|
||||
secrets:
|
||||
@ -171,7 +171,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
runs_on: linux.24xlarge
|
||||
build_name: conda-py3_8-cuda12_1
|
||||
@ -191,7 +191,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: conda-py3_8-cuda12_1
|
||||
build_environment: linux-binary-conda
|
||||
@ -213,7 +213,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: conda-py3_8-cuda12_1
|
||||
secrets:
|
||||
@ -233,7 +233,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: conda-py3_9-cpu
|
||||
build_environment: linux-binary-conda
|
||||
@ -251,7 +251,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: conda-py3_9-cpu
|
||||
build_environment: linux-binary-conda
|
||||
@ -272,7 +272,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: conda-py3_9-cpu
|
||||
secrets:
|
||||
@ -293,7 +293,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
runs_on: linux.24xlarge
|
||||
build_name: conda-py3_9-cuda11_8
|
||||
@ -313,7 +313,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: conda-py3_9-cuda11_8
|
||||
build_environment: linux-binary-conda
|
||||
@ -335,7 +335,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: conda-py3_9-cuda11_8
|
||||
secrets:
|
||||
@ -356,7 +356,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
runs_on: linux.24xlarge
|
||||
build_name: conda-py3_9-cuda12_1
|
||||
@ -376,7 +376,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: conda-py3_9-cuda12_1
|
||||
build_environment: linux-binary-conda
|
||||
@ -398,7 +398,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: conda-py3_9-cuda12_1
|
||||
secrets:
|
||||
@ -418,7 +418,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: conda-py3_10-cpu
|
||||
build_environment: linux-binary-conda
|
||||
@ -436,7 +436,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: conda-py3_10-cpu
|
||||
build_environment: linux-binary-conda
|
||||
@ -457,7 +457,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: conda-py3_10-cpu
|
||||
secrets:
|
||||
@ -478,7 +478,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
runs_on: linux.24xlarge
|
||||
build_name: conda-py3_10-cuda11_8
|
||||
@ -498,7 +498,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: conda-py3_10-cuda11_8
|
||||
build_environment: linux-binary-conda
|
||||
@ -520,7 +520,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: conda-py3_10-cuda11_8
|
||||
secrets:
|
||||
@ -541,7 +541,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
runs_on: linux.24xlarge
|
||||
build_name: conda-py3_10-cuda12_1
|
||||
@ -561,7 +561,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: conda-py3_10-cuda12_1
|
||||
build_environment: linux-binary-conda
|
||||
@ -583,7 +583,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: conda-py3_10-cuda12_1
|
||||
secrets:
|
||||
@ -603,7 +603,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: conda-py3_11-cpu
|
||||
build_environment: linux-binary-conda
|
||||
@ -621,7 +621,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: conda-py3_11-cpu
|
||||
build_environment: linux-binary-conda
|
||||
@ -642,7 +642,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: conda-py3_11-cpu
|
||||
secrets:
|
||||
@ -663,7 +663,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
runs_on: linux.24xlarge
|
||||
build_name: conda-py3_11-cuda11_8
|
||||
@ -683,7 +683,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: conda-py3_11-cuda11_8
|
||||
build_environment: linux-binary-conda
|
||||
@ -705,7 +705,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: conda-py3_11-cuda11_8
|
||||
secrets:
|
||||
@ -726,7 +726,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
runs_on: linux.24xlarge
|
||||
build_name: conda-py3_11-cuda12_1
|
||||
@ -746,7 +746,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: conda-py3_11-cuda12_1
|
||||
build_environment: linux-binary-conda
|
||||
@ -768,7 +768,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: conda-py3_11-cuda12_1
|
||||
secrets:
|
||||
@ -788,7 +788,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: conda-py3_12-cpu
|
||||
build_environment: linux-binary-conda
|
||||
@ -806,7 +806,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: conda-py3_12-cpu
|
||||
build_environment: linux-binary-conda
|
||||
@ -827,7 +827,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: conda-py3_12-cpu
|
||||
secrets:
|
||||
@ -848,7 +848,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
runs_on: linux.24xlarge
|
||||
build_name: conda-py3_12-cuda11_8
|
||||
@ -868,7 +868,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: conda-py3_12-cuda11_8
|
||||
build_environment: linux-binary-conda
|
||||
@ -890,7 +890,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: conda-py3_12-cuda11_8
|
||||
secrets:
|
||||
@ -911,7 +911,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
runs_on: linux.24xlarge
|
||||
build_name: conda-py3_12-cuda12_1
|
||||
@ -931,7 +931,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: conda-py3_12-cuda12_1
|
||||
build_environment: linux-binary-conda
|
||||
@ -953,7 +953,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: conda-py3_12-cuda12_1
|
||||
secrets:
|
||||
|
4
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-main.yml
generated
vendored
4
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-main.yml
generated
vendored
@ -43,7 +43,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
@ -62,7 +62,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
|
40
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
40
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
@ -48,7 +48,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
@ -67,7 +67,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
@ -89,7 +89,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
@ -111,7 +111,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda11_8-shared-with-deps-cxx11-abi
|
||||
@ -131,7 +131,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda11_8-shared-with-deps-cxx11-abi
|
||||
@ -154,7 +154,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda11_8-shared-with-deps-cxx11-abi
|
||||
@ -176,7 +176,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.1-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda12_1-shared-with-deps-cxx11-abi
|
||||
@ -196,7 +196,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.1-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda12_1-shared-with-deps-cxx11-abi
|
||||
@ -219,7 +219,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.1-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda12_1-shared-with-deps-cxx11-abi
|
||||
@ -241,7 +241,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm5.7-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-rocm5_7-shared-with-deps-cxx11-abi
|
||||
@ -263,7 +263,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm5.7-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
steps:
|
||||
@ -277,7 +277,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -289,7 +288,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -305,7 +304,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/libtorch-cxx11-builder:rocm5.7-main
|
||||
docker-image: pytorch/libtorch-cxx11-builder:rocm5.7-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -325,7 +324,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm5.7-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-rocm5_7-shared-with-deps-cxx11-abi
|
||||
@ -347,7 +346,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-rocm6_0-shared-with-deps-cxx11-abi
|
||||
@ -369,7 +368,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
steps:
|
||||
@ -383,7 +382,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -395,7 +393,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -411,7 +409,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/libtorch-cxx11-builder:rocm6.0-main
|
||||
docker-image: pytorch/libtorch-cxx11-builder:rocm6.0-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -431,7 +429,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-rocm6_0-shared-with-deps-cxx11-abi
|
||||
|
4
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-main.yml
generated
vendored
4
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-main.yml
generated
vendored
@ -43,7 +43,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cpu-shared-with-deps-pre-cxx11
|
||||
@ -62,7 +62,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cpu-shared-with-deps-pre-cxx11
|
||||
|
40
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
generated
vendored
40
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
generated
vendored
@ -48,7 +48,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cpu-shared-with-deps-pre-cxx11
|
||||
@ -67,7 +67,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cpu-shared-with-deps-pre-cxx11
|
||||
@ -89,7 +89,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cpu-shared-with-deps-pre-cxx11
|
||||
@ -111,7 +111,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda11_8-shared-with-deps-pre-cxx11
|
||||
@ -131,7 +131,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda11_8-shared-with-deps-pre-cxx11
|
||||
@ -154,7 +154,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda11_8-shared-with-deps-pre-cxx11
|
||||
@ -176,7 +176,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda12_1-shared-with-deps-pre-cxx11
|
||||
@ -196,7 +196,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda12_1-shared-with-deps-pre-cxx11
|
||||
@ -219,7 +219,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda12_1-shared-with-deps-pre-cxx11
|
||||
@ -241,7 +241,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-rocm5_7-shared-with-deps-pre-cxx11
|
||||
@ -263,7 +263,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
steps:
|
||||
@ -277,7 +277,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -289,7 +288,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -305,7 +304,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-main
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -325,7 +324,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-rocm5_7-shared-with-deps-pre-cxx11
|
||||
@ -347,7 +346,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-rocm6_0-shared-with-deps-pre-cxx11
|
||||
@ -369,7 +368,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
steps:
|
||||
@ -383,7 +382,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -395,7 +393,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -411,7 +409,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-main
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -431,7 +429,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-rocm6_0-shared-with-deps-pre-cxx11
|
||||
|
8
.github/workflows/generated-linux-binary-manywheel-main.yml
generated
vendored
8
.github/workflows/generated-linux-binary-manywheel-main.yml
generated
vendored
@ -44,7 +44,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -64,7 +64,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -84,7 +84,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -104,7 +104,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
|
230
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
230
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
@ -48,7 +48,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cpu
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -66,7 +66,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cpu
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -87,7 +87,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cpu
|
||||
secrets:
|
||||
@ -107,7 +107,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cpu-cxx11-abi
|
||||
@ -126,7 +126,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cpu-cxx11-abi
|
||||
@ -148,7 +148,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cpu-cxx11-abi
|
||||
@ -170,7 +170,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -190,7 +190,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -212,7 +212,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda11_8
|
||||
secrets:
|
||||
@ -233,7 +233,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -253,7 +253,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -275,7 +275,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_1
|
||||
secrets:
|
||||
@ -296,7 +296,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-rocm5_7
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -317,7 +317,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
@ -330,7 +330,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -342,7 +341,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -358,7 +357,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-main
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -378,7 +377,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-rocm5_7
|
||||
secrets:
|
||||
@ -399,7 +398,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-rocm6_0
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -420,7 +419,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
@ -433,7 +432,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -445,7 +443,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -461,7 +459,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-main
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -481,7 +479,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-rocm6_0
|
||||
secrets:
|
||||
@ -501,7 +499,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -519,7 +517,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -540,7 +538,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu
|
||||
secrets:
|
||||
@ -560,7 +558,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu-cxx11-abi
|
||||
@ -579,7 +577,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu-cxx11-abi
|
||||
@ -601,7 +599,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu-cxx11-abi
|
||||
@ -623,7 +621,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -643,7 +641,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -665,7 +663,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cuda11_8
|
||||
secrets:
|
||||
@ -686,7 +684,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -706,7 +704,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -728,7 +726,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cuda12_1
|
||||
secrets:
|
||||
@ -749,7 +747,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-rocm5_7
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -770,7 +768,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
@ -783,7 +781,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -795,7 +792,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -811,7 +808,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-main
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -831,7 +828,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-rocm5_7
|
||||
secrets:
|
||||
@ -852,7 +849,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-rocm6_0
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -873,7 +870,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
@ -886,7 +883,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -898,7 +894,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -914,7 +910,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-main
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -934,7 +930,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-rocm6_0
|
||||
secrets:
|
||||
@ -954,7 +950,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -972,7 +968,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -993,7 +989,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu
|
||||
secrets:
|
||||
@ -1013,7 +1009,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu-cxx11-abi
|
||||
@ -1032,7 +1028,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu-cxx11-abi
|
||||
@ -1054,7 +1050,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu-cxx11-abi
|
||||
@ -1076,7 +1072,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1096,7 +1092,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1118,7 +1114,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cuda11_8
|
||||
secrets:
|
||||
@ -1139,7 +1135,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1159,7 +1155,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1181,7 +1177,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cuda12_1
|
||||
secrets:
|
||||
@ -1202,7 +1198,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-rocm5_7
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1223,7 +1219,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
@ -1236,7 +1232,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1248,7 +1243,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1264,7 +1259,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-main
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -1284,7 +1279,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-rocm5_7
|
||||
secrets:
|
||||
@ -1305,7 +1300,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-rocm6_0
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1326,7 +1321,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
@ -1339,7 +1334,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1351,7 +1345,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1367,7 +1361,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-main
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -1387,7 +1381,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-rocm6_0
|
||||
secrets:
|
||||
@ -1407,7 +1401,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1425,7 +1419,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1446,7 +1440,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu
|
||||
secrets:
|
||||
@ -1466,7 +1460,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu-cxx11-abi
|
||||
@ -1485,7 +1479,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu-cxx11-abi
|
||||
@ -1507,7 +1501,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu-cxx11-abi
|
||||
@ -1529,7 +1523,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1549,7 +1543,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1571,7 +1565,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cuda11_8
|
||||
secrets:
|
||||
@ -1592,7 +1586,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1612,7 +1606,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1634,7 +1628,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cuda12_1
|
||||
secrets:
|
||||
@ -1655,7 +1649,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-rocm5_7
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1676,7 +1670,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
@ -1689,7 +1683,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1701,7 +1694,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1717,7 +1710,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-main
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -1737,7 +1730,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-rocm5_7
|
||||
secrets:
|
||||
@ -1758,7 +1751,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-rocm6_0
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1779,7 +1772,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
@ -1792,7 +1785,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1804,7 +1796,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1820,7 +1812,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-main
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -1840,7 +1832,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-rocm6_0
|
||||
secrets:
|
||||
@ -1860,7 +1852,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1878,7 +1870,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -1899,7 +1891,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu
|
||||
secrets:
|
||||
@ -1919,7 +1911,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu-cxx11-abi
|
||||
@ -1938,7 +1930,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu-cxx11-abi
|
||||
@ -1960,7 +1952,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu-cxx11-abi
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-2.3
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu-cxx11-abi
|
||||
@ -1982,7 +1974,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -2002,7 +1994,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -2024,7 +2016,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cuda11_8
|
||||
secrets:
|
||||
@ -2045,7 +2037,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -2065,7 +2057,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -2087,7 +2079,7 @@ jobs:
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cuda12_1
|
||||
secrets:
|
||||
@ -2108,7 +2100,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm5_7
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -2129,7 +2121,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
@ -2142,7 +2134,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2154,7 +2145,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2170,7 +2161,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-main
|
||||
docker-image: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -2190,7 +2181,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm5.7
|
||||
GPU_ARCH_VERSION: 5.7
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm5_7
|
||||
secrets:
|
||||
@ -2211,7 +2202,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm6_0
|
||||
build_environment: linux-binary-manywheel
|
||||
@ -2232,7 +2223,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
steps:
|
||||
- name: Setup ROCm
|
||||
@ -2245,7 +2236,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2257,7 +2247,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2273,7 +2263,7 @@ jobs:
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-main
|
||||
docker-image: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -2293,7 +2283,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.0
|
||||
GPU_ARCH_VERSION: 6.0
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-rocm6_0
|
||||
secrets:
|
||||
|
25
.github/workflows/generated-macos-arm64-binary-conda-nightly.yml
generated
vendored
25
.github/workflows/generated-macos-arm64-binary-conda-nightly.yml
generated
vendored
@ -77,7 +77,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -89,7 +88,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -141,7 +140,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: conda-py3_8-cpu
|
||||
use_s3: False
|
||||
@ -195,7 +194,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -207,7 +205,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -259,7 +257,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: conda-py3_9-cpu
|
||||
use_s3: False
|
||||
@ -313,7 +311,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -325,7 +322,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -377,7 +374,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: conda-py3_10-cpu
|
||||
use_s3: False
|
||||
@ -431,7 +428,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -443,7 +439,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -495,7 +491,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: conda-py3_11-cpu
|
||||
use_s3: False
|
||||
@ -549,7 +545,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -561,7 +556,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -613,7 +608,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/conda-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: conda-py3_12-cpu
|
||||
use_s3: False
|
||||
|
5
.github/workflows/generated-macos-arm64-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
5
.github/workflows/generated-macos-arm64-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
@ -81,7 +81,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -93,7 +92,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -145,7 +144,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.3
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
|
25
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
25
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
@ -78,7 +78,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -90,7 +89,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -142,7 +141,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: wheel-py3_8-cpu
|
||||
use_s3: False
|
||||
@ -197,7 +196,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -209,7 +207,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -261,7 +259,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: wheel-py3_9-cpu
|
||||
use_s3: False
|
||||
@ -316,7 +314,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -328,7 +325,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -380,7 +377,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: wheel-py3_10-cpu
|
||||
use_s3: False
|
||||
@ -435,7 +432,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -447,7 +443,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -499,7 +495,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: wheel-py3_11-cpu
|
||||
use_s3: False
|
||||
@ -554,7 +550,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -566,7 +561,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -618,7 +613,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.3
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: wheel-py3_12-cpu
|
||||
use_s3: False
|
||||
|
90
.github/workflows/generated-windows-binary-conda-nightly.yml
generated
vendored
90
.github/workflows/generated-windows-binary-conda-nightly.yml
generated
vendored
@ -93,7 +93,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -105,7 +104,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -210,7 +209,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -222,7 +220,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -336,7 +334,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -348,7 +345,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -454,7 +451,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -466,7 +462,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -581,7 +577,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -593,7 +588,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -699,7 +694,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -711,7 +705,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -825,7 +819,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -837,7 +830,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -942,7 +935,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -954,7 +946,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1068,7 +1060,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1080,7 +1071,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1186,7 +1177,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1198,7 +1188,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1313,7 +1303,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1325,7 +1314,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1431,7 +1420,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1443,7 +1431,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1557,7 +1545,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1569,7 +1556,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1674,7 +1661,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1686,7 +1672,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1800,7 +1786,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1812,7 +1797,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1918,7 +1903,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1930,7 +1914,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2045,7 +2029,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2057,7 +2040,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2163,7 +2146,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2175,7 +2157,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2289,7 +2271,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2301,7 +2282,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2406,7 +2387,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2418,7 +2398,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2532,7 +2512,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2544,7 +2523,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2650,7 +2629,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2662,7 +2640,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2777,7 +2755,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2789,7 +2766,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2895,7 +2872,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2907,7 +2883,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3021,7 +2997,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3033,7 +3008,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3138,7 +3113,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3150,7 +3124,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3264,7 +3238,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3276,7 +3249,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3382,7 +3355,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3394,7 +3366,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3509,7 +3481,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3521,7 +3492,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3627,7 +3598,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3639,7 +3609,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
6
.github/workflows/generated-windows-binary-libtorch-debug-main.yml
generated
vendored
6
.github/workflows/generated-windows-binary-libtorch-debug-main.yml
generated
vendored
@ -90,7 +90,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -102,7 +101,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -211,7 +210,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -223,7 +221,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
18
.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
generated
vendored
18
.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
generated
vendored
@ -97,7 +97,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -109,7 +108,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -218,7 +217,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -230,7 +228,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -352,7 +350,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -364,7 +361,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -474,7 +471,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -486,7 +482,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -609,7 +605,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -621,7 +616,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -731,7 +726,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -743,7 +737,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
6
.github/workflows/generated-windows-binary-libtorch-release-main.yml
generated
vendored
6
.github/workflows/generated-windows-binary-libtorch-release-main.yml
generated
vendored
@ -90,7 +90,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -102,7 +101,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -211,7 +210,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -223,7 +221,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
18
.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
generated
vendored
18
.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
generated
vendored
@ -97,7 +97,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -109,7 +108,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -218,7 +217,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -230,7 +228,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -352,7 +350,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -364,7 +361,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -474,7 +471,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -486,7 +482,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -609,7 +605,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -621,7 +616,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -731,7 +726,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -743,7 +737,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
90
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
90
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
@ -94,7 +94,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -106,7 +105,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -211,7 +210,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -223,7 +221,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -338,7 +336,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -350,7 +347,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -456,7 +453,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -468,7 +464,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -584,7 +580,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -596,7 +591,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -702,7 +697,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -714,7 +708,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -829,7 +823,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -841,7 +834,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -946,7 +939,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -958,7 +950,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1073,7 +1065,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1085,7 +1076,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1191,7 +1182,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1203,7 +1193,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1319,7 +1309,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1331,7 +1320,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1437,7 +1426,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1449,7 +1437,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1564,7 +1552,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1576,7 +1563,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1681,7 +1668,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1693,7 +1679,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1808,7 +1794,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1820,7 +1805,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -1926,7 +1911,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1938,7 +1922,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2054,7 +2038,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2066,7 +2049,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2172,7 +2155,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2184,7 +2166,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2299,7 +2281,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2311,7 +2292,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2416,7 +2397,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2428,7 +2408,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2543,7 +2523,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2555,7 +2534,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2661,7 +2640,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2673,7 +2651,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2789,7 +2767,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2801,7 +2778,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -2907,7 +2884,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2919,7 +2895,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3034,7 +3010,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3046,7 +3021,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3151,7 +3126,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3163,7 +3137,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3278,7 +3252,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3290,7 +3263,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3396,7 +3369,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3408,7 +3380,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3524,7 +3496,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3536,7 +3507,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
@ -3642,7 +3613,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3654,7 +3624,7 @@ jobs:
|
||||
- name: Checkout pytorch/builder
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
ref: release/2.3
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
|
2
.github/workflows/lint-bc.yml
vendored
2
.github/workflows/lint-bc.yml
vendored
@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Run BC Lint Action
|
||||
uses: pytorch/test-infra/.github/actions/bc-lint@main
|
||||
uses: pytorch/test-infra/.github/actions/bc-lint@release/2.3
|
||||
with:
|
||||
repo: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
base_sha: ${{ github.event.pull_request.base.sha }}
|
||||
|
19
.github/workflows/lint.yml
vendored
19
.github/workflows/lint.yml
vendored
@ -16,7 +16,7 @@ permissions: read-all
|
||||
# When any other step fails, it's job will be retried once by retryBot.
|
||||
jobs:
|
||||
lintrunner-clang:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.3
|
||||
with:
|
||||
timeout: 120
|
||||
runner: linux.2xlarge
|
||||
@ -32,7 +32,7 @@ jobs:
|
||||
.github/scripts/lintrunner.sh
|
||||
|
||||
lintrunner-noclang:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.3
|
||||
with:
|
||||
timeout: 120
|
||||
runner: linux.2xlarge
|
||||
@ -47,7 +47,7 @@ jobs:
|
||||
.github/scripts/lintrunner.sh
|
||||
|
||||
quick-checks:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.3
|
||||
with:
|
||||
runner: linux.2xlarge
|
||||
docker-image: pytorch-linux-focal-linter
|
||||
@ -88,7 +88,7 @@ jobs:
|
||||
if: github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'skip-pr-sanity-checks')
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: -1
|
||||
@ -101,7 +101,7 @@ jobs:
|
||||
bash .github/scripts/pr-sanity-check.sh
|
||||
|
||||
workflow-checks:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.3
|
||||
with:
|
||||
runner: linux.2xlarge
|
||||
docker-image: pytorch-linux-focal-linter
|
||||
@ -113,6 +113,7 @@ jobs:
|
||||
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
|
||||
conda activate "${CONDA_ENV}"
|
||||
|
||||
export RELEASE_VERSION_TAG="2.3"
|
||||
# Regenerate workflows
|
||||
.github/scripts/generate_ci_workflows.py
|
||||
|
||||
@ -137,7 +138,7 @@ jobs:
|
||||
exit $RC
|
||||
|
||||
toc:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.3
|
||||
with:
|
||||
runner: linux.2xlarge
|
||||
docker-image: pytorch-linux-focal-linter
|
||||
@ -175,7 +176,7 @@ jobs:
|
||||
test-tools:
|
||||
name: Test tools
|
||||
if: ${{ github.repository == 'pytorch/pytorch' }}
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.3
|
||||
with:
|
||||
runner: linux.2xlarge
|
||||
docker-image: pytorch-linux-focal-linter
|
||||
@ -196,7 +197,7 @@ jobs:
|
||||
runs-on: linux.20_04.4x
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
@ -226,7 +227,7 @@ jobs:
|
||||
# [see note: pytorch repo ref]
|
||||
# deep clone (fetch-depth 0) required, to allow us to use git log
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
|
@ -21,7 +21,7 @@ jobs:
|
||||
environment: upload-stats
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
|
6
.github/workflows/nightly.yml
vendored
6
.github/workflows/nightly.yml
vendored
@ -41,7 +41,7 @@ jobs:
|
||||
environment: update-commit-hash
|
||||
steps:
|
||||
- name: update-vision-commit-hash
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@main
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@release/2.3
|
||||
if: ${{ github.event_name == 'schedule' }}
|
||||
with:
|
||||
repo-name: vision
|
||||
@ -56,7 +56,7 @@ jobs:
|
||||
environment: update-commit-hash
|
||||
steps:
|
||||
- name: update-audio-commit-hash
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@main
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@release/2.3
|
||||
if: ${{ github.event_name == 'schedule' }}
|
||||
with:
|
||||
repo-name: audio
|
||||
@ -71,7 +71,7 @@ jobs:
|
||||
environment: update-commit-hash
|
||||
steps:
|
||||
- name: update-executorch-commit-hash
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@main
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@release/2.3
|
||||
if: ${{ github.event_name == 'schedule' }}
|
||||
with:
|
||||
repo-name: executorch
|
||||
|
@ -18,7 +18,7 @@ jobs:
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.3
|
||||
with:
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
|
||||
|
||||
@ -32,13 +32,13 @@ jobs:
|
||||
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.3
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
||||
id: install-nvidia-driver
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.3
|
||||
|
||||
- name: Clone PyTorch
|
||||
uses: actions/checkout@v3
|
||||
|
2
.github/workflows/target_determination.yml
vendored
2
.github/workflows/target_determination.yml
vendored
@ -14,7 +14,7 @@ jobs:
|
||||
# checkout because when we run this action we don't *have* a local
|
||||
# checkout. In other cases you should prefer a local checkout.
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
|
2
.github/workflows/update-viablestrict.yml
vendored
2
.github/workflows/update-viablestrict.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
||||
environment: ${{ (github.event_name == 'schedule') && 'mergebot' || '' }}
|
||||
steps:
|
||||
- name: Update viable/strict
|
||||
uses: pytorch/test-infra/.github/actions/update-viablestrict@main
|
||||
uses: pytorch/test-infra/.github/actions/update-viablestrict@release/2.3
|
||||
with:
|
||||
repository: pytorch/pytorch
|
||||
stable-branch: viable/strict
|
||||
|
2
.github/workflows/update_pytorch_labels.yml
vendored
2
.github/workflows/update_pytorch_labels.yml
vendored
@ -17,7 +17,7 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
|
2
.github/workflows/upload-alerts.yml
vendored
2
.github/workflows/upload-alerts.yml
vendored
@ -44,7 +44,7 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
uses: pytorch/test-infra/.github/actions/upload-alerts@main
|
||||
uses: pytorch/test-infra/.github/actions/upload-alerts@release/2.3
|
||||
with:
|
||||
alerts: '${{ steps.alert_creation_step.outputs.script-output }}'
|
||||
organization: "pytorch"
|
||||
|
2
.github/workflows/upload-test-stats.yml
vendored
2
.github/workflows/upload-test-stats.yml
vendored
@ -39,7 +39,7 @@ jobs:
|
||||
run: echo "${TRIGGERING_WORKFLOW}"
|
||||
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
|
@ -29,7 +29,7 @@ jobs:
|
||||
name: Upload dynamo performance stats for ${{ github.event.workflow_run.id }}, attempt ${{ github.event.workflow_run.run_attempt }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.3
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
|
4
.github/workflows/weekly.yml
vendored
4
.github/workflows/weekly.yml
vendored
@ -21,7 +21,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: update-xla-commit-hash
|
||||
continue-on-error: true
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@main
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@release/2.3
|
||||
with:
|
||||
repo-name: xla
|
||||
branch: master
|
||||
@ -30,7 +30,7 @@ jobs:
|
||||
updatebot-token: ${{ secrets.UPDATEBOT_TOKEN }}
|
||||
pytorchbot-token: ${{ secrets.GH_PYTORCHBOT_TOKEN }}
|
||||
- name: update-triton-commit-hash
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@main
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@release/2.3
|
||||
with:
|
||||
repo-owner: openai
|
||||
repo-name: triton
|
||||
|
@ -13,6 +13,10 @@ namespace at {
|
||||
TORCH_API ScalarType toScalarType(const DLDataType& dtype);
|
||||
TORCH_API DLManagedTensor* toDLPack(const Tensor& src);
|
||||
TORCH_API Tensor fromDLPack(DLManagedTensor* src);
|
||||
C10_DEPRECATED_MESSAGE("Please migrate to a non-const variant")
|
||||
inline Tensor fromDLPack(const DLManagedTensor* src) {
|
||||
return fromDLPack(const_cast<DLManagedTensor*>(src));
|
||||
}
|
||||
TORCH_API Tensor
|
||||
fromDLPack(DLManagedTensor* src, std::function<void(void*)> deleter);
|
||||
TORCH_API DLDataType getDLDataType(const Tensor& t);
|
||||
|
@ -303,6 +303,29 @@ Tensor FunctionalInverses::_nested_view_from_buffer_inverse(const Tensor& base,
|
||||
return Tensor();
|
||||
}
|
||||
|
||||
Tensor FunctionalInverses::_nested_view_from_jagged_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, const Tensor& offsets, const Tensor& dummy, const std::optional<Tensor>& lengths, int64_t ragged_idx) {
|
||||
auto values = at::_nested_get_values(mutated_view);
|
||||
if (inverse_return_mode != InverseReturnMode::NeverView) {
|
||||
return values;
|
||||
} else {
|
||||
return values.clone(/*memory_format=*/at::MemoryFormat::Contiguous);
|
||||
}
|
||||
}
|
||||
|
||||
Tensor FunctionalInverses::_nested_get_values_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode) {
|
||||
auto offsets = at::_nested_get_offsets(base);
|
||||
auto lengths = at::_nested_get_lengths(base);
|
||||
auto ragged_idx = at::_nested_get_ragged_idx(base);
|
||||
auto dummy = at::_nested_get_jagged_dummy(base);
|
||||
auto nt = at::_nested_view_from_jagged(mutated_view, offsets, dummy, lengths, ragged_idx);
|
||||
|
||||
if (inverse_return_mode != InverseReturnMode::NeverView) {
|
||||
return nt;
|
||||
} else {
|
||||
return nt.clone(/*memory_format=*/at::MemoryFormat::Contiguous);
|
||||
}
|
||||
}
|
||||
|
||||
Tensor FunctionalInverses::unsqueeze_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, int64_t dim) {
|
||||
if (inverse_return_mode != InverseReturnMode::NeverView) {
|
||||
return at::squeeze(mutated_view, dim);
|
||||
|
@ -210,6 +210,9 @@ std::string scalarToMetalTypeString(const c10::ScalarType& scalar_type) {
|
||||
return "float";
|
||||
case ScalarType::Half:
|
||||
return "half";
|
||||
case ScalarType::BFloat16:
|
||||
checkSupportsBFloat16();
|
||||
return "bfloat";
|
||||
case ScalarType::Int:
|
||||
return "int";
|
||||
case ScalarType::Long:
|
||||
|
@ -4,6 +4,8 @@
|
||||
#include <ATen/mps/MPSProfiler.h>
|
||||
#include <ATen/native/LinearAlgebraUtils.h>
|
||||
#include <ATen/native/Resize.h>
|
||||
// For MTLLanguageVersion_3_1
|
||||
#include <ATen/native/mps/MPSGraphSonomaOps.h>
|
||||
#include <ATen/native/mps/OperationUtils.h>
|
||||
|
||||
#ifndef AT_PER_OPERATOR_HEADERS
|
||||
@ -29,7 +31,7 @@ static const char* METAL_LINALG = R"MATMUL_METAL(
|
||||
using namespace metal;
|
||||
template<typename T>
|
||||
T dot_product(constant T *v1, constant T* v2, ulong2 strides, uint32_t size) {
|
||||
T rc = 0.0;
|
||||
T rc = T(0.0);
|
||||
for (uint32_t i = 0; i < size; ++i) {
|
||||
rc += v1[i * strides.x] * v2[i * strides.y];
|
||||
}
|
||||
@ -69,6 +71,9 @@ kernel void naive_matmul<DTYPE>( \
|
||||
|
||||
INSTANTIATE_NAIVE_MM(float);
|
||||
INSTANTIATE_NAIVE_MM(half);
|
||||
#if __METAL_VERSION__ >= 310
|
||||
INSTANTIATE_NAIVE_MM(bfloat);
|
||||
#endif
|
||||
)MATMUL_METAL";
|
||||
|
||||
id<MTLLibrary> compileLinalgOpLibrary(id<MTLDevice> device) {
|
||||
@ -79,7 +84,8 @@ id<MTLLibrary> compileLinalgOpLibrary(id<MTLDevice> device) {
|
||||
|
||||
NSError* error = nil;
|
||||
MTLCompileOptions* options = [[MTLCompileOptions new] autorelease];
|
||||
[options setLanguageVersion:MTLLanguageVersion2_3];
|
||||
[options setLanguageVersion:is_macos_13_or_newer(MacOSVersion::MACOS_VER_14_0_PLUS) ? MTLLanguageVersion3_1
|
||||
: MTLLanguageVersion2_3];
|
||||
linalgLibrary = [device newLibraryWithSource:[NSString stringWithCString:METAL_LINALG encoding:NSASCIIStringEncoding]
|
||||
options:options
|
||||
error:&error];
|
||||
|
@ -30,41 +30,53 @@ static void clamp_mps_graph(CachedGraph* cachedGraph,
|
||||
const Tensor& min_tensor,
|
||||
const Tensor& max_tensor) {
|
||||
auto input_dtype = input_tensor.scalar_type();
|
||||
auto min_dtype = input_dtype;
|
||||
auto max_dtype = input_dtype;
|
||||
if (cachedGraph->minTensor) {
|
||||
min_dtype = min_tensor.scalar_type();
|
||||
}
|
||||
if (cachedGraph->maxTensor) {
|
||||
max_dtype = max_tensor.scalar_type();
|
||||
}
|
||||
auto min_dtype = cachedGraph->minTensor ? min_tensor.scalar_type() : input_dtype;
|
||||
auto max_dtype = cachedGraph->maxTensor ? max_tensor.scalar_type() : input_dtype;
|
||||
|
||||
MPSGraph* mpsGraph = cachedGraph->graph();
|
||||
|
||||
cachedGraph->inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, input_tensor);
|
||||
|
||||
MPSGraphTensor* minTensor = cachedGraph->minTensor;
|
||||
MPSGraphTensor* maxTensor = cachedGraph->maxTensor;
|
||||
auto minTensor = cachedGraph->minTensor;
|
||||
auto maxTensor = cachedGraph->maxTensor;
|
||||
|
||||
if (input_dtype != min_dtype) {
|
||||
minTensor = castMPSTensor(mpsGraph, cachedGraph->minTensor, input_dtype);
|
||||
}
|
||||
if (input_dtype != max_dtype) {
|
||||
maxTensor = castMPSTensor(mpsGraph, cachedGraph->maxTensor, input_dtype);
|
||||
}
|
||||
if (cachedGraph->minTensor && cachedGraph->maxTensor) {
|
||||
cachedGraph->outputTensor = [mpsGraph clampWithTensor:cachedGraph->inputTensor
|
||||
minValueTensor:minTensor
|
||||
maxValueTensor:maxTensor
|
||||
name:nil];
|
||||
} else if (cachedGraph->maxTensor) {
|
||||
cachedGraph->outputTensor = [mpsGraph minimumWithPrimaryTensor:cachedGraph->inputTensor
|
||||
secondaryTensor:maxTensor
|
||||
name:nil];
|
||||
} else if (cachedGraph->minTensor) {
|
||||
cachedGraph->outputTensor = [mpsGraph maximumWithPrimaryTensor:cachedGraph->inputTensor
|
||||
secondaryTensor:minTensor
|
||||
name:nil];
|
||||
if (c10::isIntegralType(input_dtype, /*includeBool=*/true)) {
|
||||
if (minTensor && maxTensor) {
|
||||
cachedGraph->outputTensor = [mpsGraph clampWithTensor:cachedGraph->inputTensor
|
||||
minValueTensor:minTensor
|
||||
maxValueTensor:maxTensor
|
||||
name:nil];
|
||||
} else if (maxTensor) {
|
||||
cachedGraph->outputTensor = [mpsGraph minimumWithPrimaryTensor:cachedGraph->inputTensor
|
||||
secondaryTensor:maxTensor
|
||||
name:nil];
|
||||
} else if (minTensor) {
|
||||
cachedGraph->outputTensor = [mpsGraph maximumWithPrimaryTensor:cachedGraph->inputTensor
|
||||
secondaryTensor:minTensor
|
||||
name:nil];
|
||||
}
|
||||
return;
|
||||
}
|
||||
// clampWithTensor doesn't propagate NaN through so simulate it as composition of
|
||||
// maximumWithNaNPropagationWithPrimaryTensor and minimumWithNaNPropagationWithPrimaryTensor
|
||||
auto outputTensor = cachedGraph->inputTensor;
|
||||
if (minTensor) {
|
||||
outputTensor = [mpsGraph maximumWithNaNPropagationWithPrimaryTensor:outputTensor
|
||||
secondaryTensor:minTensor
|
||||
name:nil];
|
||||
}
|
||||
if (maxTensor) {
|
||||
outputTensor = [mpsGraph minimumWithNaNPropagationWithPrimaryTensor:outputTensor
|
||||
secondaryTensor:maxTensor
|
||||
name:nil];
|
||||
}
|
||||
cachedGraph->outputTensor = outputTensor;
|
||||
}
|
||||
|
||||
static void check_min_max_dims(const OptionalTensorRef clamp_opt, const Tensor& input_t, string op_name) {
|
||||
|
@ -6154,6 +6154,52 @@
|
||||
CompositeExplicitAutogradNonFunctional: _nested_view_from_buffer_copy
|
||||
autogen: _nested_view_from_buffer_copy.out
|
||||
|
||||
- func: _nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor(a)
|
||||
variants: function
|
||||
device_check: NoCheck
|
||||
dispatch: {}
|
||||
|
||||
- func: _nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor
|
||||
variants: function
|
||||
device_check: NoCheck
|
||||
tags: view_copy
|
||||
dispatch:
|
||||
CompositeExplicitAutogradNonFunctional: _nested_view_from_jagged_copy
|
||||
autogen: _nested_view_from_jagged_copy.out
|
||||
|
||||
- func: _nested_get_values(Tensor(a) self) -> Tensor(a)
|
||||
variants: function
|
||||
device_check: NoCheck
|
||||
dispatch: {}
|
||||
|
||||
- func: _nested_get_values_copy(Tensor self) -> Tensor
|
||||
variants: function
|
||||
device_check: NoCheck
|
||||
tags: view_copy
|
||||
dispatch:
|
||||
CompositeExplicitAutogradNonFunctional: _nested_get_values_copy
|
||||
autogen: _nested_get_values_copy.out
|
||||
|
||||
- func: _nested_get_offsets(Tensor self) -> Tensor
|
||||
variants: function
|
||||
device_check: NoCheck
|
||||
dispatch: {}
|
||||
|
||||
# returns undefined Tensor if no lengths present
|
||||
- func: _nested_get_lengths(Tensor self) -> Tensor
|
||||
variants: function
|
||||
device_check: NoCheck
|
||||
dispatch: {}
|
||||
|
||||
- func: _nested_get_ragged_idx(Tensor self) -> int
|
||||
variants: function
|
||||
device_check: NoCheck
|
||||
dispatch: {}
|
||||
|
||||
- func: _nested_get_jagged_dummy(Tensor any) -> Tensor
|
||||
category_override: dummy
|
||||
dispatch: {}
|
||||
|
||||
- func: _trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
|
||||
dispatch:
|
||||
# calls unsqueeze
|
||||
|
@ -157,43 +157,6 @@ aotriton::TensorView<Rank> mk_aotensor(const at::Tensor& q, c10::string_view ten
|
||||
cast_dtype(q.dtype()));
|
||||
}
|
||||
|
||||
template<bool COPY_FROM_INPUT, // For Input Tensor
|
||||
bool COPY_BACK> // For Output Tensor
|
||||
class TensorStorageSanitizer {
|
||||
public:
|
||||
TensorStorageSanitizer(const at::Tensor& ref,
|
||||
at::Tensor& to_sanitize)
|
||||
: ref_(ref), to_sanitize_(to_sanitize)
|
||||
{
|
||||
need_sanitize = ref_.strides() != to_sanitize_.strides();
|
||||
if (!need_sanitize)
|
||||
return;
|
||||
|
||||
temp_ = at::empty_like(ref_);
|
||||
if (COPY_FROM_INPUT) {
|
||||
temp_.copy_(to_sanitize_);
|
||||
}
|
||||
}
|
||||
|
||||
~TensorStorageSanitizer()
|
||||
{
|
||||
if (need_sanitize && COPY_BACK)
|
||||
to_sanitize_.copy_(temp_);
|
||||
}
|
||||
|
||||
at::Tensor& sanitized_tensor()
|
||||
{
|
||||
if (need_sanitize)
|
||||
return temp_;
|
||||
return to_sanitize_;
|
||||
}
|
||||
private:
|
||||
const at::Tensor& ref_;
|
||||
at::Tensor& to_sanitize_;
|
||||
at::Tensor temp_;
|
||||
bool need_sanitize = false;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#define CHECK_DEVICE(x) TORCH_CHECK(x.is_cuda(), #x " must be on CUDA")
|
||||
@ -531,9 +494,6 @@ mha_bwd(const at::Tensor &dout, // batch_size x seqlen_q x num_heads, x head_si
|
||||
int d_head = head_size_og;
|
||||
hipError_t err; // TODO: Error handling
|
||||
{
|
||||
TensorStorageSanitizer<true, false> dq_s(q_t, dq_t);
|
||||
TensorStorageSanitizer<true, false> dk_s(k_t, dk_t);
|
||||
TensorStorageSanitizer<true, false> dv_s(v_t, dv_t);
|
||||
using aotriton::v2::flash::attn_bwd;
|
||||
err = attn_bwd(mk_aotensor(q_t, "q"),
|
||||
mk_aotensor(k_t, "k"),
|
||||
@ -541,9 +501,9 @@ mha_bwd(const at::Tensor &dout, // batch_size x seqlen_q x num_heads, x head_si
|
||||
softmax_scale,
|
||||
mk_aotensor(out_t, "out"),
|
||||
mk_aotensor(dout_t, "dout"),
|
||||
mk_aotensor(dq_s.sanitized_tensor(), "dq"),
|
||||
mk_aotensor(dk_s.sanitized_tensor(), "dk"),
|
||||
mk_aotensor(dv_s.sanitized_tensor(), "dv"),
|
||||
mk_aotensor(dq_t, "dq"),
|
||||
mk_aotensor(dk_t, "dk"),
|
||||
mk_aotensor(dv_t, "dv"),
|
||||
mk_aotensor<2>(softmax_lse_cont, "L"),
|
||||
mk_aotensor<2>(delta, "delta"),
|
||||
p_dropout,
|
||||
|
2
cmake/External/aotriton.cmake
vendored
2
cmake/External/aotriton.cmake
vendored
@ -6,7 +6,7 @@ if(NOT __AOTRITON_INCLUDED)
|
||||
set(__AOTRITON_INSTALL_DIR "${PROJECT_SOURCE_DIR}/torch")
|
||||
ExternalProject_Add(aotriton_external
|
||||
GIT_REPOSITORY https://github.com/ROCm/aotriton.git
|
||||
GIT_TAG 9044fe5eb16130e49a0a1f781ea15037353ad542
|
||||
GIT_TAG 24a3fe9cb57e5cda3c923df29743f9767194cc27
|
||||
SOURCE_DIR ${__AOTRITON_SOURCE_DIR}
|
||||
BINARY_DIR ${__AOTRITON_BUILD_DIR}
|
||||
PREFIX ${__AOTRITON_INSTALL_DIR}
|
||||
|
@ -250,6 +250,44 @@ class DTensorAPITest(DTensorTestBase):
|
||||
self.assertIsInstance(local_out, torch.Tensor)
|
||||
self.assertNotIsInstance(local_out, DTensor)
|
||||
|
||||
@with_comms
|
||||
def test_distribute_module_casting(self):
|
||||
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
|
||||
|
||||
# check DTensor casting
|
||||
dt = DTensor.from_local(torch.rand(10), device_mesh, [Replicate()])
|
||||
dt = dt.to(torch.bfloat16)
|
||||
self.assertEqual(dt.dtype, torch.bfloat16)
|
||||
self.assertEqual(dt._local_tensor.dtype, torch.bfloat16)
|
||||
|
||||
# check distribute_tensor casting
|
||||
dt = distribute_tensor(torch.rand(10), device_mesh, [Replicate()])
|
||||
dt = dt.to(torch.bfloat16)
|
||||
self.assertEqual(dt.dtype, torch.bfloat16)
|
||||
self.assertEqual(dt._local_tensor.dtype, torch.bfloat16)
|
||||
|
||||
# check distribute_module casting
|
||||
model = MyModel(10, 10, device=self.device_type)
|
||||
replica_model = distribute_module(
|
||||
model,
|
||||
device_mesh,
|
||||
)
|
||||
replica_model = replica_model.to(torch.bfloat16)
|
||||
self.assertEqual(replica_model.seq[0].weight.dtype, torch.bfloat16)
|
||||
self.assertEqual(
|
||||
replica_model.seq[0].weight._local_tensor.dtype, torch.bfloat16
|
||||
)
|
||||
|
||||
# check autocast
|
||||
dt = distribute_tensor(torch.rand(10), device_mesh, [Replicate()])
|
||||
replica_model = distribute_module(
|
||||
model,
|
||||
device_mesh,
|
||||
)
|
||||
with torch.autocast(device_type=self.device_type, dtype=torch.bfloat16):
|
||||
output = replica_model(dt)
|
||||
self.assertEqual(output.dtype, torch.bfloat16)
|
||||
|
||||
@with_comms
|
||||
def test_distribute_module_meta(self):
|
||||
# If the model is too big, the user may first the create entire model on the meta device and then initialize
|
||||
|
@ -14,6 +14,7 @@ from torch.distributed._tensor.placement_types import _Partial, Shard
|
||||
from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh
|
||||
|
||||
from torch.distributed.distributed_c10d import (
|
||||
_world,
|
||||
get_global_rank,
|
||||
get_world_size,
|
||||
init_process_group,
|
||||
@ -28,7 +29,6 @@ from torch.testing._internal.common_utils import (
|
||||
)
|
||||
from torch.testing._internal.distributed._tensor.common_dtensor import (
|
||||
DTensorTestBase,
|
||||
skip_if_lt_x_gpu,
|
||||
skip_unless_torch_gpu,
|
||||
with_comms,
|
||||
)
|
||||
@ -79,10 +79,7 @@ class DeviceMeshTest(DTensorTestBase):
|
||||
|
||||
@with_comms
|
||||
@run_with_both_funcol_impls
|
||||
@skip_unless_torch_gpu
|
||||
def test_get_group(self):
|
||||
# TODO: `test_get_group` still periodically timeout on cpu
|
||||
# remove `@skip_unless_torch_gpu` after the problem is fixed.
|
||||
mesh_shape = (2, self.world_size // 2)
|
||||
mesh_2d = init_device_mesh(
|
||||
self.device_type, mesh_shape, mesh_dim_names=("dp", "tp")
|
||||
@ -103,10 +100,7 @@ class DeviceMeshTest(DTensorTestBase):
|
||||
|
||||
@with_comms
|
||||
@run_with_both_funcol_impls
|
||||
@skip_unless_torch_gpu
|
||||
def test_get_local_rank_raises_exception(self):
|
||||
# TODO: `test_get_local_rank_raises_exception` still periodically timeout on cpu
|
||||
# remove `@skip_unless_torch_gpu` after the problem is fixed.
|
||||
mesh_shape = (2, self.world_size // 2)
|
||||
mesh_2d = init_device_mesh(
|
||||
self.device_type, mesh_shape, mesh_dim_names=("dp", "tp")
|
||||
@ -120,10 +114,7 @@ class DeviceMeshTest(DTensorTestBase):
|
||||
|
||||
@with_comms
|
||||
@run_with_both_funcol_impls
|
||||
@skip_unless_torch_gpu
|
||||
def test_get_local_rank(self):
|
||||
# TODO: `test_get_local_rank_raises_exception` still periodically timeout on cpu
|
||||
# remove `@skip_unless_torch_gpu` after the problem is fixed.
|
||||
mesh_shape = (2, self.world_size // 2)
|
||||
mesh_2d = init_device_mesh(
|
||||
self.device_type, mesh_shape, mesh_dim_names=("dp", "tp")
|
||||
@ -276,71 +267,47 @@ class TestDeviceMeshGetItem(DTensorTestBase):
|
||||
|
||||
@with_comms
|
||||
@run_with_both_funcol_impls
|
||||
def test_raises_invalid_mesh_dim_names(self):
|
||||
error_msg = "Invalid mesh_dim_name"
|
||||
# Case 1: the DeviceMesh does not have a mesh_dim_names attribute
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError, "Cannot slice a DeviceMesh without mesh_dim_names."
|
||||
):
|
||||
def test_raises_no_mesh_dim_found(self):
|
||||
with self.assertRaisesRegex(KeyError, "No `mesh_dim_names` found."):
|
||||
mesh = init_device_mesh(self.device_type, (2, 4))
|
||||
child_mesh = mesh["DP"]
|
||||
|
||||
child_mesh_dim_names = "PP"
|
||||
with self.assertRaisesRegex(ValueError, error_msg):
|
||||
@with_comms
|
||||
@run_with_both_funcol_impls
|
||||
def test_raises_invalid_mesh_dim_name(self):
|
||||
child_mesh_dim_name = "PP"
|
||||
with self.assertRaisesRegex(
|
||||
KeyError, f"Mesh dimension '{child_mesh_dim_name}' does not exist."
|
||||
):
|
||||
mesh_dim_names = ("DP", "TP")
|
||||
mesh = init_device_mesh(
|
||||
self.device_type, (2, 4), mesh_dim_names=mesh_dim_names
|
||||
)
|
||||
child_mesh = mesh[child_mesh_dim_names]
|
||||
|
||||
# Case 2
|
||||
child_mesh_dim_names = ["PP", "CP"]
|
||||
with self.assertRaisesRegex(ValueError, error_msg):
|
||||
mesh_dim_names = ("DP", "TP")
|
||||
mesh = init_device_mesh(
|
||||
self.device_type, (2, 4), mesh_dim_names=mesh_dim_names
|
||||
)
|
||||
child_mesh = mesh[child_mesh_dim_names]
|
||||
|
||||
# Case 3: a given child_mesh_dim_name is not a contiguous subset of the parent mesh's mesh_dim_names.
|
||||
child_mesh_dim_names = ("TP", "DP")
|
||||
with self.assertRaisesRegex(ValueError, error_msg):
|
||||
mesh_dim_names = ("DP", "TP")
|
||||
mesh = init_device_mesh(
|
||||
self.device_type, (2, 4), mesh_dim_names=mesh_dim_names
|
||||
)
|
||||
child_mesh = mesh[child_mesh_dim_names]
|
||||
|
||||
# Case 3
|
||||
child_mesh_dim_names = ("PP", "TP")
|
||||
with self.assertRaisesRegex(ValueError, error_msg):
|
||||
mesh_dim_names = ("PP", "DP", "TP")
|
||||
mesh = init_device_mesh(
|
||||
self.device_type, (2, 2, 2), mesh_dim_names=mesh_dim_names
|
||||
)
|
||||
child_mesh = mesh[child_mesh_dim_names]
|
||||
child_mesh = mesh[child_mesh_dim_name]
|
||||
|
||||
@with_comms
|
||||
@run_with_both_funcol_impls
|
||||
@skip_if_lt_x_gpu(8)
|
||||
def test_get_item_2d(self):
|
||||
# TODO: `test_get_item_2d` still periodically timeout on cpu
|
||||
# remove `@skip_if_lt_x_gpu` after the problem is fixed.
|
||||
def test_get_item(self):
|
||||
mesh_shape = (2, 4)
|
||||
mesh_dim_names = ("DP", "TP")
|
||||
mesh_2d = init_device_mesh(
|
||||
self.device_type, mesh_shape, mesh_dim_names=mesh_dim_names
|
||||
)
|
||||
|
||||
pg_ranks_by_dim_name = {}
|
||||
for mesh_dim_name in mesh_dim_names:
|
||||
mesh_dim = mesh_dim_names.index(mesh_dim_name)
|
||||
pg_ranks_by_dim_name[mesh_dim_name] = mesh_2d.mesh.swapdims(
|
||||
-1, mesh_dim
|
||||
).reshape(-1, mesh_2d.mesh.size(mesh_dim))
|
||||
|
||||
tp_mesh = mesh_2d["TP"]
|
||||
tp_group = [[0, 1, 2, 3], [4, 5, 6, 7]]
|
||||
tp_group_idx = self.rank // 4
|
||||
self.assertEqual(tp_mesh.mesh.tolist(), tp_group[tp_group_idx])
|
||||
self.assertEqual(tp_mesh.mesh, pg_ranks_by_dim_name["TP"][tp_group_idx])
|
||||
|
||||
dp_mesh = mesh_2d["DP"]
|
||||
dp_group = [[0, 4], [1, 5], [2, 6], [3, 7]]
|
||||
dp_group_idx = self.rank % 4
|
||||
self.assertEqual(dp_mesh.mesh.tolist(), dp_group[dp_group_idx])
|
||||
self.assertEqual(mesh_2d["DP"].mesh, pg_ranks_by_dim_name["DP"][dp_group_idx])
|
||||
|
||||
@with_comms
|
||||
@run_with_both_funcol_impls
|
||||
@ -351,50 +318,32 @@ class TestDeviceMeshGetItem(DTensorTestBase):
|
||||
dp_mesh = mesh["dp"]
|
||||
self.assertEqual(dp_mesh, mesh)
|
||||
|
||||
with self.assertRaisesRegex(ValueError, "Invalid mesh_dim_name"):
|
||||
with self.assertRaisesRegex(RuntimeError, "Invalid mesh_dim_name"):
|
||||
dp_mesh = mesh["dim0"]
|
||||
|
||||
@with_comms
|
||||
@skip_if_lt_x_gpu(8)
|
||||
def test_get_item_3d(self):
|
||||
# TODO: `test_get_item_3d` still periodically timeout on cpu
|
||||
# remove `@skip_if_lt_x_gpu` after the problem is fixed.
|
||||
mesh_shape = (2, 2, 2)
|
||||
mesh_dim_names = ("Replicate", "Shard", "TP")
|
||||
mesh_3d = init_device_mesh(
|
||||
self.device_type, mesh_shape, mesh_dim_names=mesh_dim_names
|
||||
)
|
||||
@run_with_both_funcol_impls
|
||||
def test_cache_and_reuse_submesh_slice_result(self):
|
||||
mesh = init_device_mesh(self.device_type, (2, 4), mesh_dim_names=("dp", "tp"))
|
||||
|
||||
tp_group = [[0, 1], [2, 3], [4, 5], [6, 7]]
|
||||
tp_group_idx = int(self.rank / 2)
|
||||
self.assertEqual(mesh_3d["TP"].mesh.tolist(), tp_group[tp_group_idx])
|
||||
dp_mesh = mesh["dp"]
|
||||
ref_pg_count = _world.group_count
|
||||
|
||||
shard_group = [[0, 2], [1, 3], [4, 6], [5, 7]]
|
||||
shard_group_idx = self.rank % 2 + self.rank // 4 * 2
|
||||
self.assertEqual(mesh_3d["Shard"].mesh.tolist(), shard_group[shard_group_idx])
|
||||
# When we call the "dp" slice second time, it should not create any new pg.
|
||||
# As we are just using the cached result so the pg count should be the same.
|
||||
dp_mesh_2 = mesh["dp"]
|
||||
self.assertEqual(ref_pg_count, _world.group_count)
|
||||
|
||||
replicate_group = [[0, 4], [1, 5], [2, 6], [3, 7]]
|
||||
replicate_group_idx = self.rank % 4
|
||||
self.assertEqual(
|
||||
mesh_3d["Replicate"].mesh.tolist(), replicate_group[replicate_group_idx]
|
||||
)
|
||||
|
||||
# We support both UX for nD slicing.
|
||||
# mesh_3d[["Replicate", "Shard"]] or mesh_3d["Replicate", "Shard"]
|
||||
hsdp_mesh_1 = mesh_3d[["Replicate", "Shard"]]
|
||||
hsdp_mesh_2 = mesh_3d["Replicate", "Shard"]
|
||||
hsdp_group = [[[0, 2], [4, 6]], [[1, 3], [5, 7]]]
|
||||
hsdp_group_idx = self.rank % 2
|
||||
self.assertEqual(hsdp_mesh_1.mesh.tolist(), hsdp_group[hsdp_group_idx])
|
||||
self.assertEqual(hsdp_mesh_2.mesh.tolist(), hsdp_group[hsdp_group_idx])
|
||||
self.assertEqual(hsdp_mesh_1, hsdp_mesh_2)
|
||||
# When we call the "tp" slice, it should create a new pg, as the "tp" slice is called
|
||||
# for the first time.
|
||||
tp_mesh = mesh["tp"]
|
||||
self.assertTrue(_world.group_count > ref_pg_count)
|
||||
|
||||
|
||||
@instantiate_parametrized_tests
|
||||
class TestMeshEnv(DTensorTestBase):
|
||||
@with_comms
|
||||
@run_with_both_funcol_impls
|
||||
@skip_unless_torch_gpu
|
||||
def test_get_parent_mesh(self):
|
||||
mesh_shape = (2, self.world_size // 2)
|
||||
mesh_dim_names = ("DP", "TP")
|
||||
@ -415,7 +364,6 @@ class TestMeshEnv(DTensorTestBase):
|
||||
|
||||
@with_comms
|
||||
@run_with_both_funcol_impls
|
||||
@skip_unless_torch_gpu
|
||||
def test_get_parent_mesh_dim_exist(self):
|
||||
mesh_shape = (2, self.world_size // 2)
|
||||
mesh_dim_names = ("DP", "TP")
|
||||
@ -428,7 +376,6 @@ class TestMeshEnv(DTensorTestBase):
|
||||
|
||||
@with_comms
|
||||
@run_with_both_funcol_impls
|
||||
@skip_unless_torch_gpu
|
||||
def test_get_parent_mesh_dim_not_exist(self):
|
||||
mesh_shape = (self.world_size,)
|
||||
mesh = init_device_mesh(self.device_type, mesh_shape)
|
||||
@ -437,7 +384,6 @@ class TestMeshEnv(DTensorTestBase):
|
||||
|
||||
@with_comms
|
||||
@run_with_both_funcol_impls
|
||||
@skip_unless_torch_gpu
|
||||
def test_get_mesh_dim_by_name(self):
|
||||
mesh_shape = (2, self.world_size // 2)
|
||||
mesh_dim_names = ("DP", "TP")
|
||||
@ -555,9 +501,11 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
|
||||
torch.chunk(big_tensor, device_mesh.size(), dim=shard_dim)
|
||||
)
|
||||
unpadded_list = [
|
||||
shard_placement._unpad_tensor(big_tensor_chunks[i], pad_sizes[i])
|
||||
if pad_sizes[i] > 0
|
||||
else big_tensor_chunks[i]
|
||||
(
|
||||
shard_placement._unpad_tensor(big_tensor_chunks[i], pad_sizes[i])
|
||||
if pad_sizes[i] > 0
|
||||
else big_tensor_chunks[i]
|
||||
)
|
||||
for i, big_tensor in enumerate(big_tensor_chunks)
|
||||
]
|
||||
all_gathered_tensor = torch.cat(unpadded_list, dim=shard_dim)
|
||||
|
@ -19,10 +19,10 @@ from torch.fx.experimental.symbolic_shapes import (
|
||||
StatelessSymbolicContext,
|
||||
)
|
||||
from torch.nested._internal.nested_tensor import (
|
||||
buffer_from_jagged,
|
||||
jagged_from_list,
|
||||
jagged_from_tensor_and_lengths,
|
||||
ViewBufferFromNested,
|
||||
nested_view_from_values_offsets,
|
||||
NestedTensor,
|
||||
)
|
||||
from torch.testing._internal.common_utils import (
|
||||
instantiate_parametrized_tests,
|
||||
@ -1166,19 +1166,20 @@ class TestNestedTensor(torch._dynamo.test_case.TestCase):
|
||||
a = torch.randn(2, 3, requires_grad=True, dtype=torch.float64)
|
||||
b = torch.randn(3, 3, requires_grad=True, dtype=torch.float64)
|
||||
c = torch.randn(4, 3, requires_grad=True, dtype=torch.float64)
|
||||
nt, offsets = jagged_from_list([a, b, c], None)
|
||||
nt2, _ = jagged_from_list([a, b, c], offsets)
|
||||
nt = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
# TODO: Switch to public API when it exists
|
||||
nt2, _ = jagged_from_list([a, b, c], nt.offsets())
|
||||
|
||||
def fn1(nt1, nt2):
|
||||
return (nt1 + nt2).sin().cos()
|
||||
|
||||
compiled_f = torch.compile(fn1, fullgraph=True, backend=backend, dynamic=True)
|
||||
out = compiled_f(nt, nt2)
|
||||
out_buffer = ViewBufferFromNested.apply(out)
|
||||
out_buffer = out.values()
|
||||
ga, gb, gc = torch.autograd.grad(out_buffer.sum(), (a, b, c))
|
||||
|
||||
out_ref = fn1(nt, nt2)
|
||||
out_buffer_ref = ViewBufferFromNested.apply(out_ref)
|
||||
out_buffer_ref = out_ref.values()
|
||||
ga_ref, gb_ref, gc_ref = torch.autograd.grad(out_buffer_ref.sum(), (a, b, c))
|
||||
|
||||
self.assertTrue(torch.allclose(ga, ga_ref))
|
||||
@ -1218,10 +1219,10 @@ class TestNestedTensor(torch._dynamo.test_case.TestCase):
|
||||
ret = fn_c(nt, y)[0]
|
||||
ref = fn(nt_copy, y_copy)[0]
|
||||
|
||||
self.assertEqual(buffer_from_jagged(ret), buffer_from_jagged(ref))
|
||||
self.assertEqual(ret.values(), ref.values())
|
||||
|
||||
buffer_from_jagged(ret).sum().backward()
|
||||
buffer_from_jagged(ref).sum().backward()
|
||||
ret.values().sum().backward()
|
||||
ref.values().sum().backward()
|
||||
for ref_v, res_v in zip(values_copy, values):
|
||||
self.assertEqual(ref_v.grad, res_v.grad)
|
||||
|
||||
@ -1254,83 +1255,112 @@ class TestNestedTensor(torch._dynamo.test_case.TestCase):
|
||||
self._check_recompiles(fn, (nt,), (nt3,), True)
|
||||
|
||||
def _get_views(self):
|
||||
# There are three cases to consider here based on the logic in
|
||||
# meta_utils.py
|
||||
#
|
||||
# (1) basic case:
|
||||
# view is not a leaf and has the same requires grad as its basic case
|
||||
x, _ = self._get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=True)
|
||||
self.assertEqual(x.is_leaf, False)
|
||||
yield x.unsqueeze(-1)
|
||||
# Test all cases with both an NT base and a dense base
|
||||
# Subclass -> Subclass
|
||||
# Dense -> Subclass
|
||||
for base_is_nt in [False, True]:
|
||||
# There are three cases to consider here based on the logic in
|
||||
# meta_utils.py
|
||||
#
|
||||
# (1) basic case:
|
||||
# view is not a leaf and has the same requires grad as its basic case
|
||||
x, _ = self._get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=True)
|
||||
x = x.clone() if base_is_nt else x
|
||||
self.assertEqual(x.is_leaf, False)
|
||||
yield x.unsqueeze(-1)
|
||||
|
||||
# (2) leaf view case:
|
||||
# the view has to be a leaf (w/ requires_grad True or requires_grad False)
|
||||
# base w/ requires_grad True or requires_grad False
|
||||
for requires_grad_1, requires_grad_2 in itertools.product(
|
||||
[True, False], repeat=2
|
||||
):
|
||||
x, _ = self._get_jagged_tensor(
|
||||
((2, 3, 4), 3), None, requires_grad=requires_grad_1
|
||||
)
|
||||
# (2) leaf view case:
|
||||
# the view has to be a leaf (w/ requires_grad True or requires_grad False)
|
||||
# base w/ requires_grad True or requires_grad False
|
||||
for requires_grad_1, requires_grad_2 in itertools.product(
|
||||
[True, False], repeat=2
|
||||
):
|
||||
x, _ = self._get_jagged_tensor(
|
||||
((2, 3, 4), 3), None, requires_grad=requires_grad_1
|
||||
)
|
||||
x = x.clone() if base_is_nt else x
|
||||
with torch.no_grad():
|
||||
x_view = x.unsqueeze(-1)
|
||||
# The issue is this doesn't quite work
|
||||
x_view.requires_grad_(requires_grad_2)
|
||||
yield x_view
|
||||
|
||||
# (3) obscure case:
|
||||
# view is not a leaf (implies requires_grad True)
|
||||
# base w/ requires_grad False)
|
||||
x, _ = self._get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=False)
|
||||
x = x.clone() if base_is_nt else x
|
||||
# intermediate leaf view
|
||||
with torch.no_grad():
|
||||
x_view = x.unsqueeze(-1)
|
||||
# The issue is this doesn't quite work
|
||||
x_view.requires_grad_(requires_grad_2)
|
||||
yield x_view
|
||||
x_view.requires_grad_(True)
|
||||
x_view_view = x_view.unsqueeze(-1)
|
||||
yield x_view_view
|
||||
|
||||
# (3) obscure case:
|
||||
# view is not a leaf (implies requires_grad True)
|
||||
# base w/ requires_grad False)
|
||||
x, _ = self._get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=False)
|
||||
# intermediate leaf view
|
||||
with torch.no_grad():
|
||||
x_view = x.unsqueeze(-1)
|
||||
x_view.requires_grad_(True)
|
||||
x_view_view = x_view.unsqueeze(-1)
|
||||
yield x_view_view
|
||||
# Subclass -> Dense
|
||||
x = self._get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=True)[0].clone()
|
||||
yield x.values()
|
||||
|
||||
# Dense -> Subclass -> Dense -> Subclass
|
||||
values = torch.randn(10, 5)
|
||||
offsets = torch.tensor([0, 3, 6, 10])
|
||||
offsets2 = offsets.clone().detach()
|
||||
yield nested_view_from_values_offsets(
|
||||
nested_view_from_values_offsets(values, offsets).values(), offsets
|
||||
)
|
||||
|
||||
def _input_view_test(self, nt_view):
|
||||
def fn(x):
|
||||
return x.sin()
|
||||
|
||||
out_ref = fn(nt_view)
|
||||
torch._dynamo.reset()
|
||||
compile_fn = torch.compile(
|
||||
fn, fullgraph=True, backend="aot_eager", dynamic=True
|
||||
)
|
||||
out = compile_fn(nt_view)
|
||||
|
||||
# Check metadata and values are correct
|
||||
self.assertTrue(out.size() == out_ref.size())
|
||||
self.assertTrue(out.stride() == out_ref.stride())
|
||||
if out.is_nested:
|
||||
self.assertTrue(torch.allclose(out.values(), out_ref.values()))
|
||||
else:
|
||||
self.assertTrue(torch.allclose(out, out_ref))
|
||||
|
||||
# Check that no upper/lower bound guards are incurred
|
||||
def backend(gm, args):
|
||||
context = torch._guards.TracingContext.get()
|
||||
guards = [str(g.expr) for g in context.fake_mode.shape_env.guards]
|
||||
|
||||
# varies based on the type of view
|
||||
guard_str = "\n".join(guards)
|
||||
if isinstance(nt_view._base, NestedTensor):
|
||||
self.assertExpectedInline(guard_str, """Eq(s3 - 1, s0)""")
|
||||
else:
|
||||
self.assertExpectedInline(guard_str, """""")
|
||||
return gm
|
||||
|
||||
torch._dynamo.reset()
|
||||
compile_fn = torch.compile(fn, fullgraph=True, backend=backend, dynamic=True)
|
||||
out = compile_fn(nt_view)
|
||||
|
||||
def test_inputs_to_compiled_fn_are_views(self):
|
||||
for nt_view in self._get_views():
|
||||
self._input_view_test(nt_view)
|
||||
|
||||
def fn(x):
|
||||
return x.sin()
|
||||
|
||||
out_ref = fn(nt_view)
|
||||
torch._dynamo.reset()
|
||||
compile_fn = torch.compile(
|
||||
fn, fullgraph=True, backend="aot_eager", dynamic=True
|
||||
)
|
||||
out = compile_fn(nt_view)
|
||||
|
||||
# Check metadata and values are correct
|
||||
self.assertTrue(out.size() == out_ref.size())
|
||||
self.assertTrue(out.stride() == out_ref.stride())
|
||||
self.assertTrue(torch.allclose(out.values(), out_ref.values()))
|
||||
|
||||
# Check that no upper/lower bound guards are incurred
|
||||
def backend(gm, args):
|
||||
context = torch._guards.TracingContext.get()
|
||||
guards = [str(g.expr) for g in context.fake_mode.shape_env.guards]
|
||||
ranges = [
|
||||
f"{s}: [{vr.lower}, {vr.upper}]"
|
||||
for s, vr in context.fake_mode.shape_env.var_to_range.items()
|
||||
]
|
||||
self.assertExpectedInline("\n".join(guards), """Eq(s3 - 1, s0)""")
|
||||
self.assertExpectedInline(
|
||||
"\n".join(ranges),
|
||||
"""\
|
||||
s0: [2, 9223372036854775805]
|
||||
s2: [2, 9223372036854775806]
|
||||
s3: [3, 9223372036854775806]
|
||||
s5: [2, 9223372036854775806]""",
|
||||
)
|
||||
return gm
|
||||
|
||||
torch._dynamo.reset()
|
||||
compile_fn = torch.compile(
|
||||
fn, fullgraph=True, backend=backend, dynamic=True
|
||||
)
|
||||
out = compile_fn(nt_view)
|
||||
# NJT1 -> Dense -> NJT2 -> Dense view
|
||||
# During view replay, the Dense -> NJT2 part will construct an intermediate,
|
||||
# symbolically-sized NJT that is immediately deconstructed to return the final dense
|
||||
# view. To construct this intermediate properly, we need the associated nested int
|
||||
# to be symbolic. This view is expected to fail compilation until symbolic nested ints
|
||||
# are cached onto fake offsets to solve this problem.
|
||||
@unittest.expectedFailure
|
||||
def test_subclass_dense_subclass_dense_view(self):
|
||||
x = self._get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=True)[0].clone()
|
||||
offsets2 = x.offsets().clone().detach()
|
||||
nt_view = nested_view_from_values_offsets(x.values(), offsets2).values()
|
||||
self._input_view_test(nt_view)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -435,6 +435,13 @@ aten::_nested_from_padded
|
||||
aten::_nested_from_padded.out
|
||||
aten::_nested_from_padded_and_nested_example
|
||||
aten::_nested_from_padded_and_nested_example.out
|
||||
aten::_nested_get_jagged_dummy
|
||||
aten::_nested_get_lengths
|
||||
aten::_nested_get_offsets
|
||||
aten::_nested_get_ragged_idx
|
||||
aten::_nested_get_values
|
||||
aten::_nested_get_values_copy
|
||||
aten::_nested_get_values_copy.out
|
||||
aten::_nested_select_backward
|
||||
aten::_nested_sum_backward
|
||||
aten::_nested_tensor_from_mask
|
||||
@ -452,6 +459,9 @@ aten::_nested_tensor_strides.out
|
||||
aten::_nested_view_from_buffer
|
||||
aten::_nested_view_from_buffer_copy
|
||||
aten::_nested_view_from_buffer_copy.out
|
||||
aten::_nested_view_from_jagged
|
||||
aten::_nested_view_from_jagged_copy
|
||||
aten::_nested_view_from_jagged_copy.out
|
||||
aten::_new_zeros_with_same_feature_meta
|
||||
aten::_new_zeros_with_same_feature_meta.out
|
||||
aten::_nnpack_spatial_convolution
|
||||
|
@ -72,12 +72,28 @@ from torch.export import export
|
||||
|
||||
|
||||
torch.library.define("testlib::returns_tensor_symint", "(Tensor x) -> (Tensor, SymInt)")
|
||||
torch.library.define(
|
||||
"testlib::foo",
|
||||
"(Tensor(a!) x, Tensor(b!) z) -> (Tensor, Tensor, Tensor)",
|
||||
tags=torch.Tag.pt2_compliant_tag,
|
||||
)
|
||||
|
||||
@torch.library.impl("testlib::returns_tensor_symint", "cpu")
|
||||
@torch.library.impl_abstract("testlib::returns_tensor_symint")
|
||||
def returns_tensor_symint_impl(x):
|
||||
return x, x.shape[0]
|
||||
|
||||
@torch.library.impl("testlib::foo", "cpu")
|
||||
@torch._dynamo.disable
|
||||
def foo_impl(x, z):
|
||||
x.add_(5)
|
||||
z.add_(5)
|
||||
return x, z, x + z
|
||||
|
||||
@torch.library.impl_abstract("testlib::foo")
|
||||
def foo_abstract(x, z):
|
||||
return x, z, x + z
|
||||
|
||||
|
||||
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo isn't support")
|
||||
class TestDynamismExpression(TestCase):
|
||||
@ -3551,6 +3567,30 @@ def forward(self, arg0_1, arg1_1, arg2_1):
|
||||
|
||||
self._test_export_same_as_eager(Module(), (torch.randn(4, 4),))
|
||||
|
||||
def test_custom_op_auto_functionalize(self):
|
||||
class M(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(self, x, z):
|
||||
return torch.ops.testlib.foo(x, z)
|
||||
|
||||
inps = (torch.ones(5), torch.ones(5))
|
||||
inps_for_export = (torch.ones(5), torch.ones(5))
|
||||
inps_for_export_with_decomp = (torch.ones(5), torch.ones(5))
|
||||
|
||||
ep = torch.export.export(M(), inps_for_export)
|
||||
x_new_eager, z_new_eager, legit_eager = M()(*inps)
|
||||
x_new_export, z_new_export, legit_export = ep.module()(*inps_for_export)
|
||||
self.assertTrue(torch.allclose(x_new_eager, x_new_export))
|
||||
self.assertTrue(torch.allclose(z_new_eager, z_new_export))
|
||||
self.assertTrue(torch.allclose(legit_eager, legit_export))
|
||||
|
||||
ep = ep.run_decompositions()
|
||||
x_new_export, z_new_export, legit_export = ep.module()(*inps_for_export_with_decomp)
|
||||
self.assertTrue(torch.allclose(x_new_eager, x_new_export))
|
||||
self.assertTrue(torch.allclose(z_new_eager, z_new_export))
|
||||
self.assertTrue(torch.allclose(legit_eager, legit_export))
|
||||
|
||||
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo isn't support")
|
||||
class TestOneOffModelExportResult(TestCase):
|
||||
|
@ -7,8 +7,8 @@ with test_functionalization_with_native_python_assertion)
|
||||
import math
|
||||
import operator
|
||||
import unittest
|
||||
from typing import List, Set
|
||||
from re import escape
|
||||
from typing import List, Set
|
||||
|
||||
import torch
|
||||
from functorch.experimental.control_flow import cond
|
||||
@ -17,22 +17,38 @@ from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse
|
||||
from torch._export.passes.functionalize_side_effectful_ops_pass import (
|
||||
_FunctionalizeSideEffectfulOpsPass,
|
||||
)
|
||||
from torch._export.passes.replace_set_grad_with_hop_pass import (
|
||||
_is_set_grad_enabled_node,
|
||||
_is_set_grad_enabled_sub_mod,
|
||||
)
|
||||
from torch._export.passes.replace_view_ops_with_view_copy_ops_pass import (
|
||||
get_view_copy_of_view_op,
|
||||
is_view_op,
|
||||
ReplaceViewOpsWithViewCopyOpsPass,
|
||||
)
|
||||
from torch._export.utils import (
|
||||
node_inline_,
|
||||
nodes_count,
|
||||
nodes_filter,
|
||||
nodes_map,
|
||||
sequential_split,
|
||||
)
|
||||
from torch._higher_order_ops.auto_functionalize import auto_functionalized
|
||||
from torch.export import export
|
||||
from torch.export._remove_auto_functionalized_pass import (
|
||||
unsafe_remove_auto_functionalized_pass,
|
||||
)
|
||||
from torch.fx.passes.infra.partitioner import Partition
|
||||
from torch.fx.passes.operator_support import OperatorSupport
|
||||
from torch.library import impl, _scoped_library
|
||||
from torch.testing import FileCheck
|
||||
from torch.testing._internal.common_utils import run_tests, TestCase, skipIfTorchDynamo, IS_WINDOWS
|
||||
from torch.utils import _pytree as pytree
|
||||
from torch._export.utils import sequential_split, nodes_filter, nodes_map, node_inline_, nodes_count
|
||||
from torch._export.passes.replace_set_grad_with_hop_pass import (
|
||||
_is_set_grad_enabled_node, _is_set_grad_enabled_sub_mod
|
||||
from torch.testing._internal.common_utils import (
|
||||
IS_WINDOWS,
|
||||
run_tests,
|
||||
skipIfTorchDynamo,
|
||||
TestCase,
|
||||
)
|
||||
|
||||
from torch.utils import _pytree as pytree
|
||||
|
||||
def count_call_function(graph: torch.fx.Graph, target: torch.ops.OpOverload) -> int:
|
||||
count = 0
|
||||
@ -620,5 +636,96 @@ def forward(self, sin, cos):
|
||||
self.assertEqual(before_str, after_inline_str)
|
||||
self.assertEqual(gm(*args), new_gm(*args))
|
||||
|
||||
def test_remove_auto_functionalized_pass(self) -> None:
|
||||
with _scoped_library("DO_NOT_USE_TEST_ONLY", "DEF") as lib:
|
||||
|
||||
lib.define("custom_mutator(Tensor x, Tensor(a!) y) -> Tensor")
|
||||
|
||||
@impl(lib, "custom_mutator", "Meta")
|
||||
def custom_mutator_meta(
|
||||
x: torch.Tensor,
|
||||
y: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
return torch.empty_like(x)
|
||||
|
||||
|
||||
@impl(lib, "custom_mutator", "CompositeExplicitAutograd")
|
||||
def custom_mutator(
|
||||
x: torch.Tensor,
|
||||
y: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
return x + y.add_(1)
|
||||
|
||||
class M(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.register_buffer("state", torch.zeros(1))
|
||||
|
||||
def forward(self, x):
|
||||
return torch.ops.DO_NOT_USE_TEST_ONLY.custom_mutator(x, self.state)
|
||||
|
||||
mod = M()
|
||||
x = torch.randn([3, 3])
|
||||
ep = export(mod, (x,))
|
||||
inplace_ep = unsafe_remove_auto_functionalized_pass(ep)
|
||||
nodes = inplace_ep.graph.nodes
|
||||
for node in nodes:
|
||||
if node.op == "call_function":
|
||||
self.assertFalse(node.target is auto_functionalized)
|
||||
self.assertFalse(node.target is operator.getitem)
|
||||
|
||||
for spec in inplace_ep.graph_signature.output_specs:
|
||||
self.assertFalse("getitem" in spec.arg.name)
|
||||
|
||||
def test_remove_auto_functionalized_pass_tuple(self) -> None:
|
||||
with _scoped_library("DO_NOT_USE_TEST_ONLY", "DEF") as lib:
|
||||
|
||||
lib.define("custom_mutator_tuple(Tensor x, Tensor(a!) y) -> (Tensor, Tensor)")
|
||||
|
||||
@impl(lib, "custom_mutator_tuple", "Meta")
|
||||
def custom_mutator_tuple_meta(
|
||||
x: torch.Tensor,
|
||||
y: torch.Tensor,
|
||||
):
|
||||
return (torch.empty_like(x), torch.empty_like(x))
|
||||
|
||||
|
||||
@impl(lib, "custom_mutator_tuple", "CompositeExplicitAutograd")
|
||||
def custom_mutator_tuple(
|
||||
x: torch.Tensor,
|
||||
y: torch.Tensor,
|
||||
):
|
||||
return (x, x + y.add_(1))
|
||||
|
||||
class M(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.register_buffer("state", torch.zeros(1))
|
||||
|
||||
def forward(self, x):
|
||||
return torch.ops.DO_NOT_USE_TEST_ONLY.custom_mutator_tuple(
|
||||
x, self.state
|
||||
)
|
||||
|
||||
mod = M()
|
||||
x = torch.randn([3, 3])
|
||||
ep = export(mod, (x,))
|
||||
inplace_ep = unsafe_remove_auto_functionalized_pass(ep)
|
||||
|
||||
nodes = inplace_ep.graph.nodes
|
||||
getitems = 0
|
||||
for node in nodes:
|
||||
if node.op == "call_function":
|
||||
self.assertFalse(node.target is auto_functionalized)
|
||||
if node.target is operator.getitem:
|
||||
getitems += 1
|
||||
self.assertEqual(getitems, 2) # tuple return of len 2
|
||||
|
||||
out_specs = inplace_ep.graph_signature.output_specs
|
||||
self.assertEqual(out_specs[0].arg.name, "arg0_1") # state
|
||||
self.assertEqual(out_specs[1].arg.name, "getitem") # tuple return 1
|
||||
self.assertEqual(out_specs[2].arg.name, "getitem_1") # tuple return 2
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
|
@ -322,6 +322,7 @@ if RUN_CPU:
|
||||
BaseTest("test_relu"), # multiple inputs
|
||||
BaseTest("test_repeat_interleave", "", test_cpu_repro.CPUReproTests()),
|
||||
BaseTest("test_scalar_input"),
|
||||
BaseTest("test_scalar_output"),
|
||||
BaseTest("test_scaled_dot_product_attention"),
|
||||
BaseTest("test_scatter1"),
|
||||
BaseTest("test_scatter2"),
|
||||
|
@ -415,6 +415,12 @@ def check_model(
|
||||
if check_has_compiled:
|
||||
assert called, "Ran graph without calling compile_fx"
|
||||
assert type(actual) == type(correct)
|
||||
if isinstance(actual, (tuple, list)):
|
||||
assert len(actual) == len(correct)
|
||||
assert all(
|
||||
type(actual_item) == type(correct_item)
|
||||
for actual_item, correct_item in zip(actual, correct)
|
||||
)
|
||||
|
||||
correct_flat, correct_spec = tree_flatten(correct)
|
||||
actual_flat = pytree.tree_leaves(actual)
|
||||
@ -2452,6 +2458,20 @@ class CommonTemplate:
|
||||
|
||||
self.common(fn, [torch.randint(5, (1, 8)), 5400])
|
||||
|
||||
@torch._dynamo.config.patch(dynamic_shapes=True)
|
||||
@torch._dynamo.config.patch(assume_static_by_default=False)
|
||||
def test_scalar_output(self):
|
||||
def fn(arg0_1, arg2_1):
|
||||
arg1_1 = arg2_1.size(1)
|
||||
view = torch.ops.aten.view.default(arg2_1, [-1, arg1_1])
|
||||
embedding = torch.ops.aten.embedding.default(arg0_1, view)
|
||||
full = torch.ops.aten.full.default([1, arg1_1], 1, dtype=torch.float32)
|
||||
return (full, arg1_1, embedding)
|
||||
|
||||
arg0_1 = rand_strided((32128, 768), (768, 1), device="cpu", dtype=torch.float32)
|
||||
arg2_1 = rand_strided((1, 22), (22, 1), device="cpu", dtype=torch.int64)
|
||||
self.common(fn, [arg0_1, arg2_1])
|
||||
|
||||
def test_shape_prop_torch_ones(self):
|
||||
class Model(torch.nn.Module):
|
||||
def forward(self, attention_scores):
|
||||
|
@ -5,7 +5,7 @@ import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from torch.testing._internal.common_utils import IS_LINUX
|
||||
from torch.testing._internal.common_utils import IS_LINUX, skipIfRocm
|
||||
from torch.testing._internal.inductor_utils import HAS_GPU
|
||||
|
||||
try:
|
||||
@ -71,6 +71,7 @@ class TestTritonHeuristics(TestCase):
|
||||
]
|
||||
self.assertEqual(forward(*args), foo_c(*args))
|
||||
|
||||
@skipIfRocm
|
||||
def test_artificial_zgrid(self):
|
||||
self._test_artificial_zgrid()
|
||||
|
||||
|
@ -8822,7 +8822,7 @@ get_out().sum().backward()
|
||||
self.assertEqual(a.device, b.device)
|
||||
self.assertEqual(a.dtype, b.dtype)
|
||||
|
||||
def _test_fn(fn, inp, *args):
|
||||
def _test_fn(fn, inp, *args, use_unsafe_view_func=False):
|
||||
outs = fn(inp, *args)
|
||||
# handle functions that return multiple views (e.g. split)
|
||||
if isinstance(outs, torch.Tensor):
|
||||
@ -8835,7 +8835,10 @@ get_out().sum().backward()
|
||||
# forward view_func
|
||||
new_inp = inp.clone()
|
||||
_assert_match_metadata(new_inp, inp)
|
||||
new_out = out._view_func(new_inp)
|
||||
if use_unsafe_view_func:
|
||||
new_out = out._view_func_unsafe(new_inp)
|
||||
else:
|
||||
new_out = out._view_func(new_inp)
|
||||
_assert_match_metadata(new_out, out)
|
||||
self.assertEqual(new_out, out)
|
||||
|
||||
@ -8901,6 +8904,33 @@ get_out().sum().backward()
|
||||
|
||||
_test_fn(chain_with_only_current_view_func, torch.randn(2, 3, 4))
|
||||
|
||||
# TODO: Move this somewhere else
|
||||
# test NT views
|
||||
from torch.nested._internal.nested_tensor import nested_view_from_values_offsets
|
||||
|
||||
values = torch.randn(10, 5)
|
||||
offsets = torch.tensor([0, 3, 6, 10])
|
||||
_test_fn(nested_view_from_values_offsets, values, offsets)
|
||||
|
||||
nt = nested_view_from_values_offsets(values, offsets).clone().detach()
|
||||
_test_fn(torch.ops.aten._nested_get_values.default, nt, use_unsafe_view_func=True)
|
||||
|
||||
def chain_nt_to_dense_back_and_forth(nt):
|
||||
# NJT1 -> dense -> NJT2 -> dense
|
||||
offsets2 = nt.offsets().clone().detach()
|
||||
return nested_view_from_values_offsets(nt.values(), offsets2).values()
|
||||
|
||||
_test_fn(chain_nt_to_dense_back_and_forth, nt, use_unsafe_view_func=True)
|
||||
|
||||
def chain_dense_to_nt_back_and_forth(values, offsets):
|
||||
offsets2 = offsets.clone().detach()
|
||||
# dense -> NJT1 -> dense -> NJT2
|
||||
return nested_view_from_values_offsets(
|
||||
nested_view_from_values_offsets(values, offsets).values(),
|
||||
offsets2)
|
||||
|
||||
_test_fn(chain_dense_to_nt_back_and_forth, values, offsets, use_unsafe_view_func=True)
|
||||
|
||||
def test_view_func_replay_with_modified_state(self):
|
||||
with torch.autograd._force_original_view_tracking(True):
|
||||
base = torch.randn(3, 4, 5)
|
||||
|
@ -5777,6 +5777,25 @@ class TestMPS(TestCaseMPS):
|
||||
|
||||
self.assertEqual(clamp_result_mps, clamp_result_cpu)
|
||||
|
||||
def test_clamp_nan(self):
|
||||
t_mps = torch.tensor([torch.nan, 1, 2], device="mps")
|
||||
t_cpu = torch.tensor([torch.nan, 1, 2], device="cpu")
|
||||
|
||||
clamp_min_max_mps = torch.clamp(t_mps, min=-100, max=100)
|
||||
clamp_min_max_cpu = torch.clamp(t_cpu, min=-100, max=100)
|
||||
|
||||
self.assertEqual(clamp_min_max_mps, clamp_min_max_cpu)
|
||||
|
||||
clamp_min_mps = torch.clamp(t_mps, min=-100)
|
||||
clamp_min_cpu = torch.clamp(t_cpu, min=-100)
|
||||
|
||||
self.assertEqual(clamp_min_mps, clamp_min_cpu)
|
||||
|
||||
clamp_max_mps = torch.clamp(t_mps, max=100)
|
||||
clamp_max_cpu = torch.clamp(t_cpu, max=100)
|
||||
|
||||
self.assertEqual(clamp_max_mps, clamp_max_cpu)
|
||||
|
||||
# Test clamp_min
|
||||
def test_clamp_min(self):
|
||||
def helper(n, c, h, w):
|
||||
@ -6918,9 +6937,9 @@ class TestMPS(TestCaseMPS):
|
||||
# See https://github.com/pytorch/pytorch/issues/116769#issuecomment-1888302095
|
||||
self.assertNotEqual(torch.mm(x, y[:, 16384:32768]).abs().max().item(), 0.0)
|
||||
|
||||
def compare_mm(m, n, k):
|
||||
x = torch.rand(m, n, device="mps")
|
||||
y = torch.rand(n, k, device="mps")
|
||||
def compare_mm(m, n, k, dtype=torch.float):
|
||||
x = torch.rand(m, n, device="mps", dtype=dtype)
|
||||
y = torch.rand(n, k, device="mps", dtype=dtype)
|
||||
z = torch.mm(x, y).cpu()
|
||||
z_cpu = torch.mm(x.cpu(), y.cpu())
|
||||
self.assertEqual(z, z_cpu)
|
||||
@ -6931,6 +6950,10 @@ class TestMPS(TestCaseMPS):
|
||||
# see https://github.com/pytorch/pytorch/issues/116769#issuecomment-1920066984
|
||||
compare_mm(32769, 1, 1025)
|
||||
|
||||
if product_version >= 14.0:
|
||||
# Test bfloat16 mm
|
||||
compare_mm(1024, 1, 32769, torch.bfloat16)
|
||||
|
||||
# Test flip
|
||||
def test_flip(self):
|
||||
def helper(shape, dims):
|
||||
|
@ -37,18 +37,18 @@ from torch.testing._internal.common_utils import (
|
||||
parametrize,
|
||||
run_tests,
|
||||
skipIfSlowGradcheckEnv,
|
||||
skipIfTorchDynamo,
|
||||
markDynamoStrictTest,
|
||||
xfailIfTorchDynamo,
|
||||
skipIfTorchDynamo,
|
||||
subtest,
|
||||
TEST_WITH_ROCM,
|
||||
TestCase,
|
||||
)
|
||||
|
||||
from torch.nested._internal.nested_tensor import (
|
||||
buffer_from_jagged,
|
||||
jagged_from_list,
|
||||
NestedTensor,
|
||||
nested_view_from_values_offsets,
|
||||
)
|
||||
|
||||
# Tests are ported from pytorch/nestedtensor.
|
||||
@ -3014,7 +3014,8 @@ class TestNestedTensorSubclass(TestCase):
|
||||
a = torch.randn(2, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
b = torch.randn(3, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
c = torch.randn(4, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
nt, _offsets = jagged_from_list([a, b, c], None)
|
||||
nt = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
_offsets = nt.offsets()
|
||||
|
||||
for op in (
|
||||
torch.ops.aten.is_non_overlapping_and_dense.default,
|
||||
@ -3043,9 +3044,9 @@ class TestNestedTensorSubclass(TestCase):
|
||||
weight = torch.randn(4, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
|
||||
def grad_test_func(a, b, c, weight):
|
||||
nt, _ = jagged_from_list([a, b, c], None)
|
||||
nt = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
out = torch.nn.functional.linear(nt, weight)
|
||||
return buffer_from_jagged(out)
|
||||
return out.values()
|
||||
|
||||
gradcheck(grad_test_func, inputs=(a, b, c, weight), check_batched_grad=False)
|
||||
|
||||
@ -3055,9 +3056,9 @@ class TestNestedTensorSubclass(TestCase):
|
||||
c = torch.randn(4, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
|
||||
def grad_test_func(a, b, c):
|
||||
nt, _ = jagged_from_list([a, b, c], None)
|
||||
nt = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
out = torch.nn.functional.silu(nt.sin().cos())
|
||||
return buffer_from_jagged(out)
|
||||
return out.values()
|
||||
|
||||
gradcheck(grad_test_func, inputs=(a, b, c), check_batched_grad=False)
|
||||
|
||||
@ -3066,7 +3067,7 @@ class TestNestedTensorSubclass(TestCase):
|
||||
torch.randn(i + 2, 5, requires_grad=True, dtype=torch.float64, device=device) for i in range(3)
|
||||
)
|
||||
|
||||
nt, _ = jagged_from_list([a.detach(), b.detach(), c.detach()], None)
|
||||
nt = torch.nested.nested_tensor([a.detach(), b.detach(), c.detach()], layout=torch.jagged)
|
||||
nt_t = nt.transpose(1, 2)
|
||||
self.assertFalse(nt_t.is_contiguous())
|
||||
out = torch.nn.functional.silu(nt_t.sin().cos())
|
||||
@ -3079,10 +3080,10 @@ class TestNestedTensorSubclass(TestCase):
|
||||
)
|
||||
|
||||
def grad_test_func(a, b, c):
|
||||
nt, _ = jagged_from_list([a, b, c], None)
|
||||
nt = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
nt_t = nt.transpose(1, 2)
|
||||
out = torch.nn.functional.silu(nt_t.sin().cos())
|
||||
return buffer_from_jagged(out)
|
||||
return out.values()
|
||||
|
||||
gradcheck(grad_test_func, inputs=(a, b, c), check_batched_grad=False)
|
||||
|
||||
@ -3094,8 +3095,8 @@ class TestNestedTensorSubclass(TestCase):
|
||||
|
||||
# Incorrect usage: shape check will fail if the offsets tensor are not
|
||||
# the same exact tensor object
|
||||
nt1, _ = jagged_from_list([a, b, c], None)
|
||||
nt2, _ = jagged_from_list([a, b, c], None)
|
||||
nt1 = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
nt2 = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
|
||||
self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
@ -3104,10 +3105,11 @@ class TestNestedTensorSubclass(TestCase):
|
||||
|
||||
# Correct usage: chain the calls using the same offsets tensor object
|
||||
def grad_test_func(a, b, c):
|
||||
nt1, offsets = jagged_from_list([a, b, c], None)
|
||||
nt2, offsets = jagged_from_list([a, b, c], offsets)
|
||||
nt1 = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
# TODO: Switch to public API that takes in (values, offsets) once it exists
|
||||
nt2, offsets = jagged_from_list([a, b, c], nt1.offsets())
|
||||
out = nt1 * nt2
|
||||
return buffer_from_jagged(out)
|
||||
return out.values()
|
||||
|
||||
gradcheck(grad_test_func, inputs=(a, b, c), check_batched_grad=False)
|
||||
|
||||
@ -3122,10 +3124,10 @@ class TestNestedTensorSubclass(TestCase):
|
||||
nt1_t = nt1.transpose(1, 2)
|
||||
nt2_t = nt2.transpose(1, 2)
|
||||
|
||||
out = nt1_t * nt2_t
|
||||
self.assertFalse(nt1_t.is_contiguous())
|
||||
self.assertEqual(out.is_contiguous(), (b.transpose(-1, -2) * b.transpose(-1, -2)).is_contiguous())
|
||||
self.assertEqual(out.shape, nt1_t.shape)
|
||||
# out = nt1_t * nt2_t
|
||||
# self.assertFalse(nt1_t.is_contiguous())
|
||||
# self.assertEqual(out.is_contiguous(), (b.transpose(-1, -2) * b.transpose(-1, -2)).is_contiguous())
|
||||
# self.assertEqual(out.shape, nt1_t.shape)
|
||||
|
||||
self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
@ -3144,7 +3146,7 @@ class TestNestedTensorSubclass(TestCase):
|
||||
nt1_t = nt1.transpose(1, 2)
|
||||
nt2_t = nt2.transpose(1, 2)
|
||||
out = nt1_t * nt2_t
|
||||
return buffer_from_jagged(out)
|
||||
return out.values()
|
||||
|
||||
gradcheck(grad_test_func, inputs=(a, b, c), check_batched_grad=False)
|
||||
|
||||
@ -3153,14 +3155,16 @@ class TestNestedTensorSubclass(TestCase):
|
||||
b = torch.randn(3, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
c = torch.randn(4, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
|
||||
nt, _ = jagged_from_list([a, b, c], None)
|
||||
nt = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
out = torch.split(nt, 2, -1)
|
||||
self.assertEqual(len(out), 2)
|
||||
self.assertEqual(
|
||||
out[0], jagged_from_list([a[:, 0:2], b[:, 0:2], c[:, 0:2]], None)[0]
|
||||
out[0],
|
||||
torch.nested.as_nested_tensor([a[:, 0:2], b[:, 0:2], c[:, 0:2]], layout=torch.jagged)
|
||||
)
|
||||
self.assertEqual(
|
||||
out[1], jagged_from_list([a[:, 2:], b[:, 2:], c[:, 2:]], None)[0]
|
||||
out[1],
|
||||
torch.nested.as_nested_tensor([a[:, 2:], b[:, 2:], c[:, 2:]], layout=torch.jagged)
|
||||
)
|
||||
|
||||
with self.assertRaisesRegex(
|
||||
@ -3174,14 +3178,16 @@ class TestNestedTensorSubclass(TestCase):
|
||||
b = torch.randn(3, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
c = torch.randn(4, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
|
||||
nt, _ = jagged_from_list([a, b, c], None)
|
||||
nt = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
out = torch.split(nt, [1, 2], -1)
|
||||
self.assertEqual(len(out), 2)
|
||||
self.assertEqual(
|
||||
out[0], jagged_from_list([a[:, 0:1], b[:, 0:1], c[:, 0:1]], None)[0]
|
||||
out[0],
|
||||
torch.nested.as_nested_tensor([a[:, 0:1], b[:, 0:1], c[:, 0:1]], layout=torch.jagged)
|
||||
)
|
||||
self.assertEqual(
|
||||
out[1], jagged_from_list([a[:, 1:], b[:, 1:], c[:, 1:]], None)[0]
|
||||
out[1],
|
||||
torch.nested.as_nested_tensor([a[:, 1:], b[:, 1:], c[:, 1:]], layout=torch.jagged)
|
||||
)
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
@ -3207,13 +3213,12 @@ class TestNestedTensorSubclass(TestCase):
|
||||
view = nt.expand(-1, -1, 5)
|
||||
self.assertEqual(nt.shape[:2], view.shape[:2])
|
||||
|
||||
@skipIfTorchDynamo("skipped until proper view support for NT")
|
||||
def test_view_ragged_idx_not_one(self, device):
|
||||
nt = random_nt_from_dims([2, None, 20], device=device, dtype=torch.float32, layout=torch.jagged)
|
||||
|
||||
view_transposed = nt.transpose(1, 2).view(2, 20, nt.size(1))
|
||||
self.assertEqual((2, 20, nt.size(1)), (view_transposed.size()))
|
||||
self.assertEqual(view_transposed._base, nt)
|
||||
self.assertEqual(view_transposed._base, nt._base)
|
||||
|
||||
def test_unsafe_view(self, device):
|
||||
nt = random_nt_from_dims([4, None, 8, 10], device=device, dtype=torch.float32, layout=torch.jagged)
|
||||
@ -3232,14 +3237,13 @@ class TestNestedTensorSubclass(TestCase):
|
||||
@xfailIfTorchDynamo
|
||||
@parametrize("requires_grad", [False, True])
|
||||
def test_reshape_decomp(self, device, requires_grad):
|
||||
# contiguous NT should result in view
|
||||
# contiguous NT should result in view.
|
||||
nt = random_nt_from_dims(
|
||||
[3, None, 10],
|
||||
device=device,
|
||||
dtype=torch.float32,
|
||||
layout=torch.jagged,
|
||||
requires_grad=requires_grad
|
||||
)
|
||||
).detach().requires_grad_(requires_grad)
|
||||
view = nt.reshape(-1, -1, 5, 2)
|
||||
self.assertEqual(view.shape[:2], nt.shape[:2])
|
||||
self.assertTrue(view._is_view() and view._base is nt)
|
||||
@ -3355,9 +3359,9 @@ class TestNestedTensorSubclass(TestCase):
|
||||
)
|
||||
|
||||
def grad_test_func(t, *ts):
|
||||
nt, _ = jagged_from_list(ts, None)
|
||||
nt = torch.nested.as_nested_tensor(list(ts), layout=torch.jagged)
|
||||
out = nt + t
|
||||
return buffer_from_jagged(out)
|
||||
return out.values()
|
||||
|
||||
for t_size in t_sizes:
|
||||
t = torch.rand(t_size, requires_grad=True, device=device, dtype=torch.float64)
|
||||
@ -3369,13 +3373,13 @@ class TestNestedTensorSubclass(TestCase):
|
||||
|
||||
nt1, offsets = jagged_from_list(ts1, None)
|
||||
nt2, offsets = jagged_from_list(ts2, offsets)
|
||||
buf1 = buffer_from_jagged(nt1).detach().clone()
|
||||
buf2 = buffer_from_jagged(nt2).detach().clone()
|
||||
buf1 = nt1.values().detach().clone()
|
||||
buf2 = nt2.values().detach().clone()
|
||||
|
||||
res_nt = torch.ops.aten.threshold_backward(nt1, nt2, 0.0)
|
||||
res_dense = torch.ops.aten.threshold_backward(buf1, buf2, 0.0)
|
||||
|
||||
self.assertEqual(res_dense, buffer_from_jagged(res_nt))
|
||||
self.assertEqual(res_dense, res_nt.values())
|
||||
|
||||
|
||||
@parametrize("keepdim", [False, True])
|
||||
@ -3399,11 +3403,11 @@ class TestNestedTensorSubclass(TestCase):
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"applying over the ragged dimension, but not the batch dimension"):
|
||||
nt, _ = jagged_from_list(ts, None)
|
||||
nt = torch.nested.as_nested_tensor(ts, layout=torch.jagged)
|
||||
out = torch.sum(nt, dim=rd, keepdim=keepdim)
|
||||
continue
|
||||
|
||||
nt, _ = jagged_from_list(ts, None)
|
||||
nt = torch.nested.as_nested_tensor(ts, layout=torch.jagged)
|
||||
out = torch.sum(nt, dim=rd, keepdim=keepdim)
|
||||
ref_shape = ref_shape_keepdim if keepdim else ref_shape_no_keepdim
|
||||
self.assertEqual(len(out.shape), len(ref_shape))
|
||||
@ -3415,7 +3419,7 @@ class TestNestedTensorSubclass(TestCase):
|
||||
|
||||
# Check values correctness
|
||||
# raggedness not reduced
|
||||
nt, _ = jagged_from_list(ts, None)
|
||||
nt = torch.nested.as_nested_tensor(ts, layout=torch.jagged)
|
||||
out = torch.sum(nt, dim=(2, 3), keepdim=keepdim)
|
||||
out_ref = torch.sum(nt.values(), dim=(1, 2))
|
||||
self.assertIsInstance(out, NestedTensor)
|
||||
@ -3423,7 +3427,7 @@ class TestNestedTensorSubclass(TestCase):
|
||||
self.assertTrue(torch.allclose(out.values().view(-1), out_ref.view(-1)))
|
||||
|
||||
# raggedness reduced away
|
||||
nt, _ = jagged_from_list(ts, None)
|
||||
nt = torch.nested.as_nested_tensor(ts, layout=torch.jagged)
|
||||
out = torch.sum(nt, dim=(0, 1), keepdim=keepdim)
|
||||
out_ref = torch.sum(nt.values(), dim=(0,))
|
||||
self.assertNotIsInstance(out, NestedTensor)
|
||||
@ -3469,24 +3473,25 @@ class TestNestedTensorSubclass(TestCase):
|
||||
self.assertIs(pinned, pinned.pin_memory())
|
||||
self.assertEqual(pinned.data_ptr(), pinned.pin_memory().data_ptr())
|
||||
|
||||
def _validate_nt(self, nt, tensor_list, device, dtype, requires_grad):
|
||||
@torch.compiler.disable
|
||||
def _validate_nt(self, nt, device, dtype, layout, requires_grad, dim, batch_size, base=None):
|
||||
# Validate a bunch of properties after NT construction.
|
||||
device = torch.device(device)
|
||||
first_t = torch.as_tensor(tensor_list[0])
|
||||
expected_dim = first_t.dim() + 1
|
||||
batch_size = len(tensor_list)
|
||||
self.assertEqual(nt.dim(), expected_dim)
|
||||
self.assertEqual(nt.dim(), dim)
|
||||
self.assertEqual(nt.device, device)
|
||||
self.assertEqual(nt.dtype, dtype)
|
||||
self.assertEqual(nt.layout, torch.jagged)
|
||||
self.assertEqual(nt.layout, layout)
|
||||
self.assertEqual(nt.requires_grad, requires_grad)
|
||||
self.assertEqual(nt.values().device, device)
|
||||
self.assertEqual(nt.offsets().device, device)
|
||||
self.assertEqual(nt.shape[0], batch_size)
|
||||
self.assertTrue(isinstance(nt.shape[1], torch.SymInt))
|
||||
self.assertEqual(nt.shape[2:], first_t.shape[1:])
|
||||
|
||||
@xfailIfTorchDynamo
|
||||
if layout == torch.jagged:
|
||||
self.assertEqual(nt._values.device, device)
|
||||
self.assertEqual(nt._offsets.device, device)
|
||||
self.assertEqual(nt.shape[0], batch_size)
|
||||
self.assertTrue(isinstance(nt.shape[1], torch.SymInt))
|
||||
|
||||
if base is not None:
|
||||
self.assertTrue(nt._is_view() and nt._base is base)
|
||||
|
||||
@dtypes(torch.float, torch.double, torch.half)
|
||||
@parametrize("requires_grad", [False, True])
|
||||
@parametrize("components_require_grad", [False, True])
|
||||
@ -3500,7 +3505,11 @@ class TestNestedTensorSubclass(TestCase):
|
||||
dtype=dtype,
|
||||
layout=torch.jagged,
|
||||
requires_grad=requires_grad)
|
||||
self._validate_nt(nt, tensor_list, device, dtype, requires_grad)
|
||||
|
||||
expected_dim = torch.as_tensor(tensor_list[0]).dim() + 1
|
||||
expected_batch_size = len(tensor_list)
|
||||
self._validate_nt(
|
||||
nt, device, dtype, torch.jagged, requires_grad, expected_dim, expected_batch_size)
|
||||
|
||||
# Make sure grads -don't- flow back into original tensors for nested_tensor()
|
||||
if requires_grad:
|
||||
@ -3509,7 +3518,6 @@ class TestNestedTensorSubclass(TestCase):
|
||||
t = t if isinstance(t, torch.Tensor) else torch.as_tensor(t)
|
||||
self.assertTrue(t.grad is None)
|
||||
|
||||
@xfailIfTorchDynamo
|
||||
@dtypes(torch.float, torch.double, torch.half)
|
||||
@parametrize("components_require_grad", [False, True])
|
||||
def test_jagged_layout_construction_as_nested_tensor(
|
||||
@ -3524,7 +3532,16 @@ class TestNestedTensorSubclass(TestCase):
|
||||
layout=torch.jagged)
|
||||
|
||||
# nt.requires_grad=True should be set if at least one component requires grad
|
||||
self._validate_nt(nt, tensor_list, device, dtype, components_require_grad)
|
||||
expected_dim = tensor_list[0].dim() + 1
|
||||
expected_batch_size = len(tensor_list)
|
||||
self._validate_nt(
|
||||
nt,
|
||||
device,
|
||||
dtype,
|
||||
torch.jagged,
|
||||
components_require_grad,
|
||||
expected_dim,
|
||||
expected_batch_size)
|
||||
|
||||
# Make sure grads flow back into original tensors for as_nested_tensor()
|
||||
if components_require_grad:
|
||||
@ -3546,9 +3563,53 @@ class TestNestedTensorSubclass(TestCase):
|
||||
device="cpu",
|
||||
pin_memory=True)
|
||||
|
||||
self._validate_nt(nt, tensor_list, "cpu", torch.float32, requires_grad=False)
|
||||
expected_dim = torch.as_tensor(tensor_list[0]).dim() + 1
|
||||
expected_batch_size = len(tensor_list)
|
||||
self._validate_nt(
|
||||
nt,
|
||||
device="cpu",
|
||||
dtype=torch.float32,
|
||||
layout=torch.jagged,
|
||||
requires_grad=False,
|
||||
dim=expected_dim,
|
||||
batch_size=expected_batch_size)
|
||||
self.assertTrue(nt.is_pinned())
|
||||
|
||||
@dtypes(torch.float, torch.double, torch.half)
|
||||
@parametrize("requires_grad", [False, True])
|
||||
@parametrize("values_is_view", [False, True])
|
||||
def test_jagged_view_from_values_offsets(self, device, dtype, requires_grad, values_is_view):
|
||||
if values_is_view:
|
||||
# make values a view of base
|
||||
base = torch.randn(
|
||||
2, 3, 4, 5, 6, device=device, dtype=dtype, requires_grad=requires_grad)
|
||||
values = base.flatten(0, -2)
|
||||
else:
|
||||
values = torch.randn(10, 5, device=device, dtype=dtype, requires_grad=requires_grad)
|
||||
offsets = torch.tensor([0, 2, 4, 6, 10], device=device, dtype=torch.int64)
|
||||
|
||||
nt = nested_view_from_values_offsets(values, offsets)
|
||||
|
||||
expected_dim = values.dim() + 1
|
||||
expected_batch_size = offsets.shape[0] - 1
|
||||
expected_base = base if values_is_view else values
|
||||
self._validate_nt(
|
||||
nt, device, dtype, torch.jagged, requires_grad, expected_dim, expected_batch_size,
|
||||
# ensure NT is a proper view
|
||||
base=expected_base
|
||||
)
|
||||
|
||||
if requires_grad:
|
||||
# Make sure grads flow back
|
||||
(nt * 2).backward(torch.ones_like(nt))
|
||||
|
||||
@torch.compiler.disable
|
||||
def _check_grad(t):
|
||||
self.assertTrue(t.grad is not None)
|
||||
self.assertEqual(t.grad, torch.ones_like(t) * 2)
|
||||
|
||||
_check_grad(base if values_is_view else values)
|
||||
|
||||
@dtypes(torch.double, torch.half)
|
||||
@onlyCUDA
|
||||
def test_device_dtype_transfer_maintains_offsets(self, device, dtype):
|
||||
@ -3587,9 +3648,9 @@ class TestNestedTensorSubclass(TestCase):
|
||||
bias = torch.randn(3, requires_grad=False, dtype=torch.float64, device=device)
|
||||
|
||||
def grad_test_func(a, b, c, bias):
|
||||
nt, _ = jagged_from_list([a, b, c], None)
|
||||
nt = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
out = torch.nn.functional.layer_norm(nt, (nt.shape[-1],), bias=bias)
|
||||
return buffer_from_jagged(out)
|
||||
return out.values()
|
||||
|
||||
gradcheck(
|
||||
grad_test_func, inputs=(*test_tensor_list, bias), check_batched_grad=False
|
||||
@ -3599,20 +3660,26 @@ class TestNestedTensorSubclass(TestCase):
|
||||
RuntimeError,
|
||||
r"layer_norm\(\): normalizing over ragged dim not supported for nested tensors",
|
||||
):
|
||||
nt, _ = jagged_from_list(test_tensor_list, None)
|
||||
nt = torch.nested.as_nested_tensor(test_tensor_list, layout=torch.jagged)
|
||||
_ = torch.nn.functional.layer_norm(nt, (nt.shape[-2], nt.shape[-1]))
|
||||
|
||||
def test_narrow(self, device):
|
||||
starts = torch.tensor([0, 1, 2, 3, 4], device=device, dtype=torch.int64)
|
||||
lengths = torch.tensor([3, 2, 2, 1, 5], device=device, dtype=torch.int64)
|
||||
buffer = (
|
||||
torch.arange(0, 10, device=device, dtype=torch.int64)
|
||||
.unsqueeze(0).expand(5, -1).clone().detach()
|
||||
)
|
||||
nt = torch.nested.narrow(
|
||||
torch.arange(0, 10, device=device, dtype=torch.int64).unsqueeze(0).expand(5, -1).clone().detach(),
|
||||
buffer,
|
||||
1,
|
||||
starts,
|
||||
lengths,
|
||||
layout=torch.jagged
|
||||
)
|
||||
|
||||
self.assertTrue(nt._is_view() and nt._base is buffer)
|
||||
|
||||
# TODO: Use this approach when unbind is functional
|
||||
# unbinded_nt = nt.unbind()
|
||||
# for i in range(starts.shape[0]):
|
||||
@ -3627,7 +3694,7 @@ class TestNestedTensorSubclass(TestCase):
|
||||
a = torch.randn(2, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
b = torch.randn(3, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
c = torch.randn(4, 3, requires_grad=True, dtype=torch.float64, device=device)
|
||||
nt_contiguous, _ = jagged_from_list([a, b, c], None)
|
||||
nt_contiguous = torch.nested.as_nested_tensor([a, b, c], layout=torch.jagged)
|
||||
|
||||
starts_nc = torch.tensor([0, 1, 2, 3, 4], device=device, dtype=torch.int64)
|
||||
lengths_nc = torch.tensor([3, 2, 2, 1, 5], device=device, dtype=torch.int64)
|
||||
@ -3688,7 +3755,7 @@ class TestNestedTensorSubclass(TestCase):
|
||||
a = torch.randn(2, 3, 4, requires_grad=True, dtype=torch.float64, device=device)
|
||||
b = torch.randn(3, 3, 4, requires_grad=True, dtype=torch.float64, device=device)
|
||||
c = torch.randn(4, 3, 4, requires_grad=True, dtype=torch.float64, device=device)
|
||||
nt, _ = jagged_from_list([a, b, c], None)
|
||||
nt = torch.nested.nested_tensor([a, b, c], layout=torch.jagged)
|
||||
# transpose ragged dim
|
||||
transposed = nt.transpose(1, 2)
|
||||
self.assertFalse(transposed.is_contiguous())
|
||||
@ -3712,8 +3779,9 @@ class TestNestedTensorSubclass(TestCase):
|
||||
check_nt_equality(detached, transposed)
|
||||
|
||||
def test_to_copy(self, device):
|
||||
nt, _ = jagged_from_list(
|
||||
[torch.randn(i + 2, 3, 4, requires_grad=True, dtype=torch.float64, device=device) for i in range(3)], None
|
||||
nt = torch.nested.nested_tensor(
|
||||
[torch.randn(i + 2, 3, 4, requires_grad=True, dtype=torch.float64, device=device)
|
||||
for i in range(3)], layout=torch.jagged
|
||||
)
|
||||
|
||||
nt_copy_dtype = torch.ops.aten._to_copy(nt, dtype=torch.float16)
|
||||
@ -3745,7 +3813,6 @@ class TestNestedTensorSubclass(TestCase):
|
||||
|
||||
# Note 1: Math fallback doesn't work with bfloat16 on CUDA
|
||||
# Note 2: ROCm doesn't support flash attention or mem_efficient attention for NT
|
||||
@xfailIfTorchDynamo
|
||||
@unittest.skipIf(
|
||||
TEST_WITH_ROCM,
|
||||
"ROCm doesn't support flash attention or mem_efficient attention for NT",
|
||||
@ -3843,7 +3910,7 @@ class TestNestedTensorSubclass(TestCase):
|
||||
self.assertEqual(attn_d1, attn_nts[0].unsqueeze(0), atol=output_ref_atol, rtol=output_ref_rtol)
|
||||
self.assertEqual(attn_d2, attn_nts[1].unsqueeze(0), atol=output_ref_atol, rtol=output_ref_rtol)
|
||||
|
||||
nt_grads = torch.autograd.grad(buffer_from_jagged(attn_nt).sum(), (q_nt, k_nt, v_nt))
|
||||
nt_grads = torch.autograd.grad(attn_nt.values().sum(), (q_nt, k_nt, v_nt))
|
||||
for nt_grad, d1_grad, d2_grad, grad_atol, grad_rtol in zip(nt_grads, d1_grads, d2_grads, grad_atols, grad_rtols):
|
||||
unbound_nt_grads = nt_grad.unbind()
|
||||
self.assertEqual(d1_grad, unbound_nt_grads[0].unsqueeze(0), atol=grad_atol, rtol=grad_rtol)
|
||||
@ -3863,7 +3930,7 @@ class TestNestedTensorSubclass(TestCase):
|
||||
if not (str(device).startswith("cuda") and dtype == torch.bfloat16):
|
||||
check_forward_backward()
|
||||
|
||||
@skipIfTorchDynamo("skipped until proper view support for NT")
|
||||
@skipIfTorchDynamo("SDPA test compiles internally")
|
||||
@unittest.skipIf(IS_WINDOWS, reason="Windows not yet supported for torch.compile")
|
||||
@skipCUDAIf(not SM70OrLater, "GPU capability is < SM70")
|
||||
# Guarding with sqrt() doesn't work on ROCm?
|
||||
@ -3937,7 +4004,6 @@ class TestNestedTensorSubclass(TestCase):
|
||||
output_dense = F.scaled_dot_product_attention(query._values, key._values, value._values)
|
||||
self.assertEqual(output._values, output_dense)
|
||||
|
||||
@skipIfTorchDynamo("skipped until proper view support for NT")
|
||||
@onlyCUDA
|
||||
@unittest.skipIf(
|
||||
not PLATFORM_SUPPORTS_FUSED_ATTENTION,
|
||||
|
@ -2384,7 +2384,6 @@ class TestSDPACudaOnly(NNTestCase):
|
||||
# Cast up and compare
|
||||
self.assertEqual(qkv.grad, qkv_lp.grad.to(torch.float64), atol=1e-5, rtol=1e-5)
|
||||
|
||||
@skipIfRocm # TODO: Packed QKV
|
||||
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Flash Attention was not built for this system")
|
||||
@parametrize("contiguous_inputs", [True, False])
|
||||
@parametrize("is_causal", [True, False])
|
||||
@ -2435,6 +2434,8 @@ class TestSDPACudaOnly(NNTestCase):
|
||||
# Bump down the tolearnce for blfoat16
|
||||
atol = 7e-4 if dtype == torch.float16 else 7e-3
|
||||
rtol = 7e-4 if dtype == torch.float16 else 7e-3
|
||||
if TEST_WITH_ROCM:
|
||||
atol = 9e-4 if dtype == torch.float16 else 9e-3
|
||||
self.assertEqual(qkv.grad, qkv_lp.grad.to(torch.float64), atol=atol, rtol=rtol)
|
||||
|
||||
@skipIfRocm # Missing nested and EFFICIENT_ATTENTION
|
||||
|
2
third_party/ideep
vendored
2
third_party/ideep
vendored
Submodule third_party/ideep updated: 6c581ef0fd...8a6cc4e09d
4
third_party/mkl-dnn.BUILD
vendored
4
third_party/mkl-dnn.BUILD
vendored
@ -64,8 +64,8 @@ template_rule(
|
||||
substitutions = {
|
||||
"@DNNL_VERSION_MAJOR@": "3",
|
||||
"@DNNL_VERSION_MINOR@": "3",
|
||||
"@DNNL_VERSION_PATCH@": "2",
|
||||
"@DNNL_VERSION_HASH@": "2dc95a2ad0841e29db8b22fbccaf3e5da7992b01",
|
||||
"@DNNL_VERSION_PATCH@": "6",
|
||||
"@DNNL_VERSION_HASH@": "86e6af5974177e513fd3fee58425e1063e7f1361",
|
||||
},
|
||||
)
|
||||
|
||||
|
2
third_party/onnx
vendored
2
third_party/onnx
vendored
Submodule third_party/onnx updated: ccde5da813...990217f043
2
third_party/pybind11
vendored
2
third_party/pybind11
vendored
Submodule third_party/pybind11 updated: 8a099e44b3...3e9dfa2866
@ -2779,6 +2779,15 @@
|
||||
nested_size: non_differentiable
|
||||
nested_strides: non_differentiable
|
||||
|
||||
- name: _nested_view_from_jagged(Tensor(a) self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor(a)
|
||||
self: grad.values()
|
||||
offsets: non_differentiable
|
||||
lengths: non_differentiable
|
||||
dummy: non_differentiable
|
||||
|
||||
- name: _nested_get_values(Tensor(a) self) -> Tensor(a)
|
||||
self: _nested_view_from_jagged(grad, at::_nested_get_offsets(self), at::_nested_get_jagged_dummy(self), at::_nested_get_lengths(self), at::_nested_get_ragged_idx(self))
|
||||
|
||||
# Transformers
|
||||
- name: _scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)
|
||||
output_differentiability: [True, False, False, False]
|
||||
|
@ -59,7 +59,9 @@ VIEW_FUNCTIONS_WITH_METADATA_CHANGE = [
|
||||
"view_as_real",
|
||||
"_conj",
|
||||
"_neg_view",
|
||||
"_nested_get_values",
|
||||
"_nested_view_from_buffer",
|
||||
"_nested_view_from_jagged",
|
||||
]
|
||||
|
||||
VIEW_FUNCTIONS = {
|
||||
@ -363,6 +365,7 @@ def emit_view_func(
|
||||
BaseCType(intArrayRefT),
|
||||
BaseCType(symIntArrayRefT),
|
||||
ConstRefCType(BaseCType(tensorT)),
|
||||
ConstRefCType(OptionalCType(BaseCType(tensorT))),
|
||||
]
|
||||
for binding in bindings:
|
||||
arg, arg_type = binding.name, binding.nctype.type
|
||||
@ -392,7 +395,9 @@ def emit_view_func(
|
||||
arg=arg, val=arg_value, default="0"
|
||||
)
|
||||
updated_args.append(arg_value)
|
||||
elif arg_type == ConstRefCType(BaseCType(tensorT)):
|
||||
elif arg_type == ConstRefCType(BaseCType(tensorT)) or arg_type == ConstRefCType(
|
||||
OptionalCType(BaseCType(tensorT))
|
||||
):
|
||||
# NB: Closing over a tensor. If a user modifies this tensor, this will be silently
|
||||
# incorrect. The proper thing to do is to store the version counter and copy on write.
|
||||
updated_args.append(arg)
|
||||
|
@ -76,7 +76,7 @@ def fetch_and_cache(
|
||||
def get_slow_tests(
|
||||
dirpath: str, filename: str = SLOW_TESTS_FILE
|
||||
) -> Optional[Dict[str, float]]:
|
||||
url = "https://ossci-metrics.s3.amazonaws.com/slow-tests.json"
|
||||
url = "https://ossci-metrics.s3.amazonaws.com/slow-tests.json?versionId=Zw9Db41MTHlq3T.gc9Si4xX8D.FAvyDC"
|
||||
try:
|
||||
return fetch_and_cache(dirpath, filename, url, lambda x: x)
|
||||
except Exception:
|
||||
@ -116,7 +116,7 @@ def get_disabled_tests(
|
||||
return disabled_test_from_issues
|
||||
|
||||
try:
|
||||
url = "https://ossci-metrics.s3.amazonaws.com/disabled-tests-condensed.json"
|
||||
url = "https://ossci-metrics.s3.amazonaws.com/disabled-tests-condensed.json?versionId=80AmWqs8KiHyamnY4uoxMdVIVThFKCPU"
|
||||
return fetch_and_cache(dirpath, filename, url, process_disabled_test)
|
||||
except Exception:
|
||||
print("Couldn't download test skip set, leaving all tests enabled...")
|
||||
|
@ -2570,11 +2570,12 @@ torch_non_c_binding_in_graph_functions = dict.fromkeys(
|
||||
"torch.mps.set_per_process_memory_fraction",
|
||||
"torch.mps.set_rng_state",
|
||||
"torch.mps.synchronize",
|
||||
"torch.nested._internal.nested_tensor.buffer_from_jagged",
|
||||
"torch.nested._internal.nested_tensor.get_tensor_symint",
|
||||
"torch.nested._internal.nested_tensor.is_expandable_to",
|
||||
"torch.nested._internal.nested_tensor.jagged_from_list",
|
||||
"torch.nested._internal.nested_tensor.jagged_from_tensor_and_lengths",
|
||||
"torch.nested._internal.nested_tensor.nested_view_from_values_offsets",
|
||||
"torch.nested._internal.nested_tensor.nested_view_from_values_offsets_lengths",
|
||||
"torch.nested.as_nested_tensor",
|
||||
"torch.nested.narrow",
|
||||
"torch.nested.nested_tensor",
|
||||
|
@ -40,7 +40,6 @@ from torch.fx.experimental.symbolic_shapes import (
|
||||
SymbolicContext,
|
||||
)
|
||||
from torch.fx.immutable_collections import immutable_list
|
||||
from torch.nested._internal.nested_tensor import NestedTensor
|
||||
from torch.utils._python_dispatch import is_traceable_wrapper_subclass
|
||||
from torch.utils.weak import TensorWeakRef
|
||||
from .. import config, mutation_guard, replay_record, trace_rules
|
||||
@ -1062,7 +1061,7 @@ class VariableBuilder:
|
||||
if (
|
||||
isinstance(value, torch.Tensor)
|
||||
and value.is_nested
|
||||
and not isinstance(value, NestedTensor)
|
||||
and not isinstance(value, torch.nested._internal.nested_tensor.NestedTensor)
|
||||
):
|
||||
unimplemented("torch.compile does not support strided NestedTensor")
|
||||
|
||||
|
@ -251,3 +251,11 @@ def do_auto_functionalize(
|
||||
ctx.sync(orig_arg)
|
||||
|
||||
return ctx.wrap_tensors(unwrapped_actual_out) # type: ignore[arg-type]
|
||||
|
||||
|
||||
@auto_functionalized.py_functionalize_impl
|
||||
def auto_functionalized_func(ctx, _mutable_op, **kwargs):
|
||||
unwrapped_kwargs = ctx.unwrap_tensors(kwargs)
|
||||
with ctx.redispatch_to_next():
|
||||
result = auto_functionalized(_mutable_op, **unwrapped_kwargs)
|
||||
return ctx.wrap_tensors(result)
|
||||
|
@ -95,7 +95,6 @@ def generate_ttir(kernel, kwargs):
|
||||
"""
|
||||
Uses Triton's internal code generation to create TTIR
|
||||
"""
|
||||
import triton
|
||||
from triton.compiler.compiler import ASTSource
|
||||
from triton.runtime.autotuner import Autotuner
|
||||
from triton.runtime.jit import JITFunction
|
||||
@ -145,15 +144,21 @@ def generate_ttir(kernel, kwargs):
|
||||
if i not in kernel.constexprs
|
||||
}
|
||||
|
||||
context = triton._C.libtriton.ir.context()
|
||||
target = triton.runtime.driver.active.get_current_target()
|
||||
backend = triton.compiler.compiler.make_backend(target)
|
||||
def get_backend():
|
||||
from triton.compiler.backends.cuda import CUDABackend
|
||||
from triton.runtime.driver import driver
|
||||
|
||||
target = driver.get_current_target()
|
||||
return CUDABackend(target)
|
||||
|
||||
backend = get_backend()
|
||||
|
||||
options = backend.parse_options(dict())
|
||||
triton._C.libtriton.ir.load_dialects(context)
|
||||
backend.load_dialects(context)
|
||||
# triton._C.libtriton.triton.ir.load_dialects(context)
|
||||
# backend.load_dialects(context)
|
||||
|
||||
src = ASTSource(kernel, signature, constants, specialization)
|
||||
ttir_module = src.make_ir(options, context)
|
||||
ttir_module = src.make_ir(options)
|
||||
if not ttir_module.verify():
|
||||
raise Exception("Verification for TTIR module has failed")
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user