mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-22 22:25:10 +08:00
Compare commits
58 Commits
main
...
v2.6.0-rc8
Author | SHA1 | Date | |
---|---|---|---|
3207040966 | |||
ca3c3a63b8 | |||
7be6b5db47 | |||
dcb8ad070f | |||
8d4b8a920a | |||
9c34a2076b | |||
cd15d7b29f | |||
a2639bc255 | |||
1d2c22157e | |||
232eb253fa | |||
e19c13d89d | |||
4658a06320 | |||
a61b5b1d6a | |||
574210ee5b | |||
e2067a6f50 | |||
6e30474706 | |||
eb30434c97 | |||
47f4e56498 | |||
983ea0eee5 | |||
518294705e | |||
a99cc48bcd | |||
4d9de27d56 | |||
d155d8ad6a | |||
e1858b614e | |||
be126bccee | |||
8c03454867 | |||
7092dc521b | |||
f35ab0e353 | |||
3a3de27475 | |||
7d3292c0d3 | |||
478a99c59b | |||
4e4182dbd0 | |||
929efb4531 | |||
f01a678e02 | |||
23e390c711 | |||
41811ae689 | |||
d9eeddd49f | |||
5eb54f6ebf | |||
b1a10ecad9 | |||
31b520a599 | |||
f61bf202b3 | |||
4b9b7def3d | |||
9b688182f7 | |||
22775e0e8c | |||
c953e748eb | |||
6628b70f02 | |||
0cdf8b1d09 | |||
46f5510d20 | |||
f9e99fc62f | |||
1d3ffeb7ea | |||
c92f6871e6 | |||
2b84debd97 | |||
5fbc4aa90a | |||
5363f7d9fd | |||
f3c0886c05 | |||
aad1c160a7 | |||
af92bad804 | |||
c69eae32ba |
@ -6,19 +6,6 @@ GPU_ARCH_VERSION=${GPU_ARCH_VERSION:-}
|
||||
SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
|
||||
source $SCRIPTPATH/aarch64_ci_setup.sh
|
||||
|
||||
tagged_version() {
|
||||
GIT_DESCRIBE="git --git-dir /pytorch/.git describe --tags --match v[0-9]*.[0-9]*.[0-9]*"
|
||||
if ${GIT_DESCRIBE} --exact >/dev/null; then
|
||||
${GIT_DESCRIBE}
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
if tagged_version >/dev/null; then
|
||||
export OVERRIDE_PACKAGE_VERSION="$(tagged_version | sed -e 's/^v//' -e 's/-.*$//')"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Run aarch64 builder python
|
||||
###############################################################################
|
||||
|
@ -5,16 +5,14 @@ set -eux -o pipefail
|
||||
# By creating symlinks from desired /opt/python to /usr/local/bin/
|
||||
|
||||
NUMPY_VERSION=2.0.2
|
||||
PYGIT2_VERSION=1.15.1
|
||||
if [[ "$DESIRED_PYTHON" == "3.13" ]]; then
|
||||
if [[ "$DESIRED_PYTHON" == "3.13" || "$DESIRED_PYTHON" == "3.13t" ]]; then
|
||||
NUMPY_VERSION=2.1.2
|
||||
PYGIT2_VERSION=1.16.0
|
||||
fi
|
||||
|
||||
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
|
||||
source $SCRIPTPATH/../manywheel/set_desired_python.sh
|
||||
|
||||
pip install -q numpy==${NUMPY_VERSION} pyyaml==6.0.2 scons==4.7.0 ninja==1.11.1 patchelf==0.17.2 pygit2==${PYGIT2_VERSION}
|
||||
pip install -q numpy==${NUMPY_VERSION} pyyaml==6.0.2 scons==4.7.0 ninja==1.11.1 patchelf==0.17.2
|
||||
|
||||
for tool in python python3 pip pip3 ninja scons patchelf; do
|
||||
ln -sf ${DESIRED_PYTHON_BIN_DIR}/${tool} /usr/local/bin;
|
||||
|
@ -6,8 +6,6 @@ import shutil
|
||||
from subprocess import check_call, check_output
|
||||
from typing import List
|
||||
|
||||
from pygit2 import Repository
|
||||
|
||||
|
||||
def list_dir(path: str) -> List[str]:
|
||||
"""'
|
||||
@ -171,10 +169,9 @@ if __name__ == "__main__":
|
||||
args = parse_arguments()
|
||||
enable_mkldnn = args.enable_mkldnn
|
||||
enable_cuda = args.enable_cuda
|
||||
repo = Repository("/pytorch")
|
||||
branch = repo.head.name
|
||||
if branch == "HEAD":
|
||||
branch = "master"
|
||||
branch = check_output(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd="/pytorch"
|
||||
).decode()
|
||||
|
||||
print("Building PyTorch wheel")
|
||||
build_vars = "MAX_JOBS=5 CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
|
||||
@ -186,7 +183,7 @@ if __name__ == "__main__":
|
||||
build_vars += (
|
||||
f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version} PYTORCH_BUILD_NUMBER=1 "
|
||||
)
|
||||
elif branch in ["nightly", "master"]:
|
||||
elif branch in ["nightly", "main"]:
|
||||
build_date = (
|
||||
check_output(["git", "log", "--pretty=format:%cs", "-1"], cwd="/pytorch")
|
||||
.decode()
|
||||
|
@ -63,7 +63,7 @@ case ${CUDA_VERSION} in
|
||||
if [[ "$GPU_ARCH_TYPE" = "cuda-aarch64" ]]; then
|
||||
TORCH_CUDA_ARCH_LIST="9.0"
|
||||
else
|
||||
TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};9.0+PTX"
|
||||
TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST};9.0"
|
||||
fi
|
||||
EXTRA_CAFFE2_CMAKE_FLAGS+=("-DATEN_NO_TEST=ON")
|
||||
;;
|
||||
|
@ -192,7 +192,7 @@ function install_torchrec_and_fbgemm() {
|
||||
|
||||
function clone_pytorch_xla() {
|
||||
if [[ ! -d ./xla ]]; then
|
||||
git clone --recursive --quiet https://github.com/pytorch/xla.git
|
||||
git clone --recursive -b r2.6 https://github.com/pytorch/xla.git
|
||||
pushd xla
|
||||
# pin the xla hash so that we don't get broken by changes to xla
|
||||
git checkout "$(cat ../.github/ci_commit_pins/xla.txt)"
|
||||
|
@ -180,7 +180,7 @@ def smoke_test_cuda(
|
||||
# torch.compile is available on macos-arm64 and Linux for python 3.8-3.13
|
||||
if (
|
||||
torch_compile_check == "enabled"
|
||||
and sys.version_info < (3, 13, 0)
|
||||
and sys.version_info < (3, 14, 0)
|
||||
and target_os in ["linux", "linux-aarch64", "macos-arm64", "darwin"]
|
||||
):
|
||||
smoke_test_compile("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
@ -173,8 +173,10 @@ conda create ${EXTRA_CONDA_INSTALL_FLAGS} -yn "$tmp_env_name" python="$desired_p
|
||||
source activate "$tmp_env_name"
|
||||
|
||||
pip install -q "numpy=${NUMPY_PINNED_VERSION}" "pyyaml${PYYAML_PINNED_VERSION}" requests
|
||||
retry conda install ${EXTRA_CONDA_INSTALL_FLAGS} -yq llvm-openmp=14.0.6 cmake ninja "setuptools${SETUPTOOLS_PINNED_VERSION}" typing_extensions
|
||||
retry pip install -qr "${pytorch_rootdir}/requirements.txt" || true
|
||||
# TODO : Remove me later (but in the interim, use Anaconda cmake, to find Anaconda installed OpenMP)
|
||||
retry pip uninstall -y cmake
|
||||
retry conda install ${EXTRA_CONDA_INSTALL_FLAGS} -yq llvm-openmp=14.0.6 cmake ninja "setuptools${SETUPTOOLS_PINNED_VERSION}" typing_extensions
|
||||
|
||||
# For USE_DISTRIBUTED=1 on macOS, need libuv and pkg-config to find libuv.
|
||||
export USE_DISTRIBUTED=1
|
||||
|
@ -94,6 +94,8 @@ if [[ "\$GPU_ARCH_TYPE" != *s390x* && "\$GPU_ARCH_TYPE" != *xpu* && "\$GPU_ARCH_
|
||||
python /pytorch/.ci/pytorch/smoke_test/smoke_test.py --package=torchonly --torch-compile-check disabled
|
||||
fi
|
||||
|
||||
# Clean temp files
|
||||
cd /pytorch/.ci/pytorch/ && git clean -ffdx
|
||||
|
||||
# =================== The above code will be executed inside Docker container ===================
|
||||
EOL
|
||||
|
@ -75,9 +75,8 @@ export PYTORCH_BUILD_NUMBER=1
|
||||
TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
|
||||
|
||||
# Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64' and python_version < '3.13'"
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]]; then
|
||||
# Only linux Python < 3.13 are supported wheels for triton
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" && ! "$PYTORCH_BUILD_VERSION" =~ .*xpu.* ]]; then
|
||||
TRITON_REQUIREMENT="triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
|
||||
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
|
||||
TRITON_SHORTHASH=$(cut -c1-8 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton.txt)
|
||||
|
2
.github/ci_commit_pins/xla.txt
vendored
2
.github/ci_commit_pins/xla.txt
vendored
@ -1 +1 @@
|
||||
73f54ba5bd7fb83d7ba81fe6f5e05fb6ee815d6f
|
||||
r2.6
|
||||
|
4
.github/scripts/filter_test_configs.py
vendored
4
.github/scripts/filter_test_configs.py
vendored
@ -39,9 +39,9 @@ SUPPORTED_PERIODICAL_MODES: Dict[str, Callable[[Optional[str]], bool]] = {
|
||||
}
|
||||
|
||||
# The link to the published list of disabled jobs
|
||||
DISABLED_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/disabled-jobs.json"
|
||||
DISABLED_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/disabled-jobs.json?versionId=pQg1WJZKNqoisT5kAGG9Wmbuns5zBdBc"
|
||||
# and unstable jobs
|
||||
UNSTABLE_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/unstable-jobs.json"
|
||||
UNSTABLE_JOBS_URL = "https://ossci-metrics.s3.amazonaws.com/unstable-jobs.json?versionId=ddADM6lf9NqVTA0APn69zl3M7nMda4DH"
|
||||
|
||||
# Some constants used to handle disabled and unstable jobs
|
||||
JOB_NAME_SEP = "/"
|
||||
|
@ -345,7 +345,6 @@ def generate_wheels_matrix(
|
||||
if (
|
||||
gpu_arch_type in ["xpu", "cpu-s390x"]
|
||||
or os == "macos-arm64"
|
||||
or os == "linux-aarch64"
|
||||
or os == "windows"
|
||||
) and python_version == "3.13t":
|
||||
continue
|
||||
|
4
.github/templates/common.yml.j2
vendored
4
.github/templates/common.yml.j2
vendored
@ -8,7 +8,7 @@
|
||||
# NOTE: If testing pytorch/builder changes you can change this variable to change what pytorch/builder reference
|
||||
# the binary builds will check out
|
||||
{%- set builder_repo = "pytorch/builder" -%}
|
||||
{%- set builder_branch = "main" -%}
|
||||
{%- set builder_branch = "release/2.6" -%}
|
||||
|
||||
{%- macro concurrency(build_environment) -%}
|
||||
concurrency:
|
||||
@ -36,7 +36,7 @@ concurrency:
|
||||
{%- macro setup_ec2_windows() -%}
|
||||
!{{ display_ec2_information() }}
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
@ -55,7 +55,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -145,9 +145,9 @@ jobs:
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: !{{ config["container_image"] }}
|
||||
- name: Test Pytorch binary
|
||||
@ -166,12 +166,12 @@ jobs:
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
- name: ROCm set GPU_FLAG
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: !{{ config["container_image"] }}
|
||||
- name: Test Pytorch binary
|
||||
|
@ -78,7 +78,7 @@ jobs:
|
||||
elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
|
||||
echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
|
||||
fi
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
uses: nick-fields/retry@v3.0.0
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
|
@ -56,7 +56,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -80,7 +80,7 @@ jobs:
|
||||
steps:
|
||||
!{{ common.setup_ec2_windows() }}
|
||||
!{{ set_runner_specific_vars() }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
@ -121,7 +121,7 @@ jobs:
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch", checkout_pr_head=False) }}
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
|
14
.github/workflows/_bazel-build-test.yml
vendored
14
.github/workflows/_bazel-build-test.yml
vendored
@ -47,7 +47,7 @@ jobs:
|
||||
reenabled-issues: ${{ steps.filter.outputs.reenabled-issues }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
@ -69,25 +69,25 @@ jobs:
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image-name }}
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -97,7 +97,7 @@ jobs:
|
||||
run: echo "IN_CONTAINER_RUNNER=$(if [ -f /.inarc ] || [ -f /.incontainer ]; then echo true ; else echo false; fi)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.6
|
||||
if: ${{ inputs.cuda-version != 'cpu' && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }}
|
||||
|
||||
- name: Output disk space left
|
||||
@ -209,5 +209,5 @@ jobs:
|
||||
file-suffix: bazel-${{ github.job }}_${{ steps.get-job-id.outputs.job-id }}
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.6
|
||||
if: always()
|
||||
|
30
.github/workflows/_binary-build-linux.yml
vendored
30
.github/workflows/_binary-build-linux.yml
vendored
@ -159,13 +159,13 @@ jobs:
|
||||
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
if: inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.github-token }}
|
||||
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' }}
|
||||
|
||||
@ -195,7 +195,6 @@ jobs:
|
||||
- name: Checkout PyTorch to pytorch dir
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -206,21 +205,6 @@ jobs:
|
||||
git clean -fxd
|
||||
working-directory: pytorch
|
||||
|
||||
- name: Checkout pytorch/builder to builder dir
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: main
|
||||
submodules: recursive
|
||||
repository: pytorch/builder
|
||||
path: builder
|
||||
quiet-checkout: true
|
||||
|
||||
- name: Clean pytorch/builder checkout
|
||||
run: |
|
||||
# Remove any artifacts from the previous checkouts
|
||||
git clean -fxd
|
||||
working-directory: builder
|
||||
|
||||
- name: Check if the job is disabled
|
||||
id: filter
|
||||
uses: ./pytorch/.github/actions/filter-test-configs
|
||||
@ -235,7 +219,7 @@ jobs:
|
||||
|
||||
- name: Pull Docker image
|
||||
if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }}
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: ${{ inputs.DOCKER_IMAGE }}
|
||||
|
||||
@ -246,7 +230,6 @@ jobs:
|
||||
mkdir -p artifacts/
|
||||
container_name=$(docker run \
|
||||
-e BINARY_ENV_FILE \
|
||||
-e BUILDER_ROOT \
|
||||
-e BUILD_ENVIRONMENT \
|
||||
-e DESIRED_CUDA \
|
||||
-e DESIRED_DEVTOOLSET \
|
||||
@ -264,7 +247,6 @@ jobs:
|
||||
--tty \
|
||||
--detach \
|
||||
-v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \
|
||||
-v "${GITHUB_WORKSPACE}/builder:/builder" \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-w / \
|
||||
"${DOCKER_IMAGE}"
|
||||
@ -272,10 +254,8 @@ jobs:
|
||||
docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh"
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"aarch64"* ]]; then
|
||||
docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash /pytorch/.ci/aarch64_linux/aarch64_ci_build.sh"
|
||||
elif [[ ${{ inputs.PACKAGE_TYPE }} == "manywheel" || ${{ inputs.PACKAGE_TYPE }} == "libtorch" ]]; then
|
||||
docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash /pytorch/.ci/${{ inputs.PACKAGE_TYPE }}/build.sh"
|
||||
else
|
||||
docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash /builder/${{ inputs.PACKAGE_TYPE }}/build.sh"
|
||||
docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash /pytorch/.ci/${{ inputs.PACKAGE_TYPE }}/build.sh"
|
||||
fi
|
||||
|
||||
- name: Chown artifacts
|
||||
@ -295,7 +275,7 @@ jobs:
|
||||
|
||||
- name: Teardown Linux
|
||||
if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.6
|
||||
|
||||
- name: Chown workspace
|
||||
if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
|
11
.github/workflows/_binary-test-linux.yml
vendored
11
.github/workflows/_binary-test-linux.yml
vendored
@ -142,14 +142,14 @@ jobs:
|
||||
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
if: inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.github-token }}
|
||||
|
||||
# Setup the environment
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' }}
|
||||
|
||||
@ -172,7 +172,6 @@ jobs:
|
||||
- name: Checkout PyTorch to pytorch dir
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
|
||||
@ -202,12 +201,12 @@ jobs:
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
|
||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.6
|
||||
if: ${{ inputs.GPU_ARCH_TYPE == 'cuda' && steps.filter.outputs.is-test-matrix-empty == 'False' }}
|
||||
|
||||
- name: Pull Docker image
|
||||
if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }}
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: ${{ inputs.DOCKER_IMAGE }}
|
||||
|
||||
@ -217,7 +216,7 @@ jobs:
|
||||
|
||||
- name: Teardown Linux
|
||||
if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.6
|
||||
|
||||
- name: Chown workspace
|
||||
if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
|
||||
|
2
.github/workflows/_binary-upload.yml
vendored
2
.github/workflows/_binary-upload.yml
vendored
@ -103,7 +103,7 @@ jobs:
|
||||
USE_SPLIT_BUILD: ${{ inputs.use_split_build }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
|
10
.github/workflows/_docs.yml
vendored
10
.github/workflows/_docs.yml
vendored
@ -84,7 +84,7 @@ jobs:
|
||||
name: build-docs-${{ matrix.docs_type }}-${{ inputs.push }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
instructions: |
|
||||
@ -95,7 +95,7 @@ jobs:
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
@ -110,12 +110,12 @@ jobs:
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image }}
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -222,5 +222,5 @@ jobs:
|
||||
s3-prefix: pytorch/pytorch/${{ github.event.pull_request.number }}/functorchdocs
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.6
|
||||
if: always()
|
||||
|
10
.github/workflows/_linux-build.yml
vendored
10
.github/workflows/_linux-build.yml
vendored
@ -108,7 +108,7 @@ jobs:
|
||||
test-matrix: ${{ steps.filter.outputs.test-matrix }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -118,7 +118,7 @@ jobs:
|
||||
# checkout because when we run this action we don't *have* a local
|
||||
# checkout. In other cases you should prefer a local checkout.
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
@ -136,7 +136,7 @@ jobs:
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image-name }}
|
||||
@ -152,7 +152,7 @@ jobs:
|
||||
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
if: inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
@ -320,7 +320,7 @@ jobs:
|
||||
build-time: ${{ steps.build.outputs.build_time }}
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.6
|
||||
if: always() && inputs.build-environment != 'linux-s390x-binary-manywheel'
|
||||
|
||||
- name: Cleanup docker
|
||||
|
14
.github/workflows/_linux-test.yml
vendored
14
.github/workflows/_linux-test.yml
vendored
@ -80,7 +80,7 @@ jobs:
|
||||
timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
if: ${{ !contains(matrix.runner, 'gcp.a100') }}
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -89,7 +89,7 @@ jobs:
|
||||
docker exec -it $(docker container ps --format '{{.ID}}') bash
|
||||
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
@ -106,7 +106,7 @@ jobs:
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image }}
|
||||
|
||||
@ -120,7 +120,7 @@ jobs:
|
||||
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -131,7 +131,7 @@ jobs:
|
||||
|
||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
||||
id: install-nvidia-driver
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.6
|
||||
if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }}
|
||||
|
||||
- name: Setup GPU_FLAG for docker run
|
||||
@ -331,7 +331,7 @@ jobs:
|
||||
job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }}
|
||||
|
||||
- name: Upload the benchmark results
|
||||
uses: pytorch/test-infra/.github/actions/upload-benchmark-results@main
|
||||
uses: pytorch/test-infra/.github/actions/upload-benchmark-results@release/2.6
|
||||
with:
|
||||
benchmark-results-dir: test/test-reports
|
||||
dry-run: false
|
||||
@ -377,7 +377,7 @@ jobs:
|
||||
path: ./**/core.[1-9]*
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.6
|
||||
if: always() && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false'
|
||||
|
||||
# NB: We are currently having an intermittent GPU-related issue on G5 runners with
|
||||
|
10
.github/workflows/_mac-build.yml
vendored
10
.github/workflows/_mac-build.yml
vendored
@ -71,11 +71,11 @@ jobs:
|
||||
test-matrix: ${{ steps.filter.outputs.test-matrix }}
|
||||
steps:
|
||||
- name: Clean up disk space before running MacOS workflow
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@main
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.6
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
|
||||
- name: Set xcode version
|
||||
env:
|
||||
@ -87,7 +87,7 @@ jobs:
|
||||
|
||||
- name: Setup miniconda
|
||||
if: inputs.environment-file == ''
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.6
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
|
||||
@ -97,7 +97,7 @@ jobs:
|
||||
# environment even though the arch is x86-64
|
||||
- name: Setup miniconda using the provided environment file
|
||||
if: inputs.environment-file != ''
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.6
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
environment-file: ${{ inputs.environment-file }}
|
||||
@ -207,4 +207,4 @@ jobs:
|
||||
- name: Clean up disk space
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@main
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.6
|
||||
|
6
.github/workflows/_mac-test-mps.yml
vendored
6
.github/workflows/_mac-test-mps.yml
vendored
@ -41,7 +41,7 @@ jobs:
|
||||
reenabled-issues: ${{ steps.filter.outputs.reenabled-issues }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -82,7 +82,7 @@ jobs:
|
||||
use-gha: true
|
||||
|
||||
- name: Setup miniconda
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.6
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
|
||||
@ -169,4 +169,4 @@ jobs:
|
||||
- name: Clean up disk space
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@main
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.6
|
||||
|
10
.github/workflows/_mac-test.yml
vendored
10
.github/workflows/_mac-test.yml
vendored
@ -82,11 +82,11 @@ jobs:
|
||||
done
|
||||
|
||||
- name: Clean up disk space before running MacOS workflow
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@main
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.6
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
|
||||
- name: Start monitoring script
|
||||
id: monitor-script
|
||||
@ -109,7 +109,7 @@ jobs:
|
||||
use-gha: true
|
||||
|
||||
- name: Setup miniconda
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.6
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
|
||||
@ -224,7 +224,7 @@ jobs:
|
||||
file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }}
|
||||
|
||||
- name: Upload the benchmark results
|
||||
uses: pytorch/test-infra/.github/actions/upload-benchmark-results@main
|
||||
uses: pytorch/test-infra/.github/actions/upload-benchmark-results@release/2.6
|
||||
with:
|
||||
benchmark-results-dir: test/test-reports
|
||||
dry-run: false
|
||||
@ -234,4 +234,4 @@ jobs:
|
||||
- name: Clean up disk space
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@main
|
||||
uses: pytorch/test-infra/.github/actions/check-disk-space@release/2.6
|
||||
|
6
.github/workflows/_rocm-test.yml
vendored
6
.github/workflows/_rocm-test.yml
vendored
@ -66,7 +66,7 @@ jobs:
|
||||
steps:
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
@ -88,12 +88,12 @@ jobs:
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image }}
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
|
2
.github/workflows/_runner-determinator.yml
vendored
2
.github/workflows/_runner-determinator.yml
vendored
@ -54,7 +54,7 @@ jobs:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
steps:
|
||||
# - name: Checkout PyTorch
|
||||
# uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
# uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
# with:
|
||||
# fetch-depth: 1
|
||||
# submodules: true
|
||||
|
6
.github/workflows/_win-build.yml
vendored
6
.github/workflows/_win-build.yml
vendored
@ -84,10 +84,10 @@ jobs:
|
||||
git config --global core.fsmonitor false
|
||||
|
||||
- name: Clean up leftover processes on non-ephemeral Windows runner
|
||||
uses: pytorch/test-infra/.github/actions/cleanup-runner@main
|
||||
uses: pytorch/test-infra/.github/actions/cleanup-runner@release/2.6
|
||||
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
instructions: |
|
||||
@ -102,7 +102,7 @@ jobs:
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
|
6
.github/workflows/_win-test.yml
vendored
6
.github/workflows/_win-test.yml
vendored
@ -66,10 +66,10 @@ jobs:
|
||||
git config --global core.fsmonitor false
|
||||
|
||||
- name: Clean up leftover processes on non-ephemeral Windows runner
|
||||
uses: pytorch/test-infra/.github/actions/cleanup-runner@main
|
||||
uses: pytorch/test-infra/.github/actions/cleanup-runner@release/2.6
|
||||
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
instructions: |
|
||||
@ -85,7 +85,7 @@ jobs:
|
||||
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
no-sudo: true
|
||||
|
||||
|
6
.github/workflows/_xpu-test.yml
vendored
6
.github/workflows/_xpu-test.yml
vendored
@ -62,7 +62,7 @@ jobs:
|
||||
steps:
|
||||
# [see note: pytorch repo ref]
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
|
||||
- name: Setup XPU
|
||||
uses: ./.github/actions/setup-xpu
|
||||
@ -80,12 +80,12 @@ jobs:
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: ${{ inputs.docker-image }}
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
|
4
.github/workflows/build-almalinux-images.yml
vendored
4
.github/workflows/build-almalinux-images.yml
vendored
@ -41,12 +41,12 @@ jobs:
|
||||
CUDA_VERSION: ${{ matrix.cuda_version }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: almalinux-builder${{ matrix.cuda_version == 'cpu' && '-' || '-cuda' }}${{matrix.cuda_version}}
|
||||
docker-build-dir: .ci/docker/almalinux
|
||||
|
14
.github/workflows/build-libtorch-images.yml
vendored
14
.github/workflows/build-libtorch-images.yml
vendored
@ -32,7 +32,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -51,12 +51,12 @@ jobs:
|
||||
GPU_ARCH_VERSION: ${{ matrix.cuda_version }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: libtorch-cxx11-builder-cuda${{matrix.cuda_version}}
|
||||
docker-build-dir: .ci/docker/libtorch
|
||||
@ -93,12 +93,12 @@ jobs:
|
||||
GPU_ARCH_VERSION: ${{ matrix.rocm_version }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: libtorch-cxx11-builder-rocm${{matrix.rocm_version}}
|
||||
docker-build-dir: .ci/docker/libtorch
|
||||
@ -129,12 +129,12 @@ jobs:
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: libtorch-cxx11-builder-cpu
|
||||
docker-build-dir: .ci/docker/libtorch
|
||||
|
@ -41,7 +41,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
no-sudo: true
|
||||
|
40
.github/workflows/build-manywheel-images.yml
vendored
40
.github/workflows/build-manywheel-images.yml
vendored
@ -36,7 +36,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -57,12 +57,12 @@ jobs:
|
||||
- name: Purge tools folder (free space for build)
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: manylinux-builder-cuda${{matrix.cuda_version}}
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
@ -102,12 +102,12 @@ jobs:
|
||||
- name: Purge tools folder (free space for build)
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: manylinux2_28-builder-cuda${{matrix.cuda_version}}
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
@ -147,7 +147,7 @@ jobs:
|
||||
uses: actions/checkout@v3
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: manylinuxaarch64-builder-cuda${{matrix.cuda_version}}
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
@ -184,12 +184,12 @@ jobs:
|
||||
GPU_ARCH_VERSION: ${{ matrix.rocm_version }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: manylinux2_28-builder-rocm${{matrix.rocm_version}}
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
@ -220,12 +220,12 @@ jobs:
|
||||
runs-on: "${{ needs.get-label-type.outputs.label-type }}linux.9xlarge.ephemeral"
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: manylinux-builder-cpu
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
@ -258,12 +258,12 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu-manylinux_2_28
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: manylinux2_28-builder-cpu
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
@ -296,12 +296,12 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: manylinuxaarch64-builder-cpu-aarch64
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
@ -334,12 +334,12 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu-aarch64-2_28
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: manylinux2_28_aarch64-builder-cpu-aarch64
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
@ -375,12 +375,12 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu-cxx11-abi
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: manylinuxcxx11-abi-builder-cpu-cxx11-abi
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
@ -413,12 +413,12 @@ jobs:
|
||||
GPU_ARCH_TYPE: xpu
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
- name: Calculate docker image
|
||||
if: env.WITH_PUSH == 'false'
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: manylinux2_28-builder-xpu
|
||||
docker-build-dir: .ci/docker/manywheel
|
||||
|
17
.github/workflows/build-triton-wheel.yml
vendored
17
.github/workflows/build-triton-wheel.yml
vendored
@ -3,7 +3,7 @@ name: Build Triton wheels
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/2.6
|
||||
tags:
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
@ -30,7 +30,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -44,7 +44,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
py_vers: [ "3.9", "3.10", "3.11", "3.12", "3.13" ]
|
||||
py_vers: [ "3.9", "3.10", "3.11", "3.12", "3.13", "3.13t" ]
|
||||
device: ["cuda", "rocm", "xpu"]
|
||||
docker-image: ["pytorch/manylinux-builder:cpu", "pytorch/manylinux2_28-builder:cpu"]
|
||||
exclude:
|
||||
@ -65,12 +65,12 @@ jobs:
|
||||
PLATFORM: ${{ contains(matrix.docker-image, '2_28') && 'manylinux_2_28_x86_64' || 'manylinux2014_x86_64' }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
@ -78,7 +78,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: ${{ env.DOCKER_IMAGE }}
|
||||
|
||||
@ -114,6 +114,9 @@ jobs:
|
||||
3.13)
|
||||
PYTHON_EXECUTABLE=/opt/python/cp313-cp313/bin/python
|
||||
;;
|
||||
3.13t)
|
||||
PYTHON_EXECUTABLE=/opt/python/cp313-cp313t/bin/python
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported python version ${PY_VERS}"
|
||||
exit 1
|
||||
@ -154,7 +157,7 @@ jobs:
|
||||
path: ${{ runner.temp }}/artifacts/wheelhouse/*
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.6
|
||||
if: always()
|
||||
|
||||
upload-wheel:
|
||||
|
2
.github/workflows/check-labels.yml
vendored
2
.github/workflows/check-labels.yml
vendored
@ -38,7 +38,7 @@ jobs:
|
||||
runs-on: linux.20_04.4x
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
|
@ -11,7 +11,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
|
2
.github/workflows/create_release.yml
vendored
2
.github/workflows/create_release.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
|
10
.github/workflows/docker-builds.yml
vendored
10
.github/workflows/docker-builds.yml
vendored
@ -33,7 +33,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -99,21 +99,21 @@ jobs:
|
||||
# [see note: pytorch repo ref]
|
||||
# deep clone (fetch-depth 0) required for git merge-base
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
|
||||
- name: Setup Linux
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Build docker image
|
||||
id: build-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: ${{ matrix.docker-image-name }}
|
||||
always-rebuild: true
|
||||
push: true
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: ${{ steps.build-docker-image.outputs.docker-image }}
|
||||
|
||||
@ -145,5 +145,5 @@ jobs:
|
||||
if: always()
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.6
|
||||
if: always()
|
||||
|
12
.github/workflows/docker-release.yml
vendored
12
.github/workflows/docker-release.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -52,7 +52,7 @@ jobs:
|
||||
matrix: ${{ steps.generate-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: true
|
||||
@ -82,7 +82,7 @@ jobs:
|
||||
CUDNN_VERSION: ${{ matrix.cudnn_version }}
|
||||
steps:
|
||||
- name: Setup SSH (Click me for login details)
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
# [see note: pytorch repo ref]
|
||||
@ -160,12 +160,12 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.6
|
||||
if: always()
|
||||
|
||||
validate:
|
||||
needs: build
|
||||
uses: pytorch/builder/.github/workflows/validate-docker-images.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/validate-docker-images.yml@release/2.6
|
||||
with:
|
||||
channel: nightly
|
||||
channel: test
|
||||
ref: main
|
||||
|
175
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
175
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
@ -40,7 +40,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
@ -84,7 +84,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
@ -109,7 +109,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
@ -132,7 +132,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
@ -158,7 +158,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
@ -181,7 +181,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.10"
|
||||
@ -207,7 +207,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.10"
|
||||
@ -232,7 +232,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.10"
|
||||
@ -255,7 +255,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.10"
|
||||
@ -281,7 +281,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.10"
|
||||
@ -304,7 +304,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.11"
|
||||
@ -330,7 +330,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.11"
|
||||
@ -355,7 +355,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.11"
|
||||
@ -378,7 +378,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.11"
|
||||
@ -404,7 +404,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.11"
|
||||
@ -427,7 +427,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.12"
|
||||
@ -453,7 +453,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.12"
|
||||
@ -478,7 +478,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.12"
|
||||
@ -501,7 +501,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.12"
|
||||
@ -527,7 +527,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.12"
|
||||
@ -550,7 +550,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13"
|
||||
@ -576,7 +576,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13"
|
||||
@ -601,7 +601,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13"
|
||||
@ -624,7 +624,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13"
|
||||
@ -650,7 +650,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13"
|
||||
@ -660,3 +660,126 @@ jobs:
|
||||
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
||||
manywheel-py3_13t-cpu-aarch64-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13t"
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
runs_on: linux.arm64.m7g.4xlarge.ephemeral
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_13t-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.127; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.127; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.127; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.5.8; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.1.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.147; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.1.9; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.1.170; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.127; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.127; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_13t-cpu-aarch64-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs:
|
||||
- manywheel-py3_13t-cpu-aarch64-build
|
||||
- get-label-type
|
||||
uses: ./.github/workflows/_binary-test-linux.yml
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13t"
|
||||
build_name: manywheel-py3_13t-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
runs_on: linux.arm64.2xlarge
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_13t-cpu-aarch64-upload: # Uploading
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
needs: manywheel-py3_13t-cpu-aarch64-test
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13t"
|
||||
build_name: manywheel-py3_13t-cpu-aarch64
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
||||
manywheel-py3_13t-cuda-aarch64-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13t"
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
runs_on: linux.arm64.m7g.4xlarge.ephemeral
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_13t-cuda-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
timeout-minutes: 420
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_13t-cuda-aarch64-upload: # Uploading
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
needs: manywheel-py3_13t-cuda-aarch64-build
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_TYPE: cuda-aarch64
|
||||
DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13t"
|
||||
build_name: manywheel-py3_13t-cuda-aarch64
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
|
||||
uses: ./.github/workflows/_binary-upload.yml
|
||||
|
6
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-main.yml
generated
vendored
6
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-main.yml
generated
vendored
@ -35,7 +35,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -53,7 +53,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -75,7 +75,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
|
48
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
48
.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
@ -40,7 +40,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -80,7 +80,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
@ -103,7 +103,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
@ -126,7 +126,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -149,7 +149,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda11_8-shared-with-deps-cxx11-abi
|
||||
@ -173,7 +173,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda11_8-shared-with-deps-cxx11-abi
|
||||
@ -196,7 +196,7 @@ jobs:
|
||||
DESIRED_CUDA: cu124
|
||||
GPU_ARCH_VERSION: 12.4
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.4-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.4-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -219,7 +219,7 @@ jobs:
|
||||
DESIRED_CUDA: cu124
|
||||
GPU_ARCH_VERSION: 12.4
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.4-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.4-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda12_4-shared-with-deps-cxx11-abi
|
||||
@ -243,7 +243,7 @@ jobs:
|
||||
DESIRED_CUDA: cu124
|
||||
GPU_ARCH_VERSION: 12.4
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.4-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.4-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda12_4-shared-with-deps-cxx11-abi
|
||||
@ -266,7 +266,7 @@ jobs:
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_VERSION: 12.6
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -289,7 +289,7 @@ jobs:
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_VERSION: 12.6
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda12_6-shared-with-deps-cxx11-abi
|
||||
@ -313,7 +313,7 @@ jobs:
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_VERSION: 12.6
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.6-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cuda12_6-shared-with-deps-cxx11-abi
|
||||
@ -336,7 +336,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.1
|
||||
GPU_ARCH_VERSION: 6.1
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -361,7 +361,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 6.1
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
steps:
|
||||
@ -375,7 +375,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -388,9 +387,9 @@ jobs:
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: pytorch/libtorch-cxx11-builder:rocm6.1-main
|
||||
docker-image: pytorch/libtorch-cxx11-builder:rocm6.1-2.6
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -410,7 +409,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.1
|
||||
GPU_ARCH_VERSION: 6.1
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-rocm6_1-shared-with-deps-cxx11-abi
|
||||
@ -433,7 +432,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.2.4
|
||||
GPU_ARCH_VERSION: 6.2.4
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -458,7 +457,7 @@ jobs:
|
||||
GPU_ARCH_VERSION: 6.2.4
|
||||
GPU_ARCH_TYPE: rocm
|
||||
SKIP_ALL_TESTS: 1
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
steps:
|
||||
@ -472,7 +471,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -485,9 +483,9 @@ jobs:
|
||||
run: |
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: pytorch/libtorch-cxx11-builder:rocm6.2.4-main
|
||||
docker-image: pytorch/libtorch-cxx11-builder:rocm6.2.4-2.6
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown ROCm
|
||||
@ -507,7 +505,7 @@ jobs:
|
||||
DESIRED_CUDA: rocm6.2.4
|
||||
GPU_ARCH_VERSION: 6.2.4
|
||||
GPU_ARCH_TYPE: rocm
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.2.4-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-rocm6_2_4-shared-with-deps-cxx11-abi
|
||||
|
6
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-main.yml
generated
vendored
6
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-main.yml
generated
vendored
@ -35,7 +35,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -53,7 +53,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -75,7 +75,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cpu-shared-with-deps-pre-cxx11
|
||||
|
26
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
generated
vendored
26
.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
generated
vendored
@ -40,7 +40,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -80,7 +80,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cpu-shared-with-deps-pre-cxx11
|
||||
@ -103,7 +103,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cpu-shared-with-deps-pre-cxx11
|
||||
@ -126,7 +126,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -149,7 +149,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda11_8-shared-with-deps-pre-cxx11
|
||||
@ -173,7 +173,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda11_8-shared-with-deps-pre-cxx11
|
||||
@ -196,7 +196,7 @@ jobs:
|
||||
DESIRED_CUDA: cu124
|
||||
GPU_ARCH_VERSION: 12.4
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -219,7 +219,7 @@ jobs:
|
||||
DESIRED_CUDA: cu124
|
||||
GPU_ARCH_VERSION: 12.4
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda12_4-shared-with-deps-pre-cxx11
|
||||
@ -243,7 +243,7 @@ jobs:
|
||||
DESIRED_CUDA: cu124
|
||||
GPU_ARCH_VERSION: 12.4
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda12_4-shared-with-deps-pre-cxx11
|
||||
@ -266,7 +266,7 @@ jobs:
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_VERSION: 12.6
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.6-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -289,7 +289,7 @@ jobs:
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_VERSION: 12.6
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.6-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda12_6-shared-with-deps-pre-cxx11
|
||||
@ -313,7 +313,7 @@ jobs:
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_VERSION: 12.6
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.6-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
build_name: libtorch-cuda12_6-shared-with-deps-pre-cxx11
|
||||
|
14
.github/workflows/generated-linux-binary-manywheel-main.yml
generated
vendored
14
.github/workflows/generated-linux-binary-manywheel-main.yml
generated
vendored
@ -35,7 +35,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -54,7 +54,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -78,7 +78,7 @@ jobs:
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cuda11_8
|
||||
@ -101,7 +101,7 @@ jobs:
|
||||
DESIRED_CUDA: cu124
|
||||
GPU_ARCH_VERSION: 12.4
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
runner_prefix: "${{ needs.get-label-type.outputs.label-type }}"
|
||||
@ -125,7 +125,7 @@ jobs:
|
||||
DESIRED_CUDA: cu124
|
||||
GPU_ARCH_VERSION: 12.4
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cuda12_4
|
||||
@ -148,7 +148,7 @@ jobs:
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_VERSION: 12.6
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
@ -173,7 +173,7 @@ jobs:
|
||||
DESIRED_CUDA: cu126
|
||||
GPU_ARCH_VERSION: 12.6
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28-builder:cuda12.6-main
|
||||
DOCKER_IMAGE: pytorch/manylinux2_28-builder:cuda12.6-2.6
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
|
375
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
375
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
File diff suppressed because it is too large
Load Diff
32
.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
generated
vendored
32
.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
generated
vendored
@ -40,7 +40,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
runs_on: linux.s390x
|
||||
@ -82,7 +82,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu-s390x
|
||||
@ -105,7 +105,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: manywheel-py3_9-cpu-s390x
|
||||
@ -127,7 +127,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.10"
|
||||
runs_on: linux.s390x
|
||||
@ -151,7 +151,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu-s390x
|
||||
@ -174,7 +174,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: manywheel-py3_10-cpu-s390x
|
||||
@ -196,7 +196,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.11"
|
||||
runs_on: linux.s390x
|
||||
@ -220,7 +220,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu-s390x
|
||||
@ -243,7 +243,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: manywheel-py3_11-cpu-s390x
|
||||
@ -265,7 +265,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.12"
|
||||
runs_on: linux.s390x
|
||||
@ -289,7 +289,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu-s390x
|
||||
@ -312,7 +312,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: manywheel-py3_12-cpu-s390x
|
||||
@ -334,7 +334,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13"
|
||||
runs_on: linux.s390x
|
||||
@ -358,7 +358,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13"
|
||||
build_name: manywheel-py3_13-cpu-s390x
|
||||
@ -381,7 +381,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu-s390x
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
|
||||
DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-2.6
|
||||
use_split_build: False
|
||||
DESIRED_PYTHON: "3.13"
|
||||
build_name: manywheel-py3_13-cpu-s390x
|
||||
|
3
.github/workflows/generated-macos-arm64-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
3
.github/workflows/generated-macos-arm64-binary-libtorch-cxx11-abi-nightly.yml
generated
vendored
@ -78,7 +78,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -129,7 +128,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-2.6
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
build_name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
|
15
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
15
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
@ -75,7 +75,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -126,7 +125,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.6
|
||||
DESIRED_PYTHON: "3.9"
|
||||
build_name: wheel-py3_9-cpu
|
||||
use_s3: False
|
||||
@ -178,7 +177,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -229,7 +227,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.6
|
||||
DESIRED_PYTHON: "3.10"
|
||||
build_name: wheel-py3_10-cpu
|
||||
use_s3: False
|
||||
@ -281,7 +279,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -332,7 +329,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.6
|
||||
DESIRED_PYTHON: "3.11"
|
||||
build_name: wheel-py3_11-cpu
|
||||
use_s3: False
|
||||
@ -384,7 +381,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -435,7 +431,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.6
|
||||
DESIRED_PYTHON: "3.12"
|
||||
build_name: wheel-py3_12-cpu
|
||||
use_s3: False
|
||||
@ -487,7 +483,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -538,7 +533,7 @@ jobs:
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cpu
|
||||
GPU_ARCH_TYPE: cpu
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cpu-2.6
|
||||
DESIRED_PYTHON: "3.13"
|
||||
build_name: wheel-py3_13-cpu
|
||||
use_s3: False
|
||||
|
8
.github/workflows/generated-windows-binary-libtorch-debug-main.yml
generated
vendored
8
.github/workflows/generated-windows-binary-libtorch-debug-main.yml
generated
vendored
@ -28,7 +28,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -69,7 +69,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -110,7 +110,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -184,7 +183,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -230,7 +229,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
|
26
.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
generated
vendored
26
.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
generated
vendored
@ -35,7 +35,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -76,7 +76,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -117,7 +117,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -191,7 +190,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -237,7 +236,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -328,7 +326,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -369,7 +367,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -444,7 +441,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -490,7 +487,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -582,7 +578,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -623,7 +619,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -698,7 +693,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -744,7 +739,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -836,7 +830,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -877,7 +871,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -952,7 +945,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -998,7 +991,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
|
8
.github/workflows/generated-windows-binary-libtorch-release-main.yml
generated
vendored
8
.github/workflows/generated-windows-binary-libtorch-release-main.yml
generated
vendored
@ -28,7 +28,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -69,7 +69,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -110,7 +110,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -184,7 +183,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -230,7 +229,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
|
26
.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
generated
vendored
26
.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
generated
vendored
@ -35,7 +35,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -76,7 +76,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -117,7 +117,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -191,7 +190,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -237,7 +236,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -328,7 +326,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -369,7 +367,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -444,7 +441,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -490,7 +487,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -582,7 +578,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -623,7 +619,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -698,7 +693,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -744,7 +739,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -836,7 +830,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -877,7 +871,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -952,7 +945,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -998,7 +991,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
|
152
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
152
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
@ -35,7 +35,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -73,7 +73,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -114,7 +114,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -184,7 +183,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -230,7 +229,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -314,7 +312,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -355,7 +353,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -426,7 +423,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -472,7 +469,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -557,7 +553,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -598,7 +594,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -669,7 +664,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -715,7 +710,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -800,7 +794,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -841,7 +835,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -912,7 +905,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -958,7 +951,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1042,7 +1034,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -1083,7 +1075,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1153,7 +1144,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -1199,7 +1190,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1282,7 +1272,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -1323,7 +1313,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1393,7 +1382,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -1439,7 +1428,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1523,7 +1511,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -1564,7 +1552,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1635,7 +1622,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -1681,7 +1668,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1766,7 +1752,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -1807,7 +1793,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -1878,7 +1863,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -1924,7 +1909,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2009,7 +1993,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -2050,7 +2034,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2121,7 +2104,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -2167,7 +2150,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2251,7 +2233,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -2292,7 +2274,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2362,7 +2343,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -2408,7 +2389,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2491,7 +2471,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -2532,7 +2512,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2602,7 +2581,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -2648,7 +2627,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2732,7 +2710,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -2773,7 +2751,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2844,7 +2821,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -2890,7 +2867,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -2975,7 +2951,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -3016,7 +2992,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3087,7 +3062,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -3133,7 +3108,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3218,7 +3192,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -3259,7 +3233,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3330,7 +3303,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -3376,7 +3349,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3460,7 +3432,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -3501,7 +3473,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3571,7 +3542,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -3617,7 +3588,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3700,7 +3670,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -3741,7 +3711,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3811,7 +3780,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -3857,7 +3826,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -3941,7 +3909,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -3982,7 +3950,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -4053,7 +4020,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -4099,7 +4066,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -4184,7 +4150,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -4225,7 +4191,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -4296,7 +4261,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -4342,7 +4307,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -4427,7 +4391,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -4468,7 +4432,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -4539,7 +4502,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -4585,7 +4548,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -4669,7 +4631,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -4710,7 +4672,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -4780,7 +4741,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -4826,7 +4787,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -4909,7 +4869,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -4950,7 +4910,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -5020,7 +4979,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -5066,7 +5025,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -5150,7 +5108,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -5191,7 +5149,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -5262,7 +5219,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -5308,7 +5265,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -5393,7 +5349,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -5434,7 +5390,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -5505,7 +5460,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -5551,7 +5506,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -5636,7 +5590,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -5677,7 +5631,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -5748,7 +5701,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -5794,7 +5747,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -5878,7 +5830,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -5919,7 +5871,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
@ -5989,7 +5940,7 @@ jobs:
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-ssh@release/2.6
|
||||
continue-on-error: true
|
||||
with:
|
||||
github-secret: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -6035,7 +5986,6 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
submodules: recursive
|
||||
path: pytorch
|
||||
quiet-checkout: true
|
||||
|
@ -18,7 +18,7 @@ permissions: read-all
|
||||
jobs:
|
||||
get-default-label-prefix:
|
||||
name: get-default-label-prefix
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
@ -28,7 +28,7 @@ jobs:
|
||||
|
||||
get-a100-test-label-type:
|
||||
name: get-a100-test-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
4
.github/workflows/inductor-perf-compare.yml
vendored
4
.github/workflows/inductor-perf-compare.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
||||
get-default-label-prefix:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-default-label-prefix
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -25,7 +25,7 @@ jobs:
|
||||
|
||||
get-test-label-type:
|
||||
name: get-test-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: github.repository_owner == 'pytorch'
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
@ -50,7 +50,7 @@ permissions: read-all
|
||||
jobs:
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
@ -50,7 +50,7 @@ permissions: read-all
|
||||
jobs:
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
@ -68,7 +68,7 @@ permissions: read-all
|
||||
jobs:
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
4
.github/workflows/inductor-periodic.yml
vendored
4
.github/workflows/inductor-periodic.yml
vendored
@ -20,7 +20,7 @@ permissions: read-all
|
||||
jobs:
|
||||
get-default-label-prefix:
|
||||
name: get-default-label-prefix
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
@ -30,7 +30,7 @@ jobs:
|
||||
|
||||
get-a100-test-label-type:
|
||||
name: get-a100-test-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
2
.github/workflows/inductor-rocm.yml
vendored
2
.github/workflows/inductor-rocm.yml
vendored
@ -51,7 +51,7 @@ permissions: read-all
|
||||
jobs:
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
2
.github/workflows/inductor-unittest.yml
vendored
2
.github/workflows/inductor-unittest.yml
vendored
@ -17,7 +17,7 @@ permissions: read-all
|
||||
jobs:
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
2
.github/workflows/inductor.yml
vendored
2
.github/workflows/inductor.yml
vendored
@ -33,7 +33,7 @@ jobs:
|
||||
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
4
.github/workflows/lint-autoformat.yml
vendored
4
.github/workflows/lint-autoformat.yml
vendored
@ -15,12 +15,12 @@ jobs:
|
||||
if: ${{ github.repository_owner == 'pytorch' && github.event.pull_request.user.login != 'ezyang' && github.event.pull_request.user.login != 'malfet' && !startsWith(github.head_ref, 'export-') }}
|
||||
steps:
|
||||
- name: Checkout pytorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
- name: Setup miniconda
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.6
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Run lintrunner (nonretryable)
|
||||
|
2
.github/workflows/lint-bc.yml
vendored
2
.github/workflows/lint-bc.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Run BC Lint Action
|
||||
uses: pytorch/test-infra/.github/actions/bc-lint@main
|
||||
uses: pytorch/test-infra/.github/actions/bc-lint@release/2.6
|
||||
with:
|
||||
repo: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
base_sha: ${{ github.event.pull_request.base.sha }}
|
||||
|
21
.github/workflows/lint.yml
vendored
21
.github/workflows/lint.yml
vendored
@ -18,14 +18,14 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
|
||||
lintrunner-clang:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.6
|
||||
needs: get-label-type
|
||||
with:
|
||||
timeout: 120
|
||||
@ -42,7 +42,7 @@ jobs:
|
||||
.github/scripts/lintrunner.sh
|
||||
|
||||
lintrunner-noclang:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.6
|
||||
needs: get-label-type
|
||||
with:
|
||||
timeout: 120
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
.github/scripts/lintrunner.sh
|
||||
|
||||
quick-checks:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.6
|
||||
needs: get-label-type
|
||||
with:
|
||||
timeout: 120
|
||||
@ -102,7 +102,7 @@ jobs:
|
||||
if: github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'skip-pr-sanity-checks')
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: -1
|
||||
@ -115,7 +115,7 @@ jobs:
|
||||
bash .github/scripts/pr-sanity-check.sh
|
||||
|
||||
workflow-checks:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.6
|
||||
needs: get-label-type
|
||||
with:
|
||||
timeout: 120
|
||||
@ -130,6 +130,7 @@ jobs:
|
||||
conda activate "${CONDA_ENV}"
|
||||
|
||||
# Regenerate workflows
|
||||
export RELEASE_VERSION_TAG=2.6
|
||||
.github/scripts/generate_ci_workflows.py
|
||||
|
||||
RC=0
|
||||
@ -153,7 +154,7 @@ jobs:
|
||||
exit $RC
|
||||
|
||||
toc:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.6
|
||||
needs: get-label-type
|
||||
with:
|
||||
timeout: 120
|
||||
@ -193,7 +194,7 @@ jobs:
|
||||
test-tools:
|
||||
name: Test tools
|
||||
if: ${{ github.repository == 'pytorch/pytorch' }}
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@release/2.6
|
||||
needs: get-label-type
|
||||
with:
|
||||
timeout: 120
|
||||
@ -216,7 +217,7 @@ jobs:
|
||||
runs-on: linux.20_04.4x
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
@ -247,7 +248,7 @@ jobs:
|
||||
# [see note: pytorch repo ref]
|
||||
# deep clone (fetch-depth 0) required, to allow us to use git log
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
|
2
.github/workflows/linux-aarch64.yml
vendored
2
.github/workflows/linux-aarch64.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
|
6
.github/workflows/llm_td_retrieval.yml
vendored
6
.github/workflows/llm_td_retrieval.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
||||
name: get-label-type
|
||||
# Don't run on forked repos
|
||||
if: github.repository_owner == 'pytorch'
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -51,7 +51,7 @@ jobs:
|
||||
path: llm-target-determinator
|
||||
|
||||
- name: Setup miniconda
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.6
|
||||
with:
|
||||
python-version: "3.9"
|
||||
|
||||
@ -120,5 +120,5 @@ jobs:
|
||||
AWS_REGION: ""
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.6
|
||||
if: always()
|
||||
|
2
.github/workflows/nightly-s3-uploads.yml
vendored
2
.github/workflows/nightly-s3-uploads.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
||||
environment: upload-stats
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
|
8
.github/workflows/nightly.yml
vendored
8
.github/workflows/nightly.yml
vendored
@ -19,7 +19,7 @@ concurrency:
|
||||
jobs:
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
if: ${{ github.event_name == 'schedule' && github.repository_owner == 'pytorch' }}
|
||||
steps:
|
||||
- name: update-vision-commit-hash
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@main
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@release/2.6
|
||||
with:
|
||||
repo-name: vision
|
||||
branch: main
|
||||
@ -73,7 +73,7 @@ jobs:
|
||||
if: ${{ github.event_name == 'schedule' && github.repository_owner == 'pytorch' }}
|
||||
steps:
|
||||
- name: update-audio-commit-hash
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@main
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@release/2.6
|
||||
with:
|
||||
repo-name: audio
|
||||
branch: main
|
||||
@ -88,7 +88,7 @@ jobs:
|
||||
if: ${{ github.event_name == 'schedule' && github.repository_owner == 'pytorch' }}
|
||||
steps:
|
||||
- name: update-executorch-commit-hash
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@main
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@release/2.6
|
||||
with:
|
||||
repo-name: executorch
|
||||
branch: main
|
||||
|
2
.github/workflows/nitpicker.yml
vendored
2
.github/workflows/nitpicker.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
if: ${{ github.event.pull_request.number != 26921 && github.repository_owner == 'pytorch' }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
- uses: ethanis/nitpicker@v1
|
||||
with:
|
||||
nitpicks: '.github/nitpicks.yml'
|
||||
|
2
.github/workflows/periodic.yml
vendored
2
.github/workflows/periodic.yml
vendored
@ -41,7 +41,7 @@ jobs:
|
||||
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch'
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
2
.github/workflows/pull.yml
vendored
2
.github/workflows/pull.yml
vendored
@ -38,7 +38,7 @@ jobs:
|
||||
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
2
.github/workflows/slow.yml
vendored
2
.github/workflows/slow.yml
vendored
@ -39,7 +39,7 @@ jobs:
|
||||
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
@ -13,7 +13,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -35,7 +35,7 @@ jobs:
|
||||
|
||||
- name: Calculate docker image
|
||||
id: calculate-docker-image
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/calculate-docker-image@release/2.6
|
||||
with:
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
working-directory: pytorch
|
||||
@ -50,13 +50,13 @@ jobs:
|
||||
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
|
||||
|
||||
- name: Pull docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@release/2.6
|
||||
with:
|
||||
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
|
||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
||||
id: install-nvidia-driver
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-nvidia@release/2.6
|
||||
|
||||
- name: Clone CodeLlama
|
||||
uses: actions/checkout@v3
|
||||
@ -147,7 +147,7 @@ jobs:
|
||||
"s3://target-determinator-assets/indexes/latest/${ZIP_NAME}"
|
||||
|
||||
- name: Teardown Linux
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@main
|
||||
uses: pytorch/test-infra/.github/actions/teardown-linux@release/2.6
|
||||
if: always()
|
||||
|
||||
concurrency:
|
||||
|
4
.github/workflows/target_determination.yml
vendored
4
.github/workflows/target_determination.yml
vendored
@ -9,7 +9,7 @@ jobs:
|
||||
name: get-label-type
|
||||
# Don't run on forked repos
|
||||
if: github.repository_owner == 'pytorch'
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -27,7 +27,7 @@ jobs:
|
||||
# checkout because when we run this action we don't *have* a local
|
||||
# checkout. In other cases you should prefer a local checkout.
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
|
4
.github/workflows/torchbench.yml
vendored
4
.github/workflows/torchbench.yml
vendored
@ -14,7 +14,7 @@ jobs:
|
||||
get-default-label-prefix:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-default-label-prefix
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
@ -24,7 +24,7 @@ jobs:
|
||||
get-a100-test-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-a100-test-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
|
2
.github/workflows/trunk.yml
vendored
2
.github/workflows/trunk.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
||||
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
|
2
.github/workflows/update-viablestrict.yml
vendored
2
.github/workflows/update-viablestrict.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
||||
environment: ${{ (github.event_name == 'schedule') && 'mergebot' || '' }}
|
||||
steps:
|
||||
- name: Update viable/strict
|
||||
uses: pytorch/test-infra/.github/actions/update-viablestrict@main
|
||||
uses: pytorch/test-infra/.github/actions/update-viablestrict@release/2.6
|
||||
id: update_viablestrict
|
||||
with:
|
||||
repository: pytorch/pytorch
|
||||
|
2
.github/workflows/update_pytorch_labels.yml
vendored
2
.github/workflows/update_pytorch_labels.yml
vendored
@ -17,7 +17,7 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
|
@ -16,7 +16,7 @@ jobs:
|
||||
runs-on: linux.2xlarge
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
@ -25,7 +25,7 @@ jobs:
|
||||
uses: ./.github/actions/setup-linux
|
||||
|
||||
- name: Setup miniconda
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@release/2.6
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
|
2
.github/workflows/upload-test-stats.yml
vendored
2
.github/workflows/upload-test-stats.yml
vendored
@ -38,7 +38,7 @@ jobs:
|
||||
run: echo "${TRIGGERING_WORKFLOW}"
|
||||
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
|
||||
- name: Configure aws credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
|
@ -31,7 +31,7 @@ jobs:
|
||||
name: Upload dynamo performance stats for ${{ github.event.workflow_run.id }}, attempt ${{ github.event.workflow_run.run_attempt }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
|
@ -17,7 +17,7 @@ jobs:
|
||||
environment: upload-stats
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@release/2.6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: false
|
||||
|
4
.github/workflows/weekly.yml
vendored
4
.github/workflows/weekly.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: update-xla-commit-hash
|
||||
continue-on-error: true
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@main
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@release/2.6
|
||||
with:
|
||||
repo-name: xla
|
||||
branch: master
|
||||
@ -31,7 +31,7 @@ jobs:
|
||||
updatebot-token: ${{ secrets.UPDATEBOT_TOKEN }}
|
||||
pytorchbot-token: ${{ secrets.GH_PYTORCHBOT_TOKEN }}
|
||||
- name: update-triton-commit-hash
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@main
|
||||
uses: pytorch/test-infra/.github/actions/update-commit-hash@release/2.6
|
||||
with:
|
||||
repo-owner: openai
|
||||
repo-name: triton
|
||||
|
2
.github/workflows/xpu.yml
vendored
2
.github/workflows/xpu.yml
vendored
@ -15,7 +15,7 @@ jobs:
|
||||
get-label-type:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
name: get-label-type
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
||||
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@release/2.6
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
|
@ -1,8 +1,6 @@
|
||||
#include <ATen/Context.h>
|
||||
#include <ATen/DeviceAccelerator.h>
|
||||
#include <c10/core/impl/VirtualGuardImpl.h>
|
||||
|
||||
namespace at::accelerator {
|
||||
namespace at {
|
||||
|
||||
std::optional<c10::DeviceType> getAccelerator(bool checked) {
|
||||
#define DETECT_AND_ASSIGN_ACCELERATOR(device_name) \
|
||||
@ -39,8 +37,8 @@ std::optional<c10::DeviceType> getAccelerator(bool checked) {
|
||||
#undef DETECT_AND_ASSIGN_ACCELERATOR
|
||||
}
|
||||
|
||||
bool isAccelerator(c10::DeviceType device_type) {
|
||||
switch (device_type) {
|
||||
bool isAccelerator(c10::DeviceType d) {
|
||||
switch (d) {
|
||||
case at::kCUDA:
|
||||
case at::kMTIA:
|
||||
case at::kXPU:
|
||||
@ -54,50 +52,4 @@ bool isAccelerator(c10::DeviceType device_type) {
|
||||
}
|
||||
}
|
||||
|
||||
c10::DeviceIndex deviceCount() {
|
||||
const auto device_type = getAccelerator(false);
|
||||
if (!device_type.has_value()) {
|
||||
return static_cast<c10::DeviceIndex>(0);
|
||||
}
|
||||
c10::impl::VirtualGuardImpl impl(device_type.value());
|
||||
return static_cast<c10::DeviceIndex>(impl.deviceCount());
|
||||
}
|
||||
|
||||
void setDeviceIndex(c10::DeviceIndex device_index) {
|
||||
const auto device_type = getAccelerator(true).value();
|
||||
c10::impl::VirtualGuardImpl impl(device_type);
|
||||
impl.setDevice({device_type, device_index});
|
||||
}
|
||||
|
||||
c10::DeviceIndex getDeviceIndex() {
|
||||
const auto device_type = getAccelerator(true).value();
|
||||
c10::impl::VirtualGuardImpl impl(device_type);
|
||||
return static_cast<c10::DeviceIndex>(impl.getDevice().index());
|
||||
}
|
||||
|
||||
void setCurrentStream(c10::Stream stream) {
|
||||
const auto device_type = getAccelerator(true).value();
|
||||
TORCH_CHECK(
|
||||
device_type == stream.device_type(),
|
||||
"stream's device type ",
|
||||
c10::DeviceTypeName(stream.device_type()),
|
||||
" doesn't match the current accelerator ",
|
||||
c10::DeviceTypeName(device_type));
|
||||
c10::impl::VirtualGuardImpl impl(device_type);
|
||||
impl.exchangeStream(stream);
|
||||
}
|
||||
|
||||
c10::Stream getCurrentStream(c10::DeviceIndex device_index) {
|
||||
const auto device_type = getAccelerator(true).value();
|
||||
c10::impl::VirtualGuardImpl impl(device_type);
|
||||
return impl.getStream({device_type, device_index});
|
||||
}
|
||||
|
||||
void synchronizeDevice(c10::DeviceIndex device_index) {
|
||||
const auto device_type = getAccelerator(true).value();
|
||||
c10::impl::VirtualGuardImpl impl(device_type);
|
||||
// impl.synchronizeDevice should can be safely called from any device
|
||||
impl.synchronizeDevice(device_index);
|
||||
}
|
||||
|
||||
} // namespace at::accelerator
|
||||
} // namespace at
|
||||
|
@ -6,8 +6,6 @@
|
||||
#include <ATen/detail/MTIAHooksInterface.h>
|
||||
#include <optional>
|
||||
|
||||
namespace at::accelerator {
|
||||
|
||||
// This file defines the top level Accelerator concept for PyTorch.
|
||||
// A device is an accelerator per the definition here if:
|
||||
// - It is mutually exclusive with all other accelerators
|
||||
@ -17,39 +15,13 @@ namespace at::accelerator {
|
||||
// As of today, accelerator devices are (in no particular order):
|
||||
// CUDA, MTIA, XPU, HIP, MPS, PrivateUse1
|
||||
|
||||
namespace at {
|
||||
|
||||
// Ensures that only one accelerator is available (at
|
||||
// compile time if possible) and return it.
|
||||
// When checked is true, the returned optional always has a value.
|
||||
TORCH_API std::optional<c10::DeviceType> getAccelerator(bool checked = false);
|
||||
|
||||
// Check if the given device type is an accelerator.
|
||||
TORCH_API bool isAccelerator(c10::DeviceType device_type);
|
||||
TORCH_API bool isAccelerator(c10::DeviceType d);
|
||||
|
||||
// Return the number of the device available. Note that this is *REQUIRED* to
|
||||
// not raise any exception.
|
||||
TORCH_API c10::DeviceIndex deviceCount();
|
||||
|
||||
// Set the current device index to the given device index.
|
||||
TORCH_API void setDeviceIndex(c10::DeviceIndex device_index);
|
||||
|
||||
// Get the current device index.
|
||||
TORCH_API c10::DeviceIndex getDeviceIndex();
|
||||
|
||||
// Set the current stream to a given stream. Note that this API doesn't change
|
||||
// the current device index.
|
||||
TORCH_API void setCurrentStream(c10::Stream stream);
|
||||
|
||||
// Get the current stream of the given device index.
|
||||
TORCH_API c10::Stream getCurrentStream(c10::DeviceIndex device_index);
|
||||
|
||||
// Wait (by blocking the calling thread) until all the work previously enqueued
|
||||
// on the given device index has been completed.
|
||||
TORCH_API void synchronizeDevice(c10::DeviceIndex device_index);
|
||||
|
||||
} // namespace at::accelerator
|
||||
|
||||
namespace at {
|
||||
// Keep BC only
|
||||
using at::accelerator::getAccelerator;
|
||||
using at::accelerator::isAccelerator;
|
||||
} // namespace at
|
||||
|
@ -3,10 +3,6 @@
|
||||
namespace at::mps {
|
||||
|
||||
static const char* SCATTER_OPS_TEMPLATE = R"METAL_SCATTER(
|
||||
struct __attribute__ ((packed)) packed_uint5{{
|
||||
uint32_t x; uint32_t y; uint32_t z; uint32_t w; uint32_t u;
|
||||
}};
|
||||
|
||||
template<typename Y, typename X>
|
||||
Y cast(const X x);
|
||||
|
||||
@ -15,32 +11,26 @@ template<>
|
||||
return {2};
|
||||
}}
|
||||
|
||||
kernel void scatter_kernel_5(uint linear_index [[thread_position_in_grid]],
|
||||
constant void * src_ [[buffer(0)]],
|
||||
device void * dst_ [[buffer(1)]],
|
||||
constant packed_uint5 & size [[buffer(2)]],
|
||||
constant packed_uint5 & stride [[buffer(3)]],
|
||||
constant uint32_t & numel [[buffer(4)]]) {{
|
||||
kernel void scatter_kernel_n(uint linear_index [[thread_position_in_grid]],
|
||||
constant void * src_ [[buffer(0)]],
|
||||
device void * dst_ [[buffer(1)]],
|
||||
constant uint32_t * size [[buffer(2)]],
|
||||
constant uint32_t * stride [[buffer(3)]],
|
||||
constant uint32_t & numel [[buffer(4)]],
|
||||
constant int32_t & ndim [[buffer(5)]]) {{
|
||||
if (linear_index >= numel) return;
|
||||
|
||||
constant {0} * src = (constant {0} *)src_;
|
||||
device {1} * dst = (device {1} *)dst_;
|
||||
|
||||
packed_uint5 local_index;
|
||||
local_index.x = linear_index / (size.u * size.w * size.z * size.y) % size.x;
|
||||
local_index.y = linear_index / (size.u * size.w * size.z) % size.y;
|
||||
local_index.z = linear_index / (size.u * size.w) % size.z;
|
||||
local_index.w = linear_index / size.u % size.w;
|
||||
local_index.u = linear_index % size.u;
|
||||
uint64_t dst_offs = 0;
|
||||
auto dst_idx = linear_index;
|
||||
for(int dim = ndim - 1; dim >= 0; --dim) {{
|
||||
dst_offs += stride[dim] * (dst_idx % size[dim]);
|
||||
dst_idx /= size[dim];
|
||||
}}
|
||||
|
||||
packed_uint5 strided_index;
|
||||
strided_index.x = local_index.x * stride.x;
|
||||
strided_index.y = local_index.y * stride.y;
|
||||
strided_index.z = local_index.z * stride.z;
|
||||
strided_index.w = local_index.w * stride.w;
|
||||
strided_index.u = local_index.u * stride.u;
|
||||
|
||||
dst[strided_index.x + strided_index.y + strided_index.z + strided_index.w + strided_index.u] = cast<{1}>(src[linear_index]);
|
||||
dst[dst_offs] = cast<{1}>(src[linear_index]);
|
||||
}}
|
||||
|
||||
kernel void scatter_kernel_4(uint linear_index [[thread_position_in_grid]],
|
||||
@ -121,10 +111,6 @@ kernel void scatter_kernel_1(uint linear_index [[thread_position_in
|
||||
)METAL_SCATTER";
|
||||
|
||||
static const char* GATHER_OPS_TEMPLATE = R"METAL_GATHER(
|
||||
struct __attribute__ ((packed)) packed_uint5{{
|
||||
uint32_t x; uint32_t y; uint32_t z; uint32_t w; uint32_t u;
|
||||
}};
|
||||
|
||||
template<typename Y, typename X>
|
||||
Y cast(const X x);
|
||||
|
||||
@ -133,33 +119,26 @@ template<>
|
||||
return {2};
|
||||
}}
|
||||
|
||||
kernel void gather_kernel_5(uint linear_index [[thread_position_in_grid]],
|
||||
constant void * src_ [[buffer(0)]],
|
||||
device void * dst_ [[buffer(1)]],
|
||||
constant packed_uint5 & size [[buffer(2)]],
|
||||
constant packed_uint5 & stride [[buffer(3)]],
|
||||
constant uint32_t & numel [[buffer(4)]]) {{
|
||||
kernel void gather_kernel_n(uint linear_index [[thread_position_in_grid]],
|
||||
constant void * src_ [[buffer(0)]],
|
||||
device void * dst_ [[buffer(1)]],
|
||||
constant uint32_t * size [[buffer(2)]],
|
||||
constant uint32_t * stride [[buffer(3)]],
|
||||
constant uint32_t & numel [[buffer(4)]],
|
||||
constant int32_t & ndim [[buffer(5)]]) {{
|
||||
if (linear_index >= numel) return;
|
||||
|
||||
constant {0} * src = (constant {0} *)src_;
|
||||
device {1} * dst = (device {1} *)dst_;
|
||||
|
||||
uint64_t src_offs = 0;
|
||||
auto src_idx = linear_index;
|
||||
for(int dim = ndim - 1; dim >= 0; --dim) {{
|
||||
src_offs += stride[dim] * (src_idx % size[dim]);
|
||||
src_idx /= size[dim];
|
||||
}}
|
||||
|
||||
packed_uint5 local_index;
|
||||
local_index.x = linear_index / (size.u * size.w * size.z * size.y) % size.x;
|
||||
local_index.y = linear_index / (size.u * size.w * size.z) % size.y;
|
||||
local_index.z = linear_index / (size.u * size.w) % size.z;
|
||||
local_index.w = linear_index / size.u % size.w;
|
||||
local_index.u = linear_index % size.u;
|
||||
|
||||
packed_uint5 strided_index;
|
||||
strided_index.x = local_index.x * stride.x;
|
||||
strided_index.y = local_index.y * stride.y;
|
||||
strided_index.z = local_index.z * stride.z;
|
||||
strided_index.w = local_index.w * stride.w;
|
||||
strided_index.u = local_index.u * stride.u;
|
||||
|
||||
dst[linear_index] = cast<{1}>(src[strided_index.x + strided_index.y + strided_index.z + strided_index.w + strided_index.u]);
|
||||
dst[linear_index] = cast<{1}>(src[src_offs]);
|
||||
}}
|
||||
|
||||
kernel void gather_kernel_4(uint linear_index [[thread_position_in_grid]],
|
||||
|
@ -1085,6 +1085,7 @@ Tensor randn_like(
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ randperm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
namespace {
|
||||
|
||||
template <typename scalar_t>
|
||||
void randperm_cpu(Tensor& result, int64_t n, CPUGeneratorImpl* generator) {
|
||||
scalar_t *r__data = result.data_ptr<scalar_t>();
|
||||
@ -1092,20 +1093,40 @@ void randperm_cpu(Tensor& result, int64_t n, CPUGeneratorImpl* generator) {
|
||||
result.resize_({n});
|
||||
int64_t r__stride_0 = result.stride(0);
|
||||
|
||||
at::parallel_for(0, n, internal::GRAIN_SIZE,
|
||||
[&r__data, &r__stride_0](int64_t p_begin, int64_t p_end) {
|
||||
for (const auto i : c10::irange(p_begin, p_end)) {
|
||||
r__data[i*r__stride_0] = static_cast<scalar_t>(i);
|
||||
}
|
||||
});
|
||||
// for small n, preserve old behavior
|
||||
if (n < std::numeric_limits<uint32_t>::max() / 20) {
|
||||
at::parallel_for(
|
||||
0,
|
||||
n,
|
||||
internal::GRAIN_SIZE,
|
||||
[&r__data, &r__stride_0](int64_t p_begin, int64_t p_end) {
|
||||
for (const auto i : c10::irange(p_begin, p_end)) {
|
||||
r__data[i * r__stride_0] = static_cast<scalar_t>(i);
|
||||
}
|
||||
});
|
||||
|
||||
for(int64_t i = 0; i < n - 1; i++)
|
||||
{
|
||||
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
|
||||
int64_t z = generator->random() % (n-i);
|
||||
scalar_t sav = r__data[i*r__stride_0];
|
||||
r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0];
|
||||
r__data[(z+i)*r__stride_0] = sav;
|
||||
for (int64_t i = 0; i < n - 1; i++) {
|
||||
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
|
||||
int64_t z = generator->random() % (n - i);
|
||||
scalar_t sav = r__data[i * r__stride_0];
|
||||
r__data[i * r__stride_0] = r__data[(z + i) * r__stride_0];
|
||||
r__data[(z + i) * r__stride_0] = sav;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// we need to pick a number uniformly distributed between 0 and n
|
||||
// when n is of the same order of magnitude as the biggest number returned by
|
||||
// random the % result is not uniformly distributed
|
||||
// so we use random64(), you'd run out of RAM before you
|
||||
// start seeing the skew
|
||||
// use no-initialization Fischer-Yates variant
|
||||
// https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_.22inside-out.22_algorithm
|
||||
for (int64_t i = 0; i < n; i++) {
|
||||
int64_t z = (int64_t)(generator->random64() % (i + 1));
|
||||
r__data[i * r__stride_0] = i;
|
||||
r__data[i * r__stride_0] = r__data[z * r__stride_0];
|
||||
r__data[z * r__stride_0] = i;
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
@ -295,12 +295,15 @@ Tensor mkldnn_linear_pointwise_binary(
|
||||
input_reshaped.size(0), weight_t.size(0)};
|
||||
output = output.reshape(output_size_reshaped);
|
||||
other_reshaped = other_reshaped.reshape(output_size_reshaped);
|
||||
TORCH_CHECK(
|
||||
output.sizes() == other_reshaped.sizes(),
|
||||
"linear_binary_run expects the size of output and other tensor to be the same");
|
||||
} else {
|
||||
TORCH_CHECK(
|
||||
output.dim() == other_reshaped.dim(),
|
||||
"linear_binary_run expects the dimension of output and other tensor to be the same");
|
||||
}
|
||||
|
||||
TORCH_CHECK(
|
||||
output.dim() == other_reshaped.dim(),
|
||||
"linear_binary_run expects the dimension of output and other tensor to be the same");
|
||||
|
||||
c10::impl::ExcludeDispatchKeyGuard edkg(c10::autograd_dispatch_keyset);
|
||||
ideep::tensor mkldnn_output = itensor_from_tensor(output);
|
||||
const ideep::tensor mkldnn_other = itensor_from_tensor(other_reshaped);
|
||||
|
@ -81,10 +81,6 @@ std::string getArrayRefString(const IntArrayRef s);
|
||||
// use has_storage() on the returned tensor to determine if src actually is a view
|
||||
Tensor gatherViewTensor(const Tensor& src, Tensor& dst);
|
||||
Tensor& scatterViewTensor(const Tensor& src, Tensor& output);
|
||||
bool canSliceViewTensor(const TensorBase& src, MPSShape* mpsShape);
|
||||
MPSGraphTensorData* getMPSGraphTensorDataForView(const TensorBase& src,
|
||||
MPSShape* mpsShape,
|
||||
const MPSDataType mpsDataType);
|
||||
MPSGraphTensor* castToIHFTypes(MPSGraph* mpsGraph,
|
||||
MPSGraphTensor* inputTensor,
|
||||
const TensorBase& input,
|
||||
@ -347,6 +343,15 @@ template <typename encoder_t,
|
||||
typename = std::enable_if_t<std::is_same_v<id<MTLComputeCommandEncoder>, encoder_t> ||
|
||||
std::is_same_v<id<MTLArgumentEncoder>, encoder_t>>>
|
||||
static inline void mtl_setBuffer(encoder_t encoder, const TensorBase& t, unsigned idx) {
|
||||
if (C10_UNLIKELY(t.device().type() == kCPU)) {
|
||||
if constexpr (std::is_same_v<id<MTLComputeCommandEncoder>, encoder_t>) {
|
||||
TORCH_CHECK(t.dim() == 0, "Passed CPU tensor to MPS op");
|
||||
[encoder setBytes:t.storage().data() length:t.element_size() atIndex:idx];
|
||||
} else {
|
||||
TORCH_CHECK(false, "Passed CPU tensor to MPS op");
|
||||
}
|
||||
return;
|
||||
}
|
||||
[encoder setBuffer:getMTLBufferStorage(t) offset:t.storage_offset() * t.element_size() atIndex:idx];
|
||||
}
|
||||
|
||||
|
@ -974,7 +974,6 @@ void MetalKernelFunction::dispatch(c10::ArrayRef<uint64_t> length, c10::Optional
|
||||
}
|
||||
|
||||
void MetalKernelFunction::setArg(unsigned idx, const at::TensorBase& t) {
|
||||
TORCH_CHECK(t.device().type() == kMPS, "Tensor must be on GPU");
|
||||
mtl_setBuffer(encoder, t, idx);
|
||||
}
|
||||
|
||||
|
@ -138,28 +138,16 @@ kernel void bitwise_not(constant uint& length [[buffer(0)]],
|
||||
)METAL",
|
||||
3);
|
||||
|
||||
static const std::string& getMetalType(const c10::ScalarType& t) {
|
||||
// Mapping from c10::ScalarType to integral type that can be used for bitwise ops
|
||||
// As bitwise ops sign-agnostic map signed/unsigned char and boolean to the same type
|
||||
static std::unordered_map<c10::ScalarType, std::string> scalar_to_metal_type = {
|
||||
{c10::ScalarType::Long, "long"},
|
||||
{c10::ScalarType::Int, "int"},
|
||||
{c10::ScalarType::Short, "short"},
|
||||
{c10::ScalarType::Byte, "char"},
|
||||
{c10::ScalarType::Char, "char"},
|
||||
{c10::ScalarType::Bool, "char"},
|
||||
};
|
||||
|
||||
auto it = scalar_to_metal_type.find(t);
|
||||
TORCH_CHECK(it != scalar_to_metal_type.end(), "Unsupported type ", t);
|
||||
return it->second;
|
||||
static inline std::string getMetalType(const c10::ScalarType scalar_type) {
|
||||
TORCH_CHECK(c10::isIntegralType(scalar_type, /*includesBool=*/true), "Unsupported type");
|
||||
return scalarToMetalTypeString(scalar_type);
|
||||
}
|
||||
|
||||
static const std::string& getMetalType(const Tensor& t) {
|
||||
static inline std::string getMetalType(const Tensor& t) {
|
||||
return getMetalType(t.scalar_type());
|
||||
}
|
||||
|
||||
static const std::string& getMetalType(const c10::Scalar& s) {
|
||||
static inline std::string getMetalType(const c10::Scalar& s) {
|
||||
return getMetalType(s.type());
|
||||
}
|
||||
|
||||
|
@ -372,7 +372,6 @@ static Tensor mps_convolution_backward_input(IntArrayRef input_size,
|
||||
using namespace at::native::mps;
|
||||
using namespace mps;
|
||||
bool is3DConv = grad_output_t.dim() == 5;
|
||||
|
||||
if (!is_macos_13_or_newer(MacOSVersion::MACOS_VER_15_1_PLUS)) {
|
||||
// On macOS < 15.1, MPS convolution kernel does not support output channels > 2^16
|
||||
for (auto elem : grad_output_t.sizes()) {
|
||||
@ -417,36 +416,29 @@ static Tensor mps_convolution_backward_input(IntArrayRef input_size,
|
||||
assert(0 && "Check should have been done earlier\n");
|
||||
}
|
||||
|
||||
MPSShape* gradOutputShape = getMPSShape(grad_output_t, memory_format);
|
||||
MPSShape* mps_input_shape = getMPSShape(input_size);
|
||||
NSString* ns_shape_key = [[gradOutputShape valueForKey:@"description"] componentsJoinedByString:@","];
|
||||
string key;
|
||||
if (is3DConv) {
|
||||
key = "mps_3d_convolution_backward_input:" + std::to_string(stride[0]) + ":" + std::to_string(stride[1]) + ":" +
|
||||
":" + std::to_string(stride[2]) + std::to_string(dilation[0]) + ":" + std::to_string(dilation[1]) + ":" +
|
||||
std::to_string(dilation[2]) + ":" + std::to_string(padding[0]) + ":" + std::to_string(padding[1]) + ":" +
|
||||
std::to_string(padding[2]) + ":" + std::to_string(groups) + ":" + mem_format_key +
|
||||
getTensorsStringKey({grad_output_t, weight_t}) + ":" + string([ns_shape_key UTF8String]);
|
||||
getTensorsStringKey({grad_output_t, weight_t});
|
||||
|
||||
} else {
|
||||
key = "mps_convolution_backward_input:" + std::to_string(stride[0]) + ":" + std::to_string(stride[1]) + ":" +
|
||||
std::to_string(dilation[0]) + ":" + std::to_string(dilation[1]) + ":" + std::to_string(padding[0]) + ":" +
|
||||
std::to_string(padding[1]) + ":" + std::to_string(groups) + ":" + mem_format_key +
|
||||
getTensorsStringKey({grad_output_t, weight_t}) + ":" + string([ns_shape_key UTF8String]);
|
||||
getTensorsStringKey({grad_output_t, weight_t});
|
||||
}
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* gradOutputTensor =
|
||||
mpsGraphRankedPlaceHolder(mpsGraph, getMPSScalarType(grad_output_t), gradOutputShape);
|
||||
MPSGraphTensor* weightTensor = mpsGraphRankedPlaceHolder(mpsGraph, weight_t);
|
||||
auto gradOutputTensor = mpsGraphRankedPlaceHolder(mpsGraph, grad_output_t);
|
||||
auto weightTensor = mpsGraphRankedPlaceHolder(mpsGraph, weight_t);
|
||||
|
||||
MPSGraphTensor* gradOutputTensorTranspose = gradOutputTensor;
|
||||
if (is_channels_last) {
|
||||
gradOutputTensorTranspose = mps::convertNHWCtoNCHW(mpsGraph, gradOutputTensorTranspose);
|
||||
}
|
||||
MPSGraphTensor* gradInputTensor;
|
||||
MPSShape* weightOutputShape = mps::getMPSShape(weight_t);
|
||||
// Depthwise conv is input feature channels = groups. So I in OIHW has to be 1.
|
||||
bool isDepthwiseConv = ((groups > 1 && (weightOutputShape[1].intValue == 1)) && gradOutputShape.count >= 4 &&
|
||||
bool isDepthwiseConv = ((groups > 1 && (weightOutputShape[1].intValue == 1)) && grad_output_t.ndimension() >= 4 &&
|
||||
weightOutputShape.count >= 4 && !is_channels_last);
|
||||
|
||||
if (is3DConv) {
|
||||
@ -462,7 +454,7 @@ static Tensor mps_convolution_backward_input(IntArrayRef input_size,
|
||||
padding[1],
|
||||
padding[0],
|
||||
groups);
|
||||
gradInputTensor = [mpsGraph convolution3DDataGradientWithIncomingGradientTensor:gradOutputTensorTranspose
|
||||
gradInputTensor = [mpsGraph convolution3DDataGradientWithIncomingGradientTensor:gradOutputTensor
|
||||
weightsTensor:weightTensor
|
||||
outputShape:mps_input_shape
|
||||
forwardConvolutionDescriptor:conv3dDescriptor_
|
||||
@ -484,7 +476,7 @@ static Tensor mps_convolution_backward_input(IntArrayRef input_size,
|
||||
withDimension:-4
|
||||
name:nil];
|
||||
gradInputTensor =
|
||||
[mpsGraph depthwiseConvolution3DDataGradientWithIncomingGradientTensor:gradOutputTensorTranspose
|
||||
[mpsGraph depthwiseConvolution3DDataGradientWithIncomingGradientTensor:gradOutputTensor
|
||||
weightsTensor:weightTransposeTensor
|
||||
outputShape:mps_input_shape
|
||||
descriptor:depthWiseConv3dDescriptor_
|
||||
@ -501,7 +493,7 @@ static Tensor mps_convolution_backward_input(IntArrayRef input_size,
|
||||
at::MemoryFormat::Contiguous,
|
||||
groups);
|
||||
|
||||
gradInputTensor = [mpsGraph convolution2DDataGradientWithIncomingGradientTensor:gradOutputTensorTranspose
|
||||
gradInputTensor = [mpsGraph convolution2DDataGradientWithIncomingGradientTensor:gradOutputTensor
|
||||
weightsTensor:weightTensor
|
||||
outputShape:mps_input_shape
|
||||
forwardConvolutionDescriptor:conv2dDescriptor_
|
||||
@ -513,7 +505,7 @@ static Tensor mps_convolution_backward_input(IntArrayRef input_size,
|
||||
newCachedGraph->gradInputTensor_ = gradInputTensor;
|
||||
});
|
||||
|
||||
auto gradOutputPlaceholder = Placeholder(cachedGraph->gradOutputTensor_, grad_output_t, gradOutputShape);
|
||||
auto gradOutputPlaceholder = Placeholder(cachedGraph->gradOutputTensor_, grad_output_t);
|
||||
auto weightsPlaceholder = Placeholder(cachedGraph->weightTensor_, weight_t);
|
||||
auto outputPlaceholder = Placeholder(cachedGraph->gradInputTensor_, *grad_input);
|
||||
|
||||
|
@ -490,17 +490,28 @@ static Tensor& tiled_bmm_out_mps_impl(const Tensor& batch1, const Tensor& batch2
|
||||
MPSDataType dtype = getMPSDataType(batch1);
|
||||
|
||||
uint64_t elemInMatrix = resRows * resCols;
|
||||
// if largest supported batch size is zero, we need to split up the computation more
|
||||
uint64_t largestSupportedBatchSize = floor(pow(2, 32) / elemInMatrix);
|
||||
uint64_t batchSize = std::min(largestSupportedBatchSize, originalBatchSize);
|
||||
bool tileEachMatmul = largestSupportedBatchSize == 0;
|
||||
uint64_t batchSize = largestSupportedBatchSize > 0 ? std::min(largestSupportedBatchSize, originalBatchSize) : 1;
|
||||
uint64_t lastBatchSize = originalBatchSize % batchSize;
|
||||
|
||||
uint64_t aRowsTiled = aRows;
|
||||
uint64_t resRowsTiled = resRows;
|
||||
if (tileEachMatmul) {
|
||||
uint64_t maxNumRows = floor(pow(2, 32) / resCols);
|
||||
aRowsTiled = std::min(uint64_t(512), maxNumRows);
|
||||
resRowsTiled = aRowsTiled;
|
||||
}
|
||||
uint64_t lastTileSize = aRows % aRowsTiled;
|
||||
|
||||
id<MTLCommandBuffer> commandBuffer = mpsStream->commandBuffer();
|
||||
|
||||
auto matmul = [[MPSNDArrayMatrixMultiplication alloc] initWithDevice:device sourceCount:2];
|
||||
|
||||
MPSShape* aShape = @[ @(batchSize), @(aRows), @(aCols) ];
|
||||
MPSShape* aShape = @[ @(batchSize), @(aRowsTiled), @(aCols) ];
|
||||
MPSShape* bShape = @[ @(batchSize), @(bRows), @(bCols) ];
|
||||
MPSShape* resShape = @[ @(batchSize), @(resRows), @(resCols) ];
|
||||
MPSShape* resShape = @[ @(batchSize), @(resRowsTiled), @(resCols) ];
|
||||
auto aDesc_ = [MPSNDArrayDescriptor descriptorWithDataType:dtype shape:aShape];
|
||||
aDesc_.preferPackedRows = true;
|
||||
auto bDesc_ = [MPSNDArrayDescriptor descriptorWithDataType:dtype shape:bShape];
|
||||
@ -515,18 +526,30 @@ static Tensor& tiled_bmm_out_mps_impl(const Tensor& batch1, const Tensor& batch2
|
||||
//.matrices is a readonly property so we need a separate descriptor.
|
||||
MPSNDArrayDescriptor *aDescLastBatch_, *bDescLastBatch_, *resDescLastBatch_;
|
||||
if (lastBatchSize != 0) {
|
||||
aDescLastBatch_ = [MPSNDArrayDescriptor descriptorWithDataType:dtype
|
||||
shape:@[ @(lastBatchSize), @(aRows), @(aCols) ]];
|
||||
aDescLastBatch_ =
|
||||
[MPSNDArrayDescriptor descriptorWithDataType:dtype shape:@[ @(lastBatchSize), @(aRowsTiled), @(aCols) ]];
|
||||
aDescLastBatch_.preferPackedRows = true;
|
||||
bDescLastBatch_ = [MPSNDArrayDescriptor descriptorWithDataType:dtype
|
||||
shape:@[ @(lastBatchSize), @(bRows), @(bCols) ]];
|
||||
bDescLastBatch_.preferPackedRows = true;
|
||||
resDescLastBatch_ =
|
||||
[MPSNDArrayDescriptor descriptorWithDataType:dtype shape:@[ @(lastBatchSize), @(resRows), @(resCols) ]];
|
||||
[MPSNDArrayDescriptor descriptorWithDataType:dtype
|
||||
shape:@[ @(lastBatchSize), @(resRowsTiled), @(resCols) ]];
|
||||
resDescLastBatch_.preferPackedRows = true;
|
||||
}
|
||||
|
||||
MPSNDArrayDescriptor *aDescLastTile_, *resDescLastTile_;
|
||||
if (lastTileSize != 0) {
|
||||
aDescLastTile_ = [MPSNDArrayDescriptor descriptorWithDataType:dtype
|
||||
shape:@[ @(batchSize), @(lastTileSize), @(aCols) ]];
|
||||
aDescLastTile_.preferPackedRows = true;
|
||||
resDescLastTile_ =
|
||||
[MPSNDArrayDescriptor descriptorWithDataType:dtype shape:@[ @(batchSize), @(lastTileSize), @(resCols) ]];
|
||||
resDescLastTile_.preferPackedRows = true;
|
||||
}
|
||||
|
||||
uint64_t requiredIterations = ceil(float(originalBatchSize) / batchSize);
|
||||
uint64_t requiredTileIterations = ceil(float(aRows) / aRowsTiled);
|
||||
auto aDesc = aDesc_;
|
||||
auto bDesc = bDesc_;
|
||||
auto resDesc = resDesc_;
|
||||
@ -536,24 +559,30 @@ static Tensor& tiled_bmm_out_mps_impl(const Tensor& batch1, const Tensor& batch2
|
||||
bDesc = bDescLastBatch_;
|
||||
resDesc = resDescLastBatch_;
|
||||
}
|
||||
const uint64_t aArrayOffset = i * batchSize * aRows * aCols;
|
||||
const uint64_t bArrayOffset = i * batchSize * bRows * bCols;
|
||||
const uint64_t resArrayOffset = i * batchSize * resRows * resCols;
|
||||
for (const auto j : c10::irange(requiredTileIterations)) {
|
||||
if (j == requiredTileIterations - 1 && lastTileSize != 0) {
|
||||
aDesc = aDescLastTile_;
|
||||
resDesc = resDescLastTile_;
|
||||
}
|
||||
const uint64_t aArrayOffset = i * batchSize * aCols * aRows + j * aRowsTiled * aCols;
|
||||
const uint64_t bArrayOffset = i * batchSize * bCols * bRows;
|
||||
const uint64_t resArrayOffset = i * batchSize * resCols * resRows + j * resRowsTiled * resCols;
|
||||
|
||||
auto aMatrix = [[[MPSNDArray alloc] initWithBuffer:aBuffer
|
||||
offset:(batch1.storage_offset() + aArrayOffset) * aElemSize
|
||||
descriptor:aDesc] autorelease];
|
||||
auto bMatrix = [[[MPSNDArray alloc] initWithBuffer:bBuffer
|
||||
offset:(batch2.storage_offset() + bArrayOffset) * bElemSize
|
||||
descriptor:bDesc] autorelease];
|
||||
auto resMatrix = [[[MPSNDArray alloc] initWithBuffer:resBuffer
|
||||
offset:(result.storage_offset() + resArrayOffset) * resElemSize
|
||||
descriptor:resDesc] autorelease];
|
||||
|
||||
[matmul encodeToCommandEncoder:computeEncoder
|
||||
commandBuffer:commandBuffer
|
||||
sourceArrays:@[ aMatrix, bMatrix ]
|
||||
destinationArray:resMatrix];
|
||||
auto aMatrix = [[[MPSNDArray alloc] initWithBuffer:aBuffer
|
||||
offset:(batch1.storage_offset() + aArrayOffset) * aElemSize
|
||||
descriptor:aDesc] autorelease];
|
||||
auto bMatrix = [[[MPSNDArray alloc] initWithBuffer:bBuffer
|
||||
offset:(batch2.storage_offset() + bArrayOffset) * bElemSize
|
||||
descriptor:bDesc] autorelease];
|
||||
auto resMatrix =
|
||||
[[[MPSNDArray alloc] initWithBuffer:resBuffer
|
||||
offset:(result.storage_offset() + resArrayOffset) * resElemSize
|
||||
descriptor:resDesc] autorelease];
|
||||
[matmul encodeToCommandEncoder:computeEncoder
|
||||
commandBuffer:commandBuffer
|
||||
sourceArrays:@[ aMatrix, bMatrix ]
|
||||
destinationArray:resMatrix];
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -568,15 +597,11 @@ static Tensor& bmm_out_mps_impl(const Tensor& batch1, const Tensor& batch2, Tens
|
||||
|
||||
TORCH_CHECK(supportedFloatingOrComplexType(batch1), "MPS device does not support bmm for non-float inputs");
|
||||
|
||||
// Currently unsupported if the matmul output goes over the 32-bit indexing limit
|
||||
TORCH_CHECK(
|
||||
batch1.size(1) * batch2.size(2) <= pow(2, 32),
|
||||
"Output size of the matrix multiplication is larger than currently supported by the MPS backend: ",
|
||||
batch1.size(1),
|
||||
",",
|
||||
batch2.size(2),
|
||||
", needs to be less than 2**32 elements.",
|
||||
"File a feature request for this use case against the MPS backend at https://github.com/pytorch/pytorch/issues");
|
||||
// Matmul not supported if any output dimension size is larger than 2**32
|
||||
for (auto elem : result.sizes()) {
|
||||
TORCH_CHECK_NOT_IMPLEMENTED(elem <= pow(2, 32),
|
||||
"Output dim sizes larger than 2**32 elements for matmul not supported on MPS device.");
|
||||
}
|
||||
|
||||
if (batch1.numel() == 0 || batch2.numel() == 0) {
|
||||
result.zero_();
|
||||
@ -607,7 +632,7 @@ static Tensor& bmm_out_mps_impl(const Tensor& batch1, const Tensor& batch2, Tens
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we need to split the batch to do the computation
|
||||
// Call tiled implementation if the number of elements exceeds 2^32
|
||||
uint64_t resultSize = batch1.size(0) * batch1.size(1) * batch2.size(2);
|
||||
if (resultSize > pow(2, 32)) {
|
||||
result = tiled_bmm_out_mps_impl(batch1, batch2, result);
|
||||
|
@ -20,634 +20,6 @@
|
||||
namespace at::native {
|
||||
namespace mps {
|
||||
|
||||
struct ViewCachedGraph : public MPSCachedGraph {
|
||||
ViewCachedGraph(MPSGraph* graph) : MPSCachedGraph(graph) {}
|
||||
MPSGraphTensor* inputTensor = nil;
|
||||
MPSGraphTensor* outputTensor = nil;
|
||||
MPSGraphTensor* updatesTensor = nil;
|
||||
MPSGraphTensor* storageOffsetTensor = nil;
|
||||
std::vector<MPSGraphTensor*> strideTensors;
|
||||
};
|
||||
|
||||
static std::string getStridedKey(const ScalarType& self_dtype,
|
||||
const ScalarType& updates_dtype,
|
||||
const IntArrayRef& base_shape,
|
||||
const IntArrayRef& new_shape,
|
||||
const IntArrayRef& stride,
|
||||
int64_t storage_offset,
|
||||
bool is_scatter) {
|
||||
std::string dtype_key = getMPSTypeString(self_dtype);
|
||||
if (is_scatter) {
|
||||
dtype_key += ":" + getMPSTypeString(updates_dtype);
|
||||
}
|
||||
|
||||
return (is_scatter ? "scatter:" : "gather:") + dtype_key + "[" + getArrayRefString(base_shape) + "]:[" +
|
||||
getArrayRefString(new_shape) + "]:[" + getArrayRefString(stride) + "]:[" + std::to_string(storage_offset) + "]";
|
||||
}
|
||||
|
||||
// initializes the MTLBuffers for tensor data and runs the MPSGraph for the view op
|
||||
static Tensor& runViewGraph(ViewCachedGraph* cachedGraph, const at::Tensor& src, Tensor& output, bool needsScatter) {
|
||||
const id<MTLBuffer> sourceBuffer = getMTLBufferStorage(src);
|
||||
const id<MTLBuffer> outputBuffer = getMTLBufferStorage(output);
|
||||
|
||||
const IntArrayRef& strides = needsScatter ? output.strides() : src.strides();
|
||||
const IntArrayRef& sizes = needsScatter ? output.sizes() : src.sizes();
|
||||
const int64_t storage_offset = needsScatter ? output.storage_offset() : src.storage_offset();
|
||||
const MPSDataType inputType = [cachedGraph->inputTensor dataType];
|
||||
|
||||
MPSShape* inputShape = [cachedGraph->inputTensor shape];
|
||||
MPSShape* outputShape = needsScatter ? inputShape : getMPSShape(src);
|
||||
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
@autoreleasepool {
|
||||
NSMutableDictionary* feeds = [[NSMutableDictionary new] autorelease];
|
||||
// in case of scatter, we use output tensor as input buffer and write the results back to the source buffer
|
||||
feeds[cachedGraph->inputTensor] =
|
||||
[[[MPSGraphTensorData alloc] initWithMTLBuffer:needsScatter ? outputBuffer : sourceBuffer
|
||||
shape:inputShape
|
||||
dataType:inputType] autorelease];
|
||||
if (needsScatter) {
|
||||
auto updatesType = getMPSScalarType(src.scalar_type());
|
||||
if (updatesType == MPSDataTypeUInt8) {
|
||||
updatesType = MPSDataTypeInt8;
|
||||
}
|
||||
|
||||
feeds[cachedGraph->updatesTensor] = [[[MPSGraphTensorData alloc] initWithMTLBuffer:sourceBuffer
|
||||
shape:getMPSShape(src.numel())
|
||||
dataType:updatesType] autorelease];
|
||||
}
|
||||
MPSScalar storageOffsetScalar = getMPSScalar(storage_offset, ScalarType::Int);
|
||||
feeds[cachedGraph->storageOffsetTensor] = getMPSGraphTensorFromScalar(stream, storageOffsetScalar);
|
||||
|
||||
std::vector<MPSScalar> strideScalars(sizes.size());
|
||||
for (const auto i : c10::irange(sizes.size())) {
|
||||
strideScalars[i] = getMPSScalar(strides[i], ScalarType::Int);
|
||||
feeds[cachedGraph->strideTensors[i]] = getMPSGraphTensorFromScalar(stream, strideScalars[i]);
|
||||
}
|
||||
// Workaround for MPSShaderLibrary bug in macOS Monterey
|
||||
// This is fixed in macOS Ventura
|
||||
auto outputType = getMPSScalarType(output.scalar_type());
|
||||
if (outputType == MPSDataTypeUInt8) {
|
||||
outputType = MPSDataTypeInt8;
|
||||
}
|
||||
MPSGraphTensorData* outputTensorData = [[[MPSGraphTensorData alloc] initWithMTLBuffer:outputBuffer
|
||||
shape:outputShape
|
||||
dataType:outputType] autorelease];
|
||||
auto results = @{cachedGraph->outputTensor : outputTensorData};
|
||||
runMPSGraph(stream, cachedGraph->graph(), feeds, results);
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
static MPSGraphTensor* permuteTensor(MPSGraph* graph, MPSGraphTensor* inputTensor, NSArray* permuteOrder) {
|
||||
NSUInteger srcRank = [[inputTensor shape] count];
|
||||
if (srcRank != [permuteOrder count]) {
|
||||
return nil;
|
||||
}
|
||||
|
||||
MPSGraphTensor* outputTensor = inputTensor;
|
||||
std::vector<NSUInteger> dimensionOrder(srcRank);
|
||||
std::iota(std::begin(dimensionOrder), std::end(dimensionOrder), 0);
|
||||
|
||||
for (const auto i : c10::irange(srcRank)) {
|
||||
NSUInteger axis = [permuteOrder[i] integerValue];
|
||||
auto axisIter = std::find(dimensionOrder.begin(), dimensionOrder.end(), axis);
|
||||
NSUInteger axis1 = i;
|
||||
NSUInteger axis2 = axisIter - dimensionOrder.begin();
|
||||
iter_swap(dimensionOrder.begin() + i, axisIter);
|
||||
|
||||
outputTensor = [graph transposeTensor:outputTensor dimension:axis1 withDimension:axis2 name:nil];
|
||||
}
|
||||
|
||||
return outputTensor;
|
||||
}
|
||||
|
||||
static NSDictionary* getStrideToDimLengthOffsetDict(MPSGraphTensor* tensor, NSUInteger rank, NSUInteger offset) {
|
||||
// Assuming input tensor has default strides
|
||||
NSInteger stride = 1;
|
||||
NSMutableDictionary* strideToDimLengthOffset = [[NSMutableDictionary alloc] init];
|
||||
for (NSInteger srcDim = rank - 1; srcDim >= 0; srcDim--) {
|
||||
NSUInteger size = [[tensor shape][srcDim] integerValue];
|
||||
NSDictionary* entry = @{
|
||||
@"dim" : [NSNumber numberWithInteger:srcDim],
|
||||
@"length" : [tensor shape][srcDim],
|
||||
@"offset" : [NSNumber numberWithInteger:offset % size] // offset is determined traversing backwards through stride
|
||||
};
|
||||
[strideToDimLengthOffset setValue:entry forKey:[NSString stringWithFormat:@"%ld", stride]];
|
||||
offset /= size;
|
||||
stride *= size;
|
||||
}
|
||||
return strideToDimLengthOffset;
|
||||
}
|
||||
|
||||
// Detect only expand dims, allows for duplicate strides
|
||||
static MPSGraphTensor* asStridedLayer_expandDimsPattern(MPSGraph* graph,
|
||||
MPSGraphTensor* inputTensor,
|
||||
size_t dstRank,
|
||||
const IntArrayRef& dstSizes,
|
||||
const IntArrayRef& dstStrides,
|
||||
int offset) {
|
||||
NSUInteger srcRank = [[inputTensor shape] count];
|
||||
// Not an expand dims
|
||||
if (srcRank >= dstRank)
|
||||
return nil;
|
||||
|
||||
NSMutableArray* expandAxes = [[NSMutableArray alloc] init];
|
||||
|
||||
BOOL isValidExpand = YES;
|
||||
NSInteger currSrcDim = (NSInteger)srcRank - 1;
|
||||
NSUInteger currSrcStride = 1;
|
||||
for (NSInteger dstDim = dstRank - 1; dstDim >= 0 && isValidExpand; dstDim--) {
|
||||
NSUInteger currDimLength = dstSizes[dstDim];
|
||||
NSUInteger currStride = dstStrides[dstDim];
|
||||
NSUInteger currSrcDimLength = currSrcDim >= 0 ? [[inputTensor shape][currSrcDim] integerValue] : 1;
|
||||
|
||||
NSUInteger targetDimLength = currSrcDimLength;
|
||||
if (currDimLength != targetDimLength) {
|
||||
targetDimLength = 1;
|
||||
}
|
||||
if (currDimLength != targetDimLength || currStride != currSrcStride) {
|
||||
isValidExpand = NO;
|
||||
}
|
||||
if (currSrcDim >= 0 && currSrcDimLength == targetDimLength) {
|
||||
currSrcStride *= currSrcDimLength;
|
||||
currSrcDim--;
|
||||
} else {
|
||||
[expandAxes addObject:[NSNumber numberWithInt:dstDim]];
|
||||
}
|
||||
}
|
||||
|
||||
// Did not use every dimension of source
|
||||
if (!isValidExpand || currSrcDim >= 0) {
|
||||
[expandAxes release];
|
||||
return nil;
|
||||
}
|
||||
|
||||
MPSGraphTensor* expandTensor = inputTensor;
|
||||
if ([expandAxes count]) {
|
||||
expandTensor = [graph expandDimsOfTensor:expandTensor axes:expandAxes name:nil];
|
||||
}
|
||||
[expandAxes release];
|
||||
|
||||
return expandTensor;
|
||||
}
|
||||
|
||||
// Detect contiguous reshapes, no slicing
|
||||
static MPSGraphTensor* asStridedLayer_reshapePattern(MPSGraph* graph,
|
||||
MPSGraphTensor* inputTensor,
|
||||
size_t dstRank,
|
||||
const IntArrayRef& dstSizes,
|
||||
const IntArrayRef& dstStrides,
|
||||
int offset) {
|
||||
NSUInteger srcRank = [[inputTensor shape] count];
|
||||
// Not a reshape
|
||||
if (srcRank <= dstRank)
|
||||
return nil;
|
||||
|
||||
NSMutableArray* dstShape = [[NSMutableArray alloc] init];
|
||||
|
||||
BOOL isValidReshape = YES;
|
||||
NSInteger srcDim = srcRank - 1;
|
||||
NSUInteger srcStride = 1;
|
||||
for (NSInteger dstDim = dstRank - 1; dstDim >= 0 && isValidReshape; dstDim--) {
|
||||
NSUInteger currDimLength = dstSizes[dstDim];
|
||||
NSUInteger currStride = dstStrides[dstDim];
|
||||
[dstShape insertObject:[NSNumber numberWithInteger:currDimLength] atIndex:0];
|
||||
|
||||
NSUInteger targetDimLength = currDimLength;
|
||||
NSUInteger currReshapeSize = 1;
|
||||
NSUInteger innerStride = srcStride;
|
||||
|
||||
while (currReshapeSize != targetDimLength && srcDim >= 0) {
|
||||
NSUInteger srcDimLength = [[inputTensor shape][srcDim] integerValue];
|
||||
currReshapeSize *= srcDimLength;
|
||||
srcStride *= srcDimLength;
|
||||
srcDim--;
|
||||
};
|
||||
|
||||
isValidReshape &= (currReshapeSize == targetDimLength && currStride == innerStride);
|
||||
}
|
||||
isValidReshape &= (srcDim < 0);
|
||||
|
||||
MPSGraphTensor* outputTensor = nil;
|
||||
if (isValidReshape)
|
||||
outputTensor = [graph reshapeTensor:inputTensor withShape:dstShape name:nil];
|
||||
[dstShape release];
|
||||
return outputTensor;
|
||||
}
|
||||
|
||||
static MPSGraphTensor* asStridedLayer_genericPattern(MPSGraph* graph,
|
||||
MPSGraphTensor* inputTensor,
|
||||
size_t dstRank,
|
||||
const IntArrayRef& dstSizes,
|
||||
const IntArrayRef& dstStrides,
|
||||
int offset) {
|
||||
// Duplicate strides cannot be done
|
||||
{
|
||||
BOOL allUnique = YES;
|
||||
NSMutableSet* uniqueStrides = [[NSMutableSet alloc] init];
|
||||
for (NSUInteger dstDim = 0; (dstDim < dstRank) && allUnique; dstDim++) {
|
||||
int stride = dstStrides[dstDim];
|
||||
NSNumber* strideObj = [NSNumber numberWithInt:stride];
|
||||
allUnique &= (stride == 0 || ![uniqueStrides containsObject:strideObj]);
|
||||
[uniqueStrides addObject:strideObj];
|
||||
}
|
||||
[uniqueStrides release];
|
||||
if (!allUnique)
|
||||
return nil;
|
||||
|
||||
// Skip for zero in dst shape
|
||||
for (NSUInteger dstDim = 0; dstDim < dstRank; dstDim++)
|
||||
if (dstSizes[dstDim] == 0) {
|
||||
return nil;
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Flatten the inputTensor if necessary
|
||||
MPSGraphTensor* flatInputTensor = inputTensor;
|
||||
{
|
||||
// Flatten inputs to remove duplicate strides.
|
||||
NSMutableArray* squeezeAxes = [[NSMutableArray alloc] init];
|
||||
for (NSUInteger srcDim = 1; srcDim < [[flatInputTensor shape] count]; srcDim++) {
|
||||
if ([[flatInputTensor shape][srcDim] intValue] == 1)
|
||||
[squeezeAxes addObject:[NSNumber numberWithInteger:srcDim]];
|
||||
}
|
||||
// We have to leave at least 1 dimension, if all input dims are 1
|
||||
if ([squeezeAxes count])
|
||||
flatInputTensor = [graph squeezeTensor:flatInputTensor axes:squeezeAxes name:nil];
|
||||
[squeezeAxes release];
|
||||
}
|
||||
|
||||
int srcRank = (int)[[flatInputTensor shape] count];
|
||||
NSDictionary* srcStrideToDimLengthOffset = getStrideToDimLengthOffsetDict(flatInputTensor, srcRank, offset);
|
||||
|
||||
// Populate the dimension order, slice info, and broadcast info
|
||||
NSMutableArray* dstDimOrder = [[NSMutableArray alloc] init];
|
||||
std::vector<int32_t> dstDimToSliceLength(dstRank);
|
||||
std::vector<int32_t> dstDimToSliceOffset(dstRank);
|
||||
bool needsBroadcast = false;
|
||||
{
|
||||
for (auto dstDim = dstRank - 1; dstDim >= 0; dstDim--) {
|
||||
if (dstStrides[dstDim] == 0) {
|
||||
// This dimension should be a broadcast
|
||||
needsBroadcast = true;
|
||||
dstDimToSliceLength[dstDim] = dstSizes[dstDim];
|
||||
dstDimToSliceOffset[dstDim] = 0;
|
||||
} else {
|
||||
// Find what dimension and native length was for the specified stride
|
||||
NSDictionary* srcDimLengthOffset =
|
||||
srcStrideToDimLengthOffset[[NSString stringWithFormat:@"%lld", dstStrides[dstDim]]];
|
||||
|
||||
dstDimToSliceLength[dstDim] = dstSizes[dstDim];
|
||||
dstDimToSliceOffset[dstDim] = [srcDimLengthOffset[@"offset"] intValue];
|
||||
|
||||
// Stride does not exist in source tensor, or the specified size is too long. Not possible
|
||||
// TODO: Longer length with same stride + removal of dim(s) above this is a flatten/reshape. Consider adding
|
||||
// support
|
||||
if (!srcDimLengthOffset ||
|
||||
// the offset + length of destination should not be larger than source's length when slicing
|
||||
dstDimToSliceOffset[dstDim] + dstDimToSliceLength[dstDim] > [srcDimLengthOffset[@"length"] intValue]) {
|
||||
return nil;
|
||||
}
|
||||
// Get the src dimension corresponding to the requested stride
|
||||
NSNumber* srcDim = srcDimLengthOffset[@"dim"];
|
||||
[dstDimOrder insertObject:srcDim atIndex:0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Slice out any unused dimensions
|
||||
NSMutableArray* missingSrcDims = [[NSMutableArray alloc] init];
|
||||
MPSGraphTensor* slicedUnusedTensor = flatInputTensor;
|
||||
{
|
||||
// Find any src strides/dims that are not present in the dst
|
||||
NSMutableArray* missingSrcStrides = [[NSMutableArray alloc] init];
|
||||
{
|
||||
NSUInteger stride = 1;
|
||||
for (NSInteger srcDim = [[flatInputTensor shape] count] - 1; srcDim >= 0; srcDim--) {
|
||||
[missingSrcStrides addObject:[NSNumber numberWithInteger:stride]];
|
||||
stride *= [[flatInputTensor shape][srcDim] integerValue];
|
||||
}
|
||||
for (NSUInteger dstDim = 0; dstDim < dstRank; dstDim++) {
|
||||
[missingSrcStrides removeObject:[NSNumber numberWithInteger:dstStrides[dstDim]]];
|
||||
}
|
||||
}
|
||||
for (NSUInteger i = 0; i < [missingSrcStrides count]; i++) {
|
||||
NSUInteger stride = [missingSrcStrides[i] integerValue];
|
||||
NSDictionary* srcDimLengthOffset = srcStrideToDimLengthOffset[[NSString stringWithFormat:@"%ld", stride]];
|
||||
NSNumber* missingSrcDim = srcDimLengthOffset[@"dim"];
|
||||
[missingSrcDims addObject:missingSrcDim];
|
||||
[dstDimOrder insertObject:missingSrcDim atIndex:0];
|
||||
|
||||
slicedUnusedTensor = [graph sliceTensor:slicedUnusedTensor
|
||||
dimension:[missingSrcDim intValue]
|
||||
start:[srcDimLengthOffset[@"offset"] intValue]
|
||||
length:1
|
||||
name:nil];
|
||||
}
|
||||
[missingSrcStrides release];
|
||||
}
|
||||
|
||||
// 3. Transpose if necessary
|
||||
MPSGraphTensor* transposedTensor = slicedUnusedTensor;
|
||||
{
|
||||
// TODO: Use Transpose API
|
||||
BOOL needsTranspose = NO;
|
||||
for (NSUInteger dstDim = 0; dstDim < [dstDimOrder count] && !needsTranspose; dstDim++)
|
||||
needsTranspose |= ([dstDimOrder[dstDim] intValue] != static_cast<int>(dstDim));
|
||||
if (needsTranspose)
|
||||
transposedTensor = permuteTensor(graph, transposedTensor, dstDimOrder);
|
||||
}
|
||||
|
||||
// 4. Squeeze any unused dimensions following transpose
|
||||
MPSGraphTensor* squeezedTensor = transposedTensor;
|
||||
{
|
||||
// Transpose the missing dims back
|
||||
NSMutableArray* transposedMissingSrcDims = [[NSMutableArray alloc] init];
|
||||
for (NSUInteger dstDim = 0; dstDim < [dstDimOrder count]; dstDim++) {
|
||||
NSNumber* srcDim = dstDimOrder[dstDim];
|
||||
if ([missingSrcDims containsObject:srcDim])
|
||||
[transposedMissingSrcDims addObject:[NSNumber numberWithInt:dstDim]];
|
||||
}
|
||||
if ([transposedMissingSrcDims count])
|
||||
squeezedTensor = [graph squeezeTensor:squeezedTensor axes:transposedMissingSrcDims name:nil];
|
||||
[transposedMissingSrcDims release];
|
||||
}
|
||||
|
||||
// 5. Slice
|
||||
MPSGraphTensor* slicedTensor = squeezedTensor;
|
||||
{
|
||||
NSUInteger currDstDim = 0;
|
||||
for (NSUInteger dstDim = 0; dstDim < dstRank; dstDim++) {
|
||||
// Only dstDims with nonzero stride are in the current tensor, skip broadcasts
|
||||
if (dstStrides[dstDim] != 0) {
|
||||
int start = dstDimToSliceOffset[dstDim];
|
||||
int length = dstDimToSliceLength[dstDim];
|
||||
if (length != [[slicedTensor shape][currDstDim] intValue])
|
||||
slicedTensor = [graph sliceTensor:slicedTensor dimension:currDstDim start:start length:length name:nil];
|
||||
currDstDim++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 6. Expand then broadcast the source tensor
|
||||
MPSGraphTensor* broadcastTensor = slicedTensor;
|
||||
if (needsBroadcast) {
|
||||
NSMutableArray* broadcastShape = [[NSMutableArray alloc] init];
|
||||
NSMutableArray* expandAxes = [[NSMutableArray alloc] init];
|
||||
for (NSUInteger dstDim = 0; dstDim < dstRank; dstDim++) {
|
||||
[broadcastShape addObject:[NSNumber numberWithInt:dstSizes[dstDim]]];
|
||||
if (dstStrides[dstDim] == 0)
|
||||
[expandAxes addObject:[NSNumber numberWithInt:dstDim]];
|
||||
}
|
||||
|
||||
if ([expandAxes count]) {
|
||||
MPSGraphTensor* expandTensor = [graph expandDimsOfTensor:broadcastTensor axes:expandAxes name:nil];
|
||||
broadcastTensor = [graph broadcastTensor:expandTensor toShape:broadcastShape name:nil];
|
||||
}
|
||||
[broadcastShape release];
|
||||
[expandAxes release];
|
||||
}
|
||||
|
||||
[srcStrideToDimLengthOffset release];
|
||||
[dstDimOrder release];
|
||||
[missingSrcDims release];
|
||||
|
||||
return broadcastTensor;
|
||||
}
|
||||
|
||||
static MPSGraphTensor* asStridedLayer_pattern(MPSGraph* graph,
|
||||
MPSGraphTensor* inputTensor,
|
||||
size_t dstRank,
|
||||
const IntArrayRef& dstSizes,
|
||||
const IntArrayRef& dstStrides,
|
||||
int offset) {
|
||||
if (!dstRank)
|
||||
return nil;
|
||||
|
||||
MPSGraphTensor* outputTensor = nil;
|
||||
outputTensor = asStridedLayer_expandDimsPattern(graph, inputTensor, dstRank, dstSizes, dstStrides, offset);
|
||||
if (!outputTensor)
|
||||
outputTensor = asStridedLayer_reshapePattern(graph, inputTensor, dstRank, dstSizes, dstStrides, offset);
|
||||
if (!outputTensor)
|
||||
outputTensor = asStridedLayer_genericPattern(graph, inputTensor, dstRank, dstSizes, dstStrides, offset);
|
||||
|
||||
return outputTensor;
|
||||
}
|
||||
|
||||
static std::vector<int64_t> getViewShape(const TensorBase& src, MPSShape* mpsShape, const bool squeeze) {
|
||||
bool hasMPSShape = (mpsShape != nil);
|
||||
std::vector<int64_t> src_view_shape;
|
||||
if (hasMPSShape) {
|
||||
int src_ndim_view = [mpsShape count];
|
||||
if (squeeze) {
|
||||
for (const auto i : c10::irange(src_ndim_view)) {
|
||||
if ([mpsShape[i] intValue] == 1)
|
||||
continue;
|
||||
src_view_shape.emplace_back([mpsShape[i] intValue]);
|
||||
}
|
||||
} else {
|
||||
src_view_shape.resize(src_ndim_view);
|
||||
for (const auto i : c10::irange(src_ndim_view)) {
|
||||
src_view_shape[i] = [mpsShape[i] intValue];
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
if (squeeze) {
|
||||
IntArrayRef src_shape = src.sizes();
|
||||
size_t src_ndim_view = src_shape.size();
|
||||
for (const auto i : c10::irange(src_ndim_view)) {
|
||||
if (src_shape[i] == 1)
|
||||
continue;
|
||||
src_view_shape.emplace_back(src_shape[i]);
|
||||
}
|
||||
} else {
|
||||
src_view_shape = src.sizes().vec();
|
||||
}
|
||||
}
|
||||
|
||||
return src_view_shape;
|
||||
}
|
||||
|
||||
static std::vector<int64_t> getSqueezedBaseShape(const Tensor& src, IntArrayRef shape) {
|
||||
std::vector<int64_t> src_base_shape;
|
||||
for (const auto i : c10::irange(shape.size())) {
|
||||
if (shape[i] == 1)
|
||||
continue;
|
||||
src_base_shape.emplace_back(shape[i]);
|
||||
}
|
||||
|
||||
return src_base_shape;
|
||||
}
|
||||
|
||||
bool canSliceViewTensor(const TensorBase& src, MPSShape* mpsShape) {
|
||||
if (!src.is_contiguous()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
IntArrayRef src_base_shape = getIMPSAllocator()->getBufferShape(src.storage().data());
|
||||
size_t src_ndim_base = src_base_shape.size();
|
||||
std::vector<int64_t> src_view_shape = getViewShape(src, mpsShape, false);
|
||||
size_t src_ndim_view = src_view_shape.size();
|
||||
|
||||
if (src_ndim_base != src_ndim_view) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (const auto i : c10::irange(src_ndim_base)) {
|
||||
if (src_view_shape[i] > src_base_shape[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
MPSGraphTensorData* getMPSGraphTensorDataForView(const TensorBase& src,
|
||||
MPSShape* mpsShape,
|
||||
const MPSDataType mpsDataType) {
|
||||
IntArrayRef src_base_shape = getIMPSAllocator()->getBufferShape(src.storage().data());
|
||||
size_t src_ndim_base = src_base_shape.size();
|
||||
std::vector<int64_t> src_view_shape = getViewShape(src, mpsShape, false);
|
||||
size_t src_ndim_view = src_view_shape.size();
|
||||
|
||||
MPSNDArray* srcTensorNDArrayView = nil;
|
||||
MPSNDArrayDescriptor* srcTensorNDArrayDesc = nil;
|
||||
MPSNDArray* srcTensorNDArray = nil;
|
||||
id<MTLCommandBuffer> commandBuffer = getCurrentMPSStream()->commandBuffer();
|
||||
size_t base_idx = 0;
|
||||
|
||||
std::vector<int64_t> src_base_shape_vec;
|
||||
|
||||
if (src_ndim_view != src_ndim_base) {
|
||||
src_base_shape_vec.reserve(src_ndim_view);
|
||||
for (const auto i : c10::irange(src_ndim_view)) {
|
||||
if (src_view_shape[i] == 1 && src_base_shape[base_idx] != 1) {
|
||||
src_base_shape_vec.emplace_back(1);
|
||||
} else {
|
||||
src_base_shape_vec.emplace_back(src_base_shape[base_idx]);
|
||||
if (base_idx < src_ndim_base - 1)
|
||||
base_idx += 1;
|
||||
}
|
||||
}
|
||||
src_base_shape = IntArrayRef(src_base_shape_vec);
|
||||
src_ndim_base = src_base_shape.size();
|
||||
}
|
||||
|
||||
srcTensorNDArray = ndArrayFromTensor(src, getMPSShape(src_base_shape), mpsDataType);
|
||||
srcTensorNDArrayDesc = srcTensorNDArray.descriptor;
|
||||
|
||||
size_t firstDimToSlice = 0;
|
||||
while (src_base_shape[firstDimToSlice] == src_view_shape[firstDimToSlice]) {
|
||||
firstDimToSlice++;
|
||||
}
|
||||
|
||||
int64_t view_numel = 1;
|
||||
for (const auto i : c10::irange(firstDimToSlice + 1, src_base_shape.size())) {
|
||||
view_numel *= src_base_shape[i];
|
||||
}
|
||||
|
||||
int64_t sliceOffset = src.storage_offset() / view_numel;
|
||||
[srcTensorNDArrayDesc
|
||||
sliceDimension:src_ndim_base - 1 - firstDimToSlice
|
||||
withSubrange:{static_cast<NSUInteger>(sliceOffset), static_cast<NSUInteger>(src.sizes()[firstDimToSlice])}];
|
||||
|
||||
// Slice any remaining dimensions
|
||||
for (const auto crtSliceOffset : c10::irange(firstDimToSlice + 1, src_base_shape.size())) {
|
||||
if (src_view_shape[crtSliceOffset] != src_base_shape[crtSliceOffset]) {
|
||||
if (crtSliceOffset == src_base_shape.size() - 1) {
|
||||
sliceOffset = src.storage_offset() % src_base_shape[src_base_shape.size() - 1];
|
||||
} else {
|
||||
sliceOffset = (src.storage_offset() % view_numel) / (view_numel / src_base_shape[crtSliceOffset]);
|
||||
}
|
||||
[srcTensorNDArrayDesc
|
||||
sliceDimension:src_ndim_base - 1 - crtSliceOffset
|
||||
withSubrange:{static_cast<NSUInteger>(sliceOffset), static_cast<NSUInteger>(src.sizes()[crtSliceOffset])}];
|
||||
}
|
||||
}
|
||||
srcTensorNDArrayView = [srcTensorNDArray arrayViewWithCommandBuffer:commandBuffer
|
||||
descriptor:srcTensorNDArrayDesc
|
||||
aliasing:MPSAliasingStrategyShallAlias];
|
||||
|
||||
return [[[MPSGraphTensorData alloc] initWithMPSNDArray:srcTensorNDArrayView] autorelease];
|
||||
}
|
||||
|
||||
static MPSGraphTensor* chainViewOperation(ViewCachedGraph* cachedGraph,
|
||||
const IntArrayRef& size,
|
||||
const IntArrayRef& stride,
|
||||
int64_t offset,
|
||||
const IntArrayRef& base_shape,
|
||||
bool needsScatter,
|
||||
MPSGraphTensor* updatesTensor) {
|
||||
MPSGraph* mpsGraph = cachedGraph->graph();
|
||||
MPSGraphTensor* outputTensor = nil;
|
||||
const size_t shape_size = size.size();
|
||||
|
||||
@autoreleasepool {
|
||||
std::vector<int32_t> sizeArray(shape_size);
|
||||
const int64_t int_max = std::numeric_limits<int32_t>::max();
|
||||
for (const auto i : c10::irange(shape_size)) {
|
||||
TORCH_CHECK(size[i] <= int_max);
|
||||
sizeArray[i] = static_cast<int32_t>(size[i]);
|
||||
}
|
||||
NSData* shapeData = [NSData dataWithBytes:sizeArray.data() length:shape_size * sizeof(int32_t)];
|
||||
MPSGraphTensor* shapeTensor = [mpsGraph constantWithData:shapeData
|
||||
shape:@[ [NSNumber numberWithUnsignedInteger:shape_size] ]
|
||||
dataType:MPSDataTypeInt32];
|
||||
MPSGraphTensor* indicesTensor = nil;
|
||||
// create stride Tensors for each rank of the input tensor
|
||||
for (int i = 0; i < static_cast<int>(shape_size); i++) {
|
||||
MPSGraphTensor* rangeTensor = [mpsGraph coordinateAlongAxis:(-i - 1) withShapeTensor:shapeTensor name:nil];
|
||||
MPSGraphTensor* strideTensor = cachedGraph->strideTensors[shape_size - i - 1];
|
||||
MPSGraphTensor* indexTensor = [mpsGraph multiplicationWithPrimaryTensor:rangeTensor
|
||||
secondaryTensor:strideTensor
|
||||
name:nil];
|
||||
if (!indicesTensor) {
|
||||
indicesTensor = indexTensor;
|
||||
} else {
|
||||
indicesTensor = [mpsGraph additionWithPrimaryTensor:indexTensor secondaryTensor:indicesTensor name:nil];
|
||||
}
|
||||
}
|
||||
|
||||
indicesTensor = [mpsGraph additionWithPrimaryTensor:indicesTensor
|
||||
secondaryTensor:cachedGraph->storageOffsetTensor
|
||||
name:nil];
|
||||
MPSGraphTensor* inputTensor = cachedGraph->inputTensor;
|
||||
|
||||
if (!needsScatter) {
|
||||
MPSGraphTensor* outputTensor = asStridedLayer_pattern(mpsGraph, inputTensor, shape_size, size, stride, offset);
|
||||
if (outputTensor) {
|
||||
return outputTensor;
|
||||
}
|
||||
}
|
||||
|
||||
MPSGraphTensor* reshapedInputTensor = [mpsGraph reshapeTensor:inputTensor withShape:@[ @-1 ] name:nil];
|
||||
MPSGraphTensor* reshapedIndicesTensor = [mpsGraph reshapeTensor:indicesTensor withShape:@[ @-1 ] name:nil];
|
||||
if (needsScatter) {
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wobjc-method-access"
|
||||
MPSGraphTensor* scatteredTensor = [mpsGraph scatterAlongAxis:(NSInteger)0
|
||||
withDataTensor:reshapedInputTensor
|
||||
updatesTensor:updatesTensor
|
||||
indicesTensor:reshapedIndicesTensor
|
||||
mode:MPSGraphScatterModeSet
|
||||
name:nil];
|
||||
#pragma clang diagnostic pop
|
||||
outputTensor = [mpsGraph reshapeTensor:scatteredTensor withShape:getMPSShape(base_shape) name:nil];
|
||||
} else {
|
||||
// Call gather to coalesce the needed values. Result will be of same shape as flattened indices tensor
|
||||
MPSGraphTensor* gatheredTensor = [mpsGraph gatherWithUpdatesTensor:reshapedInputTensor
|
||||
indicesTensor:reshapedIndicesTensor
|
||||
axis:0
|
||||
batchDimensions:0
|
||||
name:nil];
|
||||
// Reshape the data to desired size
|
||||
outputTensor = [mpsGraph reshapeTensor:gatheredTensor withShapeTensor:shapeTensor name:nil];
|
||||
}
|
||||
}
|
||||
return outputTensor;
|
||||
}
|
||||
|
||||
static IntArrayRef updateTensorBaseShape(const Tensor& self) {
|
||||
IntArrayRef base_shape = getIMPSAllocator()->getBufferShape(self.storage().data());
|
||||
// if there's no base_shape stored in MPSAllocator, then infer it from tensor's size and store it
|
||||
@ -666,69 +38,30 @@ static IntArrayRef updateTensorBaseShape(const Tensor& self) {
|
||||
return base_shape;
|
||||
}
|
||||
|
||||
// There are few cases we need to consider:
|
||||
// Here nodes are the Tensors and the edges are the operations performed on the
|
||||
// Tensor. As a result of the operation performed we can have result as View
|
||||
// Tensor (View T) or a Non view tensor (NonView T). The difference is if its
|
||||
// mapped by the same underlying storage ptr or a new MTLBuffer was allocated.
|
||||
// T = Tensor
|
||||
// ----------
|
||||
// | Orig T |
|
||||
// ----------
|
||||
// / | \
|
||||
// View T View T NonView T
|
||||
// / / \ |
|
||||
// View T / \ |
|
||||
// | / \ |
|
||||
// | / \ |
|
||||
// | / \ |
|
||||
// NonView T NonView T
|
||||
static ViewCachedGraph* createViewGraph(const Tensor& self,
|
||||
const Tensor& updates,
|
||||
IntArrayRef size,
|
||||
IntArrayRef stride,
|
||||
int64_t storage_offset,
|
||||
bool needsScatter) {
|
||||
IntArrayRef base_shape = updateTensorBaseShape(self);
|
||||
|
||||
@autoreleasepool {
|
||||
string key = getStridedKey(
|
||||
self.scalar_type(), updates.scalar_type(), base_shape, size, stride, storage_offset, needsScatter);
|
||||
return LookUpOrCreateCachedGraph<ViewCachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* updatesTensor = nil;
|
||||
// Workaround for MPSShaderLibrary bug in macOS Monterey
|
||||
// This is fixed in macOS Ventura
|
||||
auto inputType = getMPSScalarType(self.scalar_type());
|
||||
if (inputType == MPSDataTypeUInt8) {
|
||||
inputType = MPSDataTypeInt8;
|
||||
}
|
||||
|
||||
// Self is the input tensor we are creating view of
|
||||
newCachedGraph->inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, inputType, getMPSShape(base_shape));
|
||||
newCachedGraph->storageOffsetTensor = mpsGraphRankedPlaceHolder(mpsGraph, MPSDataTypeInt32, @[ @1 ]);
|
||||
for ([[maybe_unused]] const auto i : c10::irange(size.size())) {
|
||||
newCachedGraph->strideTensors.push_back(mpsGraphRankedPlaceHolder(mpsGraph, MPSDataTypeInt32, @[ @1 ]));
|
||||
}
|
||||
if (needsScatter) {
|
||||
auto updatesType = getMPSScalarType(updates.scalar_type());
|
||||
if (updatesType == MPSDataTypeUInt8) {
|
||||
updatesType = MPSDataTypeInt8;
|
||||
}
|
||||
newCachedGraph->updatesTensor = mpsGraphRankedPlaceHolder(mpsGraph, updatesType, getMPSShape(self.numel()));
|
||||
updatesTensor = newCachedGraph->updatesTensor;
|
||||
if (inputType != updatesType) {
|
||||
updatesTensor = [mpsGraph castTensor:updatesTensor toType:inputType name:@"castUpdatesTensor"];
|
||||
}
|
||||
}
|
||||
newCachedGraph->outputTensor =
|
||||
chainViewOperation(newCachedGraph, size, stride, storage_offset, base_shape, needsScatter, updatesTensor);
|
||||
});
|
||||
}
|
||||
}
|
||||
// For both scatter and gather kernels, there are 4 specized ones (for 1D to 4D tensor)
|
||||
// and one generic, for 5+D ones. Assumption (to be tested) about specialized kernels
|
||||
// is that reduction of n-dimentional vector, where n is 2, should be slower
|
||||
// than reduction of 2D one, as n is not known at compiler time, therefore compiler
|
||||
// could not do loop unrolls, that is
|
||||
// float sum(float* v, int n) {
|
||||
// float rc = 0;
|
||||
// for (int idx = 0; idx < n; idx++)
|
||||
// rc += v[idx];
|
||||
// return rc;
|
||||
// }
|
||||
// would be slower than
|
||||
// float sum2(float* v) { return v[0] + v[1]; }
|
||||
//
|
||||
// TODOS:
|
||||
// - Benchmark on whether or not this is really the case
|
||||
// - Instantiate specialized tensors from template
|
||||
// - Have proper error checking for 64-bit tensors
|
||||
// - Add flavors for 64-bit tensors
|
||||
// - Merged both scatter and gather templates together, as they more or less alike
|
||||
|
||||
static std::string getGatherScatterFunctionName(ScalarType scalarType, int64_t dim, bool needsScatter) {
|
||||
std::string kernelName = needsScatter ? "scatter" : "gather";
|
||||
return kernelName + "_kernel_" + std::to_string(dim == 0 ? 1 : dim);
|
||||
return kernelName + "_kernel_" + (dim < 5 ? std::to_string(dim == 0 ? 1 : dim) : "n");
|
||||
}
|
||||
|
||||
static std::string genScatterGatherCvtFunc(const std::string& dtypeSrc, const std::string& dtypeDst, bool needsConj) {
|
||||
@ -777,12 +110,6 @@ Tensor gatherViewTensor(const at::Tensor& src, at::Tensor& dst) {
|
||||
return dst;
|
||||
}
|
||||
|
||||
if (src.dim() > 5) {
|
||||
ViewCachedGraph* cachedGraph =
|
||||
createViewGraph(src, dst, src.sizes(), src.strides(), src.storage_offset(), /*needsScatter*/ false);
|
||||
return runViewGraph(cachedGraph, src, dst.has_storage() ? dst : output, /*needsScatter*/ false);
|
||||
}
|
||||
|
||||
uint32_t numThreads = output.numel();
|
||||
|
||||
MPSStream* mpsStream = getCurrentMPSStream();
|
||||
@ -813,6 +140,9 @@ Tensor gatherViewTensor(const at::Tensor& src, at::Tensor& dst) {
|
||||
|
||||
[computeEncoder setComputePipelineState:gatherPSO];
|
||||
mtl_setArgs(computeEncoder, src, dst.has_storage() ? dst : output, src_sizes, src_strides, numThreads);
|
||||
if (src.dim() > 4) {
|
||||
mtl_setBytes<int32_t>(computeEncoder, src.dim(), 5);
|
||||
}
|
||||
mtl_dispatch1DJob(computeEncoder, gatherPSO, numThreads);
|
||||
|
||||
getMPSProfiler().endProfileKernel(gatherPSO);
|
||||
@ -822,15 +152,6 @@ Tensor gatherViewTensor(const at::Tensor& src, at::Tensor& dst) {
|
||||
}
|
||||
|
||||
Tensor& scatterViewTensor(const at::Tensor& src, at::Tensor& output) {
|
||||
if (output.dim() > 5) {
|
||||
ViewCachedGraph* cachedGraph = createViewGraph(output.is_complex() ? at::view_as_real(output) : output,
|
||||
src,
|
||||
output.sizes(),
|
||||
output.strides(),
|
||||
output.storage_offset(),
|
||||
/*needsScatter*/ true);
|
||||
return runViewGraph(cachedGraph, src, output, /*needsScatter*/ true);
|
||||
}
|
||||
if (src.numel() == 0 || output.numel() == 0) {
|
||||
return output;
|
||||
}
|
||||
@ -865,6 +186,9 @@ Tensor& scatterViewTensor(const at::Tensor& src, at::Tensor& output) {
|
||||
|
||||
[computeEncoder setComputePipelineState:scatterPSO];
|
||||
mtl_setArgs(computeEncoder, src, output, output_sizes, output_strides, numThreads);
|
||||
if (output.dim() > 4) {
|
||||
mtl_setBytes<int32_t>(computeEncoder, output.dim(), 5);
|
||||
}
|
||||
mtl_dispatch1DJob(computeEncoder, scatterPSO, numThreads);
|
||||
|
||||
getMPSProfiler().endProfileKernel(scatterPSO);
|
||||
|
@ -705,6 +705,14 @@ bool can_use_mem_efficient_attention(sdp_params const& params, bool debug) {
|
||||
}
|
||||
|
||||
#ifdef USE_ROCM
|
||||
if (params.attn_mask.has_value()) {
|
||||
const auto q_dtype = params.query.dtype();
|
||||
const auto bias_dtype = params.attn_mask.value().dtype();
|
||||
if (bias_dtype != at::kBool && bias_dtype != q_dtype) {
|
||||
TORCH_WARN("Efficient attention on ROCM requires attn_mask be boolean, or has the same datatype as of q,k,v");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return check_tensor_dtype(params, aotriton_mem_efficient_dtypes, debug);
|
||||
#else
|
||||
auto dprop = at::cuda::getCurrentDeviceProperties();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user