Make M1 tests green (#82213)

This is skipping all the failing tests and add a new master job to test on M1

Pull Request resolved: https://github.com/pytorch/pytorch/pull/82213
Approved by: https://github.com/seemethere, https://github.com/soulitzer, https://github.com/malfet
This commit is contained in:
albanD
2022-08-05 16:12:06 +00:00
committed by PyTorch MergeBot
parent 1cafb1027f
commit 2255911f8a
20 changed files with 175 additions and 74 deletions

View File

@ -27,6 +27,12 @@ on:
description: |
If this is set, our linter will use this to make sure that every other
job with the same `sync-tag` is identical.
python_version:
required: false
type: string
default: "3.8"
description: |
The python version to be used. Will be 3.8 by default
secrets:
MACOS_SCCACHE_S3_ACCESS_KEY_ID:
@ -68,7 +74,7 @@ jobs:
uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
python-version: 3.8
python-version: ${{ inputs.python_version }}
activate-environment: build
miniconda-version: 4.7.12

View File

@ -41,7 +41,7 @@ jobs:
- name: Install PyTorch
env:
ENV_NAME: conda-test-env-${{ github.run_id }}
PY_VERS: 3.8
PY_VERS: 3.9
shell: arch -arch arm64 bash {0}
run: |
# shellcheck disable=SC1090

View File

@ -18,6 +18,11 @@ on:
description: |
If this is set, our linter will use this to make sure that every other
job with the same `sync-tag` is identical.
arch:
required: true
type: string
description: |
Contains the architecture to run the tests with
secrets:
AWS_OSSCI_METRICS_V2_ACCESS_KEY_ID:
@ -27,15 +32,16 @@ on:
required: true
description: secret acess key for test stats upload
# For setup-miniconda, see https://github.com/conda-incubator/setup-miniconda/issues/179
defaults:
run:
shell: bash -e -l {0}
jobs:
test:
# Don't run on forked repos.
if: github.repository_owner == 'pytorch'
# For setup-miniconda, see https://github.com/conda-incubator/setup-miniconda/issues/179
# Also ensure that we always run with the right architecture
defaults:
run:
shell: arch -arch ${{ inputs.arch }} bash -e -l {0}
strategy:
matrix: ${{ fromJSON(inputs.test-matrix) }}
fail-fast: false
@ -57,7 +63,6 @@ jobs:
- name: Start monitoring script
id: monitor-script
shell: bash
run: |
python3 -m pip install psutil==5.9.1
python3 -m pip install pynvml==11.4.1
@ -70,7 +75,8 @@ jobs:
name: ${{ inputs.build-environment }}
use-gha: true
- name: Setup miniconda
- name: Setup miniconda for x86
if: inputs.build-environment == 'macos-12-py3-x86-64'
uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
@ -78,6 +84,16 @@ jobs:
activate-environment: build
miniconda-version: 4.7.12
- name: Setup miniconda for arm64
if: inputs.build-environment == 'macos-12-py3-arm64'
run: |
# Conda is already installed and setup for bash here
# Cleanup lingering conda environment and create
# a new one for this run
conda env remove -n build
conda create -n build python=3.9.12
conda list
- name: Install macOS homebrew dependencies
run: |
# Install dependencies
@ -87,6 +103,12 @@ jobs:
id: parse-ref
run: .github/scripts/parse_ref.py
- name: Pre-process arm64 wheels
if: inputs.build-environment == 'macos-12-py3-arm64'
run: |
# As wheels are cross-compiled they are reported as x86_64 ones
ORIG_WHLNAME=$(ls -1 dist/*.whl); ARM_WHLNAME=${ORIG_WHLNAME/x86_64/arm64}; mv "${ORIG_WHLNAME}" "${ARM_WHLNAME}"
- name: Test
id: test
run: |
@ -103,10 +125,21 @@ jobs:
# wreak havoc internally
export COMMIT_MESSAGES="${COMMIT_MESSAGES//[\'\"]}"
export PR_BODY="${PR_BODY//[\'\"]}"
arch
# This is a no-op for x86
conda activate build
python3 -mpip install dist/*.whl
.jenkins/pytorch/macos-test.sh
- name: Cleanup miniconda for arm64
if: inputs.build-environment == 'macos-12-py3-arm64'
run: |
# Cleanup conda env
conda deactivate
conda env remove -n build
- name: Get workflow job id
id: get-job-id
uses: ./.github/actions/get-workflow-job-id
@ -115,8 +148,7 @@ jobs:
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Stop monitoring script
if: always() && steps.monitor-script.outputs.monitor-script-pid
shell: bash
if: always() && ${{ steps.monitor-script.outputs.monitor-script-pid }}
env:
MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }}
run: |
@ -148,7 +180,6 @@ jobs:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_V2_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_OSSCI_METRICS_V2_SECRET_ACCESS_KEY }}
GHA_WORKFLOW_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
shell: bash
run: |
set -x
python3 -m pip install -r requirements.txt

View File

@ -157,6 +157,7 @@ jobs:
{ config: "default", shard: 2, num_shards: 2, runner: "macos-12" },
{ config: "functorch", shard: 1, num_shards: 1, runner: "macos-12" },
]}
arch: x86_64
secrets:
AWS_OSSCI_METRICS_V2_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_V2_ACCESS_KEY_ID }}
AWS_OSSCI_METRICS_V2_SECRET_ACCESS_KEY: ${{ secrets.AWS_OSSCI_METRICS_V2_SECRET_ACCESS_KEY }}
@ -181,17 +182,35 @@ jobs:
xcode-version: "13.3.1"
runner-type: macos-12-xl
build-generates-artifacts: true
# To match the one pre-installed in the m1 runners
python_version: 3.9.12
secrets:
MACOS_SCCACHE_S3_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
MACOS_SCCACHE_S3_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
macos-12-py3-arm64-mps-test:
name: macos-12-py3-arm64
uses: ./.github/workflows/_mac-test-arm64.yml
name: macos-12-py3-arm64-mps
uses: ./.github/workflows/_mac-test-mps.yml
needs: macos-12-py3-arm64-build
with:
build-environment: macos-12-py3-arm64
macos-12-py3-arm64-test:
name: macos-12-py3-arm64
uses: ./.github/workflows/_mac-test.yml
needs: macos-12-py3-arm64-build
with:
build-environment: macos-12-py3-arm64
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 2, runner: "macos-m1-12" },
{ config: "default", shard: 2, num_shards: 2, runner: "macos-m1-12" },
]}
arch: arm64
secrets:
AWS_OSSCI_METRICS_V2_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_V2_ACCESS_KEY_ID }}
AWS_OSSCI_METRICS_V2_SECRET_ACCESS_KEY: ${{ secrets.AWS_OSSCI_METRICS_V2_SECRET_ACCESS_KEY }}
win-vs2019-cuda11_6-py3-build:
name: win-vs2019-cuda11.6-py3
uses: ./.github/workflows/_win-build.yml

View File

@ -7,19 +7,34 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
sysctl -a | grep machdep.cpu
# NOTE: mkl 2021.3.0+ cmake requires sub-command PREPEND, may break the build
retry conda install -y \
mkl=2021.2.0 \
mkl-include=2021.2.0 \
numpy=1.18.5 \
pyyaml=5.3 \
setuptools=46.0.0 \
cmake=3.19 \
cffi \
ninja \
typing_extensions \
dataclasses \
pip
if [[ ${BUILD_ENVIRONMENT} = *arm64* ]]; then
# We use different versions here as the arm build/tests runs on python 3.9
# while the x86 one runs on python 3.8
retry conda install -y \
numpy=1.22.3 \
pyyaml=6.0 \
setuptools=61.2.0 \
cmake=3.22.1 \
cffi \
ninja \
typing_extensions \
dataclasses \
pip
else
# NOTE: mkl 2021.3.0+ cmake requires sub-command PREPEND, may break the build
retry conda install -y \
mkl=2021.2.0 \
mkl-include=2021.2.0 \
numpy=1.18.5 \
pyyaml=5.3 \
setuptools=46.0.0 \
cmake=3.19 \
cffi \
ninja \
typing_extensions \
dataclasses \
pip
fi
# The torch.hub tests make requests to GitHub.
#

View File

@ -5,7 +5,11 @@
source "$(dirname "${BASH_SOURCE[0]}")/macos-common.sh"
conda install -y six
pip install -q hypothesis "expecttest==0.1.3" "librosa>=0.6.2" "numba<=0.49.1" psutil "scipy==1.6.3"
if [[ ${BUILD_ENVIRONMENT} = *arm64* ]]; then
pip install hypothesis "expecttest==0.1.3" "librosa>=0.6.2" "numba==0.56.0" psutil "scipy==1.9.0"
else
pip install hypothesis "expecttest==0.1.3" "librosa>=0.6.2" "numba<=0.49.1" psutil "scipy==1.6.3"
fi
# TODO move this to docker
# Pin unittest-xml-reporting to freeze printing test summary logic, related: https://github.com/pytorch/pytorch/issues/69014
@ -32,14 +36,15 @@ if [ -z "${CI}" ]; then
7z x "${IMAGE_COMMIT_TAG}".7z -o"${WORKSPACE_DIR}/miniconda3/lib/python3.6/site-packages"
fi
# Test that OpenMP is enabled
pushd test
if [[ ! $(python -c "import torch; print(int(torch.backends.openmp.is_available()))") == "1" ]]; then
echo "Build should have OpenMP enabled, but torch.backends.openmp.is_available() is False"
exit 1
# Test that OpenMP is enabled for non-arm64 build
if [[ ${BUILD_ENVIRONMENT} != *arm64* ]]; then
pushd test
if [[ ! $(python -c "import torch; print(int(torch.backends.openmp.is_available()))") == "1" ]]; then
echo "Build should have OpenMP enabled, but torch.backends.openmp.is_available() is False"
exit 1
fi
popd
fi
popd
setup_test_python() {
# The CircleCI worker hostname doesn't resolve to an address.

View File

@ -1,6 +1,7 @@
# Owner(s): ["oncall: quantization"]
import re
import unittest
from pathlib import Path
import torch
@ -10,6 +11,7 @@ from torch.testing._internal.common_quantization import (
QuantizationTestCase,
SingleLayerLinearModel,
)
from torch.testing._internal.common_utils import IS_ARM64
class TestQuantizationDocs(QuantizationTestCase):
@ -106,18 +108,21 @@ class TestQuantizationDocs(QuantizationTestCase):
expr = compile(code, "test", "exec")
exec(expr, global_inputs)
@unittest.skipIf(IS_ARM64, "Not working on arm")
def test_quantization_doc_ptdq(self):
path_from_pytorch = "docs/source/quantization.rst"
unique_identifier = "PTDQ API Example::"
code = self._get_code(path_from_pytorch, unique_identifier)
self._test_code(code)
@unittest.skipIf(IS_ARM64, "Not working on arm")
def test_quantization_doc_ptsq(self):
path_from_pytorch = "docs/source/quantization.rst"
unique_identifier = "PTSQ API Example::"
code = self._get_code(path_from_pytorch, unique_identifier)
self._test_code(code)
@unittest.skipIf(IS_ARM64, "Not working on arm")
def test_quantization_doc_qat(self):
path_from_pytorch = "docs/source/quantization.rst"
unique_identifier = "QAT API Example::"
@ -131,6 +136,7 @@ class TestQuantizationDocs(QuantizationTestCase):
code = self._get_code(path_from_pytorch, unique_identifier)
self._test_code(code, global_inputs)
@unittest.skipIf(IS_ARM64, "Not working on arm")
def test_quantization_doc_fx(self):
path_from_pytorch = "docs/source/quantization.rst"
unique_identifier = "FXPTQ API Example::"
@ -141,6 +147,7 @@ class TestQuantizationDocs(QuantizationTestCase):
code = self._get_code(path_from_pytorch, unique_identifier)
self._test_code(code, global_inputs)
@unittest.skipIf(IS_ARM64, "Not working on arm")
def test_quantization_doc_custom(self):
path_from_pytorch = "docs/source/quantization.rst"
unique_identifier = "Custom API Example::"

View File

@ -1,5 +1,6 @@
# Owner(s): ["oncall: quantization"]
import unittest
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
@ -35,7 +36,7 @@ from torch.testing._internal.common_quantization import (
skip_if_no_torchvision,
)
from torch.testing._internal.common_quantized import override_qengines
from torch.testing._internal.common_utils import IS_ARM64
class SubModule(torch.nn.Module):
def __init__(self):
@ -574,11 +575,13 @@ class TestNumericSuiteEager(QuantizationTestCase):
act_compare_dict = get_matching_activations(float_model, qmodel)
@skip_if_no_torchvision
@unittest.skipIf(IS_ARM64, "Not working on arm right now")
def test_mobilenet_v2(self):
from torchvision.models.quantization import mobilenet_v2
self._test_vision_model(mobilenet_v2(pretrained=True, quantize=False))
@skip_if_no_torchvision
@unittest.skipIf(IS_ARM64, "Not working on arm right now")
def test_mobilenet_v3(self):
from torchvision.models.quantization import mobilenet_v3_large
self._test_vision_model(mobilenet_v3_large(pretrained=True, quantize=False))

View File

@ -167,7 +167,7 @@ from torch.testing._internal.common_quantized import (
override_quantized_engine,
)
from torch.testing._internal.common_utils import TemporaryFileName
from torch.testing._internal.common_utils import TemporaryFileName, IS_ARM64
from torch.testing._internal.common_quantization import NodeSpec as ns
@ -7237,6 +7237,7 @@ class TestQuantizeFxModels(QuantizationTestCase):
self.assertEqual(out.device.type, device_after)
@skip_if_no_torchvision
@unittest.skipIf(IS_ARM64, "Not working on arm")
def test_model_dropout(self):
from torchvision import models
m = models.mobilenet_v3_small()

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Owner(s): ["module: unknown"]
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.common_utils import run_tests, IS_ARM64
# Kernels
from ao.sparsity.test_kernels import TestQuantizedSparseKernels # noqa: F401
@ -22,8 +22,9 @@ from ao.sparsity.test_pruner import TestBasePruner # noqa: F401
from ao.sparsity.test_scheduler import TestScheduler # noqa: F401
# Composability
from ao.sparsity.test_composability import TestComposability # noqa: F401
from ao.sparsity.test_composability import TestFxComposability # noqa: F401
if not IS_ARM64:
from ao.sparsity.test_composability import TestComposability # noqa: F401
from ao.sparsity.test_composability import TestFxComposability # noqa: F401
# Utilities
from ao.sparsity.test_sparsity_utils import TestSparsityUtilFunctions # noqa: F401

View File

@ -27,35 +27,36 @@ class TestCppApiParity(common.TestCase):
expected_test_params_dicts = []
for test_params_dicts, test_instance_class in [
(sample_module.module_tests, common_nn.NewModuleTest),
(sample_functional.functional_tests, common_nn.NewModuleTest),
(common_nn.module_tests, common_nn.NewModuleTest),
(common_nn.new_module_tests, common_nn.NewModuleTest),
(common_nn.criterion_tests, common_nn.CriterionTest),
]:
for test_params_dict in test_params_dicts:
if test_params_dict.get('test_cpp_api_parity', True):
if is_torch_nn_functional_test(test_params_dict):
functional_impl_check.write_test_to_test_class(
TestCppApiParity, test_params_dict, test_instance_class, parity_table, devices)
else:
module_impl_check.write_test_to_test_class(
TestCppApiParity, test_params_dict, test_instance_class, parity_table, devices)
expected_test_params_dicts.append(test_params_dict)
if not common.IS_ARM64:
for test_params_dicts, test_instance_class in [
(sample_module.module_tests, common_nn.NewModuleTest),
(sample_functional.functional_tests, common_nn.NewModuleTest),
(common_nn.module_tests, common_nn.NewModuleTest),
(common_nn.new_module_tests, common_nn.NewModuleTest),
(common_nn.criterion_tests, common_nn.CriterionTest),
]:
for test_params_dict in test_params_dicts:
if test_params_dict.get('test_cpp_api_parity', True):
if is_torch_nn_functional_test(test_params_dict):
functional_impl_check.write_test_to_test_class(
TestCppApiParity, test_params_dict, test_instance_class, parity_table, devices)
else:
module_impl_check.write_test_to_test_class(
TestCppApiParity, test_params_dict, test_instance_class, parity_table, devices)
expected_test_params_dicts.append(test_params_dict)
# Assert that all NN module/functional test dicts appear in the parity test
assert len([name for name in TestCppApiParity.__dict__ if 'test_torch_nn_' in name]) == \
len(expected_test_params_dicts) * len(devices)
# Assert that all NN module/functional test dicts appear in the parity test
assert len([name for name in TestCppApiParity.__dict__ if 'test_torch_nn_' in name]) == \
len(expected_test_params_dicts) * len(devices)
# Assert that there exists auto-generated tests for `SampleModule` and `sample_functional`.
# 4 == 2 (number of test dicts that are not skipped) * 2 (number of devices)
assert len([name for name in TestCppApiParity.__dict__ if 'SampleModule' in name]) == 4
# 4 == 2 (number of test dicts that are not skipped) * 2 (number of devices)
assert len([name for name in TestCppApiParity.__dict__ if 'sample_functional' in name]) == 4
# Assert that there exists auto-generated tests for `SampleModule` and `sample_functional`.
# 4 == 2 (number of test dicts that are not skipped) * 2 (number of devices)
assert len([name for name in TestCppApiParity.__dict__ if 'SampleModule' in name]) == 4
# 4 == 2 (number of test dicts that are not skipped) * 2 (number of devices)
assert len([name for name in TestCppApiParity.__dict__ if 'sample_functional' in name]) == 4
module_impl_check.build_cpp_tests(TestCppApiParity, print_cpp_source=PRINT_CPP_SOURCE)
functional_impl_check.build_cpp_tests(TestCppApiParity, print_cpp_source=PRINT_CPP_SOURCE)
module_impl_check.build_cpp_tests(TestCppApiParity, print_cpp_source=PRINT_CPP_SOURCE)
functional_impl_check.build_cpp_tests(TestCppApiParity, print_cpp_source=PRINT_CPP_SOURCE)
if __name__ == "__main__":
common.run_tests()

View File

@ -15,7 +15,7 @@ import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
from torch.testing._internal.common_utils import gradcheck, skipIfSlowGradcheckEnv
from torch.testing._internal.common_utils import gradcheck, skipIfSlowGradcheckEnv, IS_ARM64
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
@ -39,6 +39,7 @@ def remove_build_path():
# There's only one test that runs gracheck, run slow mode manually
@skipIfSlowGradcheckEnv
@unittest.skipIf(IS_ARM64, "Does not work on arm")
class TestCppExtensionJIT(common.TestCase):
"""Tests just-in-time cpp extensions.
Don't confuse this with the PyTorch JIT (aka TorchScript).

View File

@ -3,8 +3,10 @@
import os
import shutil
import sys
import unittest
import torch.testing._internal.common_utils as common
from torch.testing._internal.common_utils import IS_ARM64
import torch
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
@ -18,7 +20,6 @@ if TEST_CUDA and torch.version.cuda is not None: # the skip CUDNN test for ROCm
TEST_CUDNN = (
TEST_CUDA and CUDNN_HEADER_EXISTS and torch.backends.cudnn.is_available()
)
IS_WINDOWS = sys.platform == "win32"
def remove_build_path():
@ -54,6 +55,7 @@ class TestCppExtensionOpenRgistration(common.TestCase):
def tearDownClass(cls):
remove_build_path()
@unittest.skipIf(IS_ARM64, "Does not work on arm")
def test_open_device_registration(self):
module = torch.utils.cpp_extension.load(
name="custom_device_extension",

View File

@ -36,7 +36,7 @@ from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_ASAN, TEST_WITH_TSAN, IS_SANDCASTLE,
IS_MACOS)
IS_MACOS, IS_ARM64)
try:
@ -1461,6 +1461,7 @@ except RuntimeError as e:
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
@skipIfNoNumpy
@unittest.skipIf(IS_ARM64, "Not working on arm")
def test_multiprocessing_iterdatapipe(self):
# Testing to make sure that function from global scope (e.g. imported from library) can be serialized
# and used with multiprocess DataLoader
@ -2228,6 +2229,7 @@ except RuntimeError as e:
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2(TestCase):
@skipIfNoDill
@unittest.skipIf(IS_ARM64, "Not working on arm")
def test_basics(self):
# TODO(VitalyFedyunin): This test will start breaking if we remove guaranteed order
# of traversing workers

View File

@ -1,12 +1,13 @@
# Owner(s): ["module: dispatch"]
import torch._C as C
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_utils import TestCase, run_tests, IS_ARM64
from torch._python_dispatcher import PythonDispatcher
from collections import namedtuple
import itertools
import os
import unittest
import re
import torch.utils.cpp_extension
@ -767,6 +768,7 @@ CompositeImplicitAutograd[alias] (inactive): fn1 :: (Tensor _0) -> Tensor _0 [ b
msg=f"Expect zero dangling impls, but found: {dangling_impls}"
)
@unittest.skipIf(IS_ARM64, "Not working on arm")
def test_find_dangling_impls_ext(self):
extension_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cpp_extensions', 'dangling_impl_extension.cpp')
module = torch.utils.cpp_extension.load(

View File

@ -18,7 +18,7 @@ from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices,
make_fullrank_matrices_with_distinct_singular_values,
freeze_rng_state)
freeze_rng_state, IS_ARM64)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, has_cusolver,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
@ -52,7 +52,7 @@ def setLinalgBackendsToDefaultFinally(fn):
torch.backends.cuda.preferred_linalg_library('default')
return _fn
@unittest.skipIf(IS_ARM64, "Issue with numpy version on arm")
class TestLinalg(TestCase):
def setUp(self):
super(self.__class__, self).setUp()

View File

@ -33,6 +33,7 @@ from torch.testing._internal.common_utils import (
first_sample,
parametrize,
skipIfSlowGradcheckEnv,
IS_ARM64
)
from torch.testing._internal.common_methods_invocations import (
op_db,
@ -1230,6 +1231,7 @@ class TestCommon(TestCase):
self.fail(msg)
@unittest.skipIf(IS_ARM64, "Not working on arm")
class TestCompositeCompliance(TestCase):
# Checks if the operator (if it is composite) is written to support most
# backends and Tensor subclasses. See "CompositeImplicitAutograd Compliance"

View File

@ -12,7 +12,7 @@ import expecttest
import torch
from torch._C._autograd import _ExtraFields_PyCall, _ExtraFields_PyCCall
from torch.testing._internal.common_utils import (
TestCase, run_tests, IS_WINDOWS, TEST_WITH_CROSSREF)
TestCase, run_tests, IS_WINDOWS, TEST_WITH_CROSSREF, IS_ARM64)
# These functions can vary from based on platform and build (e.g. with CUDA)
# and generally distract from rather than adding to the test.
@ -154,6 +154,7 @@ class ProfilerTree:
caller_name = to_string(extra_fields.caller)
assert parent_name == caller_name, f"{parent_name} vs. {caller_name}"
@unittest.skipIf(IS_ARM64, "Not working on ARM")
class TestProfilerTree(TestCase):
def assertTreesMatch(self, actual: str, expected: str, allow_failure: bool = False):
# Warning: Here be dragons

View File

@ -18,7 +18,7 @@ from torch.utils.checkpoint import checkpoint, checkpoint_sequential
import torch.utils.cpp_extension
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, IS_SANDCASTLE, IS_WINDOWS
from torch.testing._internal.common_utils import load_tests, IS_SANDCASTLE, IS_WINDOWS, IS_ARM64
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
@ -623,6 +623,7 @@ class TestAssert(TestCase):
@unittest.skipIf(IS_SANDCASTLE, "cpp_extension is OSS only")
class TestStandaloneCPPJIT(TestCase):
@unittest.skipIf(IS_ARM64, "Not working on arm")
def test_load_standalone(self):
build_dir = tempfile.mkdtemp()
try:

View File

@ -758,6 +758,7 @@ IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
IS_X86 = platform.machine() in ('x86_64', 'i386')
IS_ARM64 = platform.machine() == 'arm64'
def is_avx512_vnni_supported():
if sys.platform != 'linux':
@ -832,7 +833,7 @@ TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
TEST_LIBROSA = _check_module_exists('librosa') and not IS_ARM64
BUILD_WITH_CAFFE2 = torch.onnx._CAFFE2_ATEN_FALLBACK