mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE] Delete skipIfMPSOnMacOS13
(#163515)
As PyTorch needs MacOS-14 or newer to use MPS Pull Request resolved: https://github.com/pytorch/pytorch/pull/163515 Approved by: https://github.com/Skylion007
This commit is contained in:
committed by
PyTorch MergeBot
parent
8e62d01f7a
commit
4027e97791
@ -24,7 +24,7 @@ from torch.testing._internal.common_nn import (
|
||||
marginrankingloss_reference, multimarginloss_reference, multilabelmarginloss_reference,
|
||||
nllloss_reference, nlllossNd_reference, smoothl1loss_reference, softmarginloss_reference, get_reduction)
|
||||
from torch.testing._internal.common_utils import (
|
||||
freeze_rng_state, skipIfMPS, skipIfMPSOnMacOS13, GRADCHECK_NONDET_TOL, TEST_WITH_ROCM, IS_WINDOWS,
|
||||
freeze_rng_state, skipIfMPS, GRADCHECK_NONDET_TOL, TEST_WITH_ROCM, IS_WINDOWS,
|
||||
skipIfTorchDynamo)
|
||||
from types import ModuleType
|
||||
import operator
|
||||
@ -3413,11 +3413,8 @@ module_db: list[ModuleInfo] = [
|
||||
'TestModule',
|
||||
'test_memory_format',
|
||||
active_if=operator.itemgetter('training'),
|
||||
device_type='cuda',
|
||||
),
|
||||
# error: input types 'tensor<f32>' and 'tensor<15x10xf16>' are not broadcast compatible
|
||||
DecorateInfo(skipIfMPSOnMacOS13, 'TestModule', dtypes=[torch.float16], device_type='mps',),),
|
||||
),
|
||||
device_type='cuda',),
|
||||
),),
|
||||
ModuleInfo(torch.nn.AvgPool3d,
|
||||
module_inputs_func=module_inputs_torch_nn_AvgPool3d,
|
||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||
@ -3496,14 +3493,6 @@ module_db: list[ModuleInfo] = [
|
||||
module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False),
|
||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||
module_memformat_affects_out=True,
|
||||
skips=(
|
||||
# See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32'
|
||||
# xfail does not work due to Fatal Python error: Aborted
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
),
|
||||
decorators=(
|
||||
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
|
||||
)),
|
||||
@ -3519,12 +3508,6 @@ module_db: list[ModuleInfo] = [
|
||||
# Fails with channels last test on MPS backend
|
||||
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format",
|
||||
device_type='mps', dtypes=[torch.float32, torch.float16]),
|
||||
# See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32'
|
||||
# xfail does not work due to Fatal Python error: Aborted
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
),
|
||||
decorators=(
|
||||
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
|
||||
@ -3552,12 +3535,7 @@ module_db: list[ModuleInfo] = [
|
||||
# Not implemented for chalf on CPU
|
||||
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity',
|
||||
dtypes=(torch.chalf,), device_type='cuda'),
|
||||
# See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32'
|
||||
# xfail does not work due to Fatal Python error: Aborted
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors",
|
||||
device_type='mps', dtypes=[torch.float16]),),
|
||||
),
|
||||
decorators=(
|
||||
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
|
||||
DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'),
|
||||
@ -3581,12 +3559,6 @@ module_db: list[ModuleInfo] = [
|
||||
# Not implemented for chalf on CPU
|
||||
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity',
|
||||
dtypes=(torch.chalf,), device_type='cuda'),
|
||||
# See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32'
|
||||
# xfail does not work due to Fatal Python error: Aborted
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
),
|
||||
decorators=(
|
||||
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
|
||||
@ -3665,12 +3637,6 @@ module_db: list[ModuleInfo] = [
|
||||
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
||||
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
||||
DecorateInfo(skipMeta),
|
||||
# See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32'
|
||||
# xfail does not work due to Fatal Python error: Aborted
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
),
|
||||
decorators=(
|
||||
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
|
||||
@ -3690,12 +3656,6 @@ module_db: list[ModuleInfo] = [
|
||||
# Fails with channels last test on MPS backend
|
||||
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format",
|
||||
device_type='mps', dtypes=[torch.float32, torch.float16]),
|
||||
# See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32'
|
||||
# xfail does not work due to Fatal Python error: Aborted
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
),
|
||||
decorators=(
|
||||
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
|
||||
@ -3725,12 +3685,6 @@ module_db: list[ModuleInfo] = [
|
||||
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
||||
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
||||
DecorateInfo(skipMeta),
|
||||
# See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32'
|
||||
# xfail does not work due to Fatal Python error: Aborted
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
),
|
||||
decorators=(
|
||||
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
|
||||
@ -3750,12 +3704,6 @@ module_db: list[ModuleInfo] = [
|
||||
# Fails with channels last test on MPS backend
|
||||
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format",
|
||||
device_type='mps', dtypes=[torch.float32, torch.float16]),
|
||||
# See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32'
|
||||
# xfail does not work due to Fatal Python error: Aborted
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors",
|
||||
device_type='mps', dtypes=[torch.float16]),
|
||||
),
|
||||
decorators=(
|
||||
DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'),
|
||||
@ -3853,9 +3801,6 @@ module_db: list[ModuleInfo] = [
|
||||
skips=(
|
||||
# No channels_last support for loss functions.
|
||||
DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),
|
||||
# See #119108: input types 'tensor<f32>' and 'tensor<15x10xf16>' are not broadcast compatible
|
||||
DecorateInfo(skipIfMPSOnMacOS13, 'TestModule', 'test_non_contiguous_tensors',
|
||||
device_type='mps', dtypes=[torch.float16],),
|
||||
# See #119108: tolerance issue
|
||||
DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward",
|
||||
device_type='mps', dtypes=[torch.float16]),)
|
||||
|
@ -1990,16 +1990,6 @@ def skipIfMPS(fn):
|
||||
return wrapper
|
||||
|
||||
|
||||
def skipIfMPSOnMacOS13(fn):
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
if TEST_MPS and int(MACOS_VERSION) == 13:
|
||||
raise unittest.SkipTest("Test crashes MPSGraph on MacOS13")
|
||||
else:
|
||||
fn(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def skipIfHpu(fn):
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
|
Reference in New Issue
Block a user