diff --git a/test/functorch/test_ops.py b/test/functorch/test_ops.py index d277ce7716d8..8c6752386ae4 100644 --- a/test/functorch/test_ops.py +++ b/test/functorch/test_ops.py @@ -9,6 +9,7 @@ import itertools import unittest +from torch.testing._internal.common_utils import unMarkDynamoStrictTest from torch.testing._internal.common_utils import TestCase, run_tests, is_iterable_of_tensors, IS_MACOS, \ IS_X86, parametrize, TEST_WITH_ASAN, noncontiguous_like from torch.testing._internal.common_utils import skipIfRocm, runOnRocm @@ -369,6 +370,7 @@ aliasing_ops_list_return = { @unittest.skipIf(TEST_WITH_ASAN, "tests time out with asan, are probably redundant") +@unMarkDynamoStrictTest class TestOperators(TestCase): @with_tf32_off # https://github.com/pytorch/pytorch/issues/86798 @ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,)) diff --git a/test/functorch/test_vmap.py b/test/functorch/test_vmap.py index 3748b789e341..dacadecc0f4d 100644 --- a/test/functorch/test_vmap.py +++ b/test/functorch/test_vmap.py @@ -63,6 +63,7 @@ from torch._functorch.make_functional import functional_init_with_buffers from torch.testing._internal.autograd_function_db import autograd_function_db from torch._functorch.vmap import restore_vmap from torch.utils import _pytree as pytree +from torch.testing._internal.common_utils import unMarkDynamoStrictTest FALLBACK_REGEX = 'There is a performance drop' @@ -3397,6 +3398,7 @@ def discover_variants(opinfo): # TODO: enable this when we get a bit closer to getting torch.vmap x torch.compile working. # @markDynamoStrictTest +@unMarkDynamoStrictTest class TestVmapOperatorsOpInfo(TestCase): def vmap_outplace_test(self, func, args, kwargs, in_dims, check_shape_only=False, diff --git a/test/test_decomp.py b/test/test_decomp.py index 10df8b8be8e8..878715f325df 100644 --- a/test/test_decomp.py +++ b/test/test_decomp.py @@ -10,6 +10,7 @@ from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten from torch.utils import _pytree as pytree from torch.testing import make_tensor from torch.testing._internal.common_cuda import tf32_off +from torch.testing._internal.common_utils import unMarkDynamoStrictTest from torch.testing._internal.common_utils import ( is_iterable_of_tensors, TestCase, @@ -481,6 +482,7 @@ if not TEST_WITH_SLOW: }) +@unMarkDynamoStrictTest class TestDecomp(TestCase): longMessage = True diff --git a/test/test_meta.py b/test/test_meta.py index d489f0df31de..e856025cda80 100644 --- a/test/test_meta.py +++ b/test/test_meta.py @@ -13,6 +13,7 @@ import torch.utils._python_dispatch from torch._dispatch.python import enable_python_dispatcher from torch._ops import OpOverload, OpOverloadPacket from torch.testing import make_tensor +from torch.testing._internal.common_utils import unMarkDynamoStrictTest from torch.testing._internal.common_utils import ( TestCase, skipIfCrossRef, @@ -1107,6 +1108,7 @@ class MetaCrossRefDispatchMode(torch.utils._python_dispatch.TorchDispatchMode): # inconsistencies between CUDA and CPU, and running on CUDA makes it easier # to ignore the CPU case when inconsistencies arise. Ideally we deal # with the inconsistencies but this takes time. +@unMarkDynamoStrictTest class TestMeta(TestCase): # Copies inputs to inplace operations to avoid inplace modifications # to leaves requiring gradient diff --git a/test/test_ops.py b/test/test_ops.py index 4a2729a6cd19..969cb5d9ee8e 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -43,6 +43,7 @@ from torch.testing._internal.common_utils import ( parametrize, skipIfTorchInductor, slowTest, + unMarkDynamoStrictTest, ) from torch.testing._internal.common_methods_invocations import ( op_db, @@ -125,6 +126,7 @@ aten = torch.ops.aten # Tests that apply to all operators and aren't related to any particular # system +@unMarkDynamoStrictTest class TestCommon(TestCase): exact_dtype = True @@ -1470,6 +1472,7 @@ class TestCommon(TestCase): ) +@unMarkDynamoStrictTest class TestCompositeCompliance(TestCase): # Checks if the operator (if it is composite) is written to support most # backends and Tensor subclasses. See "CompositeImplicitAutograd Compliance" @@ -1526,6 +1529,7 @@ class TestCompositeCompliance(TestCase): op.get_op(), args, kwargs, op.gradcheck_wrapper, self.assertEqual) +@unMarkDynamoStrictTest class TestMathBits(TestCase): # Tests that # 1. The operator's output for physically conjugated/negated tensors and conjugate/negative view tensors @@ -1747,6 +1751,7 @@ class TestTagsMode(TorchDispatchMode): return rs # Test to verify the correctness for tags in `tags.yaml`, also available for access through `torch.Tags` +@unMarkDynamoStrictTest class TestTags(TestCase): @onlyCPU @ops(ops_and_refs, dtypes=OpDTypes.any_one) @@ -1766,6 +1771,7 @@ class TestTags(TestCase): check_inplace_view(opoverloadpacket, input, rs, old_size, old_stride) +@unMarkDynamoStrictTest class TestRefsOpsInfo(TestCase): import_paths = ["_refs", "_refs.special", "_refs.nn.functional", "_refs.fft", "_refs._conversions"] @@ -2040,6 +2046,7 @@ fake_autocast_backward_xfails = { skip('pinverse'), } +@unMarkDynamoStrictTest class TestFakeTensor(TestCase): def _test_fake_helper(self, device, dtype, op, context): name = op.name diff --git a/test/test_ops_fwd_gradients.py b/test/test_ops_fwd_gradients.py index 6ae463319d20..30748aa001d6 100644 --- a/test/test_ops_fwd_gradients.py +++ b/test/test_ops_fwd_gradients.py @@ -5,6 +5,7 @@ import platform from unittest import skipIf as skipif import torch +from torch.testing._internal.common_utils import unMarkDynamoStrictTest from torch.testing._internal.common_utils import ( TestGradients, run_tests, skipIfTorchInductor, IS_MACOS, TestCase) from torch.testing._internal.common_methods_invocations import op_db @@ -21,6 +22,7 @@ if IS_MACOS: _gradcheck_ops = partial(ops, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, torch.cdouble]) +@unMarkDynamoStrictTest class TestFwdGradients(TestGradients): # Test that forward-over-reverse gradgrad is computed correctly @_gradcheck_ops(op_db) diff --git a/test/test_ops_gradients.py b/test/test_ops_gradients.py index 93db89ab7dd8..55d4112be91b 100644 --- a/test/test_ops_gradients.py +++ b/test/test_ops_gradients.py @@ -9,11 +9,13 @@ from torch.testing._internal.control_flow_opinfo_db import control_flow_opinfo_d from torch.testing._internal.custom_op_db import custom_op_db from torch.testing._internal.common_device_type import \ (instantiate_device_type_tests, ops, OpDTypes) +from torch.testing._internal.common_utils import unMarkDynamoStrictTest # gradcheck requires double precision _gradcheck_ops = partial(ops, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, torch.cdouble]) +@unMarkDynamoStrictTest class TestBwdGradients(TestGradients): # Tests that gradients are computed correctly @_gradcheck_ops(op_db + control_flow_opinfo_db + custom_op_db) diff --git a/test/test_ops_jit.py b/test/test_ops_jit.py index d8e80048bb21..758f6d47dfe1 100644 --- a/test/test_ops_jit.py +++ b/test/test_ops_jit.py @@ -13,7 +13,7 @@ from torch.testing._internal.common_device_type import instantiate_device_type_t from torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, check_alias_annotation from torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining, is_lambda - +from torch.testing._internal.common_utils import unMarkDynamoStrictTest # variant testing is only done with torch.float and torch.cfloat to avoid # excessive test times and maximize signal to noise ratio @@ -27,6 +27,7 @@ _variant_ops = partial(ops, dtypes=OpDTypes.supported, # autodifferentiation behavior. # Inherits from JitCommonTestCase instead of TestCase directly to share # functionality with original test_jit.py method operator tests +@unMarkDynamoStrictTest class TestJit(JitCommonTestCase): exact_dtype = True