Revert "markDynamoStrictTest some more (#115885)"

This reverts commit 55ce4693ff2c0b6e50b8af323f36ecc7ff929638.

Reverted https://github.com/pytorch/pytorch/pull/115885 on behalf of https://github.com/atalman due to OSSCI oncall, broke inductor ([comment](https://github.com/pytorch/pytorch/pull/115885#issuecomment-1858409669))
This commit is contained in:
PyTorch MergeBot
2023-12-15 19:51:24 +00:00
parent 61abacf829
commit c006c8b50e
8 changed files with 0 additions and 73 deletions

View File

@ -12,7 +12,6 @@ from torch.testing._internal.common_device_type import (
)
from torch.testing._internal.common_utils import parametrize, run_tests, TestCase
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestMHADeviceType(TestCase):
@torch.no_grad()
def _test_transform_bias_rescale_qkv_impl(

View File

@ -71,7 +71,6 @@ if TEST_NUMPY:
# update test/run_test.py to list it, otherwise it will NOT be run in
# CI.
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestNN(NNTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
@ -2116,7 +2115,6 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
@unittest.skipUnless('fbgemm' in torch.backends.quantized.supported_engines,
'Linear_FP16_weight requires FBGEMM. FBGEMM is only optimized for CPUs'
' with instruction set support avx2 or newer.')
@skipIfTorchDynamo("how do i install fbgemm?")
def test_fb_fc_packed(self):
X = np.random.rand(16, 16).astype(np.float32) - 0.5
W = np.random.rand(16, 16).astype(np.float32) - 0.5
@ -7345,7 +7343,6 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
with self.assertRaises(RuntimeError):
res = arg_class(*arg_4)
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestFusionEval(TestCase):
@set_default_dtype(torch.double)
@given(X=hu.tensor(shapes=((5, 3, 5, 5),), dtype=np.double),
@ -7386,7 +7383,6 @@ class TestFusionEval(TestCase):
self.assertEqual(Y_ref, Y_hat, msg="Conv+BN(non-affine) fusion results are off")
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestConstantPadNd(TestCase):
def test_constant_pad_nd(self):
a = torch.tensor([[1, 2], [3, 4]])
@ -7408,7 +7404,6 @@ class TestConstantPadNd(TestCase):
self.assertTrue(nhwc_padded.is_contiguous(memory_format=torch.channels_last))
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestAddRelu(TestCase):
def test_add_relu(self):
a = torch.rand((7, 11))
@ -7862,7 +7857,6 @@ def _buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_ra
# end TestNN.test_affine_* helpers
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestNNDeviceType(NNTestCase):
def _test_InstanceNorm_general(self, cls, input, device, dtype=torch.float):
# default case track_running_stats=False
@ -12900,7 +12894,6 @@ class TestNNDeviceType(NNTestCase):
self.assertEqual(y, y_ref)
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestFunctionalPickle(TestCase):
# issue gh-38137
@ -12909,7 +12902,6 @@ class TestFunctionalPickle(TestCase):
s = pickle.dumps(F.softsign)
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestFusionUtils(TestCase):
def test_fuse_conv_bn_requires_grad(self):
conv = torch.nn.Conv2d(3, 3, 3)

View File

@ -34,7 +34,6 @@ if TEST_SCIPY:
NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor"
GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition"
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestPrims(TestCase):
@onlyCUDA
@dtypes(torch.float32)
@ -322,7 +321,6 @@ class TestPrims(TestCase):
self.assertEqual(ref1, res3)
self.assertEqual(ref2, res4)
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestPrimsBasic(TestCase):
def test_torch_ops(self):
r = make_tensor((2,), device='cpu', dtype=torch.float)
@ -347,7 +345,6 @@ $1: f32[2] = torch._ops.prims.sin.default($0)""")
instantiate_device_type_tests(TestPrims, globals())
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestRefs(TestCase):
@dtypes(torch.float32)
def test_constant_pad_nd_memory_format(self, device, dtype):
@ -412,7 +409,6 @@ class TestRefs(TestCase):
instantiate_device_type_tests(TestRefs, globals())
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestDecomp(TestCase):
@ops([op for op in op_db if op.supports_varargs], dtypes=OpDTypes.any_one)
def test_decomposition_method_vararg(self, device, dtype, op):

View File

@ -95,7 +95,6 @@ class IncorrectAliasTensor(torch.Tensor):
return tree_map(wrap, out)
# Tests various schema checking functionalities.
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestSchemaCheck(JitTestCase):
# Tests that SchemaCheckMode records operator order with grad
def test_schema_check_mode_operator_order(self):
@ -491,7 +490,6 @@ class TestSchemaCheck(JitTestCase):
x.add(x)
@torch.testing._internal.common_utils.unMarkDynamoStrictTest
class TestSchemaCheckModeOpInfo(JitTestCase):
@ops(op_db, dtypes=OpDTypes.supported)
def test_schema_correctness(self, device, dtype, op):

View File

@ -785,7 +785,6 @@ class serialization_method:
torch.save = self.torch_save
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows")
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestBothSerialization(TestCase):
@parametrize("weights_only", (True, False))
def test_serialization_new_format_old_format_compat(self, device, weights_only):
@ -808,7 +807,6 @@ class TestBothSerialization(TestCase):
self.assertTrue(len(w) == 0, msg=f"Expected no warnings but got {[str(x) for x in w]}")
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestOldSerialization(TestCase, SerializationMixin):
# unique_key is necessary because on Python 2.7, if a warning passed to
# the warning module is the same, it is not raised again.
@ -906,7 +904,6 @@ class TestOldSerialization(TestCase, SerializationMixin):
return super().run(*args, **kwargs)
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestSerialization(TestCase, SerializationMixin):
@parametrize('weights_only', (True, False))
def test_serialization_zipfile(self, weights_only):
@ -4017,7 +4014,6 @@ class TestEmptySubclass(torch.Tensor):
...
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestSubclassSerialization(TestCase):
def test_tensor_subclass_wrapper_serialization(self):
wrapped_tensor = torch.rand(2)

View File

@ -8,7 +8,6 @@ import torch.utils.show_pickle
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestShowPickle(TestCase):
@unittest.skipIf(IS_WINDOWS, "Can't re-open temp file on Windows")

View File

@ -18,7 +18,6 @@ from torch.testing._internal.common_device_type import \
# TODO: remove this
SIZE = 100
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestSortAndSelect(TestCase):
def assertIsOrdered(self, order, x, mxx, ixx, task):

View File

@ -346,56 +346,4 @@ dynamo_expected_failures = {
"TestMultiprocessing.test_fs_is_shared",
"TestMultiprocessing.test_inherit_tensor",
"TestMultiprocessing.test_is_shared",
"TestNN.test_ModuleList",
"TestNN.test_ParameterDict_replication",
"TestNN.test_ParameterList_replication",
"TestNN.test_affine_grid",
"TestNN.test_affine_grid_3d",
"TestNN.test_Sequential_append",
"TestNN.test_Sequential_extend",
"TestNN.test_Sequential_iadd",
"TestNN.test_Sequential_imul",
"TestNNDeviceTypeCPU.test_hardsigmoid_grad_cpu",
"TestNNDeviceTypeCPU.test_hardswish_grad_cpu",
"TestNNDeviceTypeCPU.test_invalid_reduction_strings_cpu",
"TestNN.test_interpolate",
"TestNN.test_interpolate_buffer_overflow",
"TestNNDeviceTypeCPU.test_module_to_empty_cpu_float32",
"TestNNDeviceTypeCPU.test_module_to_empty_cpu_float64",
"TestNNDeviceTypeCPU.test_nll_loss_all_ignored_cpu",
"TestNNDeviceTypeCPU.test_nll_loss_byte_target_matches_long_cpu",
"TestNNDeviceTypeCPU.test_nll_loss_empty_tensor_reduction_mean_cpu",
"TestNNDeviceTypeCPU.test_nll_loss_empty_tensor_reduction_none_cpu",
"TestNNDeviceTypeCPU.test_nll_loss_empty_tensor_reduction_sum_cpu",
"TestNNDeviceTypeCPU.test_nll_loss_total_weight_is_zero_cpu",
"TestNNDeviceTypeCPU.test_threshold_inplace_overlap_cpu",
"TestNN.test_overwrite_module_params_on_conversion",
"TestNNDeviceTypeCPU.test_triplet_margin_with_distance_loss_cpu",
"TestNN.test_upsamplingLinear1d",
"TestNN.test_upsampling_bfloat16",
"TestNNDeviceTypeCPU.test_upsamplingBiMode2d_antialias_True_align_corners_False_mode_bicubic_memory_format0_cpu",
"TestNNDeviceTypeCPU.test_upsamplingBiMode2d_antialias_True_align_corners_False_mode_bicubic_memory_format1_cpu",
"TestNNDeviceTypeCPU.test_upsamplingBiMode2d_antialias_True_align_corners_True_mode_bicubic_memory_format0_cpu",
"TestNNDeviceTypeCPU.test_upsamplingBiMode2d_antialias_True_align_corners_True_mode_bicubic_memory_format1_cpu",
"TestFusionEval.test_fuse_module_eval_numerics",
"TestNNDeviceTypeCPU.test_CTCLoss_no_batch_dim_reduction_mean_use_module_form_False_cpu",
"TestNNDeviceTypeCPU.test_CTCLoss_no_batch_dim_reduction_none_use_module_form_False_cpu",
"TestNNDeviceTypeCPU.test_CTCLoss_no_batch_dim_reduction_sum_use_module_form_False_cpu",
"TestNNDeviceTypeCPU.test_GRU_grad_and_gradgrad_cpu_float64",
"TestNNDeviceTypeCPU.test_LSTM_grad_and_gradgrad_cpu_float64",
"TestNNDeviceTypeCPU.test_batchnorm_grad_cpu",
"TestNNDeviceTypeCPU.test_upsamplingTrilinear3d_align_corners_False_cpu",
"TestNNDeviceTypeCPU.test_upsamplingTrilinear3d_align_corners_True_cpu",
"TestSubclassSerialization.test_tensor_subclass_deepcopy",
"TestSubclassSerialization.test_tensor_subclass_getstate_overwrite",
"TestSubclassSerialization.test_tensor_subclass_wrapper_serialization",
"TestSortAndSelectCPU.test_isin_cpu_int8",
"TestSortAndSelectCPU.test_isin_cpu_uint8",
"TestSortAndSelectCPU.test_isin_cpu_float32",
"TestSortAndSelectCPU.test_isin_cpu_float64",
"TestSortAndSelectCPU.test_isin_cpu_int16",
"TestSortAndSelectCPU.test_isin_cpu_int32",
"TestSortAndSelectCPU.test_isin_cpu_int64",
"TestSortAndSelectCPU.test_sort_overflow_cpu_int16",
"TestSortAndSelectCPU.test_topk_arguments_cpu",
}