mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
markDynamoStrictTest some more (#115885)
Featuring test_native_mha.py test_nn.py test_prims.py test_schema_check.py test_serialization.py test_show_pickle.py test_sort_and_select.py Pull Request resolved: https://github.com/pytorch/pytorch/pull/115885 Approved by: https://github.com/voznesenskym ghstack dependencies: #115845, #115855, #115856, #115857, #115858, #115870, #115871, #115879
This commit is contained in:
@ -12,6 +12,7 @@ from torch.testing._internal.common_device_type import (
|
||||
)
|
||||
from torch.testing._internal.common_utils import parametrize, run_tests, TestCase
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestMHADeviceType(TestCase):
|
||||
@torch.no_grad()
|
||||
def _test_transform_bias_rescale_qkv_impl(
|
||||
|
@ -71,6 +71,7 @@ if TEST_NUMPY:
|
||||
# update test/run_test.py to list it, otherwise it will NOT be run in
|
||||
# CI.
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestNN(NNTestCase):
|
||||
_do_cuda_memory_leak_check = True
|
||||
_do_cuda_non_default_stream = True
|
||||
@ -2115,6 +2116,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
||||
@unittest.skipUnless('fbgemm' in torch.backends.quantized.supported_engines,
|
||||
'Linear_FP16_weight requires FBGEMM. FBGEMM is only optimized for CPUs'
|
||||
' with instruction set support avx2 or newer.')
|
||||
@skipIfTorchDynamo("how do i install fbgemm?")
|
||||
def test_fb_fc_packed(self):
|
||||
X = np.random.rand(16, 16).astype(np.float32) - 0.5
|
||||
W = np.random.rand(16, 16).astype(np.float32) - 0.5
|
||||
@ -7343,6 +7345,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
|
||||
with self.assertRaises(RuntimeError):
|
||||
res = arg_class(*arg_4)
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestFusionEval(TestCase):
|
||||
@set_default_dtype(torch.double)
|
||||
@given(X=hu.tensor(shapes=((5, 3, 5, 5),), dtype=np.double),
|
||||
@ -7383,6 +7386,7 @@ class TestFusionEval(TestCase):
|
||||
self.assertEqual(Y_ref, Y_hat, msg="Conv+BN(non-affine) fusion results are off")
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestConstantPadNd(TestCase):
|
||||
def test_constant_pad_nd(self):
|
||||
a = torch.tensor([[1, 2], [3, 4]])
|
||||
@ -7404,6 +7408,7 @@ class TestConstantPadNd(TestCase):
|
||||
self.assertTrue(nhwc_padded.is_contiguous(memory_format=torch.channels_last))
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestAddRelu(TestCase):
|
||||
def test_add_relu(self):
|
||||
a = torch.rand((7, 11))
|
||||
@ -7857,6 +7862,7 @@ def _buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_ra
|
||||
# end TestNN.test_affine_* helpers
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestNNDeviceType(NNTestCase):
|
||||
def _test_InstanceNorm_general(self, cls, input, device, dtype=torch.float):
|
||||
# default case track_running_stats=False
|
||||
@ -12894,6 +12900,7 @@ class TestNNDeviceType(NNTestCase):
|
||||
self.assertEqual(y, y_ref)
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestFunctionalPickle(TestCase):
|
||||
|
||||
# issue gh-38137
|
||||
@ -12902,6 +12909,7 @@ class TestFunctionalPickle(TestCase):
|
||||
s = pickle.dumps(F.softsign)
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestFusionUtils(TestCase):
|
||||
def test_fuse_conv_bn_requires_grad(self):
|
||||
conv = torch.nn.Conv2d(3, 3, 3)
|
||||
|
@ -34,6 +34,7 @@ if TEST_SCIPY:
|
||||
NVPRIM_ATEN_FALLBACK_WARNING = "fallback to aten executor"
|
||||
GET_ISOLATED_GRAPHMODULE_ERROR = "get_isolated_graphmodule failed on decomposition"
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestPrims(TestCase):
|
||||
@onlyCUDA
|
||||
@dtypes(torch.float32)
|
||||
@ -321,6 +322,7 @@ class TestPrims(TestCase):
|
||||
self.assertEqual(ref1, res3)
|
||||
self.assertEqual(ref2, res4)
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestPrimsBasic(TestCase):
|
||||
def test_torch_ops(self):
|
||||
r = make_tensor((2,), device='cpu', dtype=torch.float)
|
||||
@ -345,6 +347,7 @@ $1: f32[2] = torch._ops.prims.sin.default($0)""")
|
||||
instantiate_device_type_tests(TestPrims, globals())
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestRefs(TestCase):
|
||||
@dtypes(torch.float32)
|
||||
def test_constant_pad_nd_memory_format(self, device, dtype):
|
||||
@ -409,6 +412,7 @@ class TestRefs(TestCase):
|
||||
instantiate_device_type_tests(TestRefs, globals())
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestDecomp(TestCase):
|
||||
@ops([op for op in op_db if op.supports_varargs], dtypes=OpDTypes.any_one)
|
||||
def test_decomposition_method_vararg(self, device, dtype, op):
|
||||
|
@ -95,6 +95,7 @@ class IncorrectAliasTensor(torch.Tensor):
|
||||
return tree_map(wrap, out)
|
||||
|
||||
# Tests various schema checking functionalities.
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestSchemaCheck(JitTestCase):
|
||||
# Tests that SchemaCheckMode records operator order with grad
|
||||
def test_schema_check_mode_operator_order(self):
|
||||
@ -490,6 +491,7 @@ class TestSchemaCheck(JitTestCase):
|
||||
x.add(x)
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.unMarkDynamoStrictTest
|
||||
class TestSchemaCheckModeOpInfo(JitTestCase):
|
||||
@ops(op_db, dtypes=OpDTypes.supported)
|
||||
def test_schema_correctness(self, device, dtype, op):
|
||||
|
@ -785,6 +785,7 @@ class serialization_method:
|
||||
torch.save = self.torch_save
|
||||
|
||||
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows")
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestBothSerialization(TestCase):
|
||||
@parametrize("weights_only", (True, False))
|
||||
def test_serialization_new_format_old_format_compat(self, device, weights_only):
|
||||
@ -807,6 +808,7 @@ class TestBothSerialization(TestCase):
|
||||
self.assertTrue(len(w) == 0, msg=f"Expected no warnings but got {[str(x) for x in w]}")
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestOldSerialization(TestCase, SerializationMixin):
|
||||
# unique_key is necessary because on Python 2.7, if a warning passed to
|
||||
# the warning module is the same, it is not raised again.
|
||||
@ -904,6 +906,7 @@ class TestOldSerialization(TestCase, SerializationMixin):
|
||||
return super().run(*args, **kwargs)
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestSerialization(TestCase, SerializationMixin):
|
||||
@parametrize('weights_only', (True, False))
|
||||
def test_serialization_zipfile(self, weights_only):
|
||||
@ -4014,6 +4017,7 @@ class TestEmptySubclass(torch.Tensor):
|
||||
...
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestSubclassSerialization(TestCase):
|
||||
def test_tensor_subclass_wrapper_serialization(self):
|
||||
wrapped_tensor = torch.rand(2)
|
||||
|
@ -8,6 +8,7 @@ import torch.utils.show_pickle
|
||||
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestShowPickle(TestCase):
|
||||
|
||||
@unittest.skipIf(IS_WINDOWS, "Can't re-open temp file on Windows")
|
||||
|
@ -18,6 +18,7 @@ from torch.testing._internal.common_device_type import \
|
||||
# TODO: remove this
|
||||
SIZE = 100
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestSortAndSelect(TestCase):
|
||||
|
||||
def assertIsOrdered(self, order, x, mxx, ixx, task):
|
||||
|
@ -346,4 +346,56 @@ dynamo_expected_failures = {
|
||||
"TestMultiprocessing.test_fs_is_shared",
|
||||
"TestMultiprocessing.test_inherit_tensor",
|
||||
"TestMultiprocessing.test_is_shared",
|
||||
"TestNN.test_ModuleList",
|
||||
"TestNN.test_ParameterDict_replication",
|
||||
"TestNN.test_ParameterList_replication",
|
||||
"TestNN.test_affine_grid",
|
||||
"TestNN.test_affine_grid_3d",
|
||||
"TestNN.test_Sequential_append",
|
||||
"TestNN.test_Sequential_extend",
|
||||
"TestNN.test_Sequential_iadd",
|
||||
"TestNN.test_Sequential_imul",
|
||||
"TestNNDeviceTypeCPU.test_hardsigmoid_grad_cpu",
|
||||
"TestNNDeviceTypeCPU.test_hardswish_grad_cpu",
|
||||
"TestNNDeviceTypeCPU.test_invalid_reduction_strings_cpu",
|
||||
"TestNN.test_interpolate",
|
||||
"TestNN.test_interpolate_buffer_overflow",
|
||||
"TestNNDeviceTypeCPU.test_module_to_empty_cpu_float32",
|
||||
"TestNNDeviceTypeCPU.test_module_to_empty_cpu_float64",
|
||||
"TestNNDeviceTypeCPU.test_nll_loss_all_ignored_cpu",
|
||||
"TestNNDeviceTypeCPU.test_nll_loss_byte_target_matches_long_cpu",
|
||||
"TestNNDeviceTypeCPU.test_nll_loss_empty_tensor_reduction_mean_cpu",
|
||||
"TestNNDeviceTypeCPU.test_nll_loss_empty_tensor_reduction_none_cpu",
|
||||
"TestNNDeviceTypeCPU.test_nll_loss_empty_tensor_reduction_sum_cpu",
|
||||
"TestNNDeviceTypeCPU.test_nll_loss_total_weight_is_zero_cpu",
|
||||
"TestNNDeviceTypeCPU.test_threshold_inplace_overlap_cpu",
|
||||
"TestNN.test_overwrite_module_params_on_conversion",
|
||||
"TestNNDeviceTypeCPU.test_triplet_margin_with_distance_loss_cpu",
|
||||
"TestNN.test_upsamplingLinear1d",
|
||||
"TestNN.test_upsampling_bfloat16",
|
||||
"TestNNDeviceTypeCPU.test_upsamplingBiMode2d_antialias_True_align_corners_False_mode_bicubic_memory_format0_cpu",
|
||||
"TestNNDeviceTypeCPU.test_upsamplingBiMode2d_antialias_True_align_corners_False_mode_bicubic_memory_format1_cpu",
|
||||
"TestNNDeviceTypeCPU.test_upsamplingBiMode2d_antialias_True_align_corners_True_mode_bicubic_memory_format0_cpu",
|
||||
"TestNNDeviceTypeCPU.test_upsamplingBiMode2d_antialias_True_align_corners_True_mode_bicubic_memory_format1_cpu",
|
||||
"TestFusionEval.test_fuse_module_eval_numerics",
|
||||
"TestNNDeviceTypeCPU.test_CTCLoss_no_batch_dim_reduction_mean_use_module_form_False_cpu",
|
||||
"TestNNDeviceTypeCPU.test_CTCLoss_no_batch_dim_reduction_none_use_module_form_False_cpu",
|
||||
"TestNNDeviceTypeCPU.test_CTCLoss_no_batch_dim_reduction_sum_use_module_form_False_cpu",
|
||||
"TestNNDeviceTypeCPU.test_GRU_grad_and_gradgrad_cpu_float64",
|
||||
"TestNNDeviceTypeCPU.test_LSTM_grad_and_gradgrad_cpu_float64",
|
||||
"TestNNDeviceTypeCPU.test_batchnorm_grad_cpu",
|
||||
"TestNNDeviceTypeCPU.test_upsamplingTrilinear3d_align_corners_False_cpu",
|
||||
"TestNNDeviceTypeCPU.test_upsamplingTrilinear3d_align_corners_True_cpu",
|
||||
"TestSubclassSerialization.test_tensor_subclass_deepcopy",
|
||||
"TestSubclassSerialization.test_tensor_subclass_getstate_overwrite",
|
||||
"TestSubclassSerialization.test_tensor_subclass_wrapper_serialization",
|
||||
"TestSortAndSelectCPU.test_isin_cpu_int8",
|
||||
"TestSortAndSelectCPU.test_isin_cpu_uint8",
|
||||
"TestSortAndSelectCPU.test_isin_cpu_float32",
|
||||
"TestSortAndSelectCPU.test_isin_cpu_float64",
|
||||
"TestSortAndSelectCPU.test_isin_cpu_int16",
|
||||
"TestSortAndSelectCPU.test_isin_cpu_int32",
|
||||
"TestSortAndSelectCPU.test_isin_cpu_int64",
|
||||
"TestSortAndSelectCPU.test_sort_overflow_cpu_int16",
|
||||
"TestSortAndSelectCPU.test_topk_arguments_cpu",
|
||||
}
|
||||
|
Reference in New Issue
Block a user