unMarkDynamoStrictTest on OpInfo-based tests (#115856)

These take too long to run under strict mode. We'll worry about them
later. Note that these decorators don't do anything yet (unless we flip
the default from non-strict to strict).
Pull Request resolved: https://github.com/pytorch/pytorch/pull/115856
Approved by: https://github.com/voznesenskym
ghstack dependencies: #115845, #115855
This commit is contained in:
rzou
2023-12-14 10:59:59 -08:00
committed by PyTorch MergeBot
parent 0722ce35f5
commit 3477a2ee03
8 changed files with 21 additions and 1 deletions

View File

@ -9,11 +9,13 @@ from torch.testing._internal.control_flow_opinfo_db import control_flow_opinfo_d
from torch.testing._internal.custom_op_db import custom_op_db
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, OpDTypes)
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
# gradcheck requires double precision
_gradcheck_ops = partial(ops, dtypes=OpDTypes.supported,
allowed_dtypes=[torch.double, torch.cdouble])
@unMarkDynamoStrictTest
class TestBwdGradients(TestGradients):
# Tests that gradients are computed correctly
@_gradcheck_ops(op_db + control_flow_opinfo_db + custom_op_db)