mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
unMarkDynamoStrictTest on OpInfo-based tests (#115856)
These take too long to run under strict mode. We'll worry about them later. Note that these decorators don't do anything yet (unless we flip the default from non-strict to strict). Pull Request resolved: https://github.com/pytorch/pytorch/pull/115856 Approved by: https://github.com/voznesenskym ghstack dependencies: #115845, #115855
This commit is contained in:
@ -9,11 +9,13 @@ from torch.testing._internal.control_flow_opinfo_db import control_flow_opinfo_d
|
||||
from torch.testing._internal.custom_op_db import custom_op_db
|
||||
from torch.testing._internal.common_device_type import \
|
||||
(instantiate_device_type_tests, ops, OpDTypes)
|
||||
from torch.testing._internal.common_utils import unMarkDynamoStrictTest
|
||||
|
||||
# gradcheck requires double precision
|
||||
_gradcheck_ops = partial(ops, dtypes=OpDTypes.supported,
|
||||
allowed_dtypes=[torch.double, torch.cdouble])
|
||||
|
||||
@unMarkDynamoStrictTest
|
||||
class TestBwdGradients(TestGradients):
|
||||
# Tests that gradients are computed correctly
|
||||
@_gradcheck_ops(op_db + control_flow_opinfo_db + custom_op_db)
|
||||
|
Reference in New Issue
Block a user