mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Remove unnecessary skips in test_dispatch.py (#85557)
The functorch dangling impls have been fixed, I hope CI passes Pull Request resolved: https://github.com/pytorch/pytorch/pull/85557 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
b0eeffdf6f
commit
60d98821c5
@ -1,7 +1,7 @@
|
||||
# Owner(s): ["module: dispatch"]
|
||||
|
||||
import torch._C as C
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfTorchDynamo
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests
|
||||
from torch._python_dispatcher import PythonDispatcher
|
||||
|
||||
from collections import namedtuple
|
||||
@ -763,7 +763,6 @@ CompositeImplicitAutograd[alias] (inactive): fn1 :: (Tensor _0) -> Tensor _0 [ b
|
||||
# function but not a def() for it. This is usually a bug, e.g. someone
|
||||
# misspelled an operator name, or someone registered an impl for an op that
|
||||
# no longer exists
|
||||
@skipIfTorchDynamo("Installing functorch reveals a dangling impl - aten::postive_")
|
||||
def test_find_dangling_impls(self):
|
||||
dangling_impls = C._dispatch_find_dangling_impls()
|
||||
self.assertEqual(
|
||||
@ -772,7 +771,6 @@ CompositeImplicitAutograd[alias] (inactive): fn1 :: (Tensor _0) -> Tensor _0 [ b
|
||||
msg=f"Expect zero dangling impls, but found: {dangling_impls}"
|
||||
)
|
||||
|
||||
@skipIfTorchDynamo("Installing functorch reveals a dangling impl - aten::positive_")
|
||||
def test_find_dangling_impls_ext(self):
|
||||
extension_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cpp_extensions', 'dangling_impl_extension.cpp')
|
||||
module = torch.utils.cpp_extension.load(
|
||||
|
Reference in New Issue
Block a user