mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Add shim.h C API to call dispatcher on our own aten ops (#148832)
This PR still needs testing through some cpp extension Pull Request resolved: https://github.com/pytorch/pytorch/pull/148832 Approved by: https://github.com/albanD, https://github.com/atalman ghstack dependencies: #148124
This commit is contained in:
committed by
PyTorch MergeBot
parent
cf19efd3d9
commit
e6ef0620cc
@ -270,6 +270,23 @@ class TestCppExtensionAOT(common.TestCase):
|
||||
curr_mem = torch.cuda.memory_allocated(device)
|
||||
self.assertEqual(curr_mem, init_mem)
|
||||
|
||||
# (3) test calling our dispatcher on ones_like
|
||||
t = torch.rand(32, 16, device=device)
|
||||
cpu_t = libtorch_agnostic.ops.my_abs(t)
|
||||
self.assertEqual(cpu_t, torch.abs(t))
|
||||
|
||||
def _make_cuda_tensors(prior_mem):
|
||||
cuda_t = libtorch_agnostic.ops.my_abs(t)
|
||||
self.assertGreater(torch.cuda.memory_allocated(device), prior_mem)
|
||||
self.assertEqual(cuda_t, torch.abs(t))
|
||||
|
||||
if t.is_cuda:
|
||||
init_mem = torch.cuda.memory_allocated(device)
|
||||
for _ in range(3):
|
||||
_make_cuda_tensors(init_mem)
|
||||
curr_mem = torch.cuda.memory_allocated(device)
|
||||
self.assertEqual(curr_mem, init_mem)
|
||||
|
||||
|
||||
@torch.testing._internal.common_utils.markDynamoStrictTest
|
||||
class TestPybindTypeCasters(common.TestCase):
|
||||
|
Reference in New Issue
Block a user