mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Add a basic shim and stable::Tensor is_contiguous API (#156228)
Add a limited is_contiguous in shim, stable::Tensor API with a test case Pull Request resolved: https://github.com/pytorch/pytorch/pull/156228 Approved by: https://github.com/desertfire
This commit is contained in:
committed by
PyTorch MergeBot
parent
49ee1e7106
commit
55dae0bf7a
@ -237,3 +237,20 @@ STABLE_TORCH_LIBRARY_FRAGMENT(libtorch_agnostic, m) {
|
||||
STABLE_TORCH_LIBRARY_IMPL(libtorch_agnostic, CompositeExplicitAutograd, m) {
|
||||
m.impl("divide_neg_exp", &boxed_divide_neg_exp);
|
||||
}
|
||||
|
||||
bool is_contiguous(Tensor t) {
|
||||
return t.is_contiguous();
|
||||
}
|
||||
|
||||
void boxed_is_contiguous(StableIValue* stack, uint64_t num_args, uint64_t num_outputs) {
|
||||
bool res = is_contiguous(to<Tensor>(stack[0]));
|
||||
stack[0] = from(res);
|
||||
}
|
||||
|
||||
STABLE_TORCH_LIBRARY_FRAGMENT(libtorch_agnostic, m) {
|
||||
m.def("is_contiguous(Tensor t) -> bool");
|
||||
}
|
||||
|
||||
STABLE_TORCH_LIBRARY_IMPL(libtorch_agnostic, CompositeExplicitAutograd, m) {
|
||||
m.impl("is_contiguous", &boxed_is_contiguous);
|
||||
}
|
||||
|
@ -104,3 +104,15 @@ def divide_neg_exp(t) -> Tensor:
|
||||
Returns: divide(neg(t), exp(t))
|
||||
"""
|
||||
return torch.ops.libtorch_agnostic.divide_neg_exp.default(t)
|
||||
|
||||
|
||||
def is_contiguous(t) -> bool:
|
||||
"""
|
||||
Returns a bool indicating if the input tensor is contiguous
|
||||
|
||||
Args:
|
||||
t: Tensor
|
||||
|
||||
Returns: is_contiguous(t)
|
||||
"""
|
||||
return torch.ops.libtorch_agnostic.is_contiguous.default(t)
|
||||
|
@ -142,6 +142,13 @@ if not IS_WINDOWS:
|
||||
curr_mem = torch.cuda.memory_allocated(device)
|
||||
self.assertEqual(curr_mem, init_mem)
|
||||
|
||||
def test_is_contiguous(self, device):
|
||||
import libtorch_agnostic
|
||||
|
||||
t = torch.rand(2, 7, device=device)
|
||||
self.assertTrue(libtorch_agnostic.ops.is_contiguous(t))
|
||||
self.assertFalse(libtorch_agnostic.ops.is_contiguous(t.transpose(0, 1)))
|
||||
|
||||
# TODO: Debug this:
|
||||
# torch._dynamo.exc.TorchRuntimeError: Dynamo failed to run FX node with fake tensors:
|
||||
# call_function libtorch_agnostic.my_ones_like.default(*(FakeTensor(..., size=(3, 1)), 'cpu'),
|
||||
|
Reference in New Issue
Block a user