Add skipLazy marker for tests and use it for tests not working with LazyTensor (#107382)

[This PR](https://github.com/pytorch/pytorch/pull/80251/files#diff-87e1d4e98eab994c977a57be29c716d3dc0f76d5b5e98cbf23cfcbd48ae625a4) marked some tests in `test/test_view_ops.py` with `@onlyNativeDeviceTypes`, because they'd fail if run on the `'lazy'` device type.
However, that marker is overly restrictive, because it prevents all devices outside of the native ones to run those tests.
This PR adds a `@skipLazy` marker (analogous to the existing ones for the other devices), and marks the tests from the mentioned PR so that they're skipped only for the `'lazy'` device type.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/107382
Approved by: https://github.com/ezyang
This commit is contained in:
Manuele Sigona
2023-08-22 22:34:33 +00:00
committed by PyTorch MergeBot
parent 4d13422997
commit a711679527
2 changed files with 16 additions and 6 deletions

View File

@ -13,7 +13,7 @@ from torch.testing._internal.common_utils import (
numpy_to_torch_dtype_dict, skipIfTorchDynamo
)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, onlyCPU, dtypes, onlyNativeDeviceTypes, skipMeta)
(instantiate_device_type_tests, onlyCPU, dtypes, onlyNativeDeviceTypes, skipLazy, skipMeta, skipXLA)
from torch.testing._internal.common_dtype import (
all_types_and_complex_and, complex_types, all_types_and, floating_and_complex_types_and,
)
@ -476,7 +476,7 @@ class TestViewOps(TestCase):
self.assertEqual(t[2, 0], v[0])
# Lazy hasn't implemented unbind yet.
@onlyNativeDeviceTypes
@skipLazy
def test_unbind_view(self, device) -> None:
t = torch.zeros((5, 5), device=device)
tup = torch.unbind(t)
@ -509,7 +509,7 @@ class TestViewOps(TestCase):
# TODO: Fix this test for LTC. There is an interaction with dynamic shapes here that is broken,
# causing asserts to trigger.
@onlyNativeDeviceTypes
@skipLazy
def test_expand_view(self, device) -> None:
t = torch.ones((5, 1), device=device)
v = t.expand(5, 5)
@ -724,7 +724,7 @@ class TestViewOps(TestCase):
@skipMeta
# self.is_view_of reports false positives for lazy
@onlyNativeDeviceTypes
@skipLazy
def test_contiguous_nonview(self, device):
t = torch.ones(5, 5, device=device)
nv = t.t().contiguous()
@ -752,7 +752,7 @@ class TestViewOps(TestCase):
@skipMeta
# self.is_view_of reports false positives for lazy
@onlyNativeDeviceTypes
@skipLazy
def test_reshape_nonview(self, device):
t = torch.ones(5, 5, device=device)
nv = torch.reshape(t.t(), (25,))
@ -763,7 +763,8 @@ class TestViewOps(TestCase):
# This test use as_strided to construct a tensor with overlapping memory,
# which is not handled by the functionalization pass.
@onlyNativeDeviceTypes
@skipLazy
@skipXLA
def test_flatten_view(self, device):
def test_writes_propagate(t, v):
idx_t = (0,) * t.ndim