mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
remove more no longer needed torch._check_is_size calls 1 (#164630)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/164630 Approved by: https://github.com/Skylion007 ghstack dependencies: #164627
This commit is contained in:
committed by
PyTorch MergeBot
parent
8c728e129d
commit
5ed4270440
@ -1121,10 +1121,8 @@ class CompileTest(TestCase):
|
||||
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
|
||||
@fresh_cache()
|
||||
def test_inductor_all_to_all_single(self):
|
||||
def _tolist_with_constrain_as_size(tensor):
|
||||
def _tolist(tensor):
|
||||
lst = tensor.tolist()
|
||||
for elem in lst:
|
||||
torch._check_is_size(elem)
|
||||
return lst
|
||||
|
||||
def func(
|
||||
@ -1134,8 +1132,8 @@ class CompileTest(TestCase):
|
||||
) -> torch.Tensor:
|
||||
output = funcol.all_to_all_single(
|
||||
input,
|
||||
_tolist_with_constrain_as_size(output_split_sizes),
|
||||
_tolist_with_constrain_as_size(input_split_sizes),
|
||||
_tolist(output_split_sizes),
|
||||
_tolist(input_split_sizes),
|
||||
"0",
|
||||
)
|
||||
return funcol.wait_tensor(output)
|
||||
|
@ -53,10 +53,8 @@ from torch.testing._internal.inductor_utils import HAS_GPU
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
|
||||
|
||||
def _tolist_with_constrain_as_size(tensor):
|
||||
def _tolist(tensor):
|
||||
lst = tensor.tolist()
|
||||
for elem in lst:
|
||||
torch._check_is_size(elem)
|
||||
return lst
|
||||
|
||||
|
||||
@ -537,10 +535,8 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
|
||||
ranks,
|
||||
group_size,
|
||||
):
|
||||
input_split_sizes = _tolist_with_constrain_as_size(input_split_sizes_tensor)
|
||||
output_split_sizes = _tolist_with_constrain_as_size(
|
||||
output_split_sizes_tensor
|
||||
)
|
||||
input_split_sizes = _tolist(input_split_sizes_tensor)
|
||||
output_split_sizes = _tolist(output_split_sizes_tensor)
|
||||
a2a = torch.ops.c10d_functional.all_to_all_single(
|
||||
inp,
|
||||
output_split_sizes,
|
||||
@ -700,10 +696,8 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
|
||||
ranks,
|
||||
group_size,
|
||||
):
|
||||
input_split_sizes = _tolist_with_constrain_as_size(input_split_sizes_tensor)
|
||||
output_split_sizes = _tolist_with_constrain_as_size(
|
||||
output_split_sizes_tensor
|
||||
)
|
||||
input_split_sizes = _tolist(input_split_sizes_tensor)
|
||||
output_split_sizes = _tolist(output_split_sizes_tensor)
|
||||
a2a = torch.ops.custom_ns.alltoall_autograd.default(
|
||||
inp,
|
||||
output_split_sizes,
|
||||
|
@ -2754,7 +2754,6 @@ def forward(self, x):
|
||||
def test_exported_graph_serialization(self):
|
||||
def f(x, y):
|
||||
b = x.item()
|
||||
torch._check_is_size(b)
|
||||
return torch.empty((b, y.shape[0]))
|
||||
|
||||
x = torch.tensor([3])
|
||||
@ -4669,7 +4668,6 @@ class ExportTestsDevice(torch._dynamo.test_case.TestCase):
|
||||
class MyModel(torch.nn.Module):
|
||||
def forward(self, numel, scalar):
|
||||
u0 = numel.item()
|
||||
torch._check_is_size(u0)
|
||||
x = torch.ones(u0 + 1)
|
||||
return scalar - x
|
||||
|
||||
|
@ -1128,9 +1128,6 @@ def forward(self, y_1, x_1):
|
||||
a = _a.item()
|
||||
b = _b.item()
|
||||
stride = _stride.item()
|
||||
torch._check_is_size(a)
|
||||
torch._check_is_size(b)
|
||||
torch._check_is_size(stride)
|
||||
ta = torch.randn(a * stride)
|
||||
tb = torch.randn(b * stride)
|
||||
r = torch.cat([ta, tb])
|
||||
@ -1476,9 +1473,9 @@ def forward(self, x_1, y_1):
|
||||
# See https://github.com/pytorch/pytorch/issues/123651
|
||||
def f(x):
|
||||
i0 = x.item()
|
||||
torch._check_is_size(i0)
|
||||
# To trigger the original issue, the max bound has to
|
||||
# be chosen such that 448 / 447 < 2 (which it is.)
|
||||
torch._check(i0 > 0)
|
||||
torch._check(i0 <= 448)
|
||||
return torch.zeros(256 * i0).view(-1, 447)
|
||||
make_fx(f, tracing_mode="symbolic")(torch.tensor(256 * 447, device="cuda"))
|
||||
@ -1559,9 +1556,6 @@ def forward(self, x_1, y_1):
|
||||
def f(lengths, values):
|
||||
# tolist not directly supported atm
|
||||
sizes = [lengths[i].item() for i in range(lengths.size(0))]
|
||||
for s in sizes:
|
||||
# TODO(avik): no assertion generated with torch._check_is_size?
|
||||
torch._constrain_as_size(s)
|
||||
return torch.split(values, sizes)
|
||||
|
||||
r = str(make_fx(f, tracing_mode="symbolic")(
|
||||
@ -1576,9 +1570,6 @@ def forward(self, lengths_1, values_1):
|
||||
_local_scalar_dense_1 = torch.ops.aten._local_scalar_dense.default(select_1); select_1 = None
|
||||
select_2 = torch.ops.aten.select.int(lengths_1, 0, 2); lengths_1 = None
|
||||
_local_scalar_dense_2 = torch.ops.aten._local_scalar_dense.default(select_2); select_2 = None
|
||||
sym_constrain_range_for_size = torch.ops.aten.sym_constrain_range_for_size.default(_local_scalar_dense); sym_constrain_range_for_size = None
|
||||
sym_constrain_range_for_size_1 = torch.ops.aten.sym_constrain_range_for_size.default(_local_scalar_dense_1); sym_constrain_range_for_size_1 = None
|
||||
sym_constrain_range_for_size_2 = torch.ops.aten.sym_constrain_range_for_size.default(_local_scalar_dense_2); sym_constrain_range_for_size_2 = None
|
||||
split_with_sizes = torch.ops.aten.split_with_sizes.default(values_1, [_local_scalar_dense, _local_scalar_dense_1, _local_scalar_dense_2]); values_1 = _local_scalar_dense = _local_scalar_dense_1 = _local_scalar_dense_2 = None
|
||||
getitem = split_with_sizes[0]
|
||||
getitem_1 = split_with_sizes[1]
|
||||
|
Reference in New Issue
Block a user