Use OpOverload instead of OpOverloadPacket for size/stride/etc slots (#112119)

Signed-off-by: Edward Z. Yang <ezyang@meta.com>

Pull Request resolved: https://github.com/pytorch/pytorch/pull/112119
Approved by: https://github.com/yanboliang
This commit is contained in:
Edward Z. Yang
2023-10-27 07:29:36 -07:00
committed by PyTorch MergeBot
parent ab20bab729
commit dd24e92949
9 changed files with 66 additions and 67 deletions

View File

@ -2096,9 +2096,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p
fx_g = make_fx(trace_fn, tracing_mode="symbolic")(x)
self.assertExpectedInline(fx_g.code.strip(), """\
def forward(self, x_1):
sym_size = torch.ops.aten.sym_size(x_1, 0)
sym_size_1 = torch.ops.aten.sym_size(x_1, 1); x_1 = None
return ((sym_size, sym_size_1), (sym_size, sym_size_1))""")
sym_size_int = torch.ops.aten.sym_size.int(x_1, 0)
sym_size_int_1 = torch.ops.aten.sym_size.int(x_1, 1); x_1 = None
return ((sym_size_int, sym_size_int_1), (sym_size_int, sym_size_int_1))""")
def test_data_ptr_respects_numel_slow_path(self):
data = torch.randn(6, 2)