mirror of
https://github.com/pytorch/pytorch.git
synced 2025-11-18 17:45:09 +08:00
deprecate check_is_size and guard_size_oblivious (#167198)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/167198 Approved by: https://github.com/bobrenjc93
This commit is contained in:
committed by
PyTorch MergeBot
parent
aa504d4d2a
commit
f2e6f94081
@ -90,12 +90,12 @@ class GraphModule(torch.nn.Module):
|
||||
"""\
|
||||
class GraphModule(torch.nn.Module):
|
||||
def forward(self, primals_1: "Sym(u0)", primals_2: "Sym(u1)", primals_3: "Sym(u2)", primals_4: "f32[u0, u1, u2]"):
|
||||
ge_1: "Sym(u0 >= 0)" = primals_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge_3: "Sym(u1 >= 0)" = primals_2 >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_3, "Runtime assertion failed for expression u1 >= 0 on node 'ge_1'"); ge_3 = _assert_scalar_1 = None
|
||||
ge_5: "Sym(u2 >= 0)" = primals_3 >= 0
|
||||
_assert_scalar_2 = torch.ops.aten._assert_scalar.default(ge_5, "Runtime assertion failed for expression u2 >= 0 on node 'ge_2'"); ge_5 = _assert_scalar_2 = None
|
||||
ge: "Sym(u0 >= 0)" = primals_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
ge_1: "Sym(u1 >= 0)" = primals_2 >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_1 = None
|
||||
ge_2: "Sym(u2 >= 0)" = primals_3 >= 0
|
||||
_assert_scalar_2 = torch.ops.aten._assert_scalar.default(ge_2, "Runtime assertion failed for expression u2 >= 0 on node 'ge_2'"); ge_2 = _assert_scalar_2 = None
|
||||
|
||||
floordiv: "Sym((u0//2))" = primals_1 // 2
|
||||
|
||||
|
||||
@ -727,7 +727,7 @@ class GraphModule(torch.nn.Module):
|
||||
x = torch.randn(3)
|
||||
arg_count = ifdynstaticdefault(4, 5)
|
||||
# when compiled with dynamic, we don't have upper bound runtime assertions for u0
|
||||
expected_op_count = ifdynstaticdefault(10, 8)
|
||||
expected_op_count = ifdynstaticdefault(9, 7)
|
||||
out_graph = self._test_wrap_simple(
|
||||
f,
|
||||
default_args_generator((x,)),
|
||||
@ -747,7 +747,6 @@ class GraphModule(torch.nn.Module):
|
||||
c: "i64[u0, 1]" = l_x_.nonzero()
|
||||
|
||||
sym_size_int_1: "Sym(u0)" = torch.ops.aten.sym_size.int(c, 0)
|
||||
_check_is_size = torch._check_is_size(sym_size_int_1); _check_is_size = None
|
||||
|
||||
ge: "Sym(u0 >= 0)" = sym_size_int_1 >= 0
|
||||
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
|
||||
@ -784,7 +783,6 @@ class GraphModule(torch.nn.Module):
|
||||
c: "i64[u0, 1]" = l_x_.nonzero()
|
||||
|
||||
sym_size_int_1: "Sym(u0)" = torch.ops.aten.sym_size.int(c, 0)
|
||||
_check_is_size = torch._check_is_size(sym_size_int_1); _check_is_size = None
|
||||
|
||||
ge: "Sym(u0 >= 0)" = sym_size_int_1 >= 0
|
||||
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
|
||||
@ -883,7 +881,7 @@ class GraphModule(torch.nn.Module):
|
||||
x = torch.randn(3)
|
||||
arg_count = ifdynstaticdefault(4, 5)
|
||||
# when compiled with dynamic, we don't have upper bound runtime assertions for u0
|
||||
expected_op_count = ifdynstaticdefault(10, 8)
|
||||
expected_op_count = ifdynstaticdefault(9, 7)
|
||||
out_graph = self._test_wrap_simple(
|
||||
f,
|
||||
default_args_generator((x,)),
|
||||
@ -905,7 +903,6 @@ class GraphModule(torch.nn.Module):
|
||||
c: "i64[u0, 1]" = l_x_.nonzero()
|
||||
|
||||
sym_size_int: "Sym(u0)" = torch.ops.aten.sym_size.int(c, 0)
|
||||
_check_is_size = torch._check_is_size(sym_size_int); _check_is_size = None
|
||||
|
||||
ge: "Sym(u0 >= 0)" = sym_size_int >= 0
|
||||
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
|
||||
@ -956,7 +953,7 @@ class GraphModule(torch.nn.Module):
|
||||
y = torch.randn(3)
|
||||
arg_count = ifdynstaticdefault(5, 6)
|
||||
# when compiled with dynamic, we don't have upper bound runtime assertions for u0 and u1
|
||||
expected_op_count = ifdynstaticdefault(17, 13)
|
||||
expected_op_count = ifdynstaticdefault(15, 11)
|
||||
out_graph = self._test_wrap_simple(
|
||||
f,
|
||||
default_args_generator((x, y)),
|
||||
@ -977,7 +974,6 @@ class GraphModule(torch.nn.Module):
|
||||
c: "i64[u0, 1]" = l_x_.nonzero()
|
||||
|
||||
sym_size_int_2: "Sym(u0)" = torch.ops.aten.sym_size.int(c, 0)
|
||||
_check_is_size = torch._check_is_size(sym_size_int_2); _check_is_size = None
|
||||
|
||||
ge: "Sym(u0 >= 0)" = sym_size_int_2 >= 0
|
||||
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
|
||||
@ -987,7 +983,6 @@ class GraphModule(torch.nn.Module):
|
||||
d: "i64[u1, 1]" = l_y_.nonzero(); l_y_ = None
|
||||
|
||||
sym_size_int_3: "Sym(u1)" = torch.ops.aten.sym_size.int(d, 0)
|
||||
_check_is_size_1 = torch._check_is_size(sym_size_int_3); _check_is_size_1 = None
|
||||
|
||||
ge_1: "Sym(u1 >= 0)" = sym_size_int_3 >= 0
|
||||
_assert_scalar_default_2 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_default_2 = None
|
||||
|
||||
@ -3081,15 +3081,12 @@ def forward(self, x, y):
|
||||
foo = torch.ops.export.foo.default(x, y); x = None
|
||||
sym_size_int = torch.ops.aten.sym_size.int(foo, 0)
|
||||
sym_size_int_1 = torch.ops.aten.sym_size.int(foo, 1)
|
||||
sym_constrain_range_for_size_default = torch.ops.aten.sym_constrain_range_for_size.default(sym_size_int); sym_constrain_range_for_size_default = None
|
||||
ge = sym_size_int >= 0; sym_size_int = None
|
||||
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
|
||||
sym_constrain_range_for_size_default_1 = torch.ops.aten.sym_constrain_range_for_size.default(sym_size_int_1); sym_constrain_range_for_size_default_1 = None
|
||||
ge_1 = sym_size_int_1 >= 0; sym_size_int_1 = None
|
||||
_assert_scalar_default_1 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_default_1 = None
|
||||
bar = torch.ops.export.bar.default(y); y = None
|
||||
sym_size_int_2 = torch.ops.aten.sym_size.int(bar, 0)
|
||||
sym_constrain_range_for_size_default_2 = torch.ops.aten.sym_constrain_range_for_size.default(sym_size_int_2); sym_constrain_range_for_size_default_2 = None
|
||||
ge_2 = sym_size_int_2 >= 0; sym_size_int_2 = None
|
||||
_assert_scalar_default_2 = torch.ops.aten._assert_scalar.default(ge_2, "Runtime assertion failed for expression u2 >= 0 on node 'ge_2'"); ge_2 = _assert_scalar_default_2 = None
|
||||
return (foo, bar)""",
|
||||
@ -17743,7 +17740,6 @@ class TestExportCustomClass(TorchTestCase):
|
||||
def forward(self, x, mask):
|
||||
masked_select = torch.ops.aten.masked_select.default(x, mask); x = mask = None
|
||||
sym_size_int_1 = torch.ops.aten.sym_size.int(masked_select, 0)
|
||||
sym_constrain_range_for_size_default = torch.ops.aten.sym_constrain_range_for_size.default(sym_size_int_1); sym_constrain_range_for_size_default = None
|
||||
ge = sym_size_int_1 >= 0
|
||||
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
|
||||
le = sym_size_int_1 <= 1188864
|
||||
|
||||
@ -1492,8 +1492,8 @@ def forward(self, arg0_1: "Sym(s77)", arg1_1: "f32[s77][1]cpu"):
|
||||
clone: "f32[s77][1]cpu" = torch.ops.aten.clone.default(arg1_1)
|
||||
nonzero: "i64[u0, 1][1, u0]cpu" = torch.ops.aten.nonzero.default(clone); clone = None
|
||||
sym_size_int_1: "Sym(u0)" = torch.ops.aten.sym_size.int(nonzero, 0)
|
||||
ge_1: "Sym(u0 >= 0)" = sym_size_int_1 >= 0; sym_size_int_1 = None
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge: "Sym(u0 >= 0)" = sym_size_int_1 >= 0; sym_size_int_1 = None
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
_to_copy: "f32[u0, 1][1, u0]cpu" = torch.ops.aten._to_copy.default(nonzero, dtype = torch.float32); nonzero = None
|
||||
auto_functionalized_v2 = torch.ops.higher_order.auto_functionalized_v2(torch.ops.mylib.foo.default, _x_base_index = 0, _x_alias = True, _y_base_index = 1, _y_alias = True, _all_bases = [arg1_1, _to_copy]); _to_copy = None
|
||||
getitem_1: "f32[s77][1]cpu" = auto_functionalized_v2[1]
|
||||
@ -1513,8 +1513,8 @@ def forward(self, arg0_1: "f32[2][1]cpu"):
|
||||
clone: "f32[2][1]cpu" = torch.ops.aten.clone.default(arg0_1)
|
||||
nonzero: "i64[u0, 1][1, u0]cpu" = torch.ops.aten.nonzero.default(clone); clone = None
|
||||
sym_size_int: "Sym(u0)" = torch.ops.aten.sym_size.int(nonzero, 0)
|
||||
ge_1: "Sym(u0 >= 0)" = sym_size_int >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge: "Sym(u0 >= 0)" = sym_size_int >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
le: "Sym(u0 <= 2)" = sym_size_int <= 2; sym_size_int = None
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(le, "Runtime assertion failed for expression u0 <= 2 on node 'le'"); le = _assert_scalar_1 = None
|
||||
_to_copy: "f32[u0, 1][1, u0]cpu" = torch.ops.aten._to_copy.default(nonzero, dtype = torch.float32); nonzero = None
|
||||
@ -1538,8 +1538,8 @@ def forward(self, arg0_1: "f32[2][1]cpu"):
|
||||
def forward(self, arg0_1: "Sym(s77)", arg1_1: "f32[s77][1]cpu"):
|
||||
nonzero: "i64[u0, 1][1, u0]cpu" = torch.ops.aten.nonzero.default(arg1_1)
|
||||
sym_size_int_1: "Sym(u0)" = torch.ops.aten.sym_size.int(nonzero, 0)
|
||||
ge_1: "Sym(u0 >= 0)" = sym_size_int_1 >= 0; sym_size_int_1 = None
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge: "Sym(u0 >= 0)" = sym_size_int_1 >= 0; sym_size_int_1 = None
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
convert_element_type: "f32[u0, 1][1, u0]cpu" = torch.ops.prims.convert_element_type.default(nonzero, torch.float32); nonzero = None
|
||||
alias_default: "f32[s77][1]cpu" = torch.ops.aten.alias.default(arg1_1)
|
||||
alias_default_1: "f32[u0, 1][1, u0]cpu" = torch.ops.aten.alias.default(convert_element_type)
|
||||
@ -1557,8 +1557,8 @@ def forward(self, arg0_1: "Sym(s77)", arg1_1: "f32[s77][1]cpu"):
|
||||
def forward(self, arg0_1: "f32[2][1]cpu"):
|
||||
nonzero: "i64[u0, 1][1, u0]cpu" = torch.ops.aten.nonzero.default(arg0_1)
|
||||
sym_size_int: "Sym(u0)" = torch.ops.aten.sym_size.int(nonzero, 0)
|
||||
ge_1: "Sym(u0 >= 0)" = sym_size_int >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge: "Sym(u0 >= 0)" = sym_size_int >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
le: "Sym(u0 <= 2)" = sym_size_int <= 2; sym_size_int = None
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(le, "Runtime assertion failed for expression u0 <= 2 on node 'le'"); le = _assert_scalar_1 = None
|
||||
convert_element_type: "f32[u0, 1][1, u0]cpu" = torch.ops.prims.convert_element_type.default(nonzero, torch.float32); nonzero = None
|
||||
|
||||
@ -3532,11 +3532,11 @@ class TestUbackedOps(TestCase):
|
||||
aot_graphs,
|
||||
"""\
|
||||
def forward(self, arg0_1: "i64[1][1]cpu", arg1_1: "Sym(u1)", arg2_1: "Sym(s7)", arg3_1: "i64[u1][s7]cpu"):
|
||||
ge_1: "Sym(u1 >= 0)" = arg1_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge: "Sym(u1 >= 0)" = arg1_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u1 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
_local_scalar_dense: "Sym(u0)" = torch.ops.aten._local_scalar_dense.default(arg0_1); arg0_1 = None
|
||||
ge_2: "Sym(u0 >= 0)" = _local_scalar_dense >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_2, "Runtime assertion failed for expression u0 >= 0 on node 'ge_1'"); ge_2 = _assert_scalar_1 = None
|
||||
ge_1: "Sym(u0 >= 0)" = _local_scalar_dense >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_1 = None
|
||||
pow_1: "Sym(u0**2)" = _local_scalar_dense ** 2
|
||||
eq: "Sym(Eq(u1, u0**2))" = arg1_1 == pow_1; arg1_1 = pow_1 = None
|
||||
_assert_scalar_2 = torch.ops.aten._assert_scalar.default(eq, "Runtime assertion failed for expression Eq(u1, u0**2) on node 'eq'"); eq = _assert_scalar_2 = None
|
||||
@ -3573,11 +3573,11 @@ def forward(self, arg0_1: "i64[1][1]cpu", arg1_1: "Sym(u1)", arg2_1: "Sym(s7)",
|
||||
aot_graphs,
|
||||
"""\
|
||||
def forward(self, arg0_1: "i64[1][1]cpu", arg1_1: "Sym(u1)", arg2_1: "i64[u1][1]cpu"):
|
||||
ge_1: "Sym(u1 >= 0)" = arg1_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge: "Sym(u1 >= 0)" = arg1_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u1 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
_local_scalar_dense: "Sym(u0)" = torch.ops.aten._local_scalar_dense.default(arg0_1); arg0_1 = None
|
||||
ge_2: "Sym(u0 >= 0)" = _local_scalar_dense >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_2, "Runtime assertion failed for expression u0 >= 0 on node 'ge_1'"); ge_2 = _assert_scalar_1 = None
|
||||
ge_1: "Sym(u0 >= 0)" = _local_scalar_dense >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_1 = None
|
||||
pow_1: "Sym(u0**2)" = _local_scalar_dense ** 2
|
||||
eq: "Sym(Eq(u1, u0**2))" = arg1_1 == pow_1; arg1_1 = pow_1 = None
|
||||
_assert_scalar_2 = torch.ops.aten._assert_scalar.default(eq, "Runtime assertion failed for expression Eq(u1, u0**2) on node 'eq'"); eq = _assert_scalar_2 = None
|
||||
@ -3632,21 +3632,21 @@ def forward(self, arg0_1: "i64[1][1]cpu", arg1_1: "Sym(u1)", arg2_1: "i64[u1][1]
|
||||
aot_graphs,
|
||||
"""\
|
||||
def forward(self, arg0_1: "i64[2][1]cpu", arg1_1: "Sym(u2)", arg2_1: "Sym(u3)", arg3_1: "f32[u2, u3][1, u2]cpu"):
|
||||
ge_1: "Sym(u2 >= 0)" = arg1_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u2 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge_3: "Sym(u3 >= 0)" = arg2_1 >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_3, "Runtime assertion failed for expression u3 >= 0 on node 'ge_1'"); ge_3 = _assert_scalar_1 = None
|
||||
ge: "Sym(u2 >= 0)" = arg1_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u2 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
ge_1: "Sym(u3 >= 0)" = arg2_1 >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u3 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_1 = None
|
||||
select: "i64[][]cpu" = torch.ops.aten.select.int(arg0_1, 0, 0)
|
||||
_local_scalar_dense: "Sym(u0)" = torch.ops.aten._local_scalar_dense.default(select); select = None
|
||||
ge_4: "Sym(u0 >= 0)" = _local_scalar_dense >= 0
|
||||
_assert_scalar_2 = torch.ops.aten._assert_scalar.default(ge_4, "Runtime assertion failed for expression u0 >= 0 on node 'ge_2'"); ge_4 = _assert_scalar_2 = None
|
||||
ge_2: "Sym(u0 >= 0)" = _local_scalar_dense >= 0
|
||||
_assert_scalar_2 = torch.ops.aten._assert_scalar.default(ge_2, "Runtime assertion failed for expression u0 >= 0 on node 'ge_2'"); ge_2 = _assert_scalar_2 = None
|
||||
sym_sum: "Sym(u0 + 1)" = torch.sym_sum((1, _local_scalar_dense))
|
||||
gt: "Sym(u0 + 1 > 0)" = sym_sum > 0; sym_sum = None
|
||||
_assert_scalar_3 = torch.ops.aten._assert_scalar.default(gt, "Runtime assertion failed for expression 0 < u0 + 1 on node 'gt'"); gt = _assert_scalar_3 = None
|
||||
select_1: "i64[][]cpu" = torch.ops.aten.select.int(arg0_1, 0, 1); arg0_1 = None
|
||||
_local_scalar_dense_1: "Sym(u1)" = torch.ops.aten._local_scalar_dense.default(select_1); select_1 = None
|
||||
ge_5: "Sym(u1 >= 0)" = _local_scalar_dense_1 >= 0
|
||||
_assert_scalar_4 = torch.ops.aten._assert_scalar.default(ge_5, "Runtime assertion failed for expression u1 >= 0 on node 'ge_3'"); ge_5 = _assert_scalar_4 = None
|
||||
ge_3: "Sym(u1 >= 0)" = _local_scalar_dense_1 >= 0
|
||||
_assert_scalar_4 = torch.ops.aten._assert_scalar.default(ge_3, "Runtime assertion failed for expression u1 >= 0 on node 'ge_3'"); ge_3 = _assert_scalar_4 = None
|
||||
sym_sum_1: "Sym(u1 + 1)" = torch.sym_sum((1, _local_scalar_dense_1))
|
||||
gt_1: "Sym(u1 + 1 > 0)" = sym_sum_1 > 0; sym_sum_1 = None
|
||||
_assert_scalar_5 = torch.ops.aten._assert_scalar.default(gt_1, "Runtime assertion failed for expression 0 < u1 + 1 on node 'gt_1'"); gt_1 = _assert_scalar_5 = None
|
||||
@ -4068,10 +4068,10 @@ def forward(self, arg0_1: "i64[2][1]cpu", arg1_1: "Sym(u2)", arg2_1: "Sym(u3)",
|
||||
self.assertExpectedInline(
|
||||
output,
|
||||
"""\
|
||||
ge_1: "Sym(u0 >= 0)" = arg0_1 >= 0; arg0_1 = None
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge_3: "Sym(u1 >= 0)" = arg1_1 >= 0; arg1_1 = None
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_3, "Runtime assertion failed for expression u1 >= 0 on node 'ge_1'"); ge_3 = _assert_scalar_1 = None
|
||||
ge: "Sym(u0 >= 0)" = arg0_1 >= 0; arg0_1 = None
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
ge_1: "Sym(u1 >= 0)" = arg1_1 >= 0; arg1_1 = None
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_1 = None
|
||||
clone: "f32[u0, u1][Max(1, u1), 1]cpu" = torch.ops.aten.clone.default(arg2_1, memory_format = torch.contiguous_format); arg2_1 = None
|
||||
add_3: "f32[u0, u1][Max(1, u1), 1]cpu" = torch.ops.aten.add.Tensor(clone, 1); clone = None
|
||||
mul_6: "f32[u0, u1][Max(1, u1), 1]cpu" = torch.ops.aten.mul.Tensor(add_3, 100); add_3 = None
|
||||
@ -4097,10 +4097,10 @@ def forward(self, arg0_1: "i64[2][1]cpu", arg1_1: "Sym(u2)", arg2_1: "Sym(u3)",
|
||||
self.assertExpectedInline(
|
||||
output,
|
||||
"""\
|
||||
ge_1: "Sym(u0 >= 0)" = arg0_1 >= 0; arg0_1 = None
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge_3: "Sym(u1 >= 0)" = arg1_1 >= 0; arg1_1 = None
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_3, "Runtime assertion failed for expression u1 >= 0 on node 'ge_1'"); ge_3 = _assert_scalar_1 = None
|
||||
ge: "Sym(u0 >= 0)" = arg0_1 >= 0; arg0_1 = None
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
ge_1: "Sym(u1 >= 0)" = arg1_1 >= 0; arg1_1 = None
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_1 = None
|
||||
add: "f32[u0, u1][Max(1, u1), 1]cpu" = torch.ops.aten.add.Tensor(arg2_1, 1); arg2_1 = None
|
||||
mul_5: "f32[u0, u1][Max(1, u1), 1]cpu" = torch.ops.aten.mul.Tensor(add, 100); add = None
|
||||
return (mul_5,)""", # noqa: B950
|
||||
@ -4283,11 +4283,11 @@ def forward(self, arg0_1: "i64[2][1]cpu", arg1_1: "Sym(u2)", arg2_1: "Sym(u3)",
|
||||
aot_graphs,
|
||||
"""\
|
||||
def forward(self, arg0_1: "i64[1][1]cpu", arg1_1: "Sym(u1)", arg2_1: "Sym(s7)", arg3_1: "i64[u1][s7]cpu"):
|
||||
ge_1: "Sym(u1 >= 0)" = arg1_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge: "Sym(u1 >= 0)" = arg1_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u1 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
_local_scalar_dense: "Sym(u0)" = torch.ops.aten._local_scalar_dense.default(arg0_1); arg0_1 = None
|
||||
ge_2: "Sym(u0 >= 0)" = _local_scalar_dense >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_2, "Runtime assertion failed for expression u0 >= 0 on node 'ge_1'"); ge_2 = _assert_scalar_1 = None
|
||||
ge_1: "Sym(u0 >= 0)" = _local_scalar_dense >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_1 = None
|
||||
pow_1: "Sym(u0**2)" = _local_scalar_dense ** 2
|
||||
eq: "Sym(Eq(u1, u0**2))" = arg1_1 == pow_1; arg1_1 = pow_1 = None
|
||||
_assert_scalar_2 = torch.ops.aten._assert_scalar.default(eq, "Runtime assertion failed for expression Eq(u1, u0**2) on node 'eq'"); eq = _assert_scalar_2 = None
|
||||
@ -4319,11 +4319,11 @@ def forward(self, arg0_1: "i64[1][1]cpu", arg1_1: "Sym(u1)", arg2_1: "Sym(s7)",
|
||||
aot_graphs,
|
||||
"""\
|
||||
def forward(self, arg0_1: "i64[1][1]cpu", arg1_1: "Sym(u1)", arg2_1: "i64[u1][1]cpu"):
|
||||
ge_1: "Sym(u1 >= 0)" = arg1_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u1 >= 0 on node 'ge'"); ge_1 = _assert_scalar = None
|
||||
ge: "Sym(u1 >= 0)" = arg1_1 >= 0
|
||||
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u1 >= 0 on node 'ge'"); ge = _assert_scalar = None
|
||||
_local_scalar_dense: "Sym(u0)" = torch.ops.aten._local_scalar_dense.default(arg0_1); arg0_1 = None
|
||||
ge_2: "Sym(u0 >= 0)" = _local_scalar_dense >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_2, "Runtime assertion failed for expression u0 >= 0 on node 'ge_1'"); ge_2 = _assert_scalar_1 = None
|
||||
ge_1: "Sym(u0 >= 0)" = _local_scalar_dense >= 0
|
||||
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 0 on node 'ge_1'"); ge_1 = _assert_scalar_1 = None
|
||||
pow_1: "Sym(u0**2)" = _local_scalar_dense ** 2
|
||||
eq: "Sym(Eq(u1, u0**2))" = arg1_1 == pow_1; arg1_1 = pow_1 = None
|
||||
_assert_scalar_2 = torch.ops.aten._assert_scalar.default(eq, "Runtime assertion failed for expression Eq(u1, u0**2) on node 'eq'"); eq = _assert_scalar_2 = None
|
||||
|
||||
@ -121,7 +121,7 @@ class TestOpaqueObject(TestCase):
|
||||
def size_impl_fake(q: OpaqueQueue) -> int:
|
||||
ctx = torch._custom_op.impl.get_ctx()
|
||||
u0 = ctx.new_dynamic_size()
|
||||
torch._check_is_size(u0)
|
||||
torch._check(u0 >= 0)
|
||||
return u0
|
||||
|
||||
torch.library.define(
|
||||
|
||||
@ -33,7 +33,11 @@ from typing import (
|
||||
TypeVar as _TypeVar,
|
||||
Union as _Union,
|
||||
)
|
||||
from typing_extensions import ParamSpec as _ParamSpec, TypeIs as _TypeIs
|
||||
from typing_extensions import (
|
||||
deprecated as _deprecated,
|
||||
ParamSpec as _ParamSpec,
|
||||
TypeIs as _TypeIs,
|
||||
)
|
||||
|
||||
|
||||
# As a bunch of torch.packages internally still have this check
|
||||
@ -1735,7 +1739,10 @@ def _check(cond, message=None): # noqa: F811
|
||||
_check_with(RuntimeError, cond, message) # pyrefly: ignore [bad-argument-type]
|
||||
|
||||
|
||||
# TODO add deprecation annotation
|
||||
@_deprecated(
|
||||
"_check_is_size will be removed in a future PyTorch release along with guard_size_oblivious. \
|
||||
Use _check(i >= 0) instead."
|
||||
)
|
||||
def _check_is_size(i, message=None, *, max=None):
|
||||
"""Checks that a given integer is a valid size (i.e., is non-negative).
|
||||
You should use this over ``_check(i >= 0)`` because it can prevent
|
||||
|
||||
@ -470,6 +470,10 @@ def has_static_value(a: Union[SymBool, SymFloat, SymInt, bool, float, int]) -> b
|
||||
return a.node.shape_env.bound_sympy(a.node.expr).is_singleton() # type: ignore[union-attr]
|
||||
|
||||
|
||||
@deprecated(
|
||||
"guard_size_oblivious will be removed. Consider using explicit unbacked handling \
|
||||
potentially utilizing guard_or_false, guard_or_true, or statically_known_true"
|
||||
)
|
||||
def guard_size_oblivious(expr: Union[torch.SymBool, bool]) -> bool:
|
||||
"""
|
||||
Perform a guard on a symbolic boolean expression in a size oblivious way.
|
||||
|
||||
@ -576,17 +576,6 @@ def insert_deferred_runtime_asserts(
|
||||
if i0 in constrained_unbacked_symbols:
|
||||
continue # constrain symbol just once
|
||||
|
||||
if i0 in shape_env.size_like:
|
||||
if export:
|
||||
graph.call_function(
|
||||
torch.ops.aten.sym_constrain_range_for_size.default,
|
||||
(expr_to_proxy[i0].node,),
|
||||
)
|
||||
else:
|
||||
graph.call_function(
|
||||
torch._check_is_size, (expr_to_proxy[i0].node,)
|
||||
)
|
||||
|
||||
vr = shape_env.var_to_range[i0]
|
||||
if vr.is_int and vr.upper == sys.maxsize - 1:
|
||||
# treat upper bound == sys.maxsize - 1 for int symbols as +oo
|
||||
|
||||
Reference in New Issue
Block a user