diff --git a/test/custom_operator/my_custom_ops.py b/test/custom_operator/my_custom_ops.py index 50f63c3370db..0eedcb49c2c5 100644 --- a/test/custom_operator/my_custom_ops.py +++ b/test/custom_operator/my_custom_ops.py @@ -6,7 +6,7 @@ import torch torch.ops.load_library(get_custom_op_library_path()) -@torch.library.impl_abstract("custom::nonzero") +@torch.library.register_fake("custom::nonzero") def nonzero_abstract(x): n = x.dim() ctx = torch.library.get_ctx() diff --git a/test/custom_operator/my_custom_ops2.py b/test/custom_operator/my_custom_ops2.py index 31cbb01baac2..2a7f4b825f47 100644 --- a/test/custom_operator/my_custom_ops2.py +++ b/test/custom_operator/my_custom_ops2.py @@ -6,6 +6,6 @@ import torch torch.ops.load_library(get_custom_op_library_path()) -@torch.library.impl_abstract("custom::sin") +@torch.library.register_fake("custom::sin") def sin_abstract(x): return torch.empty_like(x) diff --git a/test/custom_operator/pointwise.py b/test/custom_operator/pointwise.py index f1cd1a3f5374..53335fdb0267 100644 --- a/test/custom_operator/pointwise.py +++ b/test/custom_operator/pointwise.py @@ -8,12 +8,12 @@ torch.ops.load_library(get_custom_op_library_path()) # NB: The impl_abstract_pystub for cos actually # specifies it should live in the my_custom_ops2 module. -@torch.library.impl_abstract("custom::cos") +@torch.library.register_fake("custom::cos") def cos_abstract(x): return torch.empty_like(x) # NB: There is no impl_abstract_pystub for tan -@torch.library.impl_abstract("custom::tan") +@torch.library.register_fake("custom::tan") def tan_abstract(x): return torch.empty_like(x) diff --git a/test/export/test_converter.py b/test/export/test_converter.py index d5611ad2d579..9d872f87d60a 100644 --- a/test/export/test_converter.py +++ b/test/export/test_converter.py @@ -911,7 +911,7 @@ class TestConverter(TestCase): return x + x # Meta function of the custom op. - @torch.library.impl_abstract( + @torch.library.register_fake( "mylib::foo", lib=lib, ) diff --git a/test/export/test_export.py b/test/export/test_export.py index 31be248bd10f..a46a8815f93e 100755 --- a/test/export/test_export.py +++ b/test/export/test_export.py @@ -146,7 +146,7 @@ torch.library.define( @torch.library.impl("testlib::returns_tensor_symint", "cpu") -@torch.library.impl_abstract("testlib::returns_tensor_symint") +@torch.library.register_fake("testlib::returns_tensor_symint") def returns_tensor_symint_impl(x): return x, x.shape[0] @@ -159,7 +159,7 @@ def foo_impl(x, z): return x, z, x + z -@torch.library.impl_abstract("testlib::foo") +@torch.library.register_fake("testlib::foo") def foo_abstract(x, z): return x, z, x + z diff --git a/test/export/test_serialize.py b/test/export/test_serialize.py index d174405dd8e0..f4f7b68a494a 100644 --- a/test/export/test_serialize.py +++ b/test/export/test_serialize.py @@ -795,7 +795,7 @@ class TestDeserialize(TestCase): ) @torch.library.impl("mylib::foo", "cpu", lib=lib) - @torch.library.impl_abstract("mylib::foo") + @torch.library.register_fake("mylib::foo") def foo_impl(a, b, c): res2 = None if c is not None: @@ -884,21 +884,21 @@ class TestDeserialize(TestCase): ) @torch.library.impl("mylib::foo1", "cpu", lib=lib) - @torch.library.impl_abstract("mylib::foo1") + @torch.library.register_fake("mylib::foo1") def foo1_impl(x, y, z, w, n): x.add_(y[0] + w) z.add_(y[1] + n) return n + n @torch.library.impl("mylib::foo2", "cpu", lib=lib) - @torch.library.impl_abstract("mylib::foo2") + @torch.library.register_fake("mylib::foo2") def foo2_impl(x, y, z, w, n): x.add_(y[0] + w) z.add_(y[1] + n) return (n + n, n * n) @torch.library.impl("mylib::foo3", "cpu", lib=lib) - @torch.library.impl_abstract("mylib::foo3") + @torch.library.register_fake("mylib::foo3") def foo3_impl(x, y, z, w, n): x.add_(y[0] + w) z.add_(y[1] + n) diff --git a/test/higher_order_ops/test_with_effects.py b/test/higher_order_ops/test_with_effects.py index 9adf4f2dd471..67facfb127d8 100644 --- a/test/higher_order_ops/test_with_effects.py +++ b/test/higher_order_ops/test_with_effects.py @@ -328,7 +328,7 @@ def forward(self, arg0_1, arg1_1, arg2_1): return # Meta function of the custom op - @torch.library.impl_abstract( + @torch.library.register_fake( "mylib::record_scalar_tensor", lib=lib, ) diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py index 890ee8621dc2..6052a9ba2ea6 100644 --- a/test/inductor/test_aot_inductor.py +++ b/test/inductor/test_aot_inductor.py @@ -4315,7 +4315,7 @@ class AOTInductorTestsTemplate: def foo(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: return a[: b.item()] - @torch.library.impl_abstract("mylib::foo", lib=lib) + @torch.library.register_fake("mylib::foo", lib=lib) def foo_fake_impl(a, b): ctx = torch.library.get_ctx() u = ctx.new_dynamic_size() diff --git a/test/inductor/test_auto_functionalize.py b/test/inductor/test_auto_functionalize.py index 65df4912a41c..e05bbedbb95a 100644 --- a/test/inductor/test_auto_functionalize.py +++ b/test/inductor/test_auto_functionalize.py @@ -217,7 +217,7 @@ def forward(self, arg0_1: "f32[3][1]cpu", arg1_1: "f32[3][1]cpu", arg2_1: "f32[3 z.add_(y[1] + n) return y[0] + w, y[1] + n - @torch.library.impl_abstract("mylib::foo", lib=lib) + @torch.library.register_fake("mylib::foo", lib=lib) def foo_abstract(x, y, z, w, n): return y[0] + w, y[1] + n @@ -495,7 +495,7 @@ def forward(self, arg0_1: "f32[3][1]cpu", arg1_1: "f32[3][1]cpu", arg2_1: "f32[3 z.add_(y[1] + n) return y[0] + w, y[1] + n - @torch.library.impl_abstract("mylib::foo", lib=lib) + @torch.library.register_fake("mylib::foo", lib=lib) def foo_abstract(x, y, z, w, n): return y[0] + w, y[1] + n diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 4742c69211e5..b713edeb7a95 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -1608,7 +1608,7 @@ class TestCustomOp(CustomOpTestCaseBase): lib = self.lib() lib.define("sin.blah(Tensor x) -> Tensor") - torch.library.impl_abstract( + torch.library.register_fake( f"{self.test_ns}::sin.blah", torch.empty_like, lib=lib ) @@ -1621,7 +1621,7 @@ class TestCustomOp(CustomOpTestCaseBase): def foo(x: torch.Tensor, dim: int) -> torch.Tensor: raise NotImplementedError - @torch.library.impl_abstract(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) + @torch.library.register_fake(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x, dim): output_shape = list(x.shape) del output_shape[dim] @@ -1637,7 +1637,7 @@ class TestCustomOp(CustomOpTestCaseBase): def foo(x: torch.Tensor, dim: int) -> torch.Tensor: raise NotImplementedError - @torch.library.impl_abstract(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) + @torch.library.register_fake(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x, dim): output_shape = list(x.shape) del output_shape[dim] @@ -1645,7 +1645,7 @@ class TestCustomOp(CustomOpTestCaseBase): with self.assertRaisesRegex(RuntimeError, r"test_custom_ops.py:\d+"): - @torch.library.impl_abstract(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) + @torch.library.register_fake(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta2(x, dim): output_shape = list(x.shape) del output_shape[dim] @@ -1656,7 +1656,7 @@ class TestCustomOp(CustomOpTestCaseBase): def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError - @torch.library.impl_abstract(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) + @torch.library.register_fake(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x): ctx = torch.library.get_ctx() r = ctx.new_dynamic_size(min=1) @@ -1683,7 +1683,7 @@ class TestCustomOp(CustomOpTestCaseBase): def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError - @torch.library.impl_abstract(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) + @torch.library.register_fake(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x): return x.sum() @@ -1827,7 +1827,7 @@ Dynamic shape operator lib.define("foo(Tensor x) -> Tensor") qualname = f"{self.test_ns}::foo" - @torch.library.impl_abstract(qualname, lib=self.lib()) + @torch.library.register_fake(qualname, lib=self.lib()) def foo_impl(x): return x.sin() @@ -1850,7 +1850,7 @@ Dynamic shape operator op = self.get_op(qualname) with self.assertRaisesRegex(RuntimeError, r"already has .*Meta implementation"): - torch.library.impl_abstract(qualname, func=foo_impl, lib=self.lib()) + torch.library.register_fake(qualname, foo_impl, lib=self.lib()) def test_abstract_impl_on_existing_op_with_CompositeImplicitAutograd(self): lib = self.lib() @@ -1864,7 +1864,7 @@ Dynamic shape operator op = self.get_op(qualname) with self.assertRaisesRegex(RuntimeError, "CompositeImplicitAutograd"): - torch.library.impl_abstract(qualname, func=foo_impl, lib=self.lib()) + torch.library.register_fake(qualname, foo_impl, lib=self.lib()) def test_abstract_impl_on_existing_op_with_CompositeExplicitAutograd(self): lib = self.lib() @@ -1877,7 +1877,7 @@ Dynamic shape operator lib.impl("foo", foo_impl, "CompositeExplicitAutograd") op = self.get_op(qualname) - torch.library.impl_abstract(qualname, func=lambda x: x.sum(), lib=self.lib()) + torch.library.register_fake(qualname, lambda x: x.sum(), lib=self.lib()) with torch._subclasses.FakeTensorMode(): x = torch.randn(10) result = op(x) diff --git a/torch/_library/custom_ops.py b/torch/_library/custom_ops.py index 1d8d0fc5377b..bd8acb2789e1 100644 --- a/torch/_library/custom_ops.py +++ b/torch/_library/custom_ops.py @@ -403,7 +403,7 @@ class CustomOpDef: (sizes/strides/storage_offset/device), it specifies what the properties of the output Tensors are. - Please see :func:`torch.library.impl_abstract` for more details. + Please see :func:`torch.library.register_fake` for more details. Args: fn (Callable): The function to register as the FakeTensor