diff --git a/.flake8 b/.flake8 index 7f40e3ace9e4..be0e741a51f0 100644 --- a/.flake8 +++ b/.flake8 @@ -2,7 +2,7 @@ # NOTE: **Mirror any changes** to this file the [tool.ruff] config in pyproject.toml # before we can fully move to use ruff enable-extensions = G -select = B,C,E,F,G,P,SIM1,T4,W,B9,TOR0,TOR1,TOR2 +select = B,C,E,F,G,P,SIM1,T4,W,B9,TOR0,TOR1,TOR2,TOR9 max-line-length = 120 # C408 ignored because we like the dict keyword argument syntax # E501 is not flexible enough, we're using B950 instead @@ -27,6 +27,9 @@ ignore = # TODO(kit1980): fix all TOR102 issues # `torch.load` without `weights_only` parameter is unsafe TOR102, + # TODO(kit1980): resolve all TOR003 issues + # pass `use_reentrant` explicitly to `checkpoint`. + TOR003 per-file-ignores = __init__.py: F401 test/**: F821 @@ -36,6 +39,21 @@ per-file-ignores = torchgen/executorch/api/types/__init__.py: F401,F403 test/dynamo/test_higher_order_ops.py: B950 torch/testing/_internal/dynamo_test_failures.py: B950 + # TOR901 is only for test, we want to ignore it for everything else. + # It's not easy to configure this without affecting other per-file-ignores, + # so we explicitly list every file where it's violated outside of test. + torch/__init__.py: F401,TOR901 + torch/_custom_op/impl.py: TOR901 + torch/_export/serde/upgrade.py: TOR901 + torch/_functorch/vmap.py: TOR901 + torch/_inductor/test_operators.py: TOR901 + torch/_library/abstract_impl.py: TOR901 + torch/_meta_registrations.py: TOR901 + torch/_prims/__init__.py: F401,TOR901 + torch/_prims/rng_prims.py: TOR901 + torch/ao/quantization/fx/_decomposed.py: TOR901 + torch/distributed/_functional_collectives.py: TOR901 + torch/distributed/_spmd/data_parallel.py: TOR901 optional-ascii-coding = True exclude = ./.git, diff --git a/.lintrunner.toml b/.lintrunner.toml index 635e90d089d8..36362eb128e5 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -46,7 +46,7 @@ init_command = [ 'mccabe==0.7.0', 'pycodestyle==2.11.1', 'pyflakes==3.1.0', - 'torchfix==0.2.0', + 'torchfix==0.4.0', ] diff --git a/requirements-flake8.txt b/requirements-flake8.txt index dc289b4e036f..7dd48815469a 100644 --- a/requirements-flake8.txt +++ b/requirements-flake8.txt @@ -8,4 +8,4 @@ flake8-pyi==20.5.0 mccabe==0.6.1 pycodestyle==2.6.0 pyflakes==2.2.0 -torchfix==0.2.0 +torchfix==0.4.0 diff --git a/test/distributed/_spmd/test_tracing.py b/test/distributed/_spmd/test_tracing.py index 555d955da172..025167c32f58 100644 --- a/test/distributed/_spmd/test_tracing.py +++ b/test/distributed/_spmd/test_tracing.py @@ -256,7 +256,7 @@ def ddm_backward(grad: torch.Tensor) -> torch.Tensor: return grad -dummy_lib = torch.library.Library("dummy", "DEF") +dummy_lib = torch.library.Library("dummy", "DEF") # noqa: TOR901 dummy_lib.define("ddm(Tensor x) -> Tensor") dummy_lib.impl("ddm", ddm, "CompositeExplicitAutograd") dummy_lib.define("ddm_backward(Tensor x) -> Tensor") diff --git a/test/dynamo/test_aot_autograd.py b/test/dynamo/test_aot_autograd.py index 80bf187c5c3d..b92bf9e8a84a 100644 --- a/test/dynamo/test_aot_autograd.py +++ b/test/dynamo/test_aot_autograd.py @@ -25,7 +25,7 @@ def maybe_dupe_op(x): aten = torch.ops.aten -lib = torch.library.Library("custom", "DEF") +lib = torch.library.Library("custom", "DEF") # noqa: TOR901 lib.define("maybe_dupe_op(Tensor a) -> (Tensor, Tensor)") lib.impl("maybe_dupe_op", maybe_dupe_op, "CPU") lib.impl("maybe_dupe_op", maybe_dupe_op, "Meta") diff --git a/test/dynamo/test_decorators.py b/test/dynamo/test_decorators.py index 5bc5cc0ec635..daeba65924c4 100644 --- a/test/dynamo/test_decorators.py +++ b/test/dynamo/test_decorators.py @@ -39,7 +39,7 @@ class DecoratorTests(torch._dynamo.test_case.TestCase): import torch.library from torch.library import Library - foo = Library("foo", "DEF") + foo = Library("foo", "DEF") # noqa: TOR901 foo.define("custom(Tensor self) -> Tensor") # Dynamic shape data dependent operator. For static shape compilation, Dynamo diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py index c00ec244b0d1..be0f6511292b 100644 --- a/test/dynamo/test_repros.py +++ b/test/dynamo/test_repros.py @@ -43,7 +43,7 @@ from torch.testing._internal.common_utils import ( _orig_module_call = torch.nn.Module.__call__ # Custom operator that only supports CPU and Meta -lib = torch.library.Library("test_sample", "DEF") +lib = torch.library.Library("test_sample", "DEF") # noqa: TOR901 lib.define("foo(Tensor self) -> Tensor") lib.impl("foo", torch.sin, "CPU") diff --git a/test/export/test_serialize.py b/test/export/test_serialize.py index 07670a6d87d9..dbdca09e0885 100644 --- a/test/export/test_serialize.py +++ b/test/export/test_serialize.py @@ -326,7 +326,7 @@ class TestDeserialize(TestCase): def test_auto_functionalize(self): try: - lib = torch.library.Library("mylib", "FRAGMENT") + lib = torch.library.Library("mylib", "FRAGMENT") # noqa: TOR901 torch.library.define( "mylib::foo1", "(Tensor(a!) x, Tensor[] y, Tensor(b!) z, SymInt w, Tensor n) -> Tensor", @@ -522,7 +522,7 @@ class TestDeserialize(TestCase): def test_tensor_tensor_list(self): try: from torch.library import Library - lib = Library("_export", "FRAGMENT") + lib = Library("_export", "FRAGMENT") # noqa: TOR901 lib.define( "_test_tensor_tensor_list_output(Tensor x, Tensor y) -> (Tensor, Tensor[])", tags=torch.Tag.pt2_compliant_tag) diff --git a/test/inductor/test_custom_lowering.py b/test/inductor/test_custom_lowering.py index 6740108298fa..b97da059d096 100644 --- a/test/inductor/test_custom_lowering.py +++ b/test/inductor/test_custom_lowering.py @@ -17,9 +17,15 @@ class TestCustomLowering(TorchTestCase): @classmethod def setUpClass(cls): super().setUpClass() - cls.test_inductor_ops = torch.library.Library("test_inductor_ops", "DEF") - cls.impl_cuda = torch.library.Library("test_inductor_ops", "IMPL", "CUDA") - cls.impl_meta = torch.library.Library("test_inductor_ops", "IMPL", "Meta") + cls.test_inductor_ops = torch.library.Library( # noqa: TOR901 + "test_inductor_ops", "DEF" + ) + cls.impl_cuda = torch.library.Library( # noqa: TOR901 + "test_inductor_ops", "IMPL", "CUDA" + ) + cls.impl_meta = torch.library.Library( # noqa: TOR901 + "test_inductor_ops", "IMPL", "Meta" + ) cls._register_jagged_to_padded_dense() @classmethod diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py index 94d61a9c0d4e..9c7159e5019e 100644 --- a/test/inductor/test_torchinductor.py +++ b/test/inductor/test_torchinductor.py @@ -123,7 +123,7 @@ skip_if_x86_mac = functools.partial( ) vec_dtypes = [torch.float, torch.bfloat16, torch.float16] -libtest = torch.library.Library("test", "FRAGMENT") +libtest = torch.library.Library("test", "FRAGMENT") # noqa: TOR901 ids = set() f32 = torch.float32 diff --git a/test/inductor/test_torchinductor_dynamic_shapes.py b/test/inductor/test_torchinductor_dynamic_shapes.py index 078fe0b7a810..267a8ec5d823 100644 --- a/test/inductor/test_torchinductor_dynamic_shapes.py +++ b/test/inductor/test_torchinductor_dynamic_shapes.py @@ -275,31 +275,30 @@ class TestInductorDynamic(TestCase): @torch._dynamo.config.patch(capture_scalar_outputs=True) @torch._inductor.config.patch(implicit_fallbacks=True) def test_item_to_inputs_kernel_nobreak(self, device): - lib = torch.library.Library("test", "DEF") + with torch.library._scoped_library("test", "DEF") as lib: + try: - try: + @custom_ops.custom_op("test::foo") + def foo(x: torch.Tensor, y: int) -> torch.Tensor: + raise NotImplementedError() - @custom_ops.custom_op("test::foo") - def foo(x: torch.Tensor, y: int) -> torch.Tensor: - raise NotImplementedError() + @custom_ops.impl("test::foo") + def foo_impl(x: torch.Tensor, y: int) -> torch.Tensor: + return x.clone() - @custom_ops.impl("test::foo") - def foo_impl(x: torch.Tensor, y: int) -> torch.Tensor: - return x.clone() + @torch.library.impl_abstract("test::foo", lib=lib) + def foo_meta(x: torch.Tensor, y: int) -> torch.Tensor: + return x.clone() - @torch.library.impl_abstract("test::foo", lib=lib) - def foo_meta(x: torch.Tensor, y: int) -> torch.Tensor: - return x.clone() + @torch.compile(fullgraph=True) + def f(x, r): + y = x.item() + return torch.ops.test.foo(r, y) - @torch.compile(fullgraph=True) - def f(x, r): - y = x.item() - return torch.ops.test.foo(r, y) + f(torch.tensor([3], device=device), torch.randn(10, device=device)) - f(torch.tensor([3], device=device), torch.randn(10, device=device)) - - finally: - custom_ops._destroy("test::foo") + finally: + custom_ops._destroy("test::foo") @torch._dynamo.config.patch( capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True @@ -359,35 +358,34 @@ class TestInductorDynamic(TestCase): ) @torch._inductor.config.patch(implicit_fallbacks=True) def test_dynamic_stride_nobreak(self, device): - lib = torch.library.Library("test", "DEF") + with torch.library._scoped_library("test", "DEF") as lib: + try: - try: + @custom_ops.custom_op("test::foo") + def foo(x: torch.Tensor) -> torch.Tensor: + raise NotImplementedError() - @custom_ops.custom_op("test::foo") - def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + @custom_ops.impl("test::foo") + def foo_impl(x: torch.Tensor) -> torch.Tensor: + stride = x.item() + return torch.empty_strided((1,), (stride,), device=x.device) - @custom_ops.impl("test::foo") - def foo_impl(x: torch.Tensor) -> torch.Tensor: - stride = x.item() - return torch.empty_strided((1,), (stride,), device=x.device) + @torch.library.impl_abstract("test::foo", lib=lib) + def foo_meta(x: torch.Tensor) -> torch.Tensor: + ctx = torch.library.get_ctx() + stride = ctx.new_dynamic_size() + return torch.empty_strided((1,), (stride,), device=x.device) - @torch.library.impl_abstract("test::foo", lib=lib) - def foo_meta(x: torch.Tensor) -> torch.Tensor: - ctx = torch.library.get_ctx() - stride = ctx.new_dynamic_size() - return torch.empty_strided((1,), (stride,), device=x.device) + @torch.compile(fullgraph=True) + def f(x): + r = torch.ops.test.foo(x) + y = r.stride(0) + return torch.empty(y, device=x.device) - @torch.compile(fullgraph=True) - def f(x): - r = torch.ops.test.foo(x) - y = r.stride(0) - return torch.empty(y, device=x.device) + f(torch.tensor([3], device=device)) - f(torch.tensor([3], device=device)) - - finally: - custom_ops._destroy("test::foo") + finally: + custom_ops._destroy("test::foo") @torch._inductor.config.patch(disable_cpp_codegen=True) def test_floor(self): diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py index 99232aaac823..af40c8725033 100644 --- a/test/quantization/pt2e/test_quantize_pt2e.py +++ b/test/quantization/pt2e/test_quantize_pt2e.py @@ -1744,7 +1744,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase): def test_observer_callback(self): from torch.library import Library, impl - test_lib = Library("test_int4", "DEF") + test_lib = Library("test_int4", "DEF") # noqa: TOR901 test_lib.define("quantize_per_tensor_int4(Tensor input, float scale, int zero_point) -> Tensor") @impl(test_lib, "quantize_per_tensor_int4", "CompositeExplicitAutograd") diff --git a/test/test_autograd_fallback.py b/test/test_autograd_fallback.py index 86a8a968ac9c..ec6f01c4c2df 100644 --- a/test/test_autograd_fallback.py +++ b/test/test_autograd_fallback.py @@ -5,7 +5,7 @@ import warnings import numpy as np import torch -from torch.library import Library +from torch.library import _scoped_library, Library from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, @@ -38,7 +38,7 @@ class TestAutogradFallback(TestCase): return getattr(getattr(torch.ops, self.test_ns), name).default def get_lib(self): - lib = Library(self.test_ns, "FRAGMENT") + lib = Library(self.test_ns, "FRAGMENT") # noqa: TOR901 self.lib = lib return lib @@ -146,166 +146,167 @@ class TestAutogradFallback(TestCase): # To be clear, none of these situations are OK and will lead # to other problems down the line. We're testing them because # it is fairly common to actually do these things. - lib = Library(self.test_ns, "FRAGMENT") - lib.define("foo(Tensor self) -> Tensor") - lib.impl("foo", lambda x: x, "CPU") - op = self.get_op("foo") + with _scoped_library(self.test_ns, "FRAGMENT") as lib: + lib.define("foo(Tensor self) -> Tensor") + lib.impl("foo", lambda x: x, "CPU") + op = self.get_op("foo") - x = torch.randn(3, requires_grad=True) - y = op(x).sum() - with self._check_ctx(mode): - y.backward() - self.assertEqual(x.grad, torch.ones_like(x)) + x = torch.randn(3, requires_grad=True) + y = op(x).sum() + with self._check_ctx(mode): + y.backward() + self.assertEqual(x.grad, torch.ones_like(x)) - lib.define("bar(Tensor(a!) self) -> Tensor(a!)") - lib.impl("bar", lambda x: x, "CPU") - op = self.get_op("bar") + lib.define("bar(Tensor(a!) self) -> Tensor(a!)") + lib.impl("bar", lambda x: x, "CPU") + op = self.get_op("bar") - x = torch.randn(3, requires_grad=True) - y = op(x).sum() - with self._check_ctx(mode): - y.backward() - self.assertEqual(x.grad, torch.ones_like(x)) + x = torch.randn(3, requires_grad=True) + y = op(x).sum() + with self._check_ctx(mode): + y.backward() + self.assertEqual(x.grad, torch.ones_like(x)) @parametrize("mode", ("nothing", "warn")) def test_composite_registered_to_cpu(self, mode): with autograd_fallback_mode(mode): - lib = Library(self.test_ns, "FRAGMENT") - lib.define("foo(Tensor self) -> Tensor") - lib.impl("foo", lambda x: x.sin().sum(), "CPU") - op = self.get_op("foo") + with _scoped_library(self.test_ns, "FRAGMENT") as lib: + lib.define("foo(Tensor self) -> Tensor") + lib.impl("foo", lambda x: x.sin().sum(), "CPU") + op = self.get_op("foo") - x = torch.randn(3, requires_grad=True) - y = op(x) - with self._check_ctx(mode): - y.backward() - self.assertEqual(x.grad, x.cos()) + x = torch.randn(3, requires_grad=True) + y = op(x) + with self._check_ctx(mode): + y.backward() + self.assertEqual(x.grad, x.cos()) @parametrize("mode", ("nothing", "warn")) def test_autograd_function_registered_to_cpu(self, mode): with autograd_fallback_mode(mode): - lib = Library(self.test_ns, "FRAGMENT") - lib.define("foo(Tensor self) -> Tensor") + with _scoped_library(self.test_ns, "FRAGMENT") as lib: + lib.define("foo(Tensor self) -> Tensor") - class NumpySin(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return torch.tensor(np.sin(x.cpu().numpy())) + class NumpySin(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return torch.tensor(np.sin(x.cpu().numpy())) - @staticmethod - def backward(ctx, gx): - (x,) = ctx.saved_tensors - return gx * x.cos() + @staticmethod + def backward(ctx, gx): + (x,) = ctx.saved_tensors + return gx * x.cos() - lib.impl("foo", NumpySin.apply, "CPU") - op = self.get_op("foo") + lib.impl("foo", NumpySin.apply, "CPU") + op = self.get_op("foo") - x = torch.randn(3, requires_grad=True) - y = op(x).sum() - with self._check_ctx(mode): - y.backward() - self.assertEqual(x.grad, x.cos()) + x = torch.randn(3, requires_grad=True) + y = op(x).sum() + with self._check_ctx(mode): + y.backward() + self.assertEqual(x.grad, x.cos()) @parametrize("mode", ("nothing", "warn")) def test_inplace_autograd_function_registered_to_cpu(self, mode): with autograd_fallback_mode(mode): - lib = Library(self.test_ns, "FRAGMENT") - lib.define("foo(Tensor(a!) self) -> Tensor(a!)") + with _scoped_library(self.test_ns, "FRAGMENT") as lib: + lib.define("foo(Tensor(a!) self) -> Tensor(a!)") - class NumpySin_(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x.clone()) - x_np = x.detach().numpy() - np.sin(x_np, out=x_np) - ctx.mark_dirty(x) - return x + class NumpySin_(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x.clone()) + x_np = x.detach().numpy() + np.sin(x_np, out=x_np) + ctx.mark_dirty(x) + return x - @staticmethod - def backward(ctx, gx): - (x,) = ctx.saved_tensors - return gx * x.cos() + @staticmethod + def backward(ctx, gx): + (x,) = ctx.saved_tensors + return gx * x.cos() - lib.impl("foo", NumpySin_.apply, "CPU") - op = self.get_op("foo") + lib.impl("foo", NumpySin_.apply, "CPU") + op = self.get_op("foo") - x = torch.randn(3, requires_grad=True) - z = x.clone() - w = z[0] - y = op(w) + x = torch.randn(3, requires_grad=True) + z = x.clone() + w = z[0] + y = op(w) - expected = torch.zeros_like(x) - expected[0] = x[0].cos() - with self._check_ctx(mode): - (gx,) = torch.autograd.grad(y, x, torch.ones_like(y), retain_graph=True) - self.assertEqual(gx, expected) + expected = torch.zeros_like(x) + expected[0] = x[0].cos() + with self._check_ctx(mode): + (gx,) = torch.autograd.grad( + y, x, torch.ones_like(y), retain_graph=True + ) + self.assertEqual(gx, expected) - expected = torch.ones_like(x) - expected[0] = x[0].cos() - with self._check_ctx(mode): - (gx,) = torch.autograd.grad(z, x, torch.ones_like(z)) - self.assertEqual(gx, expected) + expected = torch.ones_like(x) + expected[0] = x[0].cos() + with self._check_ctx(mode): + (gx,) = torch.autograd.grad(z, x, torch.ones_like(z)) + self.assertEqual(gx, expected) @parametrize("mode", ("nothing", "warn")) def test_inplace_on_tensor_that_does_not_require_grad(self, mode): # We don't do anything special (that is, we don't rebase history). # See NOTE [autograd fallback and in-place operations] for why with autograd_fallback_mode(mode): - lib = Library(self.test_ns, "FRAGMENT") + with _scoped_library(self.test_ns, "FRAGMENT") as lib: + # Correct usage of (a!) + lib.define("foo(Tensor(a!) self, Tensor other) -> Tensor(a!)") - # Correct usage of (a!) - lib.define("foo(Tensor(a!) self, Tensor other) -> Tensor(a!)") + def foo_impl(x, y): + x_d = x.detach() + y = y.detach() + x_d.add_(y) + return x - def foo_impl(x, y): - x_d = x.detach() - y = y.detach() - x_d.add_(y) - return x + lib.impl("foo", foo_impl, "CPU") + foo = self.get_op("foo") - lib.impl("foo", foo_impl, "CPU") - foo = self.get_op("foo") + # Incorrect usage of (a!): user doesn't return tensor as-is + lib.define("bar(Tensor(a!) self, Tensor other) -> Tensor(a!)") - # Incorrect usage of (a!): user doesn't return tensor as-is - lib.define("bar(Tensor(a!) self, Tensor other) -> Tensor(a!)") + def bar_impl(x, y): + x_d = x.detach() + y = y.detach() + x_d.add_(y) + return x_d.clone() - def bar_impl(x, y): - x_d = x.detach() - y = y.detach() - x_d.add_(y) - return x_d.clone() + lib.impl("bar", bar_impl, "CPU") + bar = self.get_op("bar") - lib.impl("bar", bar_impl, "CPU") - bar = self.get_op("bar") + # User mutated input tensor but didn't return it. + lib.define("baz(Tensor(a!) self, Tensor other) -> ()") - # User mutated input tensor but didn't return it. - lib.define("baz(Tensor(a!) self, Tensor other) -> ()") + def baz_impl(x, y): + x_d = x.detach() + y = y.detach() + x_d.add_(y) - def baz_impl(x, y): - x_d = x.detach() - y = y.detach() - x_d.add_(y) + lib.impl("baz", baz_impl, "CPU") + baz = self.get_op("baz") - lib.impl("baz", baz_impl, "CPU") - baz = self.get_op("baz") + # Test in-place on non-view + for op in (foo, bar, baz): + x = torch.randn(3) + y = torch.randn(3, requires_grad=True) + with self.assertRaisesRegex(RuntimeError, "does not require grad"): + z = x.clone() + op(z, y) + torch.autograd.grad(z, y, torch.ones_like(z), allow_unused=True) - # Test in-place on non-view - for op in (foo, bar, baz): - x = torch.randn(3) - y = torch.randn(3, requires_grad=True) - with self.assertRaisesRegex(RuntimeError, "does not require grad"): - z = x.clone() - op(z, y) - torch.autograd.grad(z, y, torch.ones_like(z), allow_unused=True) - - # Test in-place on view - for op in (foo, bar, baz): - x = torch.randn(3) - y = torch.randn(3, requires_grad=True) - with self.assertRaisesRegex(RuntimeError, "does not require grad"): - z = x[:] - op(z, y) - torch.autograd.grad(z, x, torch.ones_like(z), allow_unused=True) + # Test in-place on view + for op in (foo, bar, baz): + x = torch.randn(3) + y = torch.randn(3, requires_grad=True) + with self.assertRaisesRegex(RuntimeError, "does not require grad"): + z = x[:] + op(z, y) + torch.autograd.grad(z, x, torch.ones_like(z), allow_unused=True) @parametrize("mode", ("nothing", "warn")) def test_post_autograd_returns_leaf(self, mode): diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 6ee0498587f3..828220643244 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -46,7 +46,7 @@ class CustomOpTestCaseBase(TestCase): return getattr(torch.ops, self.test_ns) def lib(self): - result = torch.library.Library(self.test_ns, "FRAGMENT") + result = torch.library.Library(self.test_ns, "FRAGMENT") # noqa: TOR901 self.libraries.append(result) return result diff --git a/test/test_fake_tensor.py b/test/test_fake_tensor.py index 975e1b9fa808..d84311aa4bd0 100644 --- a/test/test_fake_tensor.py +++ b/test/test_fake_tensor.py @@ -87,7 +87,7 @@ class FakeTensorTest(TestCase): def test_custom_op_fallback(self): from torch.library import Library, impl - test_lib = Library("my_test_op", "DEF") + test_lib = Library("my_test_op", "DEF") # noqa: TOR901 test_lib.define('foo(Tensor self) -> Tensor') @impl(test_lib, 'foo', 'CPU') diff --git a/test/test_fx_passes.py b/test/test_fx_passes.py index c5952df961e0..21400c41c1ae 100644 --- a/test/test_fx_passes.py +++ b/test/test_fx_passes.py @@ -743,7 +743,7 @@ class MultiOutputWithWithInvalidMatches: class QuantizationFp8Pattern: @classmethod def setup(cls): - cls.quantization = torch.library.Library("fp8_quantization", "DEF") + cls.quantization = torch.library.Library("fp8_quantization", "DEF") # noqa: TOR901 cls.quantization.define("quantize_per_tensor_affine_fp8(Tensor self, int dtype, float scale) -> Tensor") cls.quantization.define("dequantize_per_tensor_affine_fp8(Tensor self, int dtype, float scale) -> Tensor") diff --git a/test/test_meta.py b/test/test_meta.py index 989fafaa666d..afcc9d37f982 100644 --- a/test/test_meta.py +++ b/test/test_meta.py @@ -1325,26 +1325,22 @@ class TestMeta(TestCase): @onlyCPU def test_meta_autograd_no_error(self): - lib = torch.library.Library("meta_test", "DEF") - impl_cpu = torch.library.Library("meta_test", "IMPL", "CPU") - impl_meta = torch.library.Library("meta_test", "IMPL", "Meta") + with torch.library._scoped_library("meta_test", "DEF") as lib: + with torch.library._scoped_library("meta_test", "IMPL", "CPU") as impl_cpu: + with torch.library._scoped_library("meta_test", "IMPL", "Meta") as impl_meta: + def foo_impl(x): + return x + 1 - def foo_impl(x): - return x + 1 + lib.define("foo(Tensor a) -> Tensor") + impl_meta.impl("foo", foo_impl) + impl_cpu.impl("foo", foo_impl) - lib.define("foo(Tensor a) -> Tensor") - impl_meta.impl("foo", foo_impl) - impl_cpu.impl("foo", foo_impl) - - a = torch.ones(2, device='meta') - # The point of the test is that this should not error: - # We have a fallthrough kernel registered to the AutogradMeta - # key for custom ops, so it's fine that `foo()` doesn't have - # an autograd kernel. - b = torch.ops.meta_test.foo.default(a) - del impl_meta - del impl_cpu - del lib + a = torch.ones(2, device='meta') + # The point of the test is that this should not error: + # We have a fallthrough kernel registered to the AutogradMeta + # key for custom ops, so it's fine that `foo()` doesn't have + # an autograd kernel. + b = torch.ops.meta_test.foo.default(a) def test_huber_loss_backward(self): inps = [torch.rand(2**52, device='meta') for _ in range(3)] diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py index 5e816cbcb9b4..4676aab6f541 100644 --- a/test/test_proxy_tensor.py +++ b/test/test_proxy_tensor.py @@ -955,7 +955,7 @@ class TestSymbolicTracing(TestCase): import torch.library from torch.library import Library - foo = Library("foo", "DEF") + foo = Library("foo", "DEF") # noqa: TOR901 foo.define("foo(Tensor self) -> Tensor") # Operator where meta and cpu disagree on strides diff --git a/test/test_python_dispatch.py b/test/test_python_dispatch.py index 572fa3aebd9f..d35e76e21c2c 100644 --- a/test/test_python_dispatch.py +++ b/test/test_python_dispatch.py @@ -63,10 +63,9 @@ class TestPythonRegistration(TestCase): # RuntimeError: impl("aten::neg", ...): # Explicitly provided namespace (aten) in operator name does not match ... with self.assertRaisesRegex(RuntimeError, "operator name does not match namespace"): - my_lib3 = Library("foo", "DEF") - my_lib3.define("neg(Tensor self) -> Tensor") - my_lib3.impl(torch.ops.aten.neg.default, my_neg, "AutogradCPU") - del my_lib3 + with _scoped_library("foo", "DEF") as my_lib3: + my_lib3.define("neg(Tensor self) -> Tensor") + my_lib3.impl(torch.ops.aten.neg.default, my_neg, "AutogradCPU") # Example 2 def my_mul(*args, **kwargs): @@ -92,12 +91,12 @@ class TestPythonRegistration(TestCase): def test_error_if_fn_not_callable(self): with self.assertRaisesRegex(TypeError, "Input function is required to be a callable"): - my_lib = Library("aten", "IMPL") - my_lib.impl(torch.ops.aten.neg.default, [], "AutogradCPU") + with _scoped_library("aten", "IMPL") as my_lib: + my_lib.impl(torch.ops.aten.neg.default, [], "AutogradCPU") def test_finalizer(self): impls_refcnt = sys.getrefcount(torch.library._impls) - lib = Library(self.test_ns, "FRAGMENT") + lib = Library(self.test_ns, "FRAGMENT") # noqa: TOR901 lib.define("foo123(Tensor x) -> Tensor") # 1 for `lib`, 1 for sys.getrefcount @@ -142,12 +141,11 @@ class TestPythonRegistration(TestCase): run[0] = True return args[0].clone() - my_lib1 = Library("aten", "IMPL") - my_lib1.impl('aten::sum', my_sum, "CPU") - x = torch.tensor([1, 2]) - self.assertEqual(torch.sum(x), x) - self.assertTrue(run[0]) - del my_lib1 + with _scoped_library("aten", "IMPL") as my_lib1: + my_lib1.impl('aten::sum', my_sum, "CPU") + x = torch.tensor([1, 2]) + self.assertEqual(torch.sum(x), x) + self.assertTrue(run[0]) # Validate that the old behavior is restored for sum self.assertEqual(torch.sum(x), torch.tensor(3)) @@ -168,17 +166,16 @@ class TestPythonRegistration(TestCase): return jitted_where(*args, **kwargs) # overriding where's cuda kernel with Jiterator generated kernel - my_lib = Library("aten", "IMPL") - my_lib.impl('aten::where.self', inverted_where, "CUDA") + with _scoped_library("aten", "IMPL") as my_lib: + my_lib.impl('aten::where.self', inverted_where, "CUDA") - device = 'cuda' - cond = torch.tensor([True, True, False], device=device, dtype=torch.bool) - x = torch.tensor([1, 2, 3], device=device) - y = torch.tensor([-1, -2, -3], device=device) + device = 'cuda' + cond = torch.tensor([True, True, False], device=device, dtype=torch.bool) + x = torch.tensor([1, 2, 3], device=device) + y = torch.tensor([-1, -2, -3], device=device) - self.assertEqual(torch.where(cond, x, y), torch.tensor([-1, -2, 3])) - self.assertTrue(CALLED[0]) - del my_lib + self.assertEqual(torch.where(cond, x, y), torch.tensor([-1, -2, 3])) + self.assertTrue(CALLED[0]) # behavior restored after deregistration self.assertEqual(torch.where(cond, x, y), torch.tensor([1, 2, -3])) @@ -199,13 +196,12 @@ class TestPythonRegistration(TestCase): return jitted_gelu(*args, **kwargs) # overriding gelu's cuda kernel with Jiterator generated relu kernel - my_lib = Library("aten", "IMPL") - my_lib.impl('aten::gelu', fast_gelu, "CUDA") + with _scoped_library("aten", "IMPL") as my_lib: + my_lib.impl('aten::gelu', fast_gelu, "CUDA") - x = torch.rand([3, 3], device='cuda', dtype=torch.float) - self.assertEqual(torch.nn.functional.gelu(x), torch.nn.functional.relu(x)) - self.assertTrue(CALLED[0]) - del my_lib + x = torch.rand([3, 3], device='cuda', dtype=torch.float) + self.assertEqual(torch.nn.functional.gelu(x), torch.nn.functional.relu(x)) + self.assertTrue(CALLED[0]) # behavior restored after deregistration self.assertNotEqual(torch.nn.functional.gelu(x), torch.nn.functional.relu(x)) @@ -226,13 +222,12 @@ class TestPythonRegistration(TestCase): return jitted_exp(*args, **kwargs) # overriding exp's cuda kernel with clipped_exp kernel - my_lib = Library("aten", "IMPL") - my_lib.impl('aten::exp', clipped_exp, "CUDA") + with _scoped_library("aten", "IMPL") as my_lib: + my_lib.impl('aten::exp', clipped_exp, "CUDA") - x = torch.tensor([0.0, 100.0], device='cuda', dtype=torch.float16) - self.assertEqual(torch.exp(x), torch.tensor([1.0, 22026.4657948], dtype=torch.float16)) - self.assertTrue(CALLED[0]) - del my_lib + x = torch.tensor([0.0, 100.0], device='cuda', dtype=torch.float16) + self.assertEqual(torch.exp(x), torch.tensor([1.0, 22026.4657948], dtype=torch.float16)) + self.assertTrue(CALLED[0]) # behavior restored after deregistration self.assertEqual(torch.exp(x), torch.tensor([1.0, torch.inf], dtype=torch.float16)) @@ -252,18 +247,17 @@ class TestPythonRegistration(TestCase): CALLED[0] = True return jitted_add(*args, **kwargs) - my_lib = Library("aten", "IMPL") - my_lib.impl('aten::add.Tensor', buggy_add, "CUDA") + with _scoped_library("aten", "IMPL") as my_lib: + my_lib.impl('aten::add.Tensor', buggy_add, "CUDA") - x_cpu = torch.rand([3, 3], device='cpu') - y_cpu = torch.rand([3], device='cpu') + x_cpu = torch.rand([3, 3], device='cpu') + y_cpu = torch.rand([3], device='cpu') - x_cuda = x_cpu.cuda() - y_cuda = y_cpu.cuda() + x_cuda = x_cpu.cuda() + y_cuda = y_cpu.cuda() - self.assertEqual(x_cuda + y_cuda, x_cpu + y_cpu + 1) - self.assertTrue(CALLED[0]) - del my_lib + self.assertEqual(x_cuda + y_cuda, x_cpu + y_cpu + 1) + self.assertTrue(CALLED[0]) # behavior restored after deregistration self.assertEqual(x_cuda + y_cuda, x_cpu + y_cpu) @@ -277,97 +271,80 @@ class TestPythonRegistration(TestCase): def test_extend_library_with_dispatch_key_arg(self): def my_sum(*args, **kwargs): return args[0].clone() - my_lib1 = Library("aten", "IMPL", dispatch_key="CPU") - - # RuntimeError: Explicitly provided dispatch key (Conjugate) is - # inconsistent with the dispatch key of the enclosing TORCH_LIBRARY_IMPL block - with self.assertRaisesRegex(RuntimeError, "inconsistent with the dispatch key"): - my_lib1.impl('sum', my_sum, "Conjugate") - my_lib1.impl('aten::sum', my_sum) - x = torch.tensor([1, 2]) - self.assertEqual(torch.sum(x), x) - del my_lib1 + with _scoped_library("aten", "IMPL", dispatch_key="CPU") as my_lib1: + # RuntimeError: Explicitly provided dispatch key (Conjugate) is + # inconsistent with the dispatch key of the enclosing TORCH_LIBRARY_IMPL block + with self.assertRaisesRegex(RuntimeError, "inconsistent with the dispatch key"): + my_lib1.impl('sum', my_sum, "Conjugate") + my_lib1.impl('aten::sum', my_sum) + x = torch.tensor([1, 2]) + self.assertEqual(torch.sum(x), x) def test_create_new_library(self) -> None: - my_lib1 = Library(self.test_ns, "DEF") + with _scoped_library(self.test_ns, "DEF") as my_lib1: + my_lib1.define("sum(Tensor self) -> Tensor") - my_lib1.define("sum(Tensor self) -> Tensor") - - # Example 1 - @torch.library.impl(my_lib1, "sum", "CPU") - def my_sum(*args, **kwargs): - return args[0].clone() - - x = torch.tensor([1, 2]) - op = getattr(torch.ops, self.test_ns).sum - self.assertEqual(op(x), x) - - my_lib2 = Library(self.test_ns, "IMPL") - - # Example 2 - @torch.library.impl(my_lib2, op.default, "ZeroTensor") - def my_sum_zt(*args, **kwargs): - if args[0]._is_zerotensor(): - return torch._efficientzerotensor(args[0].shape) - else: + # Example 1 + @torch.library.impl(my_lib1, "sum", "CPU") + def my_sum(*args, **kwargs): return args[0].clone() - y = torch._efficientzerotensor(3) - self.assertTrue(op(y)._is_zerotensor()) - self.assertEqual(op(x), x) + x = torch.tensor([1, 2]) + op = getattr(torch.ops, self.test_ns).sum + self.assertEqual(op(x), x) - del my_lib2 - del my_lib1 + with _scoped_library(self.test_ns, "IMPL") as my_lib2: + # Example 2 + @torch.library.impl(my_lib2, op.default, "ZeroTensor") + def my_sum_zt(*args, **kwargs): + if args[0]._is_zerotensor(): + return torch._efficientzerotensor(args[0].shape) + else: + return args[0].clone() + + y = torch._efficientzerotensor(3) + self.assertTrue(op(y)._is_zerotensor()) + self.assertEqual(op(x), x) def test_create_new_library_fragment_no_existing(self): - my_lib = Library(self.test_ns, "FRAGMENT") + with _scoped_library(self.test_ns, "FRAGMENT") as my_lib: + my_lib.define("sum2(Tensor self) -> Tensor") - my_lib.define("sum2(Tensor self) -> Tensor") + @torch.library.impl(my_lib, "sum2", "CPU") + def my_sum(*args, **kwargs): + return args[0] - @torch.library.impl(my_lib, "sum2", "CPU") - def my_sum(*args, **kwargs): - return args[0] - - x = torch.tensor([1, 2]) - self.assertEqual(getattr(torch.ops, self.test_ns).sum2(x), x) - - del my_lib + x = torch.tensor([1, 2]) + self.assertEqual(getattr(torch.ops, self.test_ns).sum2(x), x) def test_create_new_library_fragment_with_existing(self): - my_lib1 = Library(self.test_ns, "DEF") + with _scoped_library(self.test_ns, "DEF") as my_lib1: + # Create a fragment + with _scoped_library(self.test_ns, "FRAGMENT") as my_lib2: + my_lib2.define("sum4(Tensor self) -> Tensor") - # Create a fragment - my_lib2 = Library(self.test_ns, "FRAGMENT") + @torch.library.impl(my_lib2, "sum4", "CPU") + def my_sum4(*args, **kwargs): + return args[0] - my_lib2.define("sum4(Tensor self) -> Tensor") + x = torch.tensor([1, 2]) + self.assertEqual(getattr(torch.ops, self.test_ns).sum4(x), x) - @torch.library.impl(my_lib2, "sum4", "CPU") - def my_sum4(*args, **kwargs): - return args[0] + # Create another fragment + with _scoped_library(self.test_ns, "FRAGMENT") as my_lib3: + my_lib3.define("sum3(Tensor self) -> Tensor") - x = torch.tensor([1, 2]) - self.assertEqual(getattr(torch.ops, self.test_ns).sum4(x), x) + @torch.library.impl(my_lib3, "sum3", "CPU") + def my_sum3(*args, **kwargs): + return args[0] - # Create another fragment - my_lib3 = Library(self.test_ns, "FRAGMENT") - - my_lib3.define("sum3(Tensor self) -> Tensor") - - @torch.library.impl(my_lib3, "sum3", "CPU") - def my_sum3(*args, **kwargs): - return args[0] - - x = torch.tensor([1, 2]) - self.assertEqual(getattr(torch.ops, self.test_ns).sum3(x), x) - - del my_lib1 - del my_lib2 - del my_lib3 + x = torch.tensor([1, 2]) + self.assertEqual(getattr(torch.ops, self.test_ns).sum3(x), x) @unittest.skipIf(IS_WINDOWS, "Skipped under Windows") def test_alias_analysis(self): def test_helper(alias_analysis=""): - my_lib1 = Library(self.test_ns, "DEF") + my_lib1 = Library(self.test_ns, "DEF") # noqa: TOR901 called = [0] @@ -388,11 +365,11 @@ class TestPythonRegistration(TestCase): def test_error_for_unsupported_ns_or_kind(self) -> None: with self.assertRaisesRegex(ValueError, "Unsupported kind"): - my_lib1 = Library("myns", "BLA") + my_lib1 = Library("myns", "BLA") # noqa: TOR901 for kind in ('DEF', 'FRAGMENT'): with self.assertRaisesRegex(ValueError, "reserved namespace"): - my_lib1 = Library("prim", kind) + my_lib1 = Library("prim", kind) # noqa: TOR901 def test_returning_symint(self) -> None: shape_env = ShapeEnv() @@ -402,15 +379,15 @@ class TestPythonRegistration(TestCase): s0, s1 = ft.shape - tlib = Library(self.test_ns, "DEF") - tlib.define("sqsum(SymInt a, SymInt b) -> SymInt") + with _scoped_library(self.test_ns, "DEF") as tlib: + tlib.define("sqsum(SymInt a, SymInt b) -> SymInt") - @impl(tlib, "sqsum", "CompositeExplicitAutograd") - def sqsum(a: SymInt, b: SymInt): - return a * a + b * b + @impl(tlib, "sqsum", "CompositeExplicitAutograd") + def sqsum(a: SymInt, b: SymInt): + return a * a + b * b - out = getattr(torch.ops, self.test_ns).sqsum.default(s0, s1) - out_val = shape_env.evaluate_expr(out.node.expr) + out = getattr(torch.ops, self.test_ns).sqsum.default(s0, s1) + out_val = shape_env.evaluate_expr(out.node.expr) self.assertEqual(out_val, 13) def test_register_functional_op_error_cases(self): @@ -566,8 +543,7 @@ class TestPythonRegistration(TestCase): getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w)) def test_register_fallthrough(self): - try: - my_lib = Library('aten', 'IMPL') + with _scoped_library('aten', 'IMPL') as my_lib: my_lib.impl("mm", fallthrough_kernel, "AutocastCPU") a = torch.randn(2, 3, device='cpu', dtype=torch.float32) @@ -577,8 +553,6 @@ class TestPythonRegistration(TestCase): self.assertEqual(torch.mm(a, b).dtype, torch.float32) # ops that don't have a fallthrough registered should not be affected self.assertEqual(torch.matmul(a, b).dtype, torch.bfloat16) - finally: - del my_lib with torch.autocast(device_type="cpu", dtype=torch.bfloat16): # default behavior should have been restored @@ -694,13 +668,13 @@ $5: f32[2] = torch._ops.aten.clone.default($4, memory_format=torch.contiguous_fo print("woof") return torch.empty(()) - my_lib = Library("my_lib", "DEF") - my_lib.define("weird(Tensor?[] self) -> Tensor") - my_lib.impl("weird", weird, "CPU") - with capture_logs() as logs: - x = LoggingTensor(torch.ones(2, 2)) - log_input("x", x) - torch.ops.my_lib.weird.default([None, x]) + with _scoped_library("my_lib", "DEF") as my_lib: + my_lib.define("weird(Tensor?[] self) -> Tensor") + my_lib.impl("weird", weird, "CPU") + with capture_logs() as logs: + x = LoggingTensor(torch.ones(2, 2)) + log_input("x", x) + torch.ops.my_lib.weird.default([None, x]) self.assertExpectedInline('\n'.join(logs), '''\ $0: f32[2, 2] = input('x') @@ -1485,28 +1459,29 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p t.record_stream(s) def test_return_stream(self) -> None: - l_def = torch.library.Library("test_return_stream", "DEF") - l_def.define("return_stream(Tensor self) -> Stream") - l_impl = torch.library.Library("test_return_stream", "IMPL", "CPU") - l_impl.impl("return_stream", lambda _: torch.Stream(stream_id=0, device_index=1, device_type=2)) + with _scoped_library("test_return_stream", "DEF") as l_def: + l_def.define("return_stream(Tensor self) -> Stream") + with _scoped_library("test_return_stream", "IMPL", "CPU") as l_impl: + l_impl.impl("return_stream", + lambda _: torch.Stream(stream_id=0, device_index=1, device_type=2)) - class TestMode(TorchDispatchMode): - def __torch_dispatch__(self, func, types, args=(), kwargs=None): - return torch.Stream(stream_id=1, device_index=2, device_type=3) + class TestMode(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + return torch.Stream(stream_id=1, device_index=2, device_type=3) - t = torch.tensor(5.) - s = torch.ops.test_return_stream.return_stream(t) - self.assertIsInstance(s, torch.Stream) - self.assertEqual(s.stream_id, 0) - self.assertEqual(s.device_index, 1) - self.assertEqual(s.device_type, 2) + t = torch.tensor(5.) + s = torch.ops.test_return_stream.return_stream(t) + self.assertIsInstance(s, torch.Stream) + self.assertEqual(s.stream_id, 0) + self.assertEqual(s.device_index, 1) + self.assertEqual(s.device_type, 2) - with TestMode(): - s = torch.ops.test_return_stream.return_stream(t) - self.assertIsInstance(s, torch.Stream) - self.assertEqual(s.stream_id, 1) - self.assertEqual(s.device_index, 2) - self.assertEqual(s.device_type, 3) + with TestMode(): + s = torch.ops.test_return_stream.return_stream(t) + self.assertIsInstance(s, torch.Stream) + self.assertEqual(s.stream_id, 1) + self.assertEqual(s.device_index, 2) + self.assertEqual(s.device_type, 3) def test_subclass_autograd_device_check(self) -> None: class NonWrapperSubclass(torch.Tensor): diff --git a/test/test_schema_check.py b/test/test_schema_check.py index 177493d4a09e..07e6b7b001b4 100644 --- a/test/test_schema_check.py +++ b/test/test_schema_check.py @@ -26,17 +26,17 @@ def secretly_mutating(x): def output_is_input(x): return x -custom_lib = torch.library.Library("bad_schemas", "DEF") +custom_lib = torch.library.Library("bad_schemas", "DEF") # noqa: TOR901 custom_lib.define("secretly_aliasing(Tensor x) -> Tensor") custom_lib.define("secretly_mutating(Tensor x) -> Tensor") custom_lib.define("output_is_input(Tensor(a) x) -> Tensor(a)") -custom_lib_cpu = torch.library.Library("bad_schemas", "IMPL", "CPU") +custom_lib_cpu = torch.library.Library("bad_schemas", "IMPL", "CPU") # noqa: TOR901 custom_lib_cpu.impl("secretly_aliasing", secretly_aliasing) custom_lib_cpu.impl("secretly_mutating", secretly_mutating) custom_lib_cpu.impl("output_is_input", output_is_input) -custom_lib_meta = torch.library.Library("bad_schemas", "IMPL", "Meta") +custom_lib_meta = torch.library.Library("bad_schemas", "IMPL", "Meta") # noqa: TOR901 custom_lib_meta.impl("secretly_aliasing", secretly_aliasing) custom_lib_meta.impl("secretly_mutating", secretly_mutating) custom_lib_meta.impl("output_is_input", output_is_input) diff --git a/test/test_torch.py b/test/test_torch.py index 99c8f81b0575..dbab8da4230e 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -10205,7 +10205,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], from torch.library import Library, impl global _my_storage - my_lib = Library("my_lib", "DEF") + my_lib = Library("my_lib", "DEF") # noqa: TOR901 my_lib.define('my_func() -> None') a = torch.tensor([1.]) diff --git a/torch/testing/_internal/dynamo_test_failures.py b/torch/testing/_internal/dynamo_test_failures.py index 0413da5caaab..e7200d72be80 100644 --- a/torch/testing/_internal/dynamo_test_failures.py +++ b/torch/testing/_internal/dynamo_test_failures.py @@ -2089,7 +2089,6 @@ dynamo_expected_failures = { "TestPythonRegistration.test_alias_analysis", # test_python_dispatch "TestWrapperSubclassAliasingCPU.test_wrapper_subclass_aliasing_conv2d_cpu", # test_python_dispatch "TestPythonRegistration.test_finalizer", # test_python_dispatch - "TestPythonRegistration.test_override_cpu_sum", # test_python_dispatch "TestPythonDispatch.test_subclass_autograd_device_check", # test_python_dispatch "TestPythonDispatch.test_make_subclass_with_modes", # test_python_dispatch "LoggingTests.test_trace_source_nested", # dynamo/test_logging