diff --git a/test/inductor/test_aot_inductor_utils.py b/test/inductor/test_aot_inductor_utils.py index 6868928957a2..5ec97d969a14 100644 --- a/test/inductor/test_aot_inductor_utils.py +++ b/test/inductor/test_aot_inductor_utils.py @@ -16,7 +16,7 @@ from torch._dynamo.testing import same from torch._inductor import config from torch._inductor.test_case import TestCase from torch.testing import FileCheck -from torch.testing._internal.common_utils import IS_FBCODE +from torch.testing._internal.common_utils import IS_FBCODE, run_tests from torch.testing._internal.inductor_utils import clone_preserve_strides_offset from torch.utils import _pytree as pytree @@ -205,11 +205,14 @@ def check_model( atol=None, rtol=None, ): - with torch.no_grad(), config.patch( - { - "aot_inductor.allow_stack_allocation": self.allow_stack_allocation, - "aot_inductor.use_minimal_arrayref_interface": self.use_minimal_arrayref_interface, - } + with ( + torch.no_grad(), + config.patch( + { + "aot_inductor.allow_stack_allocation": self.allow_stack_allocation, + "aot_inductor.use_minimal_arrayref_interface": self.use_minimal_arrayref_interface, + } + ), ): torch.manual_seed(0) if not isinstance(model, types.FunctionType): @@ -248,11 +251,14 @@ def check_model_with_multiple_inputs( options=None, dynamic_shapes=None, ): - with torch.no_grad(), config.patch( - { - "aot_inductor.allow_stack_allocation": self.allow_stack_allocation, - "aot_inductor.use_minimal_arrayref_interface": self.use_minimal_arrayref_interface, - } + with ( + torch.no_grad(), + config.patch( + { + "aot_inductor.allow_stack_allocation": self.allow_stack_allocation, + "aot_inductor.use_minimal_arrayref_interface": self.use_minimal_arrayref_interface, + } + ), ): torch.manual_seed(0) model = model.to(self.device) @@ -275,11 +281,14 @@ def code_check_count( target_str: str, target_count: int, ): - with torch.no_grad(), config.patch( - { - "aot_inductor.allow_stack_allocation": self.allow_stack_allocation, - "aot_inductor.use_minimal_arrayref_interface": self.use_minimal_arrayref_interface, - } + with ( + torch.no_grad(), + config.patch( + { + "aot_inductor.allow_stack_allocation": self.allow_stack_allocation, + "aot_inductor.use_minimal_arrayref_interface": self.use_minimal_arrayref_interface, + } + ), ): package_path = torch._export.aot_compile(model, example_inputs) @@ -290,3 +299,7 @@ def code_check_count( target_count, exactly=True, ).run(src_code) + + +if __name__ == "__main__": + run_tests() diff --git a/test/lazy/test_bindings.py b/test/lazy/test_bindings.py index 39466b33a168..f84763f69594 100644 --- a/test/lazy/test_bindings.py +++ b/test/lazy/test_bindings.py @@ -1,8 +1,13 @@ # Owner(s): ["oncall: jit"] import torch._lazy.metrics +from torch.testing._internal.common_utils import run_tests def test_metrics(): names = torch._lazy.metrics.counter_names() assert len(names) == 0, f"Expected no counter names, but got {names}" + + +if __name__ == "__main__": + run_tests() diff --git a/test/lazy/test_extract_compiled_graph.py b/test/lazy/test_extract_compiled_graph.py index 79359ddb769a..1ea0219066d4 100644 --- a/test/lazy/test_extract_compiled_graph.py +++ b/test/lazy/test_extract_compiled_graph.py @@ -206,3 +206,10 @@ class OptimizeTest(unittest.TestCase): test_return_multi = maketest(ModuleReturnMulti) test_return_dup_tensor = maketest(ModuleReturnDupTensor) test_inplace_update = maketest(ModuleInplaceUpdate) + + +if __name__ == "__main__": + raise RuntimeError( + "This test is not currently used and should be " + "enabled in discover_tests.py if required." + ) diff --git a/test/lazy/test_meta_kernel.py b/test/lazy/test_meta_kernel.py index e212fca89ba4..e0922b88fc28 100644 --- a/test/lazy/test_meta_kernel.py +++ b/test/lazy/test_meta_kernel.py @@ -37,3 +37,10 @@ class TestMetaKernel(TestCase): def test_add_invalid_device(self): with self.assertRaisesRegex(RuntimeError, ".*not a lazy tensor.*"): _ = torch.tensor([1], device="cpu") + torch.tensor([1], device="lazy") + + +if __name__ == "__main__": + raise RuntimeError( + "This test is not currently used and should be " + "enabled in discover_tests.py if required." + ) diff --git a/test/onnx/test_models_quantized_onnxruntime.py b/test/onnx/test_models_quantized_onnxruntime.py index 81a180ea01fd..991bb878df22 100644 --- a/test/onnx/test_models_quantized_onnxruntime.py +++ b/test/onnx/test_models_quantized_onnxruntime.py @@ -10,6 +10,7 @@ import torchvision import torch from torch import nn +from torch.testing._internal import common_utils def _get_test_image_tensor(): @@ -95,3 +96,7 @@ class TestQuantizedModelsONNXRuntime(onnx_test_common._TestONNXRuntime): pretrained=True, quantize=True ) self.run_test(model, _get_test_image_tensor()) + + +if __name__ == "__main__": + common_utils.run_tests() diff --git a/test/onnx/test_onnxscript_no_runtime.py b/test/onnx/test_onnxscript_no_runtime.py index fcac54d948d8..17e92f0e0117 100644 --- a/test/onnx/test_onnxscript_no_runtime.py +++ b/test/onnx/test_onnxscript_no_runtime.py @@ -160,3 +160,10 @@ class TestONNXScriptExport(common_utils.TestCase): ) loop_selu_proto = onnx.load(io.BytesIO(saved_model.getvalue())) self.assertEqual(len(loop_selu_proto.functions), 1) + + +if __name__ == "__main__": + raise RuntimeError( + "This test is not currently used and should be " + "enabled in discover_tests.py if required." + ) diff --git a/test/test_bundled_images.py b/test/test_bundled_images.py index 1919e1cd4fe3..c8b6a6140025 100644 --- a/test/test_bundled_images.py +++ b/test/test_bundled_images.py @@ -92,3 +92,10 @@ class TestBundledImages(TestCase): im2_tensor = torch.ops.fb.image_decode_to_NCHW(byte_tensor, weight, bias) self.assertEqual(raw_data.shape, im2_tensor.shape) self.assertEqual(raw_data, im2_tensor, atol=0.1, rtol=1e-01) + + +if __name__ == "__main__": + raise RuntimeError( + "This test is not currently used and should be " + "enabled in discover_tests.py if required." + ) diff --git a/test/test_hub.py b/test/test_hub.py index 1447b3dc4a76..2add5926d2c4 100644 --- a/test/test_hub.py +++ b/test/test_hub.py @@ -8,7 +8,12 @@ from unittest.mock import patch import torch import torch.hub as hub -from torch.testing._internal.common_utils import IS_SANDCASTLE, retry, TestCase +from torch.testing._internal.common_utils import ( + IS_SANDCASTLE, + retry, + run_tests, + TestCase, +) def sum_of_state_dict(state_dict): @@ -307,3 +312,7 @@ class TestHub(TestCase): torch.hub.load("ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo="check") self._assert_trusted_list_is_empty() + + +if __name__ == "__main__": + run_tests() diff --git a/tools/testing/discover_tests.py b/tools/testing/discover_tests.py index 614d036b45a9..28ff5bc3ff29 100644 --- a/tools/testing/discover_tests.py +++ b/tools/testing/discover_tests.py @@ -104,7 +104,10 @@ TESTS = discover_tests( "distributed/test_c10d_spawn", "distributions/test_transforms", "distributions/test_utils", + "lazy/test_meta_kernel", + "lazy/test_extract_compiled_graph", "test/inductor/test_aot_inductor_utils", + "onnx/test_onnxscript_no_runtime", "onnx/test_pytorch_onnx_onnxruntime_cuda", "onnx/test_models", # These are not C++ tests