diff --git a/test/ao/sparsity/test_activation_sparsifier.py b/test/ao/sparsity/test_activation_sparsifier.py index a0a41f6ed0a4..1a59aa0ac40b 100644 --- a/test/ao/sparsity/test_activation_sparsifier.py +++ b/test/ao/sparsity/test_activation_sparsifier.py @@ -20,7 +20,7 @@ logging.basicConfig( class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size=3) self.conv2 = nn.Conv2d(32, 32, kernel_size=3) diff --git a/test/ao/sparsity/test_data_sparsifier.py b/test/ao/sparsity/test_data_sparsifier.py index 7c0fc62909c9..90b204aec780 100644 --- a/test/ao/sparsity/test_data_sparsifier.py +++ b/test/ao/sparsity/test_data_sparsifier.py @@ -686,7 +686,7 @@ class TestNormDataSparsifiers(_NormDataSparsifierTestCase): class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb1 = nn.Embedding(100, 3) self.embbag1 = nn.EmbeddingBag(200, 32) diff --git a/test/ao/sparsity/test_structured_sparsifier.py b/test/ao/sparsity/test_structured_sparsifier.py index f7faeea8fc55..ff4ffa4a308a 100644 --- a/test/ao/sparsity/test_structured_sparsifier.py +++ b/test/ao/sparsity/test_structured_sparsifier.py @@ -912,7 +912,7 @@ class TestFPGMPruner(TestCase): """ class SimpleConvFPGM(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv2d1 = nn.Conv2d( in_channels=1, out_channels=3, kernel_size=3, padding=1, bias=False diff --git a/test/bottleneck_test/test_cuda.py b/test/bottleneck_test/test_cuda.py index 012b61daaa45..5a28fe87a174 100644 --- a/test/bottleneck_test/test_cuda.py +++ b/test/bottleneck_test/test_cuda.py @@ -5,7 +5,7 @@ import torch.nn as nn class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(20, 20) diff --git a/test/cpp/aoti_inference/compile_model.py b/test/cpp/aoti_inference/compile_model.py index 1bbf39df2519..4542a9c8f9f8 100644 --- a/test/cpp/aoti_inference/compile_model.py +++ b/test/cpp/aoti_inference/compile_model.py @@ -19,7 +19,7 @@ class SimpleModule(torch.nn.Module): a simple module to be compiled """ - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(4, 6) self.relu = torch.nn.ReLU() diff --git a/test/cpp/aoti_inference/test.py b/test/cpp/aoti_inference/test.py index cb428bcb3cce..ea3f6f042d3c 100644 --- a/test/cpp/aoti_inference/test.py +++ b/test/cpp/aoti_inference/test.py @@ -20,7 +20,7 @@ class Net(torch.nn.Module): class NetWithTensorConstants(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.randn(30, 1, device="cuda") diff --git a/test/cpp/jit/test_lite_interpreter.cpp b/test/cpp/jit/test_lite_interpreter.cpp index 45e13c2e9320..21bdf60a250b 100644 --- a/test/cpp/jit/test_lite_interpreter.cpp +++ b/test/cpp/jit/test_lite_interpreter.cpp @@ -1098,7 +1098,7 @@ TEST(RunTimeTest, ParseBytecode) { // class Module(torch.nn.Module): // - // def __init__(self): + // def __init__(self) -> None: // super().__init__() // // def forward(self, x: int, h: int, xfirst: bool): @@ -1169,7 +1169,7 @@ TEST(RunTimeTest, ParseOperator) { // PyTorch program: // class Add(torch.nn.Module): - // def __init__(self): + // def __init__(self) -> None: // super().__init__() // def forward(self, a, b): diff --git a/test/cpp/jit/tests_setup.py b/test/cpp/jit/tests_setup.py index d0ddf3153295..aa3df8fd07de 100644 --- a/test/cpp/jit/tests_setup.py +++ b/test/cpp/jit/tests_setup.py @@ -26,7 +26,7 @@ class EvalModeForLoadedModule(FileSetup): def setup(self): class Model(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.dropout = torch.nn.Dropout(0.1) diff --git a/test/cpp/lite_interpreter_runtime/test_lite_interpreter_runtime.cpp b/test/cpp/lite_interpreter_runtime/test_lite_interpreter_runtime.cpp index f26cfcb2b520..088a4eb04c99 100644 --- a/test/cpp/lite_interpreter_runtime/test_lite_interpreter_runtime.cpp +++ b/test/cpp/lite_interpreter_runtime/test_lite_interpreter_runtime.cpp @@ -22,21 +22,21 @@ TEST(RunTimeTest, LoadAndForward) { // sequence.ptl source code: // class A(torch.nn.Module): - // def __init__(self): + // def __init__(self) -> None: // super().__init__() // // def forward(self, x): // return x + 1 // // class B(torch.nn.Module): - // def __init__(self): + // def __init__(self) -> None: // super().__init__() // // def forward(self, x): // return x + 2 // // class C(torch.nn.Module): - // def __init__(self): + // def __init__(self) -> None: // super().__init__() // self.A0 = A() // self.B0 = B() diff --git a/test/create_dummy_torchscript_model.py b/test/create_dummy_torchscript_model.py index 4a95b34bbe4c..89aae430f74c 100644 --- a/test/create_dummy_torchscript_model.py +++ b/test/create_dummy_torchscript_model.py @@ -6,7 +6,7 @@ from torch import nn class NeuralNetwork(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.flatten = nn.Flatten() self.linear_relu_stack = nn.Sequential( diff --git a/test/custom_operator/model.py b/test/custom_operator/model.py index ce763bdcce5d..f250506cf965 100644 --- a/test/custom_operator/model.py +++ b/test/custom_operator/model.py @@ -18,7 +18,7 @@ def get_custom_op_library_path(): class Model(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.p = torch.nn.Parameter(torch.eye(5)) diff --git a/test/export/opinfo_schema.py b/test/export/opinfo_schema.py index f19c29831576..dba401e0e5c2 100644 --- a/test/export/opinfo_schema.py +++ b/test/export/opinfo_schema.py @@ -32,7 +32,7 @@ class PreDispatchSchemaCheckMode(SchemaCheckMode): later decompose and become functional. """ - def __init__(self): + def __init__(self) -> None: self._dispatch_key = torch._C.DispatchKey.PreDispatch super().__init__() diff --git a/test/export/test_converter.py b/test/export/test_converter.py index 52bc30806aae..4e939c4b21f4 100644 --- a/test/export/test_converter.py +++ b/test/export/test_converter.py @@ -1010,7 +1010,7 @@ class TestConverter(TestCase): # Since self.data is only read but not written, it is lifted as # constant tensors. class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.data = torch.randn(3, 2) @@ -1018,7 +1018,7 @@ class TestConverter(TestCase): return x + self.data class Goo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.data = torch.randn(3, 2) self.foo = Foo() @@ -1032,7 +1032,7 @@ class TestConverter(TestCase): def test_prim_SetAttr(self): class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.data = torch.nn.Buffer(torch.ones(3, 2)) @@ -1046,7 +1046,7 @@ class TestConverter(TestCase): ) class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.data = torch.nn.Buffer(torch.ones(3, 2)) @@ -1064,7 +1064,7 @@ class TestConverter(TestCase): # In converter, we change tensor constants that are assigned as a buffer automatically, # since it might be hard to manually register them as buffers. class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.data = torch.ones(3, 2) @@ -1082,7 +1082,7 @@ class TestConverter(TestCase): ) class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.count = 0 @@ -1165,7 +1165,7 @@ class TestConverter(TestCase): def test_context_manager(self): class ContextManager: - def __init__(self): + def __init__(self) -> None: self.count = 0 return @@ -1211,7 +1211,7 @@ class TestConverter(TestCase): def test_ts2ep_multi_outputs_on_call_ops(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.pool = torch.nn.AdaptiveMaxPool2d((2, 2), return_indices=True) diff --git a/test/export/test_experimental.py b/test/export/test_experimental.py index 42d20a5652aa..468ee296e36e 100644 --- a/test/export/test_experimental.py +++ b/test/export/test_experimental.py @@ -18,7 +18,7 @@ class TestExperiment(TestCase): def test_with_buffer_as_submodule(self): @_mark_strict_experimental class B(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer1 = torch.nn.Buffer(torch.ones(3)) @@ -31,7 +31,7 @@ class TestExperiment(TestCase): return x.sum() + y.sum() + buffer_updated.sum() class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.submodule = B() @@ -86,7 +86,7 @@ def forward(self, arg0_1, arg1_1): def test_mark_strict_with_container_type(self): @_mark_strict_experimental class B(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -94,7 +94,7 @@ def forward(self, arg0_1, arg1_1): return x0.sum() class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.submodule = B() @@ -194,7 +194,7 @@ def forward(self, arg0_1, arg1_1): def test_joint_basic(self) -> None: class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) self.loss = torch.nn.CrossEntropyLoss() @@ -266,7 +266,7 @@ def forward(self, arg0_1, arg1_1): from torch.export import Dim class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.y = torch.nn.Parameter(torch.randn(3)) diff --git a/test/export/test_export.py b/test/export/test_export.py index d1379bd4ee7b..5fe1725729e6 100644 --- a/test/export/test_export.py +++ b/test/export/test_export.py @@ -359,7 +359,7 @@ graph(): return x + x class Basic(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.external_add = ExternalMethod().add @@ -373,7 +373,7 @@ graph(): def test_colon_parameter(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.register_parameter("foo:bar", torch.nn.Parameter(torch.ones(3, 3))) @@ -445,7 +445,7 @@ graph(): def test_basic_non_strict_real_tensor(self): class Basic(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.randn(1, 3)) @@ -459,7 +459,7 @@ graph(): def test_basic_non_strict_fake_tensor(self): class Basic(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.randn(3, 2)) @@ -476,7 +476,7 @@ graph(): def test_non_strict_dynamic_shapes(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.u = torch.nn.Buffer(torch.ones(1)) self.v = torch.nn.Buffer(torch.ones(1)) @@ -591,7 +591,7 @@ graph(): def test_state_tensors(self): class M(torch.nn.Module): # simple with register buffer - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.ones(2, 3), persistent=False) @@ -615,7 +615,7 @@ graph(): ) class M(torch.nn.Module): # simple without register buffer - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.ones(2, 3) @@ -635,7 +635,7 @@ graph(): torch.export.export(M(), (torch.randn(2, 3),), strict=False) class M(torch.nn.Module): # complex with register buffer - def __init__(self): + def __init__(self) -> None: super().__init__() tensors = [torch.ones(2, 3), torch.ones(2, 3)] for i, tensor in enumerate(tensors): @@ -666,7 +666,7 @@ graph(): ) class M(torch.nn.Module): # complex without register buffer - def __init__(self): + def __init__(self) -> None: super().__init__() self.tensors = [torch.ones(2, 3), torch.ones(2, 3)] @@ -694,7 +694,7 @@ graph(): def test_state_primitives(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = 1 self.y = {"k": 2} @@ -713,7 +713,7 @@ graph(): def test_torch_fn(self): class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) self.relu = torch.nn.ReLU() @@ -741,7 +741,7 @@ graph(): self.assertEqual(actual_result, expected_result) class M2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, weight, bias): @@ -803,7 +803,7 @@ graph(): def test_export_preserve_linear_at_aot_level(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) @@ -852,7 +852,7 @@ def forward(self, p_linear_weight, p_linear_bias, x): return self.foo(x) class CondBranchClassMethod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.subm = MySubModule() @@ -1191,7 +1191,7 @@ def forward(self, p_linear_weight, p_linear_bias, x): def test_keep_composite_ops_invalid(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) @@ -1228,7 +1228,7 @@ def forward(self, p_linear_weight, p_linear_bias, x): def test_keep_composite_ops_linear_convd(self): class MyLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.randn(20, 98) self.bias = torch.randn(20) @@ -1237,7 +1237,7 @@ def forward(self, p_linear_weight, p_linear_bias, x): return torch.nn.functional.linear(x, self.weight, self.bias) class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3) self.conv1d = torch.nn.Conv1d(16, 33, 3) @@ -1313,7 +1313,7 @@ def forward(self, p_conv_weight, p_conv_bias, p_conv1d_weight, p_conv1d_bias, c_ def test_keep_composite_ops_linear_convd_for_training_ir(self): class MyLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Buffer(torch.randn(20, 98)) self.bias = torch.nn.Buffer(torch.randn(20)) @@ -1322,7 +1322,7 @@ def forward(self, p_conv_weight, p_conv_bias, p_conv1d_weight, p_conv1d_bias, c_ return torch.nn.functional.linear(x, self.weight, self.bias) class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3) self.conv1d = torch.nn.Conv1d(16, 33, 3) @@ -1460,7 +1460,7 @@ def forward(self, p_conv_weight, p_conv_bias, p_conv1d_weight, p_conv1d_bias, b_ def test_simple_export_for_training(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) @@ -1496,7 +1496,7 @@ def forward(self, x): def test_export_for_training_with_mutation(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(4, 4)) @@ -1540,7 +1540,7 @@ def forward(self, x): def test_export_for_training_with_dynamic_shapes(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(4, 4)) @@ -1577,7 +1577,7 @@ def forward(self, x): def test_export_for_training_with_container_type(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(4, 4)) @@ -1605,7 +1605,7 @@ def forward(self, x): def test_export_for_training_run_decomp(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(2, 2)) self.linear = torch.nn.Linear(2, 2) @@ -1672,7 +1672,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): def test_static_dim_constraints(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l = torch.nn.Linear(6, 4) @@ -1896,7 +1896,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): # Just to introduce some indirection: N is a top-level module N that calls # module M, defined next. class N(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m = M() @@ -2408,7 +2408,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): return y[:b] class M2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m1 = M1() self.m3 = M3() @@ -2462,7 +2462,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): @testing.expectedFailureTrainingIRToRunDecompNonStrict def test_linear_conv(self): class MyLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.randn(20, 98) self.bias = torch.randn(20) @@ -2471,7 +2471,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): return torch.nn.functional.linear(x, self.weight, self.bias) class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3) self.linear = MyLinear() @@ -3100,7 +3100,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): def test_param_util(self): class Basic(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lin = torch.nn.Linear(10, 1) @@ -3137,7 +3137,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): def test_export_dynamo_config(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lstm = torch.nn.LSTM(input_size=4, hidden_size=5, num_layers=1) @@ -3251,7 +3251,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): def test_module(self): class MyLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.randn(20, 98) self.bias = torch.randn(20) @@ -3260,7 +3260,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): return torch.nn.functional.linear(x, self.weight, self.bias) class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3) self.linear = MyLinear() @@ -3296,7 +3296,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): def test_module_with_dict_container_inp_out(self): class MyLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.randn(20, 98) self.bias = torch.randn(20) @@ -3305,7 +3305,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): return torch.nn.functional.linear(x, self.weight, self.bias) class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3) self.linear = MyLinear() @@ -3364,7 +3364,7 @@ def forward(self, p_linear_weight, p_linear_bias, b_buffer, x): def test_decomp_batch_norm_functional_predispatch(self): class ConvBatchnorm(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 3, 1, 1) self.bn = torch.nn.BatchNorm2d(3) @@ -3605,7 +3605,7 @@ def forward(self, x): def test_constrain_decomp(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.freq = torch.ones(5, 5) @@ -3701,7 +3701,7 @@ def forward(self, x): def test_to_module_with_mutated_buffer(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.zeros(1)) @@ -3730,7 +3730,7 @@ def forward(self, x): def test_to_module_with_mutated_buffer_multiple(self): class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) @@ -3739,7 +3739,7 @@ def forward(self, x): return x.sum() + self.buf.sum() class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.zeros(1)) self.bar = Bar() @@ -3829,7 +3829,7 @@ def forward(self, x): def test_to_module_with_mutated_buffer_multiple_update_sub_later(self): class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) @@ -3838,7 +3838,7 @@ def forward(self, x): return x.sum() + self.buf.sum() class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.zeros(1)) self.bar = Bar() @@ -3882,7 +3882,7 @@ def forward(self, x): def test_retracable_ep(self): class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) @@ -3891,7 +3891,7 @@ def forward(self, x): return x.sum() + self.buf.sum() class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.zeros(1)) self.bar = Bar() @@ -3938,7 +3938,7 @@ def forward(self, x): def test_export_cond_symbool_pred(self): class A(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(6, 4)) @@ -3946,7 +3946,7 @@ def forward(self, x): return self.buffer.cos() class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = A() @@ -3991,7 +3991,7 @@ def forward(self, b_a_buffer, x): def test_cond_buffers(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.register_parameter( "param", torch.nn.Parameter(torch.ones(2, 3), requires_grad=False) @@ -4024,7 +4024,7 @@ def forward(self, b_a_buffer, x): @unittest.expectedFailure def test_map_buffers(self): class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.register_parameter( "param", torch.nn.Parameter(torch.tensor(5), requires_grad=False) @@ -4060,7 +4060,7 @@ def forward(self, b_a_buffer, x): @testing.expectedFailureTrainingIRToRunDecompNonStrict def test_retrace_graph_level_meta_preservation(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -4118,7 +4118,7 @@ def forward(self, b_a_buffer, x): def test_train_eval_on_exported_preautograd_module(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -4150,7 +4150,7 @@ def forward(self, b_a_buffer, x): self.assertEqual(len(ep.constants), 1) class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = torch.tensor(3) @@ -4252,7 +4252,7 @@ def forward(self, b_a_buffer, x): def test_export_decomps_simple(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lin = torch.nn.Linear(10, 1) @@ -4278,7 +4278,7 @@ def forward(self, b_a_buffer, x): def test_export_decomps_dynamic(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lin = torch.nn.Linear(10, 1) @@ -4369,7 +4369,7 @@ def forward(self, b_a_buffer, x): def test_constant_output(self): class ModuleConstant(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.b = torch.randn(3, 2) @@ -4377,7 +4377,7 @@ def forward(self, b_a_buffer, x): return self.b class ModuleNestedConstant(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bff = torch.randn(3, 2) @@ -4486,7 +4486,7 @@ graph(): def test_nested_module_with_init_buffer(self): class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.b = torch.ones(3, 3) @@ -4522,7 +4522,7 @@ graph(): @testing.expectedFailureRetraceability # Retracing tensor constants results in buffers def test_nested_module_with_constant_buffer(self): class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.b = torch.tensor(5) @@ -4572,7 +4572,7 @@ graph(): def test_nested_module_with_parameter(self): class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = torch.nn.Parameter(torch.ones(3, 3)) self.b = torch.nn.Parameter(torch.tensor(5.0)) @@ -4630,7 +4630,7 @@ graph(): def test_retrace_pre_autograd(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(4, 4)) @@ -4684,7 +4684,7 @@ graph(): @unittest.skip("Test is only supposed to work with non-strict mode") def test_issue_113041(self): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = torch.tensor(1.0) @@ -4699,7 +4699,7 @@ graph(): handle = seq.register_forward_hook(forward_hook) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.seq = seq @@ -4791,7 +4791,7 @@ graph(): def test_run_decomposition_supports_user_input_mutation(self): class SingleOp(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.op = torch.ops.aten.native_batch_norm @@ -5052,7 +5052,7 @@ graph(): def test_check_specialized_int(self): class SingleOp(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.op = torch.ops.aten.scatter_add @@ -5085,7 +5085,7 @@ graph(): return x / x class Child1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.nested = NestedChild() self.register_parameter( @@ -5097,7 +5097,7 @@ graph(): return x + self.child1param class Child2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.child2buffer = torch.nn.Buffer(torch.ones(2, 3)) @@ -5105,7 +5105,7 @@ graph(): return x - self.child2buffer class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Child1() self.bar = Child2() @@ -5148,7 +5148,7 @@ graph(): def test_nn_module_stack(self): class Leaf(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -5156,7 +5156,7 @@ graph(): return self.linear(x) class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.leaf = Leaf() self.buffer = torch.nn.Buffer(torch.randn(4, 4)) @@ -5165,7 +5165,7 @@ graph(): return self.buffer.sum() + self.leaf(x).sum() class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bar = Bar() @@ -5205,7 +5205,7 @@ graph(): def test_nn_module_stack_shared_submodule(self): class Leaf(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -5213,7 +5213,7 @@ graph(): return self.linear(x) class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.leaf = Leaf() self.buffer = torch.nn.Buffer(torch.randn(4, 4)) @@ -5222,7 +5222,7 @@ graph(): return self.buffer.sum() + self.leaf(x).sum() class BarDifferent(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.leaf = Leaf() @@ -5232,7 +5232,7 @@ graph(): return a + b class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bar = Bar() self.bar_different = BarDifferent() @@ -5286,7 +5286,7 @@ graph(): def test_stack_trace(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -5318,7 +5318,7 @@ graph(): # Guard validation upsets the guard def test_cond_with_module_stack_export_with(self): class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -5332,7 +5332,7 @@ graph(): return torch.cond(x.sum() > 4, true_fn, false_fn, [x]) class CondExport(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bar = Bar() @@ -5371,7 +5371,7 @@ def forward(self, p_bar_linear_weight, p_bar_linear_bias, x): @unittest.expectedFailure def test_cond_with_module_stack_export_with_unflatten(self): class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -5385,7 +5385,7 @@ def forward(self, p_bar_linear_weight, p_bar_linear_bias, x): return torch.cond(x.shape[0] > 4, true_fn, false_fn, [x]) class CondExport(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bar = Bar() @@ -5412,7 +5412,7 @@ def forward(self, p_bar_linear_weight, p_bar_linear_bias, x): def test_predispatch_cond(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.pred = torch.nn.Buffer(torch.tensor(False)) self.t = torch.nn.Buffer(torch.tensor(10)) @@ -5525,7 +5525,7 @@ def forward(self, x, b_t, y): N, C, H, W = 1, 2, 2, 3 class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() layer = torch.nn.LayerNorm([C, H, W]) self.norms = torch.nn.ModuleList( @@ -5548,7 +5548,7 @@ def forward(self, x, b_t, y): def test_non_persistent_buffer(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Buffer(torch.rand(2, 3), persistent=False) @@ -5556,7 +5556,7 @@ def forward(self, x, b_t, y): return self.foo + x class MyOuterModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.inner = MyModule() @@ -5607,7 +5607,7 @@ def forward(self, x, b_t, y): def test_nonstrict_retrace_preserves_metadata(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -5625,7 +5625,7 @@ def forward(self, x, b_t, y): def test_fake_weights(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Parameter(torch.randn(4, 4)) self.bar = torch.nn.Buffer(torch.randn(4, 4), persistent=False) @@ -5645,7 +5645,7 @@ def forward(self, x, b_t, y): def test_fake_inputs(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Parameter(torch.randn(4, 4)) @@ -5664,7 +5664,7 @@ def forward(self, x, b_t, y): def test_trace_under_fake(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Parameter(torch.randn(4, 4)) @@ -5714,7 +5714,7 @@ def forward(self, x, b_t, y): def test_user_input_and_buffer_mutation(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Buffer(torch.randn(4, 4)) @@ -5739,7 +5739,7 @@ def forward(self, x, b_t, y): def test_custom_op_auto_functionalize(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, z): @@ -5766,7 +5766,7 @@ def forward(self, x, b_t, y): def test_custom_op_auto_functionalize_pre_dispatch(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -5800,7 +5800,7 @@ def forward(self, x): def test_custom_op_auto_warn_pre_dispatch(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -5851,7 +5851,7 @@ def forward(self, x): # test collisions between user inputs and params, buffers, constants class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.randn(4)) self.alpha = torch.nn.Buffer(torch.randn(4), persistent=True) @@ -6197,7 +6197,7 @@ def forward(self, x, y): @testing.expectedFailureSerDer def test_preserve_requires_grad_placeholders(self): class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.p = torch.nn.Parameter(torch.randn(3, 3)) @@ -6216,7 +6216,7 @@ def forward(self, x, y): def test_reshape_view_helper(self): # see: https://github.com/pytorch/pytorch/issues/126607 class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -6326,7 +6326,7 @@ def forward(self, x, y): return x + self.foo + self.m2(x) class M2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.ones(3, 3) @@ -6352,7 +6352,7 @@ def forward(self, x, y): @testing.expectedFailureRetraceability def test_unused_aliases(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() # param self.alpha = torch.nn.Parameter(torch.randn(4)) @@ -6512,7 +6512,7 @@ def forward(self, x, y): return y[d:] class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m1 = M1() @@ -6539,7 +6539,7 @@ def forward(self, x, y): def test_split_const_gm_with_lifted_constants(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w_pre = torch.randn(4, 4) self.b = torch.randn(4) @@ -6599,7 +6599,7 @@ class TestOneOffModelExportResult(TestCase): """ class ScaledDotProductAttention(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, q, k, v): @@ -6640,7 +6640,7 @@ class TestOneOffModelExportResult(TestCase): """ class ScaledDotProductAttention(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, q, k, v): @@ -6866,7 +6866,7 @@ def forward(self, x): def test_constant_fqn(self): class Nested(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.constant = torch.rand(2, 3) self.parameter = torch.nn.Parameter(torch.rand(2, 3)) @@ -6875,7 +6875,7 @@ def forward(self, x): return x + self.constant class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.nested = Nested() @@ -6889,7 +6889,7 @@ def forward(self, x): def test_constant_name(self): class Nested(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.constant = torch.rand(2, 3) self.parameter = torch.nn.Parameter(torch.rand(2, 3)) @@ -6898,7 +6898,7 @@ def forward(self, x): return x + self.constant class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.nested_1 = Nested() self.nested_2 = Nested() @@ -6933,7 +6933,7 @@ def forward(self, x): def test_nested_retrace(self): class Nested(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.randn(3)) @@ -6941,7 +6941,7 @@ def forward(self, x): return x + self.param class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.nested = Nested() diff --git a/test/export/test_lift_unlift.py b/test/export/test_lift_unlift.py index 5d9437de6d92..c027fc557178 100644 --- a/test/export/test_lift_unlift.py +++ b/test/export/test_lift_unlift.py @@ -29,7 +29,7 @@ from torch.testing._internal.common_utils import ( class GraphBuilder: - def __init__(self): + def __init__(self) -> None: self.graph = torch.fx.Graph() self.nodes = {} self.values = {} @@ -354,7 +354,7 @@ class TestLift(TestCase): def test_unlift_nonpersistent_buffer(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.register_buffer( "non_persistent_buf", torch.zeros(1), persistent=False diff --git a/test/export/test_pass_infra.py b/test/export/test_pass_infra.py index 7f7776cc5988..832f1ef1e93f 100644 --- a/test/export/test_pass_infra.py +++ b/test/export/test_pass_infra.py @@ -44,7 +44,7 @@ class TestPassInfra(TestCase): @unittest.skipIf(IS_WINDOWS, "Windows not supported") def test_cond(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, pred, x, y): @@ -74,7 +74,7 @@ class TestPassInfra(TestCase): # Tests that graph nodes stay the same for nodes that are not touched # during transformation class CustomModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() # Define a parameter @@ -110,7 +110,7 @@ class TestPassInfra(TestCase): # Checks that pass infra correctly updates graph signature # after transformations. class CustomModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.my_parameter = torch.nn.Parameter(torch.tensor(2.0)) @@ -152,7 +152,7 @@ class TestPassInfra(TestCase): def test_replace_hook_basic(self) -> None: class CustomModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.my_parameter = torch.nn.Parameter(torch.tensor(2.0)) diff --git a/test/export/test_passes.py b/test/export/test_passes.py index c6cdc5d35078..7455dd31f24c 100644 --- a/test/export/test_passes.py +++ b/test/export/test_passes.py @@ -87,12 +87,12 @@ def _get_output_names(gm: torch.fx.GraphModule) -> List[str]: class ModelsWithScriptObjectAttr: class Simple(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) class SimpleWithAttrInContainer(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) self.pytree_attr2 = [ @@ -104,7 +104,7 @@ class ModelsWithScriptObjectAttr: ] class NestedWithAttrInContainer(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) self.pytree_attr2 = [ @@ -118,7 +118,7 @@ class ModelsWithScriptObjectAttr: self.sub_mod2 = ModelsWithScriptObjectAttr.SimpleWithAttrInContainer() class MoreNestedWithAttrInContainer(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) self.pytree_attr2 = [ @@ -267,7 +267,7 @@ class TestPasses(TestCase): def test_runtime_assert_one_dim(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -290,7 +290,7 @@ class TestPasses(TestCase): def test_runtime_assert_multiple_dims(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, y): @@ -320,7 +320,7 @@ class TestPasses(TestCase): def test_runtime_assert_some_dims_not_specified(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, y): @@ -357,7 +357,7 @@ class TestPasses(TestCase): def test_runtime_assert_some_inps_not_used(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, y): @@ -389,7 +389,7 @@ class TestPasses(TestCase): def test_view_to_view_copy(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -444,7 +444,7 @@ class TestPasses(TestCase): def test_custom_obj_tuple_out(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -471,7 +471,7 @@ class TestPasses(TestCase): def test_remove_effect_token_kwargs(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -546,7 +546,7 @@ def forward(self, token, obj_attr, x): def test_runtime_assert_inline_constraints_for_item(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -569,7 +569,7 @@ def forward(self, token, obj_attr, x): def test_runtime_assert_inline_constraints_for_nonzero(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -613,7 +613,7 @@ def forward(self, token, obj_attr, x): # TODO(pianpwk): add back runtime asserts to subgraphs def test_runtime_assert_inline_constraints_for_cond(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, pred, x, y): @@ -870,7 +870,7 @@ def forward(self, sin, cos): return x + y.add_(1) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.state = torch.nn.Buffer(torch.zeros(1)) @@ -911,7 +911,7 @@ def forward(self, sin, cos): return (x, x + y.add_(1)) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.state = torch.nn.Buffer(torch.zeros(1)) diff --git a/test/export/test_serialize.py b/test/export/test_serialize.py index 367ba530f1b6..79290848830e 100644 --- a/test/export/test_serialize.py +++ b/test/export/test_serialize.py @@ -121,7 +121,7 @@ class TestSerialize(TestCase): def test_predispatch_export_with_autograd_op(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -148,7 +148,7 @@ class TestSerialize(TestCase): class MyModule(torch.nn.Module): """A test module with that has multiple args and uses kwargs""" - def __init__(self): + def __init__(self) -> None: super().__init__() self.p = torch.nn.Parameter(torch.ones(2, 3)) @@ -178,7 +178,7 @@ class TestSerialize(TestCase): # Tests that modules with more complicated layer patterns can be serialized # and deserialized correctly. class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layers = torch.nn.Sequential( torch.nn.SiLU(), @@ -209,7 +209,7 @@ class TestSerialize(TestCase): def test_serialize_constant_outputs(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -231,7 +231,7 @@ class TestSerialize(TestCase): def test_serialize_multiple_returns_from_node(self) -> None: class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, w, b): @@ -267,7 +267,7 @@ class TestSerialize(TestCase): def test_serialize_list_returns(self) -> None: class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -309,7 +309,7 @@ class TestSerialize(TestCase): """ class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -636,7 +636,7 @@ class TestDeserialize(TestCase): """ class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, w, b): @@ -657,7 +657,7 @@ class TestDeserialize(TestCase): def test_basic(self) -> None: class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -671,7 +671,7 @@ class TestDeserialize(TestCase): def test_dynamic(self) -> None: class DynamicShapeSimpleModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, a, b, c) -> torch.Tensor: @@ -709,7 +709,7 @@ class TestDeserialize(TestCase): def test_module(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(3, 3) self.relu = torch.nn.ReLU() @@ -727,7 +727,7 @@ class TestDeserialize(TestCase): def test_module_meta(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.p = torch.nn.Parameter(torch.ones(3, 3)) @@ -803,7 +803,7 @@ class TestDeserialize(TestCase): def test_list_of_optional_tensors(self) -> None: class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, y, z): @@ -906,7 +906,7 @@ def forward(self, x): @unittest.skipIf(not torch.cuda.is_available(), "Requires cuda") def test_device(self) -> None: class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 16, 3, stride=1, bias=True) self.relu = torch.nn.ReLU() @@ -923,7 +923,7 @@ def forward(self, x): def test_custom_obj_tuple_out(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -939,7 +939,7 @@ def forward(self, x): def test_custom_obj(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -954,7 +954,7 @@ def forward(self, x): def test_custom_obj_list_out(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -970,7 +970,7 @@ def forward(self, x): def test_export_no_inputs(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.p = torch.ones(3, 3) @@ -1019,7 +1019,7 @@ class TestSaveLoad(TestCase): inp = (torch.tensor([0.1, 0.1]),) class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) @@ -1118,7 +1118,7 @@ class TestSaveLoad(TestCase): def test_save_constants(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = torch.tensor(3) @@ -1192,7 +1192,7 @@ class TestSerializeCustomClass(TestCase): def test_custom_class_containing_fake_tensor(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.custom_obj = torch.classes._TorchScriptTesting._ContainsTensor( torch.rand(2, 3) diff --git a/test/export/test_sparse.py b/test/export/test_sparse.py index c9662fc5db94..3708c272dd81 100644 --- a/test/export/test_sparse.py +++ b/test/export/test_sparse.py @@ -60,7 +60,7 @@ class SumNet(torch.nn.Module): class EltwiseNet(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU() diff --git a/test/export/test_tools.py b/test/export/test_tools.py index 654ee12579e7..9110591e0139 100644 --- a/test/export/test_tools.py +++ b/test/export/test_tools.py @@ -44,7 +44,7 @@ class TestExportTools(TestCase): return x.sin() class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.unsupported = Unsupported() self.supported = Supported() diff --git a/test/export/test_torchbind.py b/test/export/test_torchbind.py index 251ab9d29b59..119064e1dd56 100644 --- a/test/export/test_torchbind.py +++ b/test/export/test_torchbind.py @@ -162,7 +162,7 @@ class TestExportTorchbind(TestCase): @parametrize("pre_dispatch", [True, False]) def test_none(self, pre_dispatch): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -212,7 +212,7 @@ def forward(self, token, obj_attr, x, n): @parametrize("pre_dispatch", [True, False]) def test_attribute(self, pre_dispatch): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -246,7 +246,7 @@ def forward(self, token, obj_attr, x): @parametrize("pre_dispatch", [True, False]) def test_attribute_as_custom_op_argument(self, pre_dispatch): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -282,7 +282,7 @@ def forward(self, token, obj_attr, x): cc = torch.classes._TorchScriptTesting._Foo(10, 20) class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, cc): @@ -320,7 +320,7 @@ def forward(self, token, x, cc): cc = torch.classes._TorchScriptTesting._Foo(10, 20) class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, cc): @@ -381,7 +381,7 @@ def forward(self, token, x, cc): return x + torch.ops._TorchScriptTesting.takes_foo(self.foo, x) class F1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.alpha = torch.classes._TorchScriptTesting._Foo(10, 20) self.beta = self.alpha @@ -417,7 +417,7 @@ def forward(self, token, x, cc): @parametrize("pre_dispatch", [True, False]) def test_unlift_custom_obj(self, pre_dispatch): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -458,7 +458,7 @@ def forward(self, token, obj_attr, x): @parametrize("pre_dispatch", [True, False]) def test_custom_obj_list_out(self, pre_dispatch): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -510,7 +510,7 @@ def forward(self, token, obj_attr, x): @parametrize("pre_dispatch", [True, False]) def test_custom_obj_tuple_out(self, pre_dispatch): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -559,7 +559,7 @@ def forward(self, token, obj_attr, x): test = self class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 2) self.check_tq_is_fake = True @@ -617,7 +617,7 @@ def forward(self, arg0_1, arg1_1): test = self class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 2) self.check_tq_is_fake = True @@ -674,7 +674,7 @@ def forward(self, arg0_1, arg1_1): def test_non_strict_export_methods(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) @@ -857,7 +857,7 @@ def forward(self, token, safe_obj): @parametrize("fallthrough_via", ["lib_impl", "py_impl"]) def test_make_fx_tensor_queue_operators(self, fallthrough_via): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, tq, x): @@ -932,7 +932,7 @@ def forward(self, arg0_1, arg1_1): def test_aot_export_tensor_queue_operators(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, tq, x): @@ -1072,7 +1072,7 @@ class TestCompileTorchbind(TestCase): backend = EagerAndRecordGraphs() class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.check_tq_is_fake = True @@ -1133,7 +1133,7 @@ class TestCompileTorchbind(TestCase): @parametrize("backend", ["eager", "aot_eager"]) def test_compile_script_object_input_guards(self, backend): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.check_tq_is_fake = True @@ -1184,7 +1184,7 @@ class TestCompileTorchbind(TestCase): def test_compile_script_object_input_automatic_dynamic_shape(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.check_tq_is_fake = True @@ -1221,7 +1221,7 @@ class TestCompileTorchbind(TestCase): backend = EagerAndRecordGraphs() class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.check_tq_is_fake = True @@ -1418,7 +1418,7 @@ def forward(self, token, obj, x): backend = EagerAndRecordGraphs() class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.tq = _empty_tensor_queue() @@ -1482,7 +1482,7 @@ class TestRegisterFakeClass(TestCase): @torch._library.register_fake_class("_TorchScriptTesting::_Foo") class InvalidFakeFoo: - def __init__(self): + def __init__(self) -> None: pass def test_register_fake_class_from_real_not_classmethod(self): diff --git a/test/export/test_unflatten.py b/test/export/test_unflatten.py index 2b0efb823529..0de2896cbd9e 100644 --- a/test/export/test_unflatten.py +++ b/test/export/test_unflatten.py @@ -65,7 +65,7 @@ class TestUnflatten(TestCase): return x / x class Child1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.nested = NestedChild() self.register_parameter( @@ -77,7 +77,7 @@ class TestUnflatten(TestCase): return x + self.child1param class Child2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.child2buffer = torch.nn.Buffer(torch.ones(2, 3)) @@ -85,7 +85,7 @@ class TestUnflatten(TestCase): return x - self.child2buffer class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Child1() self.bar = Child2() @@ -119,7 +119,7 @@ class TestUnflatten(TestCase): def test_unflatten_buffer_mutation(self): class Child(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.child2buffer = torch.nn.Buffer(torch.ones(2, 3)) @@ -128,7 +128,7 @@ class TestUnflatten(TestCase): return x - self.child2buffer class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Child() self.register_parameter( @@ -155,7 +155,7 @@ class TestUnflatten(TestCase): def test_unflatten_nested_access(self): class Child(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.child2buffer = torch.nn.Buffer(torch.ones(2, 3)) @@ -163,7 +163,7 @@ class TestUnflatten(TestCase): return x - self.child2buffer class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Child() self.register_parameter( @@ -184,7 +184,7 @@ class TestUnflatten(TestCase): def test_unflatten_shared_submodule(self): class Shared(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() layernorm = torch.nn.LayerNorm(10) self.sub_net = torch.nn.Sequential( @@ -218,7 +218,7 @@ class TestUnflatten(TestCase): return {"x": y["key"] + zx[1], "w": y["key"] * zx[1]} class Child1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.nested = NestedChild() @@ -228,14 +228,14 @@ class TestUnflatten(TestCase): return xw["w"] + z - xw["x"] class Child2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): return x - 1 class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Child1() self.bar = Child2() @@ -287,7 +287,7 @@ class TestUnflatten(TestCase): def test_unflatten_param_list_dict(self): class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param_list = torch.nn.ParameterList() self.param_dict = torch.nn.ParameterDict() @@ -317,7 +317,7 @@ class TestUnflatten(TestCase): return x + a, b class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m1 = M1() @@ -337,7 +337,7 @@ class TestUnflatten(TestCase): def test_unflatten_wrong_input(self): class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param_list = torch.nn.ParameterList() self.param_dict = torch.nn.ParameterDict() @@ -374,7 +374,7 @@ class TestUnflatten(TestCase): return x / x class Child1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.nested = NestedChild() self.register_parameter( @@ -386,7 +386,7 @@ class TestUnflatten(TestCase): return x + self.child1param class Child2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.child2buffer = torch.nn.Buffer(torch.ones(2, 3)) @@ -394,7 +394,7 @@ class TestUnflatten(TestCase): return x - self.child2buffer class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Child1() self.bar = Child2() @@ -420,7 +420,7 @@ class TestUnflatten(TestCase): def test_fx_trace(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, y): @@ -439,14 +439,14 @@ class TestUnflatten(TestCase): def test_double_nested_submodule(self): class SubSubMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): return x * x class SubMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.subsubmod = SubSubMod() @@ -454,7 +454,7 @@ class TestUnflatten(TestCase): return x - x class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.submod = SubMod() @@ -470,7 +470,7 @@ class TestUnflatten(TestCase): def test_unflatten_container_type(self): class Leaf(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -478,7 +478,7 @@ class TestUnflatten(TestCase): return self.linear(x) class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.leaf = Leaf() self.buffer = torch.nn.Buffer(torch.randn(4, 4)) @@ -487,7 +487,7 @@ class TestUnflatten(TestCase): return self.buffer.sum() + self.leaf(x).sum() + z[0].sum() + z[1].sum() class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bar = Bar() @@ -506,14 +506,14 @@ class TestUnflatten(TestCase): def test_unflattened_module_nodes_has_meta_val(self): class SubMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): return x + x, x * x class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.submod = SubMod() @@ -538,7 +538,7 @@ class TestUnflatten(TestCase): def test_placeholder_and_get_attr_ordering_after_unflattened(self): class TransposeModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 1, 3, stride=2) @@ -564,7 +564,7 @@ class TestUnflatten(TestCase): def test_unflatten_constant_tensor(self): class SubMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.initializer = 0.1 @@ -572,7 +572,7 @@ class TestUnflatten(TestCase): return x + torch.tensor(self.initializer) class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.submod = SubMod() @@ -604,7 +604,7 @@ class TestUnflatten(TestCase): return (self.x + self.y) * z class SubMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -612,7 +612,7 @@ class TestUnflatten(TestCase): return x + self.attr.add_tensor(x) class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.submod = SubMod() @@ -635,7 +635,7 @@ class TestUnflatten(TestCase): return x + 1 class Nested(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.leaf = Leaf() @@ -643,7 +643,7 @@ class TestUnflatten(TestCase): return self.leaf(x) + 2 class TopLevel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.nested = Nested() @@ -661,7 +661,7 @@ class TestUnflatten(TestCase): def test_unflatten_submodule_ordering(self): class Module2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.rand(3, 4)) self.register_parameter("param", torch.nn.Parameter(torch.rand(3, 4))) @@ -670,7 +670,7 @@ class TestUnflatten(TestCase): return x + self.buffer + self.param class Module1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.rand(3, 4)) self.register_parameter("param", torch.nn.Parameter(torch.rand(3, 4))) @@ -679,7 +679,7 @@ class TestUnflatten(TestCase): return x + self.buffer + self.param class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod2 = Module2() self.mod3 = self.mod2 @@ -704,7 +704,7 @@ class TestUnflatten(TestCase): N, C, H, W = 1, 2, 2, 3 class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() layer = torch.nn.LayerNorm([C, H, W]) self.norms = torch.nn.ModuleList( @@ -735,7 +735,7 @@ class TestUnflatten(TestCase): def test_simple_alias(self): # handle weight sharing, check tensor ids after unflattening class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() # alias param self.bias = torch.nn.Parameter(torch.randn(4)) @@ -753,7 +753,7 @@ class TestUnflatten(TestCase): # handle aliasing where one alias is unused class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bias = torch.nn.Parameter(torch.randn(4)) self.m = torch.nn.Linear(4, 4) @@ -809,7 +809,7 @@ class TestUnflatten(TestCase): return y[:d] class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m1 = M1() self.m2 = M2() diff --git a/test/export/test_verifier.py b/test/export/test_verifier.py index a6cffdf3e665..5a1a5eb13010 100644 --- a/test/export/test_verifier.py +++ b/test/export/test_verifier.py @@ -139,7 +139,7 @@ class TestVerifier(TestCase): def test_ep_verifier_invalid_buffer(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = torch.tensor(3.0) @@ -160,7 +160,7 @@ class TestVerifier(TestCase): def test_ep_verifier_buffer_mutate(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.my_parameter = torch.nn.Parameter(torch.tensor(2.0)) @@ -183,7 +183,7 @@ class TestVerifier(TestCase): def test_ep_verifier_invalid_output(self) -> None: class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.my_parameter = torch.nn.Parameter(torch.tensor(2.0)) diff --git a/test/functorch/test_aotdispatch.py b/test/functorch/test_aotdispatch.py index 4e120bf03206..ccc1287b3352 100644 --- a/test/functorch/test_aotdispatch.py +++ b/test/functorch/test_aotdispatch.py @@ -526,7 +526,7 @@ class TestAOTAutograd(AOTTestCase): inp = [torch.randn(1, 10, 10, dtype=torch.complex64)] class F(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(10, 10, dtype=torch.complex64) @@ -540,7 +540,7 @@ class TestAOTAutograd(AOTTestCase): # test that this works even though the sparse tensor has no storage. class F(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.EmbeddingBag(100, 8, sparse=True) @@ -1004,7 +1004,7 @@ def forward(self, primals_1): @skipIfTorchDynamo("This test suite already uses dynamo") def test_composite_impl_compile(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) @@ -3584,7 +3584,7 @@ def forward(self, tangents_1): def test_buffer_copied_in_graph(self): class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.zeros(1)) self.w1 = torch.nn.Parameter(torch.zeros(1)) @@ -3639,7 +3639,7 @@ def forward(self, primals_1, primals_2, primals_3, primals_4): def test_buffer_copied_in_graph_with_different_shapes(self): class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.ones(4, 4)) self.w = torch.nn.Parameter( @@ -3694,7 +3694,7 @@ def forward(self, primals_1, primals_2, primals_3): def test_buffer_batch_norm(self): class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m = torch.nn.BatchNorm1d(100) @@ -3816,7 +3816,7 @@ def forward(self, tangents_1): from functorch.experimental import functionalize class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) @@ -3865,7 +3865,7 @@ def forward(self, tangents_1): def test_real_weights_in_symbolic_mode_with_inplace_ops(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer = torch.nn.Buffer(torch.ones(4, 5)) @@ -4142,7 +4142,7 @@ def forward(self, arg0_1, arg1_1): def test_aot_export_predispatch_composite_implicit_linear(self): class MM(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) @@ -4219,7 +4219,7 @@ def forward(self, arg0_1, arg1_1): def test_aot_export_predispatch_buffer_mutation_metadata(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Buffer(torch.zeros(2, 2)) @@ -4287,7 +4287,7 @@ def forward(self, arg0_1, arg1_1): ) def test_aot_export_predispatch_with_cond_nested(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -4365,7 +4365,7 @@ def forward(self, arg0_1): ) def test_aot_export_predispatch_map_1(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, y): @@ -4449,7 +4449,7 @@ def forward(self, arg0_1, arg1_1): def test_aot_export_predispatch_map_2(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, y): @@ -4492,7 +4492,7 @@ def forward(self, arg0_1, arg1_1): ) def test_aot_export_predispatch_with_cond(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -4540,7 +4540,7 @@ def forward(self, arg0_1): def test_aot_export_predispatch_conv_and_bn(self): class ConvBatchnorm(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 3, 1, 1) self.bn = torch.nn.BatchNorm2d(3) @@ -4607,7 +4607,7 @@ def forward(self, arg0_1): def test_aot_export_module_joint(self): class ConvBatchnormRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 3, 1, 1) self.bn = torch.nn.BatchNorm2d(3) @@ -4793,7 +4793,7 @@ class (torch.nn.Module): def test_aot_export_forward_mutation_no_buffer_mut(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer1 = torch.nn.Buffer(torch.ones(6, 4)) @@ -4819,7 +4819,7 @@ def forward(self, arg0_1, arg1_1): def test_aot_export_forward_mutation_multiple_mut(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer1 = torch.nn.Buffer(torch.ones(6, 4)) @@ -4928,7 +4928,7 @@ def forward(self, arg0_1, arg1_1, arg2_1): ) def test_aot_export_with_torch_cond(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -5065,7 +5065,7 @@ class TestPartitioning(AOTTestCase): # Following module results in inplace ops while tracing. The test checks # that the meta tensor information is stored for inplace ops. class MockModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.randn(3072, 768, requires_grad=True) @@ -5800,7 +5800,7 @@ def forward(self, tangents_1, tangents_2): class TestAOTModuleSimplified(AOTTestCase): def test_aot_module_simplified(self): class MockModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(20, 30) @@ -5829,7 +5829,7 @@ class TestAOTModuleSimplified(AOTTestCase): def test_aot_module_simplified_dynamic(self): class MockModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(20, 30) @@ -5892,7 +5892,7 @@ class TestAOTModuleSimplified(AOTTestCase): def test_inference_python_dispatcher(self): # Extracted from unet class MockModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.upsample = torch.nn.Upsample( scale_factor=2, mode="bilinear", align_corners=True @@ -5911,7 +5911,7 @@ class TestAOTModuleSimplified(AOTTestCase): def test_aot_module_simplified_preserves_stack_trace(self): class MockModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(20, 30) @@ -5952,7 +5952,7 @@ class TestAOTModuleSimplified(AOTTestCase): def test_aot_module_simplified_preserves_stack_trace_from_mutation(self): class MockModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -6390,7 +6390,7 @@ class MockFXGraphCache: In memory version of FXGraphCache so we can isolate testing for FXGraphCache """ - def __init__(self): + def __init__(self) -> None: self.cache = {} def save(self, key, gm): diff --git a/test/functorch/test_control_flow.py b/test/functorch/test_control_flow.py index 1226c372bd83..0563172cfc2d 100644 --- a/test/functorch/test_control_flow.py +++ b/test/functorch/test_control_flow.py @@ -135,7 +135,7 @@ def _while_loop_tests(): return while_loop(cond_fn, body_fn, (ci, cj, a, b)) class SimpleWithLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) self.dec = torch.nn.Buffer(torch.tensor(1)) @@ -150,7 +150,7 @@ def _while_loop_tests(): return while_loop(cond_fn, body_fn, (iter, x)) class NestedWithLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod = SimpleWithLinear() self.outer_linear = torch.nn.Linear(2, 2) @@ -834,7 +834,7 @@ def forward(self, pred_1, x_1): def test_cond_autograd_user_nn_module(self): class User_nn_module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, input): @@ -2964,7 +2964,7 @@ def forward(self, arg0_1, arg1_1, arg2_1): def test_cond_with_module_param_closure(self): class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.register_parameter( "param", torch.nn.Parameter(torch.ones(2, 3), requires_grad=False) diff --git a/test/functorch/test_eager_transforms.py b/test/functorch/test_eager_transforms.py index 4db11884263c..87140432bac9 100644 --- a/test/functorch/test_eager_transforms.py +++ b/test/functorch/test_eager_transforms.py @@ -3661,7 +3661,7 @@ class TestMakeFunctional(TestCase): @parametrize("disable_autograd_tracking", [True, False]) def test_disable_autograd_tracking(self, disable_autograd_tracking): class Foo(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(3, 3) @@ -3679,7 +3679,7 @@ class TestMakeFunctional(TestCase): def test_parameter_tying(self): class Foo(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(3)) self.linear = nn.Linear(3, 3) @@ -3708,7 +3708,7 @@ class TestMakeFunctional(TestCase): def test_buffer_tying(self): class Foo(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(3)) self.linear = nn.Linear(3, 3) @@ -3740,7 +3740,7 @@ class TestMakeFunctional(TestCase): @parametrize("disable_autograd_tracking", [True, False]) def test_with_buffers_disable_autograd_tracking(self, disable_autograd_tracking): class Foo(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(3, 3) self.buffer = nn.Buffer(torch.randn(3)) @@ -3762,7 +3762,7 @@ class TestMakeFunctional(TestCase): @parametrize("detach_params", [True, False]) def test_using_detach_functional_call(self, detach_params): class Foo(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(3, 3) self.buffer = nn.Buffer(torch.randn(3)) @@ -3788,7 +3788,7 @@ class TestMakeFunctional(TestCase): def test_parameter_tying_grad(self): class Foo(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(3, 3) self.weight = self.linear.weight @@ -3820,7 +3820,7 @@ class TestMakeFunctional(TestCase): def test_parameter_tying_ensemble(self): class Foo(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(3, 3) self.weight = self.linear.weight @@ -3854,7 +3854,7 @@ class TestMakeFunctional(TestCase): @parametrize("mechanism", ["make_functional", "functional_call"]) def test_correctness_mnist(self, mechanism): class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) @@ -3965,7 +3965,7 @@ class TestMakeFunctional(TestCase): @parametrize("mechanism", ["make_functional", "functional_call"]) def test_make_functional_state_correctly_returned_after_forward(self, mechanism): class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(3, 3) @@ -4021,7 +4021,7 @@ class TestExamplesCorrectness(TestCase): @parametrize("mechanism", ["make_functional", "functional_call"]) def test_maml_regression(self, device, mechanism): class ThreeLayerNet(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(1, 40) self.relu1 = nn.ReLU() diff --git a/test/functorch/test_minifier.py b/test/functorch/test_minifier.py index c354cedc4e05..014117431ffa 100644 --- a/test/functorch/test_minifier.py +++ b/test/functorch/test_minifier.py @@ -86,7 +86,7 @@ class TestMinifier(TestCase): def test_module(self): class MockModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU() diff --git a/test/fx/test_dce_pass.py b/test/fx/test_dce_pass.py index 134c3e49fff7..2cdda722c2d1 100644 --- a/test/fx/test_dce_pass.py +++ b/test/fx/test_dce_pass.py @@ -89,7 +89,7 @@ class TestDCE(TestCase): """ class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr_1 = torch.nn.Parameter(torch.tensor([-0.9])) @@ -105,7 +105,7 @@ class TestDCE(TestCase): """ class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr_1 = torch.nn.Parameter(torch.tensor([-0.9])) @@ -122,7 +122,7 @@ class TestDCE(TestCase): """ class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr_1 = torch.nn.Parameter(torch.tensor([-0.9])) @@ -169,7 +169,7 @@ class TestDCE(TestCase): _is_impure = True class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = ReLUImpure() diff --git a/test/fx/test_fx_const_fold.py b/test/fx/test_fx_const_fold.py index 9641a1f9ba97..c1e5929ca301 100644 --- a/test/fx/test_fx_const_fold.py +++ b/test/fx/test_fx_const_fold.py @@ -61,7 +61,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr_1 = torch.nn.Parameter(torch.tensor([[-0.9]])) self.attr_2 = torch.nn.Parameter(torch.tensor([[17.1]])) @@ -106,7 +106,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() # Note: Named as such to result in name collision. self.add_1__CF = torch.nn.Parameter(torch.tensor([[1.0]])) @@ -168,7 +168,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr1 = torch.nn.Parameter(torch.tensor([[-0.9]])) @@ -211,7 +211,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr1 = torch.nn.Parameter(torch.tensor([[-0.9]])) self.attr1 = torch.nn.Parameter(torch.tensor([[1.32]])) @@ -254,7 +254,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr1 = torch.nn.Parameter(torch.randn(2, 3)) self.attr2 = torch.nn.Parameter(torch.randn(2, 3)) @@ -301,7 +301,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr1 = torch.nn.Parameter(torch.randn(4, 4)) self.attr2 = torch.nn.Parameter(torch.randn(4, 4)) @@ -332,7 +332,7 @@ class TestConstFold(TestCase): """ class TracedThroughModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.internal_attr = torch.nn.Parameter(torch.randn(2, 3)) @@ -340,7 +340,7 @@ class TestConstFold(TestCase): return self.internal_attr class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.my_mod = TracedThroughModule() self.attr = torch.nn.Parameter(torch.randn(2, 3)) @@ -364,7 +364,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.nn.Parameter(torch.randn(2, 3)) @@ -413,7 +413,7 @@ class TestConstFold(TestCase): def test_const_fold_has_inlined_call_module_node(self): class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.nn.Parameter(torch.randn(2, 3)) self.mod = torch.nn.Identity() @@ -434,7 +434,7 @@ class TestConstFold(TestCase): def test_const_fold_module_attr(self): class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.const = torch.nn.Parameter(torch.randn(2, 3)) self.mod = torch.nn.Identity() @@ -456,7 +456,7 @@ class TestConstFold(TestCase): def test_const_fold_unused_placeholder(self): class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.const = torch.nn.Parameter(torch.randn(2, 3)) @@ -475,7 +475,7 @@ class TestConstFold(TestCase): def test_dict_output(self): class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.const = torch.nn.Parameter(torch.randn(2, 3)) @@ -494,7 +494,7 @@ class TestConstFold(TestCase): def test_two_outputs(self): class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.const = torch.nn.Parameter(torch.randn(2, 3)) @@ -514,7 +514,7 @@ class TestConstFold(TestCase): def test_three_outputs(self): class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.const = torch.nn.Parameter(torch.randn(2, 3)) @@ -540,7 +540,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.nn.Parameter(torch.randn(2, 3)) @@ -572,7 +572,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.nn.Parameter(torch.randn(2, 3)) @@ -605,7 +605,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.randn(4, 4)) self.bias = torch.nn.Parameter(torch.randn(4)) @@ -650,7 +650,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lin_input = torch.nn.Parameter(torch.randn(4, 4)) self.lin = torch.nn.Linear(4, 4) @@ -676,7 +676,7 @@ class TestConstFold(TestCase): """ class ConstFoldTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr_1 = torch.nn.Parameter(torch.tensor([[-0.9]]), requires_grad) self.attr_2 = torch.nn.Parameter(torch.tensor([[17.1]]), requires_grad) diff --git a/test/fx/test_fx_split.py b/test/fx/test_fx_split.py index e93d0b9665c0..12862cc1774d 100644 --- a/test/fx/test_fx_split.py +++ b/test/fx/test_fx_split.py @@ -154,7 +154,7 @@ class TestSplitByTags(TestCase): class TestSplitOutputType(TestCase): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 16, 3, stride=1, bias=True) self.relu = torch.nn.ReLU() diff --git a/test/fx/test_gradual_type.py b/test/fx/test_gradual_type.py index 31f28dc45ee1..01a76eaf98a8 100644 --- a/test/fx/test_gradual_type.py +++ b/test/fx/test_gradual_type.py @@ -672,7 +672,7 @@ class TypeCheckerTest(TestCase): def test_type_check_conv2D_maxpool2d_flatten(self): class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 6, 5) @@ -761,7 +761,7 @@ class TypeCheckerTest(TestCase): def test_type_typechecl_maxpool2d_3dinput(self): class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.pool = torch.nn.MaxPool2d(5, 8) @@ -1119,7 +1119,7 @@ class TypeCheckerTest(TestCase): def test_type_check_symbolic_inferenceconv2D_maxpool2d_flatten(self): class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 6, 5) diff --git a/test/fx/test_matcher_utils.py b/test/fx/test_matcher_utils.py index def8a253e386..83db868e4d5a 100644 --- a/test/fx/test_matcher_utils.py +++ b/test/fx/test_matcher_utils.py @@ -34,7 +34,7 @@ class WrapperModule(torch.nn.Module): class TestMatcher(JitTestCase): def test_subgraph_matcher_with_attributes(self): class LargeModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self._weight = torch.nn.Parameter(torch.ones(3, 3)) self._bias = torch.nn.Parameter(torch.ones(3, 3)) @@ -53,7 +53,7 @@ class TestMatcher(JitTestCase): large_model_graph = symbolic_trace(LargeModel()).graph class PatternModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self._weight_1 = torch.nn.Parameter(torch.ones(5, 5)) self._bias_1 = torch.nn.Parameter(torch.ones(5, 5)) @@ -228,7 +228,7 @@ class TestMatcher(JitTestCase): """Testing SubgraphMatcherWithNameNodeMap with module pattern""" class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) @@ -236,7 +236,7 @@ class TestMatcher(JitTestCase): return self.linear(x) class Pattern(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) diff --git a/test/fx/test_shape_inference.py b/test/fx/test_shape_inference.py index 8e6ff1185295..1caa4847bc24 100644 --- a/test/fx/test_shape_inference.py +++ b/test/fx/test_shape_inference.py @@ -86,7 +86,7 @@ class TestShapeInference(unittest.TestCase): def test_infer_shape(self): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w_1 = torch.empty([256, 328]) self.b_1 = torch.empty([256]) diff --git a/test/fx/test_source_matcher_utils.py b/test/fx/test_source_matcher_utils.py index 835f77063ba8..1ece2e2cd10b 100644 --- a/test/fx/test_source_matcher_utils.py +++ b/test/fx/test_source_matcher_utils.py @@ -22,7 +22,7 @@ class TestSourceMatcher(JitTestCase): @unittest.skipIf(not is_dynamo_supported(), "Dynamo not supported") def test_module_partitioner_linear_relu_linear(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(3, 3) self.relu = torch.nn.ReLU() @@ -140,7 +140,7 @@ class TestSourceMatcher(JitTestCase): @unittest.skipIf(not is_dynamo_supported(), "Dynamo not supported") def test_module_partitioner_functional_conv_relu_conv(self): class FunctionalConv2d(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.stride = (1, 1) self.padding = (0, 0) @@ -159,7 +159,7 @@ class TestSourceMatcher(JitTestCase): ) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = FunctionalConv2d() self.conv2 = FunctionalConv2d() @@ -184,7 +184,7 @@ class TestSourceMatcher(JitTestCase): @unittest.skipIf(not is_dynamo_supported(), "Dynamo not supported") def test_module_partitioner_functional_linear_relu_linear(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, weight, bias): diff --git a/test/fx/test_subgraph_rewriter.py b/test/fx/test_subgraph_rewriter.py index 2dd296cd262d..a4e14fbfab44 100644 --- a/test/fx/test_subgraph_rewriter.py +++ b/test/fx/test_subgraph_rewriter.py @@ -400,7 +400,7 @@ class TestSubgraphRewriter(JitTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.dtype = torch.float16 @@ -439,7 +439,7 @@ class TestSubgraphRewriter(JitTestCase): def test_subgraph_rewriter_replaces_referenced_submodules(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sigmoid = torch.nn.Sigmoid() self.submod = torch.nn.ReLU() @@ -449,7 +449,7 @@ class TestSubgraphRewriter(JitTestCase): return self.submod(self.sigmoid(x)) class Pattern(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sigmoid = torch.nn.Sigmoid() self.submod = torch.nn.ReLU() @@ -458,7 +458,7 @@ class TestSubgraphRewriter(JitTestCase): return self.submod(self.sigmoid(x)) class Replacement(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.tanh = torch.nn.Tanh() self.submod = torch.nn.ReLU() @@ -467,7 +467,7 @@ class TestSubgraphRewriter(JitTestCase): return self.submod(self.tanh(x)) class Comparison(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.tanh = torch.nn.Tanh() self.submod = torch.nn.ReLU() @@ -904,7 +904,7 @@ def forward(self, x): def test_replacement_with_attrs(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = torch.tensor([1]) self.b = torch.tensor([2]) @@ -913,7 +913,7 @@ def forward(self, x): return x + self.a - self.b class Pattern(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = torch.tensor([1]) @@ -921,7 +921,7 @@ def forward(self, x): return x + self.a class Replacement(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.c = torch.tensor([3]) diff --git a/test/fx/test_z3_gradual_types.py b/test/fx/test_z3_gradual_types.py index c49ab79120eb..be5fd3d73f61 100644 --- a/test/fx/test_z3_gradual_types.py +++ b/test/fx/test_z3_gradual_types.py @@ -391,7 +391,7 @@ class HFOperations(unittest.TestCase): def test_layer_norm(self): class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l = torch.nn.LayerNorm((1024,)) @@ -723,7 +723,7 @@ class HFOperations(unittest.TestCase): def test_embedding(self): class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.embedding = torch.nn.Embedding(256008, 1024, padding_idx=1) @@ -881,7 +881,7 @@ class HFOperations(unittest.TestCase): def test_view_mul(self): class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.embed_tokens = torch.nn.Embedding(256008, 1024, padding_idx=1) @@ -1003,7 +1003,7 @@ class HFOperations(unittest.TestCase): """ class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.embed_tokens = torch.nn.Embedding(256008, 1024, padding_idx=1) @@ -1068,7 +1068,7 @@ class HFOperations(unittest.TestCase): """ class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.embed_tokens = torch.nn.Embedding(256008, 1024, padding_idx=1) @@ -1531,7 +1531,7 @@ class GradualTypes(unittest.TestCase): class TestSingleOperation(unittest.TestCase): def test_conv_wrong_example(self): class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d( in_channels=2, @@ -2188,7 +2188,7 @@ class TestSingleOperation(unittest.TestCase): def test_conv2D_maxpool2d_flatten(self): class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 6, 5) @@ -2225,7 +2225,7 @@ class TestSingleOperation(unittest.TestCase): def test_conv2D_maxpool2d_flatten_unsat(self): class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 6, 5) @@ -2258,7 +2258,7 @@ class TestSingleOperation(unittest.TestCase): def test_conv2D_maxpool2d_flatten_dyn(self): class BasicBlock(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 6, 5) diff --git a/test/higher_order_ops/test_with_effects.py b/test/higher_order_ops/test_with_effects.py index a71f50fd559c..b69f25910a6e 100644 --- a/test/higher_order_ops/test_with_effects.py +++ b/test/higher_order_ops/test_with_effects.py @@ -82,7 +82,7 @@ def forward(self, arg1_1): def test_torchbind_custom_op(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.classes._TorchScriptTesting._Foo(10, 20) @@ -108,7 +108,7 @@ def forward(self, arg0_1, arg1_1): def test_print_with_buffer_mutations(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.ones(3)) @@ -143,7 +143,7 @@ def forward(self, arg0_1, arg1_1, arg2_1): def test_print_with_input_mutations(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): @@ -304,7 +304,7 @@ def forward(self, arg0_1, arg1_1, arg2_1): return torch.nn.functional.linear(x, self.weight, self.bias) class MockModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = MyLinear(10, 10) self.register_buffer( diff --git a/test/lazy/test_extract_compiled_graph.py b/test/lazy/test_extract_compiled_graph.py index 26e7caa95317..d62a99b7b6df 100644 --- a/test/lazy/test_extract_compiled_graph.py +++ b/test/lazy/test_extract_compiled_graph.py @@ -45,7 +45,7 @@ class ModuleReturnMulti(nn.Module): # The default fx tracer will convert torch.randn to a constant.. We may need # a custom tracer. # class ModuleEagerTensor(nn.Module): -# def __init__(self): +# def __init__(self) -> None: # super().__init__() # # def forward(self, a): @@ -60,7 +60,7 @@ class ModuleReturnMulti(nn.Module): # Unfortunately, the default fx tracer convert the return value of the forward # method to a constant.. Comment out for now # class ModuleReturnEagerTensorOnDefaultDevice(nn.Module): -# def __init__(self): +# def __init__(self) -> None: # super().__init__() # # def forward(self): diff --git a/test/lazy/test_functionalization.py b/test/lazy/test_functionalization.py index 1ec8058d9c36..bcd21ffd24ec 100644 --- a/test/lazy/test_functionalization.py +++ b/test/lazy/test_functionalization.py @@ -22,7 +22,7 @@ class LazyFuncionalizationTest(TestCase): metrics.reset() class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(4, 2, bias=False) diff --git a/test/mkldnn_verbose.py b/test/mkldnn_verbose.py index a2feb29ba398..ac9d34e27bee 100644 --- a/test/mkldnn_verbose.py +++ b/test/mkldnn_verbose.py @@ -4,7 +4,7 @@ import torch class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 10, 5, 1) diff --git a/test/mobile/lightweight_dispatch/test_codegen_unboxing.cpp b/test/mobile/lightweight_dispatch/test_codegen_unboxing.cpp index 1b879118b5b8..8b80b98a2856 100644 --- a/test/mobile/lightweight_dispatch/test_codegen_unboxing.cpp +++ b/test/mobile/lightweight_dispatch/test_codegen_unboxing.cpp @@ -197,7 +197,7 @@ TEST(LiteInterpreterTest, MultipleOps) { auto testModelFile = "ModelWithMultipleOps.ptl"; // class ModelWithMultipleOps(torch.nn.Module): - // def __init__(self): + // def __init__(self) -> None: // super().__init__() // self.ops = torch.nn.Sequential( // torch.nn.ReLU(), diff --git a/test/mobile/lightweight_dispatch/tests_setup.py b/test/mobile/lightweight_dispatch/tests_setup.py index 22def584bce0..81ac0fffb7b9 100644 --- a/test/mobile/lightweight_dispatch/tests_setup.py +++ b/test/mobile/lightweight_dispatch/tests_setup.py @@ -74,7 +74,7 @@ class ModelWithScalarList(torch.nn.Module): # upsample_linear1d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor @save_model class ModelWithFloatList(torch.nn.Upsample): - def __init__(self): + def __init__(self) -> None: super().__init__( scale_factor=(2.0,), mode="linear", @@ -95,7 +95,7 @@ class ModelWithListOfOptionalTensors(torch.nn.Module): # int groups=1) -> Tensor @save_model class ModelWithArrayOfInt(torch.nn.Conv2d): - def __init__(self): + def __init__(self) -> None: super().__init__(1, 2, (2, 2), stride=(1, 1), padding=(1, 1)) @@ -120,7 +120,7 @@ class ModelWithStringOptional(torch.nn.Module): @save_model class ModelWithMultipleOps(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.ops = torch.nn.Sequential( torch.nn.ReLU(), diff --git a/test/mobile/model_test/nn_ops.py b/test/mobile/model_test/nn_ops.py index 5db34f2e560d..fb6530daad87 100644 --- a/test/mobile/model_test/nn_ops.py +++ b/test/mobile/model_test/nn_ops.py @@ -5,7 +5,7 @@ import torch.nn.functional as F # https://pytorch.org/docs/stable/nn.html class NNConvolutionModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.input1d = torch.randn(1, 4, 36) self.input2d = torch.randn(1, 4, 30, 10) @@ -42,7 +42,7 @@ class NNConvolutionModule(torch.nn.Module): class NNPoolingModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.input1d = torch.randn(1, 16, 50) self.module1d = nn.ModuleList( @@ -90,7 +90,7 @@ class NNPoolingModule(torch.nn.Module): class NNPaddingModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.input1d = torch.randn(1, 4, 50) self.module1d = nn.ModuleList( @@ -131,7 +131,7 @@ class NNPaddingModule(torch.nn.Module): class NNNormalizationModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.input1d = torch.randn(1, 4, 50) self.module1d = nn.ModuleList( @@ -172,7 +172,7 @@ class NNNormalizationModule(torch.nn.Module): class NNActivationModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.activations = nn.ModuleList( [ @@ -215,7 +215,7 @@ class NNActivationModule(torch.nn.Module): class NNRecurrentModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn = nn.ModuleList( [ @@ -245,7 +245,7 @@ class NNRecurrentModule(torch.nn.Module): class NNTransformerModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.transformers = nn.ModuleList( [ @@ -271,7 +271,7 @@ class NNTransformerModule(torch.nn.Module): class NNLinearModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linears = nn.ModuleList( [ @@ -329,7 +329,7 @@ class NNDistanceModule(torch.nn.Module): class NNLossFunctionModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = torch.FloatTensor([[0.1, 0.2, 0.4, 0.8]]) self.y = torch.LongTensor([[3, 0, -1, 1]]) @@ -368,7 +368,7 @@ class NNLossFunctionModule(torch.nn.Module): class NNVisionModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.input = torch.randn(1, 4, 9, 9) self.vision_modules = nn.ModuleList( @@ -398,7 +398,7 @@ class NNVisionModule(torch.nn.Module): class NNShuffleModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.shuffle = nn.ChannelShuffle(2) @@ -409,7 +409,7 @@ class NNShuffleModule(torch.nn.Module): class NNUtilsModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.flatten = nn.Sequential(nn.Linear(50, 50), nn.Unflatten(1, (2, 5, 5))) diff --git a/test/mobile/model_test/quantization_ops.py b/test/mobile/model_test/quantization_ops.py index 2e80886c144e..140894fddc4c 100644 --- a/test/mobile/model_test/quantization_ops.py +++ b/test/mobile/model_test/quantization_ops.py @@ -3,7 +3,7 @@ import torch.nn as nn class GeneralQuantModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.embedding = torch.ao.nn.quantized.Embedding( num_embeddings=10, embedding_dim=12 @@ -47,7 +47,7 @@ class GeneralQuantModule(torch.nn.Module): class DynamicQuantModule: - def __init__(self): + def __init__(self) -> None: super().__init__() self.module = self.M() @@ -55,7 +55,7 @@ class DynamicQuantModule: return torch.ao.quantization.quantize_dynamic(self.module, dtype=torch.qint8) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super(DynamicQuantModule.M, self).__init__() self.rnn = nn.RNN(4, 8, 2) self.rnncell = nn.RNNCell(4, 8) @@ -122,7 +122,7 @@ class StaticQuantModule: return model_int8 class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super(StaticQuantModule.M, self).__init__() self.quant = torch.ao.quantization.QuantStub() self.input1d = torch.randn(4, 2, 2) @@ -182,7 +182,7 @@ class FusedQuantModule: return model_int8 class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super(FusedQuantModule.M, self).__init__() self.quant = torch.ao.quantization.QuantStub() self.input1d = torch.randn(4, 2, 2) diff --git a/test/mobile/test_lite_script_module.py b/test/mobile/test_lite_script_module.py index 01ef72ebde02..05b9b30ea12a 100644 --- a/test/mobile/test_lite_script_module.py +++ b/test/mobile/test_lite_script_module.py @@ -72,7 +72,7 @@ class TestLiteScriptModule(TestCase): return x * y class B(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.A0 = A() self.A1 = A() @@ -177,7 +177,7 @@ class TestLiteScriptModule(TestCase): def test_method_calls_with_optional_arg(self): class A(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() # opt arg in script-to-script invocation @@ -185,7 +185,7 @@ class TestLiteScriptModule(TestCase): return x + two class B(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.A0 = A() @@ -218,7 +218,7 @@ class TestLiteScriptModule(TestCase): def test_unsupported_classtype(self): class Foo: - def __init__(self): + def __init__(self) -> None: return def func(self, x: int, y: int): @@ -243,7 +243,7 @@ class TestLiteScriptModule(TestCase): pass class MyTestModuleForListWithModuleClass(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Foo() @@ -267,7 +267,7 @@ class TestLiteScriptModule(TestCase): pass class MyTestModuleForDictWithModuleClass(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Foo() @@ -288,7 +288,7 @@ class TestLiteScriptModule(TestCase): def test_module_export_operator_list(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.ones((20, 1, 5, 5)) self.bias = torch.ones(20) @@ -464,7 +464,7 @@ class TestLiteScriptModule(TestCase): class A(torch.nn.Module): b: Forward - def __init__(self): + def __init__(self) -> None: super().__init__() self.b = B() @@ -523,7 +523,7 @@ class TestLiteScriptQuantizedModule(QuantizationLiteTestCase): def test_quantization_example(self): # From the example in Static Quantization section of https://pytorch.org/docs/stable/quantization.html class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.conv = torch.nn.Conv2d(1, 1, 1) diff --git a/test/mobile/test_lite_script_type.py b/test/mobile/test_lite_script_type.py index d77708be7a9b..e0c948712bba 100644 --- a/test/mobile/test_lite_script_type.py +++ b/test/mobile/test_lite_script_type.py @@ -42,7 +42,7 @@ class TestLiteScriptModule(TestCase): id: torch.Tensor class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Foo(torch.tensor(1)) @@ -101,7 +101,7 @@ class TestLiteScriptModule(TestCase): id: torch.Tensor class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Foo(torch.tensor(1)) @@ -144,7 +144,7 @@ class TestLiteScriptModule(TestCase): baz: Baz class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Foo(torch.tensor(1), Baz(torch.tensor(1))) diff --git a/test/mobile/test_quantize_fx_lite_script_module.py b/test/mobile/test_quantize_fx_lite_script_module.py index 966217a1490e..2e5f5dd0046f 100644 --- a/test/mobile/test_quantize_fx_lite_script_module.py +++ b/test/mobile/test_quantize_fx_lite_script_module.py @@ -21,7 +21,7 @@ class TestLiteFuseFx(QuantizationLiteTestCase): def test_embedding(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) @@ -50,7 +50,7 @@ class TestLiteFuseFx(QuantizationLiteTestCase): def test_conv2d(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 1, 1) self.conv2 = nn.Conv2d(1, 1, 1) diff --git a/test/nn/test_lazy_modules.py b/test/nn/test_lazy_modules.py index a6a63e9bf1ca..163063c6f085 100644 --- a/test/nn/test_lazy_modules.py +++ b/test/nn/test_lazy_modules.py @@ -753,7 +753,7 @@ class TestLazyModules(TestCase): @suppress_warnings def test_chained_initialization(self): class MyNetwork(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear_1 = torch.nn.LazyLinear(15) self.linear_2 = torch.nn.LazyLinear(10) diff --git a/test/nn/test_load_state_dict.py b/test/nn/test_load_state_dict.py index 0f8535a75bbf..8004252a37db 100644 --- a/test/nn/test_load_state_dict.py +++ b/test/nn/test_load_state_dict.py @@ -223,7 +223,7 @@ class TestLoadStateDict(NNTestCase): @swap([True, False]) def test_load_state_dict_custom(self): class CustomState(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.ones(1)) self.sub = torch.nn.Linear(5, 5) @@ -264,7 +264,7 @@ class TestLoadStateDict(NNTestCase): @parametrize("keep_vars", [True, False]) def test_load_state_dict_assign_meta(self, keep_vars): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(3, 5) self.bn = nn.BatchNorm1d(5) @@ -340,7 +340,7 @@ class TestLoadStateDict(NNTestCase): @swap([True, False]) def test_load_state_dict_assign_with_optimizer(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(3, 5) self.bn = nn.BatchNorm1d(5) @@ -390,7 +390,7 @@ class TestLoadStateDict(NNTestCase): # Assigned tensor is allowed to have different properties than initial # tensor except for shape class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(3, 5) self.bn = nn.BatchNorm1d(5) @@ -426,7 +426,7 @@ class TestLoadStateDict(NNTestCase): @swap([True, False]) def test_load_state_dict_with_unexpected_key(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 10) diff --git a/test/nn/test_module_hooks.py b/test/nn/test_module_hooks.py index 283a83859e9c..6806bed1f3d4 100644 --- a/test/nn/test_module_hooks.py +++ b/test/nn/test_module_hooks.py @@ -627,7 +627,7 @@ class TestStateDictHooks(TestCase): # Test with module instance method as hook class MyModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Parameter(torch.rand(10)) @@ -699,7 +699,7 @@ class TestStateDictHooks(TestCase): hook_called = 0 class MyModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Parameter(torch.rand(10)) @@ -813,7 +813,7 @@ class TestStateDictHooks(TestCase): def test_register_state_dict_pre_hook(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = nn.Sequential( nn.Linear(3, 3), nn.Linear(3, 3), nn.Linear(3, 3) @@ -827,7 +827,7 @@ class TestStateDictHooks(TestCase): def test_register_state_dict_pre_hook_lazy_module(self): class MyLazyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layer1 = nn.LazyLinear(8) self.layer2 = nn.LazyLinear(5) diff --git a/test/nn/test_parametrization.py b/test/nn/test_parametrization.py index 3e322bb65df3..f61d8aecc864 100644 --- a/test/nn/test_parametrization.py +++ b/test/nn/test_parametrization.py @@ -941,7 +941,7 @@ class TestNNParametrization(NNTestCase): return x + 1.0 class ModelWithoutDeepcopy(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = nn.Parameter( torch.tensor([1.0, 1.0, 1.0, 1.0]), requires_grad=True diff --git a/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py b/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py index dc31f9413ca3..fb410ce5323a 100644 --- a/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py +++ b/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py @@ -353,7 +353,7 @@ class TestDynamoWithONNXRuntime(onnx_test_common._TestONNXRuntime): ) class MLP(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(2, 4, bias=True) self.fc2 = nn.Linear(4, 2, bias=True) @@ -690,7 +690,7 @@ class TestDynamoWithONNXRuntime(onnx_test_common._TestONNXRuntime): ) class MLP(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(2, 4, bias=True) self.fc2 = nn.Linear(4, 2, bias=True) diff --git a/test/onnx/dynamo/test_exporter_api.py b/test/onnx/dynamo/test_exporter_api.py index fd06c95fb0cb..ba57d7dba62a 100644 --- a/test/onnx/dynamo/test_exporter_api.py +++ b/test/onnx/dynamo/test_exporter_api.py @@ -36,7 +36,7 @@ class SampleModelForDynamicShapes(torch.nn.Module): class _LargeModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.randn(2**28)) # 1GB self.param2 = torch.nn.Parameter(torch.randn(2**28)) # 1GB diff --git a/test/onnx/model_defs/mnist.py b/test/onnx/model_defs/mnist.py index cc66c0303106..12f037da75bf 100644 --- a/test/onnx/model_defs/mnist.py +++ b/test/onnx/model_defs/mnist.py @@ -3,7 +3,7 @@ import torch.nn.functional as F class MNIST(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) diff --git a/test/onnx/model_defs/op_test.py b/test/onnx/model_defs/op_test.py index 195e3c8dc849..f7ac8c6bab12 100644 --- a/test/onnx/model_defs/op_test.py +++ b/test/onnx/model_defs/op_test.py @@ -29,7 +29,7 @@ class PermuteNet(nn.Module): class PReluNet(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.features = nn.Sequential( nn.PReLU(3), @@ -41,7 +41,7 @@ class PReluNet(nn.Module): class FakeQuantNet(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fake_quant = torch.ao.quantization.FakeQuantize() self.fake_quant.disable_observer() diff --git a/test/onnx/test_custom_ops.py b/test/onnx/test_custom_ops.py index 96694a0ea935..d926375ee356 100644 --- a/test/onnx/test_custom_ops.py +++ b/test/onnx/test_custom_ops.py @@ -26,7 +26,7 @@ class TestCustomAutogradFunction(pytorch_test_common.ExportTestCase): return g.op("Clip", input, min_f=scalar) class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.clip = MyClip.apply @@ -52,7 +52,7 @@ class TestCustomAutogradFunction(pytorch_test_common.ExportTestCase): return input.clamp(min=0) class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.clip = MyClip.apply self.relu = MyRelu.apply @@ -89,7 +89,7 @@ class TestExportAsContribOps(pytorch_test_common.ExportTestCase): def test_contrib_op_with_loop(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.gelu = torch.nn.GELU(approximate="none") diff --git a/test/onnx/test_export_modes.py b/test/onnx/test_export_modes.py index 6ea3530e3acb..0b5b34161fb7 100644 --- a/test/onnx/test_export_modes.py +++ b/test/onnx/test_export_modes.py @@ -23,7 +23,7 @@ from torch.testing._internal import common_utils # Smoke tests for export methods class TestExportModes(pytorch_test_common.ExportTestCase): class MyModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super(TestExportModes.MyModel, self).__init__() def forward(self, x): diff --git a/test/onnx/test_fx_passes.py b/test/onnx/test_fx_passes.py index b5e5127e9277..e49b21dc7089 100644 --- a/test/onnx/test_fx_passes.py +++ b/test/onnx/test_fx_passes.py @@ -126,7 +126,7 @@ class TestModularizePass(common_utils.TestCase): # # Minified repro from Background_Matting. https://github.com/pytorch/benchmark/issues/1768 class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.unused_relu = torch.nn.ReLU() self.used_gelu = torch.nn.GELU() @@ -172,7 +172,7 @@ class TestModularizePass(common_utils.TestCase): self, is_exported_program ): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU() @@ -218,7 +218,7 @@ class TestModularizePass(common_utils.TestCase): ): # Minified repro from basic_gnn_edgecnn. class InnerModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU() @@ -226,7 +226,7 @@ class TestModularizePass(common_utils.TestCase): return self.relu(x) class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.inner_module = InnerModule() diff --git a/test/onnx/test_fx_to_onnx.py b/test/onnx/test_fx_to_onnx.py index 2ff9f4741a60..4aaf8df34b9c 100644 --- a/test/onnx/test_fx_to_onnx.py +++ b/test/onnx/test_fx_to_onnx.py @@ -104,7 +104,7 @@ class TestFxToOnnx(pytorch_test_common.ExportTestCase): ) def test_mnist_exported_with_no_warnings(self, diagnostic_rule): class MNISTModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1, bias=False) self.conv2 = nn.Conv2d(32, 64, 3, 1, bias=False) @@ -227,7 +227,7 @@ class TestFxToOnnx(pytorch_test_common.ExportTestCase): self, ): class TraceModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv2 = torch.nn.Conv2d( 16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1) @@ -340,7 +340,7 @@ class TestFxToOnnx(pytorch_test_common.ExportTestCase): return output + bias class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.submodule = SubModule() @@ -402,7 +402,7 @@ class TestFxToOnnx(pytorch_test_common.ExportTestCase): def test_dynamo_export_retains_readable_parameter_and_buffer_names(self): class SubModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv2 = nn.Conv2d(32, 64, 3, 1, bias=False) self.fc1 = nn.Linear(9216, 128, bias=False) @@ -419,7 +419,7 @@ class TestFxToOnnx(pytorch_test_common.ExportTestCase): return tensor_x class MNISTModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1, bias=False) self.submodule = SubModule() @@ -649,7 +649,7 @@ class TestFxToOnnx(pytorch_test_common.ExportTestCase): def test_exported_program_torch_distributions_normal_Normal(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: self.normal = torch.distributions.normal.Normal(0, 1) super().__init__() @@ -825,7 +825,7 @@ class TestFxToOnnx(pytorch_test_common.ExportTestCase): self, include_initializer, use_fake_mode, use_exported_program ): class MNISTModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1, bias=False) self.conv2 = nn.Conv2d(32, 64, 3, 1, bias=False) diff --git a/test/onnx/test_fx_to_onnx_decomp_skip.py b/test/onnx/test_fx_to_onnx_decomp_skip.py index d3f21fa054b5..db8edce14259 100644 --- a/test/onnx/test_fx_to_onnx_decomp_skip.py +++ b/test/onnx/test_fx_to_onnx_decomp_skip.py @@ -27,7 +27,7 @@ class TestDynamoExportDecompSkip(pytorch_test_common.ExportTestCase): def test_upsample_bilinear2d(self): class TestModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.upsample = torch.nn.Upsample(scale_factor=2, mode="bilinear") @@ -51,7 +51,7 @@ class TestDynamoExportDecompSkip(pytorch_test_common.ExportTestCase): def test_upsample_trilinear3d(self): class TestModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.upsample = torch.nn.Upsample(scale_factor=2, mode="trilinear") diff --git a/test/onnx/test_fx_to_onnx_with_onnxruntime.py b/test/onnx/test_fx_to_onnx_with_onnxruntime.py index 8a093d4f3a7c..398b4813aa64 100644 --- a/test/onnx/test_fx_to_onnx_with_onnxruntime.py +++ b/test/onnx/test_fx_to_onnx_with_onnxruntime.py @@ -273,7 +273,7 @@ class TestFxToOnnxWithOnnxRuntime(onnx_test_common._TestONNXRuntime): def test_mnist(self): class MNISTModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1, bias=True) self.conv2 = nn.Conv2d(32, 64, 3, 1, bias=True) @@ -302,7 +302,7 @@ class TestFxToOnnxWithOnnxRuntime(onnx_test_common._TestONNXRuntime): # This produces op as `torch.ops.aten.log_sigmoid_forward`, instead of the more # conventional `torch.ops.aten.log_sigmoid`. class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m = torch.nn.LogSigmoid() @@ -419,7 +419,7 @@ class TestFxToOnnxWithOnnxRuntime(onnx_test_common._TestONNXRuntime): def test_transpose_infer_shape(self): class TransposeModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 1, 3, stride=2) @@ -845,7 +845,7 @@ class TestFxToOnnxWithOnnxRuntime(onnx_test_common._TestONNXRuntime): ) def test_fx_symbolic_tracer_large_scale_exporter_with_toy_mlp(self): class MLPModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc0 = nn.Linear(8, 8, bias=True) self.fc1 = nn.Linear(8, 4, bias=True) @@ -1178,7 +1178,7 @@ class TestFxToOnnxFakeTensorWithOnnxRuntime(onnx_test_common._TestONNXRuntime): ) def test_large_scale_exporter_with_toy_mlp(self): class MLPModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc0 = nn.Linear(8, 8, bias=True) self.fc1 = nn.Linear(8, 4, bias=True) diff --git a/test/onnx/test_models_onnxruntime.py b/test/onnx/test_models_onnxruntime.py index 7c51f45b19e4..3012c91d22b0 100644 --- a/test/onnx/test_models_onnxruntime.py +++ b/test/onnx/test_models_onnxruntime.py @@ -347,7 +347,7 @@ class TestModelsONNXRuntime(onnx_test_common._TestONNXRuntime): @skipScriptTest() def test_roi_heads(self): class RoIHeadsModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.transform = _init_test_generalized_rcnn_transform() self.rpn = _init_test_rpn() diff --git a/test/onnx/test_onnx_opset.py b/test/onnx/test_onnx_opset.py index 0ff8e85715c3..29ac8f108c2d 100644 --- a/test/onnx/test_onnx_opset.py +++ b/test/onnx/test_onnx_opset.py @@ -300,7 +300,7 @@ class TestONNXOpset(pytorch_test_common.ExportTestCase): def test_dropout(self): class MyModule(Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.dropout = torch.nn.Dropout(0.5) diff --git a/test/onnx/test_onnxscript_no_runtime.py b/test/onnx/test_onnxscript_no_runtime.py index cb13f044de15..3d5cbf2e89b3 100644 --- a/test/onnx/test_onnxscript_no_runtime.py +++ b/test/onnx/test_onnxscript_no_runtime.py @@ -109,7 +109,7 @@ class TestONNXScriptExport(common_utils.TestCase): # Control flow is tested for _find_onnxscript_op function in torch/onnx/utils.py, # which has recursive logic to go through every nodes with subgraph in model proto class NestedLoopsModel(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.selu = torch.nn.SELU() diff --git a/test/onnx/test_operators.py b/test/onnx/test_operators.py index 995b5e2bd85d..ba5f47b82117 100644 --- a/test/onnx/test_operators.py +++ b/test/onnx/test_operators.py @@ -1061,7 +1061,7 @@ class TestOperators(common_utils.TestCase): c0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE) class LSTMModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn = torch.nn.LSTM( RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False @@ -1157,7 +1157,7 @@ class TestOperators(common_utils.TestCase): ) class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(4, 8) @@ -1207,7 +1207,7 @@ class TestOperators(common_utils.TestCase): ) class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(4, 8) diff --git a/test/onnx/test_pytorch_onnx_no_runtime.py b/test/onnx/test_pytorch_onnx_no_runtime.py index f713ec4c3c46..64cbba6fc15f 100644 --- a/test/onnx/test_pytorch_onnx_no_runtime.py +++ b/test/onnx/test_pytorch_onnx_no_runtime.py @@ -97,7 +97,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): return x.contiguous().transpose(0, 1).sum() class TraceMe(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = Foo() @@ -149,7 +149,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): return torch.neg(x) class ModuleToExport(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod = PythonModule() @@ -169,7 +169,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): return torch.neg(x) class ModuleToExport(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod = torch.jit.trace(ModuleToInline(), torch.zeros(1, 2, 3)) @@ -188,7 +188,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): return torch.neg(x) class ModuleToExport(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod = ModuleToInline() @@ -251,7 +251,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): def test_onnx_export_script_inline_params(self): class ModuleToInline(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m = torch.nn.Parameter(torch.ones(3, 3)) self.unused = torch.nn.Parameter(torch.ones(1, 2, 3)) @@ -261,7 +261,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): return torch.mm(x, self.m) class ModuleToExport(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod = ModuleToInline() self.param = torch.nn.Parameter(torch.ones(3, 4)) @@ -375,7 +375,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): def test_source_range_propagation(self): class ExpandingModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() # Will be expanded during ONNX export self.ln = torch.nn.LayerNorm([1]) @@ -485,7 +485,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): "box_coder": BoxCoder, } - def __init__(self): + def __init__(self) -> None: super().__init__() self.box_coder = BoxCoder(1.4) @@ -888,7 +888,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): mask_start_point = 0 class LSTMTraceWrapper(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn = torch.nn.LSTM( @@ -1003,7 +1003,7 @@ class TestONNXExport(pytorch_test_common.ExportTestCase): def test_onnx_aten_fallback_must_not_fallback(self): # For BUILD_CAFFE2=0, aten fallback only when not exportable class ONNXExportable(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.fc1 = torch.nn.Linear(12, 8) diff --git a/test/onnx/test_pytorch_onnx_onnxruntime.py b/test/onnx/test_pytorch_onnx_onnxruntime.py index c6ea90407c09..3cc3673b89de 100644 --- a/test/onnx/test_pytorch_onnx_onnxruntime.py +++ b/test/onnx/test_pytorch_onnx_onnxruntime.py @@ -166,7 +166,7 @@ def _parametrize_rnn_args(arg_name): class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_fuse_conv_bn1d(self): class Fuse(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv1d(16, 33, 3, stride=2) self.bn = torch.nn.BatchNorm1d(33) @@ -181,7 +181,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_fuse_conv_bn2d(self): class Fuse(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d( 3, 2, kernel_size=1, stride=2, padding=3, bias=False @@ -198,7 +198,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_fuse_conv_bn3d(self): class Fuse(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv3d( 3, 2, (3, 5, 2), stride=(2, 1, 1), padding=(3, 2, 0), bias=False @@ -215,7 +215,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_fuse_conv_in_block(self): class Fuse(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv1d( in_channels=5, @@ -1201,7 +1201,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_conv(self): class TraceModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv1d(16, 33, 3, stride=2) self.conv2 = torch.nn.Conv2d( @@ -1222,7 +1222,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_conv_str_padding(self): class TraceModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv1d(16, 33, 3, padding="valid") self.conv2 = torch.nn.Conv2d( @@ -1243,7 +1243,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_conv_shape_inference(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv2 = torch.nn.Conv2d( 16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1) @@ -1259,7 +1259,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_conv_transpose(self): class TraceModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.ConvTranspose1d(16, 33, 3, stride=2) self.conv2 = torch.nn.ConvTranspose2d( @@ -1289,7 +1289,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): # The following test only works when onnx shape inference is enabled. def test_transpose_infer_shape(self): class TransposeModule(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 1, 3, stride=2) @@ -2610,7 +2610,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): return bias class ScriptModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.ngram = 2 self.max_target_positions = 512 @@ -3064,7 +3064,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): ) class ScriptModule(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.submodule = ScriptModel() @@ -4262,7 +4262,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(16) def test_scatter_reduce(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, index, input): @@ -4284,7 +4284,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(16) def test_scatter_reduce_self_rank_zero(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, index, input): @@ -4349,7 +4349,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipScriptTest() # Scripting error: Cannot instantiate nn module def test_gather_constant_fold(self): class GatherModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Buffer(torch.ones(5)) # torch.nn.Embedding is converted to ONNX::Gather. @@ -4368,7 +4368,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): self.run_test(GatherModule(), (x,)) class GatherModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Buffer(torch.ones(2)) @@ -4383,7 +4383,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): self.run_test(GatherModule(), (x,)) class GatherModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rb = torch.nn.Buffer(torch.randn(1, 1, 3, 1, 1)) @@ -4652,7 +4652,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_lstm_no_hidden(self): class LSTMModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn = torch.nn.LSTM(input_size=16, hidden_size=16) @@ -4665,7 +4665,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_lstm_proj_no_hidden(self): class LSTMModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn = torch.nn.LSTM(input_size=16, hidden_size=16, proj_size=8) @@ -4679,7 +4679,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_lstm(self): class LSTMModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn = torch.nn.LSTM( RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False @@ -4714,7 +4714,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_lstm_default_init_state(self): class LSTMModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn = torch.nn.LSTM( RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False @@ -4729,7 +4729,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_lstm_fixed_batch_size(self): class LSTMModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lstm = torch.nn.LSTM( RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False @@ -4752,7 +4752,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_lstm_post_fix_init_state(self): class LSTMModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lstm = torch.nn.LSTM( RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False @@ -4842,7 +4842,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_lstm_sequence(self): class LstmNet(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn1 = torch.nn.LSTM(8, 8, bidirectional=True, batch_first=True) self.linear1 = torch.nn.Linear(8 * 2, 8) @@ -5288,7 +5288,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_gt_primitive(self): class GreaterModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.y: int = 2 @@ -5629,7 +5629,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_linear(self): class LinearModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(16, 16) @@ -6580,7 +6580,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_new_zeros_with_dtype(self): class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(50, 64) @@ -6834,7 +6834,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(11) def test_inplace_attr_with_loop(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self._bias = torch.arange( 12, @@ -6861,7 +6861,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(11) def test_inplace_attr_copy_with_loop(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self._bias = torch.arange( 12, @@ -7282,7 +7282,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_unfold_infer_shape(self): class UnfoldModule(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv1d(3, 1, 3, stride=2) @@ -7347,7 +7347,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_prelu(self): class PReluModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.prelu = torch.nn.PReLU() @@ -7370,7 +7370,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_relu6(self): class Relu6Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu6 = torch.nn.ReLU6() @@ -7389,7 +7389,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_silu(self): class SiLUModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.silu = torch.nn.SiLU() @@ -7461,7 +7461,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_mish(self): class MishModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mish = torch.nn.Mish() @@ -9047,7 +9047,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_MSELoss(self): class MSELoss(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss1 = torch.nn.MSELoss(reduction="none") self.loss2 = torch.nn.MSELoss(reduction="sum") @@ -9080,7 +9080,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def _kldiv_loss(self, x, y): class KLDivLossNone(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.KLDivLoss(reduction="none", log_target=True) @@ -9090,7 +9090,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): self.run_test(KLDivLossNone(), input_args=(x, y)) class KLDivLossMean(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.KLDivLoss(reduction="mean", log_target=False) @@ -9100,7 +9100,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): self.run_test(KLDivLossMean(), input_args=(x, y)) class KLDivLossSum(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.KLDivLoss(reduction="sum", log_target=True) @@ -9110,7 +9110,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): self.run_test(KLDivLossSum(), input_args=(x, y)) class KLDivLossBatchMean(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.KLDivLoss(reduction="batchmean", log_target=False) @@ -9120,7 +9120,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): self.run_test(KLDivLossBatchMean(), input_args=(x, y)) class KLDivLossMiniBatchMean(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.KLDivLoss( reduction="batchmean", size_average=False, log_target=True @@ -9134,7 +9134,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_nllloss(self): class NLLModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.NLLLoss(reduction="none") self.m = torch.nn.LogSoftmax(dim=1) @@ -9154,7 +9154,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_nllloss_2d_none(self): class NLLModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.NLLLoss(reduction="none") self.conv = torch.nn.Conv2d(16, C, (3, 3)) @@ -9175,7 +9175,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_nllloss_2d_mean(self): class NLLModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.NLLLoss(reduction="mean") self.conv = torch.nn.Conv2d(16, C, (3, 3)) @@ -9196,7 +9196,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_nllloss_2d_sum(self): class NLLModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.NLLLoss(reduction="sum") self.conv = torch.nn.Conv2d(16, C, (3, 3)) @@ -9217,7 +9217,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_nllloss_2d_mean_weights(self): class NLLModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.NLLLoss(reduction="mean", weight=torch.randn(C)) self.conv = torch.nn.Conv2d(16, C, (3, 3)) @@ -9238,7 +9238,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_nllloss_2d_mean_ignore_index(self): class NLLModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.NLLLoss(reduction="mean", ignore_index=1) self.conv = torch.nn.Conv2d(16, C, (3, 3)) @@ -9296,7 +9296,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_nllloss_2d_mean_ignore_index_weights(self): class NLLModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.NLLLoss( reduction="mean", weight=torch.randn(C), ignore_index=1 @@ -9640,7 +9640,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_dropout(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.dropout = torch.nn.Dropout(0.3) @@ -9657,7 +9657,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_shape_constant_fold(self): class ShapeModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Buffer(torch.ones(5)) @@ -9671,7 +9671,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_celu(self): class Celu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.celu = torch.nn.CELU(alpha=1.0) @@ -9684,7 +9684,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_celu_default(self): class Celu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.celu = torch.nn.CELU() @@ -9697,7 +9697,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_celu_alpha(self): class Celu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.celu = torch.nn.CELU(alpha=2.0) @@ -9710,7 +9710,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_celu_cast(self): class Celu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.celu = torch.nn.CELU() @@ -10046,7 +10046,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_embedding_module(self): class EmbedModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(4, 3, padding_idx=1) self.emb2 = torch.nn.Embedding(4, 3, padding_idx=1) @@ -10067,7 +10067,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): self.run_test(model, (x,)) class EmbedModelWithoutPaddingIdx(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(4, 3) @@ -10512,7 +10512,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_batchnorm_training(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bn1 = torch.nn.BatchNorm2d(3, affine=False) self.cv1 = torch.nn.Conv2d(3, 3, 10) @@ -10548,7 +10548,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_batchnorm_training_mode_fix_layer(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bn1 = torch.nn.BatchNorm2d(3, affine=True) self.cv1 = torch.nn.Conv2d(3, 3, 10) @@ -10585,7 +10585,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_batchnorm_eval_mode_train_layer(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bn1 = torch.nn.BatchNorm2d(3, affine=True) self.cv1 = torch.nn.Conv2d(3, 3, 10) @@ -10622,7 +10622,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_instancenorm_training(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.in1 = torch.nn.InstanceNorm2d(3, affine=True) self.cv1 = torch.nn.Conv2d(3, 3, 10) @@ -10658,7 +10658,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_instancenorm_training_mode_fix_layer(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.in1 = torch.nn.InstanceNorm2d(3, affine=True) self.cv1 = torch.nn.Conv2d(3, 3, 10) @@ -10695,7 +10695,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_instancenorm_eval_mode_train_layer(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.in1 = torch.nn.InstanceNorm2d(8, affine=True) self.cv1 = torch.nn.Conv2d(8, 8, 10) @@ -10733,7 +10733,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_dropout_training(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.dropout = torch.nn.Dropout(0.4) @@ -10775,7 +10775,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(12) def test_dropout_training_zero(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.dropout = torch.nn.Dropout(0.5) @@ -10839,7 +10839,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_conv_bn(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d( 3, 16, kernel_size=1, stride=2, padding=3, bias=True @@ -10864,7 +10864,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_multiple_conv_bn(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d( 3, 64, kernel_size=7, stride=2, padding=3, bias=False @@ -11003,7 +11003,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(11) def test_resize_images(self): class TransformModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.transform = _init_test_generalized_rcnn_transform() @@ -11024,7 +11024,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipScriptTest() def test_transform_images(self): class TransformModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.transform = _init_test_generalized_rcnn_transform() @@ -11055,7 +11055,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipScriptTest() def test_rpn(self): class RPNModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rpn = _init_test_rpn() @@ -11094,7 +11094,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipScriptTest() def test_multi_scale_roi_align(self): class TransformModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.model = torchvision.ops.MultiScaleRoIAlign( ["feat1", "feat2"], 3, 2 @@ -11203,7 +11203,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): return self.module(x) + self.weights class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.module = InnerModule(embedding_dim=8) @@ -11243,7 +11243,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): ) * self.const class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.module = InnerModule(embedding_dim=8) @@ -11256,7 +11256,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): def test_set_attr(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv1d(3, 10, 2) self.b = False @@ -11279,7 +11279,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(11) def test_set_attr_2(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv1d(10, 3, 3) self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3)) @@ -11304,7 +11304,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(11) def test_set_attr_3(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv1d(10, 3, 3) self.conv.weight = torch.nn.Parameter(torch.zeros(3, 10)) @@ -11331,7 +11331,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(11) def test_set_attr_4(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv1d(10, 3, 3) self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3)) @@ -11363,7 +11363,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(11) def test_set_attr_5(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv1d(10, 3, 3) self.conv.bias = torch.nn.Parameter(torch.zeros(3, 10, 3)) @@ -11394,7 +11394,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(11) def test_set_attr_in_loop(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv1d(10, 3, 3) self.conv.weight = torch.nn.Parameter(torch.zeros(3, 10)) @@ -11422,7 +11422,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(13) def test_set_attr_in_loop_with_list(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv1d(10, 3, 3) self.conv.weight = torch.nn.Parameter(torch.zeros(3, 10)) @@ -12196,7 +12196,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_hann_window_periodic(self): class HannWindowModule_Periodic(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.window_length = 0 @@ -12218,7 +12218,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(9) def test_hann_window_not_periodic(self): class HannWindowModule_NotPeriodic(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.window_length = 0 @@ -12241,7 +12241,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipScriptTest() def test_hann_window_default_values(self): class HannWindowModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.window_length = 0 @@ -13098,7 +13098,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(13) def test_qat_linear_per_channel(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.linear = torch.nn.Linear(4, 3) @@ -13130,7 +13130,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(13) def test_quantized_list_of_inputs_with_cat(self): class TestModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.dequant = torch.ao.quantization.DeQuantStub() @@ -13151,7 +13151,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(13) def test_qat_relu(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.relu = torch.nn.ReLU() @@ -13173,7 +13173,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(13) def test_qat_conv2d(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.conv = torch.nn.Conv2d(4, 2, 3, stride=2) @@ -13204,7 +13204,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(13) def test_qat_conv2d_relu(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.conv = torch.nn.Conv2d(4, 2, 3, stride=2) @@ -13237,7 +13237,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(13) def test_qat_conv2d_relu_fused(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.conv = torch.nn.Conv2d(4, 2, 3, stride=2) @@ -13271,7 +13271,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(13) def test_qat_linear_relu_fused(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.linear = torch.nn.Linear(4, 2) @@ -13303,7 +13303,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): @skipIfUnsupportedMinOpsetVersion(10) def test_qat_maxpool2d(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.pool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1) @@ -13776,7 +13776,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): inputs = (coords0, coords1, edge_from, edge_to) class MySAGEConv(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.SAGEConvBlock1 = torch_geometric_nn.SAGEConv( 2, 512, normalize=True diff --git a/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py b/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py index 5d36a5bb8a34..85aeafceafb4 100644 --- a/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py +++ b/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py @@ -49,7 +49,7 @@ class TestONNXRuntime_cuda(onnx_test_common._TestONNXRuntime): @skipScriptTest() def test_layer_norm_fp16(self): class LayerNormModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layer_norm = torch.nn.LayerNorm([10, 10]) @@ -73,7 +73,7 @@ class TestONNXRuntime_cuda(onnx_test_common._TestONNXRuntime): @skipScriptTest() def test_softmaxCrossEntropy_fusion_fp16(self): class FusionModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.loss = torch.nn.NLLLoss(reduction="none") self.m = torch.nn.LogSoftmax(dim=1) @@ -97,7 +97,7 @@ class TestONNXRuntime_cuda(onnx_test_common._TestONNXRuntime): @skipScriptTest() def test_apex_o2(self): class LinearModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 5) @@ -133,7 +133,7 @@ class TestONNXRuntime_cuda(onnx_test_common._TestONNXRuntime): @skipIfNoCuda def test_deduplicate_initializers_diff_devices(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.nn.Parameter( torch.ones(2, 3, device=torch.device("cpu")) diff --git a/test/onnx/test_utility_funs.py b/test/onnx/test_utility_funs.py index a6ad2ced84cb..81c5ffe1bd46 100644 --- a/test/onnx/test_utility_funs.py +++ b/test/onnx/test_utility_funs.py @@ -397,7 +397,7 @@ class TestUtilityFuns(_BaseTestCase): def test_constant_fold_unsqueeze_multi_axies(self): class PReluModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.prelu = torch.nn.PReLU() @@ -490,7 +490,7 @@ class TestUtilityFuns(_BaseTestCase): def test_constant_fold_lstm(self): class GruNet(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mygru = torch.nn.GRU(7, 3, 1, bidirectional=False) @@ -521,7 +521,7 @@ class TestUtilityFuns(_BaseTestCase): def test_constant_fold_transpose_matmul(self): class MatMulNet(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.B = torch.nn.Parameter(torch.ones(5, 3)) @@ -694,7 +694,7 @@ class TestUtilityFuns(_BaseTestCase): def test_constant_fold_shape(self): class ShapeModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Buffer(torch.ones(5)) @@ -845,7 +845,7 @@ class TestUtilityFuns(_BaseTestCase): return x * x class Outer(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.inner = torch.jit.script(Inner()) @@ -1137,7 +1137,7 @@ class TestUtilityFuns(_BaseTestCase): def test_node_scope(self): class N(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU() @@ -1566,7 +1566,7 @@ class TestUtilityFuns(_BaseTestCase): def test_unused_initializers(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv2 = torch.nn.ConvTranspose2d( 16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(1, 1) @@ -1593,7 +1593,7 @@ class TestUtilityFuns(_BaseTestCase): def test_scripting_param(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d( 3, 16, kernel_size=1, stride=2, padding=3, bias=True @@ -1629,7 +1629,7 @@ class TestUtilityFuns(_BaseTestCase): def test_fuse_conv_bn(self): class Fuse(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d( 3, 2, kernel_size=1, stride=2, padding=3, bias=True @@ -1701,7 +1701,7 @@ class TestUtilityFuns(_BaseTestCase): def test_onnx_value_name(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.in_weight = torch.nn.Parameter(torch.Tensor(3, 3)) self.in_bias = torch.nn.Parameter(torch.Tensor(3)) @@ -1734,7 +1734,7 @@ class TestUtilityFuns(_BaseTestCase): def test_onnx_node_naming(self): class MainModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self._module_1 = torch.nn.Linear(10, 10) self._module_2 = torch.nn.Linear(10, 10) @@ -1773,7 +1773,7 @@ class TestUtilityFuns(_BaseTestCase): def _test_deduplicate_initializers(self, torchscript=False): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layer1 = torch.nn.Linear(3, 3) self.layer2 = torch.nn.Linear(3, 3) @@ -1841,7 +1841,7 @@ class TestUtilityFuns(_BaseTestCase): @skipIfNoCuda def test_deduplicate_initializers_diff_devices(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w_cpu = torch.nn.Parameter( torch.ones(3, device=torch.device("cpu")) @@ -1914,7 +1914,7 @@ class TestUtilityFuns(_BaseTestCase): # upsample scale is a constant, not a model parameter, # therefore should be ignored by shared weight deduplication. class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.upsample_1 = torch.nn.Upsample(scale_factor=2) self.upsample_2 = torch.nn.Upsample(scale_factor=2) diff --git a/test/onnx/test_verification.py b/test/onnx/test_verification.py index 547aa40698d2..3489788accb1 100644 --- a/test/onnx/test_verification.py +++ b/test/onnx/test_verification.py @@ -206,7 +206,7 @@ class TestFindMismatch(pytorch_test_common.ExportTestCase): ) class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layers = torch.nn.Sequential( torch.nn.Linear(3, 4), diff --git a/test/onnx/torch_export/test_torch_export_with_onnxruntime.py b/test/onnx/torch_export/test_torch_export_with_onnxruntime.py index 71a1ba7d7c54..0615e6852c86 100644 --- a/test/onnx/torch_export/test_torch_export_with_onnxruntime.py +++ b/test/onnx/torch_export/test_torch_export_with_onnxruntime.py @@ -110,7 +110,7 @@ class TestFxToOnnxWithOnnxRuntime(onnx_test_common._TestONNXRuntime): def test_onnx_program_supports_retraced_graph(self): class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.ones(1)) @@ -119,7 +119,7 @@ class TestFxToOnnxWithOnnxRuntime(onnx_test_common._TestONNXRuntime): return x.sum() + self.buf.sum() class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = torch.nn.Buffer(torch.zeros(1)) self.bar = Bar() @@ -209,7 +209,7 @@ class TestFxToOnnxWithOnnxRuntime(onnx_test_common._TestONNXRuntime): for persistent in (True, False): class CustomModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.register_buffer( "my_buffer", torch.tensor(4.0), persistent=persistent diff --git a/test/optim/test_lrscheduler.py b/test/optim/test_lrscheduler.py index 115fe4b15d27..6b8dcbe05795 100644 --- a/test/optim/test_lrscheduler.py +++ b/test/optim/test_lrscheduler.py @@ -47,7 +47,7 @@ load_tests = load_tests class TestLRScheduler(TestCase): class SchedulerTestNet(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) @@ -1572,7 +1572,7 @@ class TestLRScheduler(TestCase): # Case 3: Custom `scale_fn`, a callable class class ScaleFn: - def __init__(self): + def __init__(self) -> None: self.x = 0.5 def __call__(self, _): diff --git a/test/package/package_a/fake_interface.py b/test/package/package_a/fake_interface.py index 02d343af4e1b..8131ee5fd374 100644 --- a/test/package/package_a/fake_interface.py +++ b/test/package/package_a/fake_interface.py @@ -34,7 +34,7 @@ class NewModule(torch.nn.Module): class UsesInterface(torch.nn.Module): proxy_mod: ModuleInterface - def __init__(self): + def __init__(self) -> None: super().__init__() self.proxy_mod = OrigModule() diff --git a/test/package/package_a/fake_script_class.py b/test/package/package_a/fake_script_class.py index 988a726b3ed3..e7e54f843485 100644 --- a/test/package/package_a/fake_script_class.py +++ b/test/package/package_a/fake_script_class.py @@ -22,7 +22,7 @@ def uses_script_class(x): class IdListFeature: - def __init__(self): + def __init__(self) -> None: self.id_list = torch.ones(1, 1) def returns_self(self) -> "IdListFeature": diff --git a/test/package/package_c/test_module.py b/test/package/package_c/test_module.py index 7981fc4a71d1..8b8ccd4abcfe 100644 --- a/test/package/package_c/test_module.py +++ b/test/package/package_c/test_module.py @@ -7,7 +7,7 @@ try: from torchvision.models import resnet18 class TorchVisionTest(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.tvmod = resnet18() diff --git a/test/package/test_dependency_api.py b/test/package/test_dependency_api.py index a2f12887b1aa..3f8ce1391eb6 100644 --- a/test/package/test_dependency_api.py +++ b/test/package/test_dependency_api.py @@ -273,7 +273,7 @@ class TestDependencyAPI(PackageTestCase): return module class BrokenImporter(Importer): - def __init__(self): + def __init__(self) -> None: self.modules = { "foo": create_module("foo"), "bar": create_module("bar"), diff --git a/test/package/test_package_fx.py b/test/package/test_package_fx.py index 6ea4262d9170..9976766f47f3 100644 --- a/test/package/test_package_fx.py +++ b/test/package/test_package_fx.py @@ -169,7 +169,7 @@ class TestPackageFX(PackageTestCase): def test_package_fx_wrap(self): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, a): diff --git a/test/package/test_package_script.py b/test/package/test_package_script.py index 43f3a66b6b5a..d5e7d218bd34 100644 --- a/test/package/test_package_script.py +++ b/test/package/test_package_script.py @@ -83,7 +83,7 @@ class TestPackageScript(PackageTestCase): class UsesInterface(torch.nn.Module): proxy_mod: ModuleInterface - def __init__(self): + def __init__(self) -> None: super().__init__() self.proxy_mod = ImplementsInterface() @@ -246,7 +246,7 @@ class TestPackageScript(PackageTestCase): return input class TopMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.modB = Submod() @@ -710,7 +710,7 @@ class TestPackageScript(PackageTestCase): """ class TorchVisionTestInline(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.tvmod = resnet18() @@ -749,7 +749,7 @@ class TestPackageScript(PackageTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.ones(2, 3) diff --git a/test/profiler/test_memory_profiler.py b/test/profiler/test_memory_profiler.py index 6bc1f5d0af7a..a074a29b60c5 100644 --- a/test/profiler/test_memory_profiler.py +++ b/test/profiler/test_memory_profiler.py @@ -66,7 +66,7 @@ class LazyLinear(torch.nn.Module): class RecordInputOutputDispatchMode(torch.utils._python_dispatch.TorchDispatchMode): - def __init__(self): + def __init__(self) -> None: self.results = [] def mark_region(self, name: str): diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index 61905f559ed4..c5c83a28817f 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -244,7 +244,7 @@ class TestProfiler(TestCase): return w.sum() class DummyModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d( 3, 2, kernel_size=1, stride=2, padding=3, bias=False @@ -351,7 +351,7 @@ class TestProfiler(TestCase): end_barrier = threading.Barrier(num_threads, timeout=timeout) class Task(threading.Thread): - def __init__(self): + def __init__(self) -> None: self._end_gate = threading.Event() super().__init__(daemon=True) self.start() @@ -763,7 +763,7 @@ class TestProfiler(TestCase): return x + 2 class C(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.A0 = A() self.B0 = B() @@ -1423,7 +1423,7 @@ from torch.profiler import supported_activities, profile from torch.autograd.profiler import KinetoStepTracker class SimpleNet(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(10, 5) self.fc2 = nn.Linear(5, 2) @@ -1914,7 +1914,7 @@ assert KinetoStepTracker.current_step() == initial_step + 2 * niters class SimpleNet(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(10, 5) self.fc2 = nn.Linear(5, 2) diff --git a/test/profiler/test_profiler_tree.py b/test/profiler/test_profiler_tree.py index abd76def9875..7de38519feca 100644 --- a/test/profiler/test_profiler_tree.py +++ b/test/profiler/test_profiler_tree.py @@ -556,7 +556,7 @@ class TestProfilerTree(TestCase): @ProfilerTree.test def test_profiler_experimental_tree_with_stack_and_modules(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layers = [ torch.nn.ReLU(), diff --git a/test/profiler/test_record_function.py b/test/profiler/test_record_function.py index bd4c02806213..e024c7d48856 100644 --- a/test/profiler/test_record_function.py +++ b/test/profiler/test_record_function.py @@ -96,7 +96,7 @@ class TestRecordFunction(TestCase): def test_datapipe_delegation_with_profiler(self): class IDPIterator(torch.utils.data.IterDataPipe): - def __init__(self): + def __init__(self) -> None: self.data = list(range(10)) self._idx = 0 diff --git a/test/profiler/test_torch_tidy.py b/test/profiler/test_torch_tidy.py index 93d909f8cee8..9a6f40c9a8c5 100644 --- a/test/profiler/test_torch_tidy.py +++ b/test/profiler/test_torch_tidy.py @@ -47,7 +47,7 @@ def find_node_with_regex(nodes, pattern): class SimpleNet(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(10, 5) self.fc2 = nn.Linear(5, 2) diff --git a/test/quantization/bc/test_backward_compatibility.py b/test/quantization/bc/test_backward_compatibility.py index e3ec8e684d41..e249ae3977e4 100644 --- a/test/quantization/bc/test_backward_compatibility.py +++ b/test/quantization/bc/test_backward_compatibility.py @@ -514,7 +514,7 @@ class TestSerialization(TestCase): ) def test_lstm(self): class LSTMModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lstm = nnqd.LSTM(input_size=3, hidden_size=7, num_layers=1).to( dtype=torch.float @@ -544,7 +544,7 @@ class TestSerialization(TestCase): def test_default_qat_qconfig(self): class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(5, 5) self.relu = nn.ReLU() diff --git a/test/quantization/core/experimental/test_adaround_eager.py b/test/quantization/core/experimental/test_adaround_eager.py index 537deea25971..53f943398c4e 100644 --- a/test/quantization/core/experimental/test_adaround_eager.py +++ b/test/quantization/core/experimental/test_adaround_eager.py @@ -70,7 +70,7 @@ class TestAdaround(QuantizationTestCase): def get_feed_forward_wrapper(self): class FeedForwardWrapper(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, model, sample): @@ -81,7 +81,7 @@ class TestAdaround(QuantizationTestCase): def test_linear_chain(self): class LinearChain(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = nn.Linear(3, 4) self.linear2 = nn.Linear(4, 5) @@ -110,7 +110,7 @@ class TestAdaround(QuantizationTestCase): def test_conv_chain(self): class ConvChain(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv2d1 = nn.Conv2d(3, 4, 5, 5) self.conv2d2 = nn.Conv2d(4, 5, 5, 5) diff --git a/test/quantization/core/test_quantized_tensor.py b/test/quantization/core/test_quantized_tensor.py index c99378d4155f..658987fba017 100644 --- a/test/quantization/core/test_quantized_tensor.py +++ b/test/quantization/core/test_quantized_tensor.py @@ -21,7 +21,7 @@ import itertools import tempfile class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.qscheme = torch.per_tensor_symmetric @@ -1414,7 +1414,7 @@ class TestQuantizedTensor(TestCase): class M(torch.jit.ScriptModule): __constants__ = ['fname'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.fname = fname diff --git a/test/quantization/core/test_utils.py b/test/quantization/core/test_utils.py index d30cc0d919a5..6024fe29eaef 100644 --- a/test/quantization/core/test_utils.py +++ b/test/quantization/core/test_utils.py @@ -19,7 +19,7 @@ class TestUtils(TestCase): def test_get_fqn_to_example_inputs_simple(self): class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(5, 5) self.linear2 = torch.nn.Linear(5, 5) @@ -30,7 +30,7 @@ class TestUtils(TestCase): return x class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(5, 5) self.linear2 = torch.nn.Linear(5, 5) @@ -57,7 +57,7 @@ class TestUtils(TestCase): """ Test that we can get example inputs for functions with default keyword arguments """ class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(5, 5) self.linear2 = torch.nn.Linear(5, 5) @@ -68,7 +68,7 @@ class TestUtils(TestCase): return x class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(5, 5) self.linear2 = torch.nn.Linear(5, 5) @@ -98,7 +98,7 @@ class TestUtils(TestCase): """ Test that we can record complex example inputs such as lists and dicts """ class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(5, 5) self.linear2 = torch.nn.Linear(5, 5) @@ -109,7 +109,7 @@ class TestUtils(TestCase): return x class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(5, 5) self.linear2 = torch.nn.Linear(5, 5) diff --git a/test/quantization/core/test_workflow_module.py b/test/quantization/core/test_workflow_module.py index c4f36b4dd79a..168c00920f62 100644 --- a/test/quantization/core/test_workflow_module.py +++ b/test/quantization/core/test_workflow_module.py @@ -996,7 +996,7 @@ class TestDistributed(QuantizationTestCase): with override_quantized_engine('fbgemm'): # create conv-bn class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(4, 1, 3, padding=1) self.bn = nn.BatchNorm2d(1) @@ -1045,7 +1045,7 @@ class TestDistributed(QuantizationTestCase): """ class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(1, 1, 1) self.bn = nn.BatchNorm2d(1) @@ -1276,7 +1276,7 @@ class TestFusedObsFakeQuantModule(TestCase): def test_embedding_bag_qat_config(self): class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb1 = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, include_last_offset=True, scale_grad_by_freq=False, mode='sum') @@ -1356,7 +1356,7 @@ class TestFusedObsFakeQuantModule(TestCase): def test_default_fused_qat_config(self): class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(2, 2) self.relu = nn.ReLU() diff --git a/test/quantization/core/test_workflow_ops.py b/test/quantization/core/test_workflow_ops.py index 99d60b78aa54..4a0c25776f7c 100644 --- a/test/quantization/core/test_workflow_ops.py +++ b/test/quantization/core/test_workflow_ops.py @@ -629,7 +629,7 @@ class TestFakeQuantizeOps(TestCase): def test_fake_quant_preserves_qparam_shapes_for_activations(self): class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(4, 4) diff --git a/test/quantization/eager/test_bias_correction_eager.py b/test/quantization/eager/test_bias_correction_eager.py index d29d39bb3028..39c8ad872bdd 100644 --- a/test/quantization/eager/test_bias_correction_eager.py +++ b/test/quantization/eager/test_bias_correction_eager.py @@ -67,7 +67,7 @@ class TestBiasCorrectionEager(QuantizationTestCase): @skipIfNoFBGEMM def test_linear_chain(self): class LinearChain(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = nn.Linear(3, 4) self.linear2 = nn.Linear(4, 5) @@ -86,7 +86,7 @@ class TestBiasCorrectionEager(QuantizationTestCase): @skipIfNoFBGEMM def test_conv_chain(self): class ConvChain(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv2d1 = nn.Conv2d(3, 4, 5, 5) self.conv2d2 = nn.Conv2d(4, 5, 5, 5) diff --git a/test/quantization/eager/test_equalize_eager.py b/test/quantization/eager/test_equalize_eager.py index f08ff2b8d023..8cf506e8478a 100644 --- a/test/quantization/eager/test_equalize_eager.py +++ b/test/quantization/eager/test_equalize_eager.py @@ -72,7 +72,7 @@ class TestEqualizeEager(QuantizationTestCase): given the same input ''' class ChainModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = nn.Linear(3, 4) self.linear2 = nn.Linear(4, 5) @@ -108,7 +108,7 @@ class TestEqualizeEager(QuantizationTestCase): yield the same output given the same input ''' class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(3, 3, 1).to(dtype=torch.float) self.relu1 = nn.ReLU(inplace=False).to(dtype=torch.float) @@ -154,7 +154,7 @@ class TestEqualizeEager(QuantizationTestCase): yield the same output given the same input ''' class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = nn.Linear(3, 4) self.relu1 = nn.ReLU(inplace=False).to(dtype=torch.float) diff --git a/test/quantization/eager/test_numeric_suite_eager.py b/test/quantization/eager/test_numeric_suite_eager.py index a798745d6537..37e642fde753 100644 --- a/test/quantization/eager/test_numeric_suite_eager.py +++ b/test/quantization/eager/test_numeric_suite_eager.py @@ -39,7 +39,7 @@ from torch.testing._internal.common_quantized import override_qengines from torch.testing._internal.common_utils import IS_ARM64 class SubModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.qconfig = default_qconfig self.mod1 = torch.nn.Conv2d(3, 3, 3, bias=False).to(dtype=torch.float) @@ -56,7 +56,7 @@ class SubModule(torch.nn.Module): class ModelWithSubModules(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod1 = SubModule() self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) @@ -68,7 +68,7 @@ class ModelWithSubModules(torch.nn.Module): class ModelWithFunctionals(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mycat = nnq.FloatFunctional() self.myadd = nnq.FloatFunctional() diff --git a/test/quantization/eager/test_quantize_eager_ptq.py b/test/quantization/eager/test_quantize_eager_ptq.py index e6e3327b7cdf..c50ece71a3a3 100644 --- a/test/quantization/eager/test_quantize_eager_ptq.py +++ b/test/quantization/eager/test_quantize_eager_ptq.py @@ -81,7 +81,7 @@ class TestQuantizeEagerOps(QuantizationTestCase): extra_module_kwargs, input_size): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = float_module_class(**extra_module_kwargs) self.quant = QuantStub() @@ -94,7 +94,7 @@ class TestQuantizeEagerOps(QuantizationTestCase): return x class RefM(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = float_module_class(**extra_module_kwargs) self.quant1 = QuantStub() @@ -203,7 +203,7 @@ class TestQuantizeEagerOps(QuantizationTestCase): def test_int16_reference_module(self): class RefM(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.ConvTranspose2d(1, 1, 1) self.quant1 = QuantStub() @@ -277,7 +277,7 @@ class TestQuantizeEagerOps(QuantizationTestCase): extra_module_kwargs: keyword args to instantiate the float module """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.activation_op = float_module_class(**extra_module_kwargs) self.quant = QuantStub() @@ -839,7 +839,7 @@ class TestQuantizeEagerPTQStatic(QuantizationTestCase): self.checkScriptable(quantized_model, [[indices, offsets, per_sample_weights]], check_save_load=True) class EmbeddingBagWithLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, include_last_offset=True, scale_grad_by_freq=False, mode='sum') @@ -861,7 +861,7 @@ class TestQuantizeEagerPTQStatic(QuantizationTestCase): @skipIfNoFBGEMM def test_custom_module_class(self): class CustomModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) @@ -901,7 +901,7 @@ class TestQuantizeEagerPTQStatic(QuantizationTestCase): return quantized class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.custom = CustomModule() @@ -909,7 +909,7 @@ class TestQuantizeEagerPTQStatic(QuantizationTestCase): return self.custom(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = QuantStub() self.conv = torch.nn.Conv2d(1, 1, 1) @@ -924,7 +924,7 @@ class TestQuantizeEagerPTQStatic(QuantizationTestCase): return x class RefM(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = QuantStub() self.conv1 = torch.nn.Conv2d(1, 1, 1) @@ -1031,7 +1031,7 @@ class TestQuantizeEagerPTQStatic(QuantizationTestCase): `non_leaf_module_list`. """ class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = QuantStub() self.sigmoid = torch.nn.Sigmoid() @@ -1477,7 +1477,7 @@ class TestQuantizeEagerPTQDynamic(QuantizationTestCase): @skipIfNoFBGEMM def test_embedding_bag_dynamic(self): class EmbeddingBagWithLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, include_last_offset=True, scale_grad_by_freq=False, mode='sum') @@ -1502,7 +1502,7 @@ class TestQuantizeEagerPTQDynamic(QuantizationTestCase): @skipIfNoFBGEMM def test_embedding_ops_dynamic(self): class EmbeddingWithLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding( num_embeddings=10, embedding_dim=12, scale_grad_by_freq=False) diff --git a/test/quantization/eager/test_quantize_eager_qat.py b/test/quantization/eager/test_quantize_eager_qat.py index 7340500f1362..e4ea127f99fe 100644 --- a/test/quantization/eager/test_quantize_eager_qat.py +++ b/test/quantization/eager/test_quantize_eager_qat.py @@ -555,7 +555,7 @@ class TestQuantizeEagerQAT(QuantizationTestCase): def test_add_scalar_uses_input_qparams(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.ff = torch.ao.nn.quantized.FloatFunctional() @@ -576,7 +576,7 @@ class TestQuantizeEagerQAT(QuantizationTestCase): def test_mul_scalar_uses_input_qparams(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.ff = torch.ao.nn.quantized.FloatFunctional() @@ -642,7 +642,7 @@ class TestQuantizeEagerQAT(QuantizationTestCase): class TestQuantizeEagerQATNumerics(QuantizationTestCase): def _test_activation_convert_numerics_impl(self, Act, data): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.act = Act() self.quant = QuantStub() @@ -664,7 +664,7 @@ class TestQuantizeEagerQATNumerics(QuantizationTestCase): def test_fixed_qparam_ops(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sigmoid = torch.nn.Sigmoid() self.hardsigmoid = torch.nn.Hardsigmoid() @@ -717,7 +717,7 @@ class TestQuantizeEagerQATNumerics(QuantizationTestCase): def test_relu(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = nn.ReLU() diff --git a/test/quantization/fx/test_equalize_fx.py b/test/quantization/fx/test_equalize_fx.py index 059c5bb68b9d..648afa81b5ae 100644 --- a/test/quantization/fx/test_equalize_fx.py +++ b/test/quantization/fx/test_equalize_fx.py @@ -835,7 +835,7 @@ class TestEqualizeFx(QuantizationTestCase): torch.manual_seed(1) class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bot = torch.nn.Sequential(torch.nn.Linear(5, 5)) self.top = torch.nn.Sequential(torch.nn.Linear(5, 5)) diff --git a/test/quantization/fx/test_model_report_fx.py b/test/quantization/fx/test_model_report_fx.py index ee64bc3c7915..69fec404de68 100644 --- a/test/quantization/fx/test_model_report_fx.py +++ b/test/quantization/fx/test_model_report_fx.py @@ -84,7 +84,7 @@ FUSION_CONV_LINEAR_EXAMPLE = torch.nn.Sequential( # Test class # example model to use for tests class ThreeOps(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(3, 3) self.bn = nn.BatchNorm2d(3) @@ -100,7 +100,7 @@ class ThreeOps(nn.Module): return (torch.randn(1, 3, 3, 3),) class TwoThreeOps(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.block1 = ThreeOps() self.block2 = ThreeOps() @@ -233,7 +233,7 @@ class TestFxModelReportDetector(QuantizationTestCase): # we need to design the model class ConvLinearModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 2, 1) self.fc1 = torch.nn.Linear(9, 27) @@ -433,7 +433,7 @@ class TestFxModelReportDetector(QuantizationTestCase): # first we want a QAT model class QATConvLinearReluModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() # QuantStub converts tensors from floating point to quantized self.quant = torch.ao.quantization.QuantStub() @@ -505,7 +505,7 @@ Partition on Output class TestFxModelReportObserver(QuantizationTestCase): class NestedModifiedSingleLayerLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.obs1 = ModelReportObserver() self.mod1 = SingleLayerLinearModel() @@ -636,7 +636,7 @@ class TestFxModelReportObserver(QuantizationTestCase): # model specific to this test class NestedModifiedObserverAfterRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.obs1 = ModelReportObserver() self.mod1 = SingleLayerLinearModel() @@ -673,7 +673,7 @@ class TestFxModelReportObserver(QuantizationTestCase): # set up a basic model class TinyNestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.obs1 = ModelReportObserver() self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) @@ -688,7 +688,7 @@ class TestFxModelReportObserver(QuantizationTestCase): return x class LargerIncludeNestModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.obs1 = ModelReportObserver() self.nested = TinyNestModule() @@ -727,7 +727,7 @@ class TestFxModelReportObserver(QuantizationTestCase): return x class HighDimensionNet(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.obs1 = ModelReportObserver() self.fc1 = torch.nn.Linear(3, 7) @@ -786,7 +786,7 @@ class TestFxModelReportDetectDynamicStatic(QuantizationTestCase): @skipIfNoFBGEMM def test_nested_detection_case(self): class SingleLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) @@ -795,7 +795,7 @@ class TestFxModelReportDetectDynamicStatic(QuantizationTestCase): return x class TwoBlockNet(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.block1 = SingleLinear() self.block2 = SingleLinear() @@ -1266,7 +1266,7 @@ class TestFxDetectInputWeightEqualization(QuantizationTestCase): return x class TwoBlockComplexNet(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.block1 = TestFxDetectInputWeightEqualization.SimpleConv((3, 32)) self.block2 = TestFxDetectInputWeightEqualization.SimpleConv((3, 3)) @@ -1292,7 +1292,7 @@ class TestFxDetectInputWeightEqualization(QuantizationTestCase): return (torch.randn((1, 3, 28, 28)),) class ReluOnly(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU() diff --git a/test/quantization/fx/test_numeric_suite_fx.py b/test/quantization/fx/test_numeric_suite_fx.py index f90a3751f9be..f88485b961b1 100644 --- a/test/quantization/fx/test_numeric_suite_fx.py +++ b/test/quantization/fx/test_numeric_suite_fx.py @@ -100,7 +100,7 @@ from torch.ao.quantization.fx.quantize_handler import _get_pattern_to_quantize_h # across various different files, speed of debugging on individual test cases # decreases. class LinearReluFunctional(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w1 = nn.Parameter(torch.empty(4, 4)) self.b1 = nn.Parameter(torch.zeros(4)) @@ -113,7 +113,7 @@ class LinearReluFunctional(nn.Module): class LinearFunctional(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w1 = nn.Parameter(torch.empty(4, 4)) self.b1 = nn.Parameter(torch.zeros(4)) @@ -125,7 +125,7 @@ class LinearFunctional(nn.Module): class LinearReluLinearFunctional(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = nn.Parameter(torch.Tensor(4, 4)) self.b = nn.Parameter(torch.zeros(4)) @@ -150,7 +150,7 @@ class AddMulFunctional(nn.Module): class AllConvAndLinearFusionModules(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() # conv1d self.conv1d_0 = nn.Conv1d(1, 1, 1) @@ -331,7 +331,7 @@ class TestFXGraphMatcher(QuantizationTestCase): @skipIfNoFBGEMM def test_simple_fun(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = nn.Parameter(torch.empty(1, 4)) self.b = nn.Parameter(torch.zeros(1)) @@ -495,7 +495,7 @@ class TestFXGraphMatcher(QuantizationTestCase): @skipIfNoFBGEMM def test_nodes_with_equal_types_get_matched(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 1, 1) self.conv2 = nn.Conv2d(1, 1, 1) @@ -1241,7 +1241,7 @@ class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase): Verifies that logging inputs works correctly """ class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(1, 1, 1) @@ -1263,7 +1263,7 @@ class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase): signature for fp32 and int8 tensors. """ class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.max_pool_2d = nn.MaxPool2d(2) @@ -1347,7 +1347,7 @@ class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase): int8 inputs. """ class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = nn.ReLU() @@ -1401,7 +1401,7 @@ class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase): return (x1, x2) class M2(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m1 = M1() @@ -1446,7 +1446,7 @@ class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase): return x class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(1, 1) self.user_module = UserModule() @@ -1682,7 +1682,7 @@ class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase): Verify that NS APIs work on user defined functions """ class M1(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w1 = nn.Parameter(torch.empty(1, 1)) self.b1 = nn.Parameter(torch.zeros(1)) @@ -1695,7 +1695,7 @@ class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase): return x class M2(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w1 = nn.Parameter(torch.empty(1, 1)) self.b1 = nn.Parameter(torch.zeros(1)) @@ -1881,7 +1881,7 @@ class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase): @skipIfNoFBGEMM def test_int8_shadows_fp32_coverage(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.adaptive_avg_pool = nn.AdaptiveAvgPool2d(1) self.conv = nn.Conv2d(1, 1, 1) @@ -2048,7 +2048,7 @@ class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase): def test_linear_kwargs_shadow(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w1 = nn.Parameter(torch.empty(4, 4)) self.b1 = nn.Parameter(torch.zeros(4)) @@ -2104,7 +2104,7 @@ class TestFXNumericSuiteNShadows(FXNumericSuiteQuantizationTestCase): @withQNNPACKBackend def test_linear_mod(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(2, 2) @@ -2122,7 +2122,7 @@ class TestFXNumericSuiteNShadows(FXNumericSuiteQuantizationTestCase): @withQNNPACKBackend def test_linear_relu_mod(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(2, 2) self.fc2 = nn.Linear(2, 2) @@ -2148,7 +2148,7 @@ class TestFXNumericSuiteNShadows(FXNumericSuiteQuantizationTestCase): @withQNNPACKBackend def test_conv_bn_relu_mod(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(1, 1, 1) self.bn = nn.BatchNorm2d(1) @@ -2173,7 +2173,7 @@ class TestFXNumericSuiteNShadows(FXNumericSuiteQuantizationTestCase): @withQNNPACKBackend def test_functions(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w1 = nn.Parameter(torch.randn(2, 2)) self.b1 = nn.Parameter(torch.zeros(2)) @@ -2212,7 +2212,7 @@ class TestFXNumericSuiteNShadows(FXNumericSuiteQuantizationTestCase): @withQNNPACKBackend def test_partial_qconfig_mapping(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = nn.Linear(2, 2) self.w1 = nn.Parameter(torch.randn(2, 2)) @@ -2504,7 +2504,7 @@ class TestFXNumericSuiteNShadows(FXNumericSuiteQuantizationTestCase): @withQNNPACKBackend def test_custom_functions_and_tracer(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(2, 2) self.fc2 = nn.Linear(2, 2) @@ -2571,7 +2571,7 @@ class TestFXNumericSuiteNShadows(FXNumericSuiteQuantizationTestCase): @withQNNPACKBackend def test_extract_weights_linear(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w1 = nn.Parameter(torch.randn(2, 2)) self.b1 = nn.Parameter(torch.randn(2)) @@ -2710,7 +2710,7 @@ class TestFXNumericSuiteNShadows(FXNumericSuiteQuantizationTestCase): @withQNNPACKBackend def test_add_loggers_functions(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w1 = nn.Parameter(torch.randn(2, 2)) self.b1 = nn.Parameter(torch.randn(2)) diff --git a/test/quantization/fx/test_quantize_fx.py b/test/quantization/fx/test_quantize_fx.py index 6aeea970e650..bb3fac5c95e9 100644 --- a/test/quantization/fx/test_quantize_fx.py +++ b/test/quantization/fx/test_quantize_fx.py @@ -269,7 +269,7 @@ def _user_func_with_complex_return_type(x): class TestFuseFx(QuantizationTestCase): def test_fuse_conv_bn_relu(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1d = nn.Conv1d(1, 1, 1) self.conv2d = nn.Conv2d(1, 1, 1) @@ -349,7 +349,7 @@ class TestFuseFx(QuantizationTestCase): def test_fuse_linear_bn_eval(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(1, 1) self.bn1d = nn.BatchNorm1d(1) @@ -587,7 +587,7 @@ class TestFuseFx(QuantizationTestCase): def test_fuse_module_relu(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1d = nn.Conv1d(1, 1, 1) self.conv2d = nn.Conv2d(1, 1, 1) @@ -662,14 +662,14 @@ class TestFuseFx(QuantizationTestCase): def test_problematic_fuse_example(self): class LinearRelu(nn.Sequential): - def __init__(self): + def __init__(self) -> None: super().__init__( nn.Linear(5, 5), nn.ReLU(), ) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lin_relu = LinearRelu() self.linear = nn.Linear(5, 5) @@ -702,7 +702,7 @@ class TestFuseFx(QuantizationTestCase): return MyConvReLU() class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.relu = torch.nn.ReLU() @@ -772,7 +772,7 @@ class TestFuseFx(QuantizationTestCase): fused node as extra inputs. """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.bn = torch.nn.BatchNorm2d(3) @@ -841,7 +841,7 @@ class TestFuseFx(QuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 3) self.relu1 = torch.nn.ReLU() @@ -899,7 +899,7 @@ class TestQuantizeFx(QuantizationTestCase): conv - bn - add - relu pattern """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(1, 1, 1) self.bn = nn.BatchNorm2d(1) @@ -937,7 +937,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_fused_module_qat_swap(self): class Tmp(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.tmp = torch.nn.Linear(5, 5) self.relu = torch.nn.ReLU() @@ -948,7 +948,7 @@ class TestQuantizeFx(QuantizationTestCase): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = torch.nn.Sequential(Tmp(), torch.nn.Linear(5, 5)) self.mods2 = torch.nn.Linear(5, 5) @@ -1070,7 +1070,7 @@ class TestQuantizeFx(QuantizationTestCase): linear_weight = torch.rand(10, 5) class LinearModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 10) @@ -1704,7 +1704,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_linear_bn(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(4, 4) self.bn = nn.BatchNorm1d(4) @@ -1761,7 +1761,7 @@ class TestQuantizeFx(QuantizationTestCase): linear_weight = torch.rand(10, 5) class LinearModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 10) @@ -1802,7 +1802,7 @@ class TestQuantizeFx(QuantizationTestCase): """ class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(1, 1, 1) self.bn = nn.BatchNorm2d(1) @@ -1839,7 +1839,7 @@ class TestQuantizeFx(QuantizationTestCase): """ Make sure quantization runs for models with dictionary output """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) @@ -1859,7 +1859,7 @@ class TestQuantizeFx(QuantizationTestCase): """ Make sure quantization runs for a corner case in attention module """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) @@ -1900,7 +1900,7 @@ class TestQuantizeFx(QuantizationTestCase): configurations """ class StandaloneModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) @@ -1908,7 +1908,7 @@ class TestQuantizeFx(QuantizationTestCase): return self.conv(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) self.standalone = StandaloneModule() @@ -1919,7 +1919,7 @@ class TestQuantizeFx(QuantizationTestCase): return x class RefM(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) @@ -2054,7 +2054,7 @@ class TestQuantizeFx(QuantizationTestCase): @skipIfNoFBGEMM def test_qconfig_none(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 1, 1) self.conv2 = nn.Conv2d(1, 1, 1) @@ -2083,7 +2083,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_qconfig_module_type(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(1, 1, 1) self.linear = nn.Linear(9, 3) @@ -2112,14 +2112,14 @@ class TestQuantizeFx(QuantizationTestCase): def test_qconfig_qat_module_type(self): class LinearRelu(nn.Sequential): - def __init__(self): + def __init__(self) -> None: super().__init__( nn.Linear(5, 5), nn.ReLU(), ) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lin_relu = LinearRelu() self.linear = nn.Linear(5, 5) @@ -2174,7 +2174,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_qconfig_module_name_regex(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 1, 1) self.conv2 = nn.Conv2d(1, 1, 1) @@ -2203,7 +2203,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_qconfig_precedence(self): for device in get_supported_device_types(): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(1, 1) self.conv = nn.Conv2d(1, 1, 1) @@ -2244,7 +2244,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_qconfig_module_name_object_type_order(self): class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(1, 1) self.fc2 = nn.Linear(1, 1) @@ -2257,7 +2257,7 @@ class TestQuantizeFx(QuantizationTestCase): return x class M2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(1, 1) self.fc2 = nn.Linear(1, 1) @@ -2272,7 +2272,7 @@ class TestQuantizeFx(QuantizationTestCase): return x class M3(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(1, 1) self.fc2 = nn.Linear(1, 1) @@ -2335,7 +2335,7 @@ class TestQuantizeFx(QuantizationTestCase): # test that function order overrides global qconfig class M4(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(1, 1) self.fc2 = nn.Linear(1, 1) @@ -2613,7 +2613,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_default_qconfig_mapping_override_global(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) @@ -2873,7 +2873,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_remove_qconfig(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.avg_pool = torch.nn.AvgPool2d(1) @@ -2904,7 +2904,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_default_quant_after_none_qconfig(self): """ Make sure default quant is inserted properly""" class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) @@ -2926,7 +2926,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_qconfig_for_call_method(self): class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) @@ -2936,7 +2936,7 @@ class TestQuantizeFx(QuantizationTestCase): return x.transpose(2, 3) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sub = Sub() self.conv1 = torch.nn.Conv2d(1, 1, 1) @@ -2996,7 +2996,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_qconfig_for_call_func(self): class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -3005,7 +3005,7 @@ class TestQuantizeFx(QuantizationTestCase): return torch.nn.functional.linear(x, self.w, self.b) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = torch.nn.Sequential( Linear(), @@ -3037,7 +3037,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_preserve_attributes(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) @@ -3139,7 +3139,7 @@ class TestQuantizeFx(QuantizationTestCase): @skipIfNoFBGEMM def test_custom_module_class(self): class CustomModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) @@ -3194,7 +3194,7 @@ class TestQuantizeFx(QuantizationTestCase): return quantized class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) self.custom = CustomModule() @@ -3205,7 +3205,7 @@ class TestQuantizeFx(QuantizationTestCase): return x class RefM(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(3, 3) self.linear2 = torch.nn.Linear(3, 3) @@ -3309,7 +3309,7 @@ class TestQuantizeFx(QuantizationTestCase): has multiple users """ class CustomModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) @@ -3349,7 +3349,7 @@ class TestQuantizeFx(QuantizationTestCase): return quantized class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) self.custom = CustomModule() @@ -3393,7 +3393,7 @@ class TestQuantizeFx(QuantizationTestCase): multiple nodes with the same custom module target. """ class CustomModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) @@ -3433,7 +3433,7 @@ class TestQuantizeFx(QuantizationTestCase): return quantized class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.custom = CustomModule() @@ -3486,7 +3486,7 @@ class TestQuantizeFx(QuantizationTestCase): return x class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m1 = NonTraceable() self.m2 = NonTraceable2() @@ -3522,7 +3522,7 @@ class TestQuantizeFx(QuantizationTestCase): """Ensures that copy.deepcopy works correctly on a prepared model. """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) self._foobar = 'foobar' @@ -3548,7 +3548,7 @@ class TestQuantizeFx(QuantizationTestCase): """ Test state_dict and deepcopy works properly in the quantized model """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) @@ -3576,7 +3576,7 @@ class TestQuantizeFx(QuantizationTestCase): non-quantizable node """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) self.act = torch.nn.GELU() @@ -3597,7 +3597,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_sequential(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.convs = torch.nn.Sequential( torch.nn.Conv2d(1, 1, 1), @@ -3624,7 +3624,7 @@ class TestQuantizeFx(QuantizationTestCase): Test the option to have inputs and outputs of the graph quantized """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) @@ -3717,7 +3717,7 @@ class TestQuantizeFx(QuantizationTestCase): @skipIfNoFBGEMM def test_qparams_buffers(self): class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -3726,7 +3726,7 @@ class TestQuantizeFx(QuantizationTestCase): return torch.nn.functional.linear(x, self.w, self.b) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = torch.nn.Sequential( Linear(), @@ -3780,7 +3780,7 @@ class TestQuantizeFx(QuantizationTestCase): @skipIfNoFBGEMM def test_packed_weight_fused_op(self): class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -3789,7 +3789,7 @@ class TestQuantizeFx(QuantizationTestCase): return F.linear(x, self.w, self.b) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = torch.nn.Sequential( Linear(), @@ -3818,7 +3818,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_mul_add_fp16_config(self): with override_quantized_engine('fbgemm'): class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -3827,7 +3827,7 @@ class TestQuantizeFx(QuantizationTestCase): return torch.nn.functional.linear(x, self.w, self.b) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = torch.nn.Sequential( Linear(), @@ -4116,7 +4116,7 @@ class TestQuantizeFx(QuantizationTestCase): correctly in quantization passes. """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 1, 1) @@ -4140,7 +4140,7 @@ class TestQuantizeFx(QuantizationTestCase): quantized layers. """ class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 1, 1) @@ -4151,7 +4151,7 @@ class TestQuantizeFx(QuantizationTestCase): return x class M2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 1, 1) self.conv2 = nn.Conv2d(1, 1, 1) @@ -4179,7 +4179,7 @@ class TestQuantizeFx(QuantizationTestCase): unquantized runs through the APIs without errors. """ class Child(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = nn.ReLU() @@ -4189,7 +4189,7 @@ class TestQuantizeFx(QuantizationTestCase): return x class Parent(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.child = Child() self.conv = nn.Conv2d(1, 1, 1) @@ -4217,7 +4217,7 @@ class TestQuantizeFx(QuantizationTestCase): # test linear packed weight class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.rand(4, 30) self.b = torch.rand(4) @@ -4234,7 +4234,7 @@ class TestQuantizeFx(QuantizationTestCase): # test conv packed weight class M2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.rand(3, 3, 3, 3) self.b = torch.rand(3) @@ -4298,7 +4298,7 @@ class TestQuantizeFx(QuantizationTestCase): """ with override_quantized_engine('fbgemm'): class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -4307,7 +4307,7 @@ class TestQuantizeFx(QuantizationTestCase): return torch.nn.functional.linear(x, self.w, self.b) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = torch.nn.Sequential( Linear(), @@ -4352,7 +4352,7 @@ class TestQuantizeFx(QuantizationTestCase): """ Test that the FQN of input_scale/zero_point is set to that of first linear use. """ class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -4361,7 +4361,7 @@ class TestQuantizeFx(QuantizationTestCase): return torch.nn.functional.linear(x, self.w, self.b) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = torch.nn.Sequential( Linear(), @@ -4405,7 +4405,7 @@ class TestQuantizeFx(QuantizationTestCase): """ class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = nn.ReLU() @@ -4430,7 +4430,7 @@ class TestQuantizeFx(QuantizationTestCase): graph, get folded and we erase the extra dequant nodes. """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -4477,7 +4477,7 @@ class TestQuantizeFx(QuantizationTestCase): # non-quantizeable node, quantized output class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.identity = torch.nn.Identity() @@ -4498,7 +4498,7 @@ class TestQuantizeFx(QuantizationTestCase): # quantizeable node, quantized output class M2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) @@ -4520,7 +4520,7 @@ class TestQuantizeFx(QuantizationTestCase): # quantizeable node, quantized dictionary output class M3(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) @@ -4542,7 +4542,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_deepcopy_preserve_attributes(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = 3 @@ -4573,7 +4573,7 @@ class TestQuantizeFx(QuantizationTestCase): """Verify that specifying complicated output types does not crash. """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(1, 1, 1) @@ -4591,7 +4591,7 @@ class TestQuantizeFx(QuantizationTestCase): the Tensor before the next operator """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2) self.conv2 = torch.nn.Conv2d(2, 2, 2) @@ -4617,7 +4617,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_trace_quantize_per_tensor(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) @@ -4637,7 +4637,7 @@ class TestQuantizeFx(QuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.avgpool2d = torch.nn.AvgPool2d(kernel_size=3) @@ -4718,7 +4718,7 @@ class TestQuantizeFx(QuantizationTestCase): """Test support for qint8 activation in reference pattern """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 2, 2, 2) self.linear = torch.nn.Linear(8, 5) @@ -4746,7 +4746,7 @@ class TestQuantizeFx(QuantizationTestCase): """ class LSTM(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lstm = nn.LSTM(50, 50, 1) @@ -4788,7 +4788,7 @@ class TestQuantizeFx(QuantizationTestCase): tensors of the output tuple. """ class MyModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lstm = nn.LSTM(50, 50, 1) self.linear1 = nn.Linear(50, 10) @@ -4826,7 +4826,7 @@ class TestQuantizeFx(QuantizationTestCase): output tuple, either as a whole or part of it. """ class ModuleAfterLSTM(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.identity = torch.nn.Identity() @@ -4834,7 +4834,7 @@ class TestQuantizeFx(QuantizationTestCase): return self.identity(x) class ConsumeWholeTuple(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lstm = nn.LSTM(50, 50, 1) self.module_after_lstm = ModuleAfterLSTM() @@ -4891,7 +4891,7 @@ class TestQuantizeFx(QuantizationTestCase): and use the child class in the custom module mapping. """ class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.my_lstm = torch.nn.LSTM(50, 50, 1) @@ -5101,7 +5101,7 @@ class TestQuantizeFx(QuantizationTestCase): """ with override_quantized_engine('fbgemm'): class LinearRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) self.relu = torch.nn.ReLU() @@ -5111,7 +5111,7 @@ class TestQuantizeFx(QuantizationTestCase): return self.relu(x) class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -5120,7 +5120,7 @@ class TestQuantizeFx(QuantizationTestCase): return torch.nn.functional.linear(x, self.w, self.b) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = torch.nn.Sequential(LinearRelu(), LinearRelu()) self.mods2 = Linear() @@ -5159,7 +5159,7 @@ class TestQuantizeFx(QuantizationTestCase): """ with override_quantized_engine('fbgemm'): class LinearRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) self.relu = torch.nn.ReLU() @@ -5169,7 +5169,7 @@ class TestQuantizeFx(QuantizationTestCase): return self.relu(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear_relu = LinearRelu() @@ -5200,7 +5200,7 @@ class TestQuantizeFx(QuantizationTestCase): """ with override_quantized_engine('fbgemm'): class LinearRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) self.relu = torch.nn.ReLU() @@ -5210,7 +5210,7 @@ class TestQuantizeFx(QuantizationTestCase): return self.relu(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod1 = LinearRelu() self.mod2 = LinearRelu() @@ -5240,7 +5240,7 @@ class TestQuantizeFx(QuantizationTestCase): matches models with fbgemm/qnnpack module """ class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(10, 5) @@ -5248,7 +5248,7 @@ class TestQuantizeFx(QuantizationTestCase): return self.linear(x) class M2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(10, 5) self.relu = torch.nn.ReLU() @@ -5329,7 +5329,7 @@ class TestQuantizeFx(QuantizationTestCase): and uses the modules FQN to determine the observer name. """ class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -5340,7 +5340,7 @@ class TestQuantizeFx(QuantizationTestCase): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = torch.nn.Sequential( Linear(), @@ -5408,7 +5408,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_convert_qconfig_mapping(self): class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -5418,7 +5418,7 @@ class TestQuantizeFx(QuantizationTestCase): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = torch.nn.Sequential( Linear(), @@ -5555,7 +5555,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_reuse_input_qconfig(self): class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) @@ -5602,7 +5602,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_stack_trace_preserved_linear(self): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(1, 1) @@ -5640,7 +5640,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_qat_skip_untraced(self): class UnTraceableModuleClass(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(2, 2) @@ -5648,7 +5648,7 @@ class TestQuantizeFx(QuantizationTestCase): return self.linear(x) class UnTraceableModuleName(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(2, 2) @@ -5656,7 +5656,7 @@ class TestQuantizeFx(QuantizationTestCase): return self.linear(x) class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.untraceable_module_class = UnTraceableModuleClass() self.untraceable_module_name = UnTraceableModuleClass() @@ -5701,7 +5701,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_qconfig_dict_setup(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.Conv1d = torch.nn.Conv1d(1, 1, 1) self.Conv2d = torch.nn.Conv2d(1, 1, 1) @@ -5748,7 +5748,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_prepare_mode(self): class LinearModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 10) @@ -5807,7 +5807,7 @@ class TestQuantizeFx(QuantizationTestCase): the observers inserted into the model. """ class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(30, 4).float() @@ -5870,7 +5870,7 @@ class TestQuantizeFx(QuantizationTestCase): Test QConfig eps validation against the BackendConfig's min scale value. """ class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(30, 4).float() @@ -5930,7 +5930,7 @@ class TestQuantizeFx(QuantizationTestCase): Test whether default QNNPACK QConfigs are compatible with the QNNPACK BackendConfig. """ class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(30, 4).float() @@ -5961,7 +5961,7 @@ class TestQuantizeFx(QuantizationTestCase): return class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(30, 4).float() @@ -5992,7 +5992,7 @@ class TestQuantizeFx(QuantizationTestCase): return class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(30, 4).float() @@ -6028,7 +6028,7 @@ class TestQuantizeFx(QuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.tensor((5, 5)) self.bias = torch.tensor((5,)) @@ -6072,7 +6072,7 @@ class TestQuantizeFx(QuantizationTestCase): def test__convert_to_reference_decomposed_fx(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 10) @@ -6101,7 +6101,7 @@ class TestQuantizeFx(QuantizationTestCase): @skipIfNoQNNPACK def test__convert_to_reference_decomposed_fx_dynamic_quant(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 10) @@ -6167,7 +6167,7 @@ class TestQuantizeFx(QuantizationTestCase): on BackendConfig """ class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.tanh = torch.nn.Tanh() @@ -6193,7 +6193,7 @@ class TestQuantizeFx(QuantizationTestCase): def test_channel_shuffle_lowering(self): # Three versions of channel shuffle class M1(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.op = torch.nn.ChannelShuffle(2) @@ -6427,7 +6427,7 @@ class TestQuantizeFx(QuantizationTestCase): and the dtypes will be converted correctly between the layers. """ class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(5, 5) self.linear2 = torch.nn.Linear(5, 5) @@ -6681,7 +6681,7 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_linear_module(self): with override_quantized_engine('fbgemm'): class LinearModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(30, 4).float() @@ -6703,7 +6703,7 @@ class TestQuantizeFxOps(QuantizationTestCase): return x class LinearBnModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4).float() self.bn = torch.nn.BatchNorm1d(4) @@ -7321,12 +7321,12 @@ class TestQuantizeFxOps(QuantizationTestCase): @skipIfNoFBGEMM def test_add_relu_multiple_uses_of_relu(self): class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU(inplace=True) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sub = Sub() @@ -7369,7 +7369,7 @@ class TestQuantizeFxOps(QuantizationTestCase): @skipIfNoFBGEMM def test_quantized_add_qat(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) @@ -7392,7 +7392,7 @@ class TestQuantizeFxOps(QuantizationTestCase): @skipIfNoFBGEMM def test_quantized_mul_qat(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) @@ -7444,7 +7444,7 @@ class TestQuantizeFxOps(QuantizationTestCase): input of cat. we only quantize the output of cat when its inputs are quantized. """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -7757,7 +7757,7 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_norm_weight_bias(self): class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -7766,7 +7766,7 @@ class TestQuantizeFxOps(QuantizationTestCase): return torch.nn.functional.linear(x, self.w, self.b) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = Linear() self.scale = torch.randn(5, 5) @@ -7970,7 +7970,7 @@ class TestQuantizeFxOps(QuantizationTestCase): pattern for it """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bmm = torch.bmm @@ -7999,7 +7999,7 @@ class TestQuantizeFxOps(QuantizationTestCase): @skipIfNoFBGEMM def test_clamp(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() self.relu6 = torch.nn.ReLU6() @@ -8033,7 +8033,7 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_fixed_qparams_ops_fp16(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sigmoid = torch.nn.Sigmoid() self.tanh = torch.nn.Tanh() @@ -8062,7 +8062,7 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_fixed_qparams_ops_qint8(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sigmoid = torch.nn.Sigmoid() self.tanh = torch.nn.Tanh() @@ -8094,7 +8094,7 @@ class TestQuantizeFxOps(QuantizationTestCase): """ Test that wrong qconfigs for fixed qparams ops results in the ops not being quantized. """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sigmoid = torch.nn.Sigmoid() self.tanh = torch.nn.Tanh() @@ -8128,7 +8128,7 @@ class TestQuantizeFxOps(QuantizationTestCase): without actually checking for execution of these ops """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.maxpool1d = torch.nn.MaxPool1d(kernel_size=3) self.maxpool2d = torch.nn.MaxPool2d(kernel_size=3) @@ -8254,7 +8254,7 @@ class TestQuantizeFxOps(QuantizationTestCase): avg_pool2d with customized config """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.avg_pool2d = torch.nn.AvgPool2d(3) @@ -8299,7 +8299,7 @@ class TestQuantizeFxOps(QuantizationTestCase): without actually checking for execution of these ops """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.avg_pool1d = torch.nn.AvgPool1d(3) @@ -8441,7 +8441,7 @@ class TestQuantizeFxOps(QuantizationTestCase): @skipIfNoFBGEMM def test_fixed_qparams_ops(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.sigmoid = torch.nn.Sigmoid() @@ -8549,7 +8549,7 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_float_functional(self): class TorchAdd(nn.Module): """Wrapper around torch.add so that all ops can be found at build""" - def __init__(self): + def __init__(self) -> None: super().__init__() self.add_func = nnq.FloatFunctional() @@ -8557,7 +8557,7 @@ class TestQuantizeFxOps(QuantizationTestCase): return self.add_func.add(x, y) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.ff1 = TorchAdd() self.ff2 = nnq.FloatFunctional() @@ -8627,7 +8627,7 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_embedding(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) @@ -8670,7 +8670,7 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_embedding_bag(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, include_last_offset=True) @@ -8933,7 +8933,7 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_ref_pattern_multi_use(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) self.linear1 = torch.nn.Linear(5, 5) @@ -8991,12 +8991,12 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_pixel_shuffle(self): class MyBias(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(8)) class MyModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(8, 8, 1, bias=False) self.bias = MyBias() @@ -9026,12 +9026,12 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_pixel_shuffle_module(self) -> None: class MyBias(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(8)) class MyModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(8, 8, 1, bias=False) self.ps = nn.PixelShuffle(upscale_factor=2) @@ -9063,12 +9063,12 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_pixel_unshuffle(self): class MyBias(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(64)) class MyModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(8, 8, 1, bias=False) self.bias = MyBias() @@ -9101,12 +9101,12 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_pixel_unshuffle_module(self) -> None: class MyBias(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(64)) class MyModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(8, 8, 1, bias=False) self.unshuffle = nn.PixelUnshuffle(downscale_factor=2) @@ -9143,12 +9143,12 @@ class TestQuantizeFxOps(QuantizationTestCase): def test_narrow(self): class MyBias(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(4)) class MyModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(8, 8, 1, bias=False) self.bias = MyBias() @@ -9185,7 +9185,7 @@ class TestQuantizeFxModels(QuantizationTestCase): def test_static_gpu_convert_basic(self): class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu1 = nn.ReLU() self.conv1 = nn.Conv2d(1, 6, 5) @@ -9211,7 +9211,7 @@ class TestQuantizeFxModels(QuantizationTestCase): def test_switch_device_prepare_convert(self): class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu1 = nn.ReLU() self.conv1 = nn.Conv2d(1, 6, 5) @@ -9238,7 +9238,7 @@ class TestQuantizeFxModels(QuantizationTestCase): @unittest.skipIf(not TEST_CUDA, "gpu is not available.") def test_prepare_serialize_switch_device_convert(self): class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 6, 5) self.linear1 = nn.Linear(120, 1) @@ -9539,7 +9539,7 @@ class TestQuantizeFxModels(QuantizationTestCase): def test_qat_embeddingbag_linear(self): for device in get_supported_device_types(): class EmbeddingBagLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, mode='sum') self.linear = torch.nn.Linear(12, 1).to(dtype=torch.float) @@ -9580,7 +9580,7 @@ class TestQuantizeFxModels(QuantizationTestCase): def test_qat_embedding_linear(self): for device in get_supported_device_types(): class EmbeddingLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) self.linear = torch.nn.Linear(12, 1).to(dtype=torch.float) @@ -9627,7 +9627,7 @@ class TestQuantizeFxModels(QuantizationTestCase): return class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) @@ -9636,7 +9636,7 @@ class TestQuantizeFxModels(QuantizationTestCase): return torch.nn.functional.linear(x, self.w, self.b) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods1 = torch.nn.Sequential(Linear(), Linear()) self.mods2 = Linear() diff --git a/test/quantization/fx/test_subgraph_rewriter.py b/test/quantization/fx/test_subgraph_rewriter.py index eb4346a0f241..dc07ed2676a9 100644 --- a/test/quantization/fx/test_subgraph_rewriter.py +++ b/test/quantization/fx/test_subgraph_rewriter.py @@ -339,7 +339,7 @@ class TestSubgraphRewriter(JitTestCase): Credit to Jerry Zhang (GitHub: jerryzh168) for this test case """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.dtype = torch.float16 @@ -378,7 +378,7 @@ class TestSubgraphRewriter(JitTestCase): def test_subgraph_rewriter_replaces_referenced_submodules(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sigmoid = torch.nn.Sigmoid() self.submod = torch.nn.ReLU() @@ -388,7 +388,7 @@ class TestSubgraphRewriter(JitTestCase): return self.submod(self.sigmoid(x)) class Pattern(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sigmoid = torch.nn.Sigmoid() self.submod = torch.nn.ReLU() @@ -397,7 +397,7 @@ class TestSubgraphRewriter(JitTestCase): return self.submod(self.sigmoid(x)) class Replacement(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.id = torch.nn.Identity() self.submod = torch.nn.ReLU() @@ -406,7 +406,7 @@ class TestSubgraphRewriter(JitTestCase): return self.submod(self.id(x)) class Comparison(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.id = torch.nn.Identity() self.submod = torch.nn.ReLU() diff --git a/test/quantization/jit/test_deprecated_jit_quant.py b/test/quantization/jit/test_deprecated_jit_quant.py index 2e5be93647c6..491f0e928ccb 100644 --- a/test/quantization/jit/test_deprecated_jit_quant.py +++ b/test/quantization/jit/test_deprecated_jit_quant.py @@ -118,7 +118,7 @@ class TestDeprecatedJitQuantized(JitTestCase): K1, N1 = 2, 2 class FooBar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(K1, N1).float() diff --git a/test/quantization/jit/test_ondevice_quantization.py b/test/quantization/jit/test_ondevice_quantization.py index afa2931e8773..1d85c3f6e52b 100644 --- a/test/quantization/jit/test_ondevice_quantization.py +++ b/test/quantization/jit/test_ondevice_quantization.py @@ -34,7 +34,7 @@ class myMod(torch.nn.Module): class MyConvLinearModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3) weight = torch.nn.Parameter(torch.ones(5, 5)) diff --git a/test/quantization/jit/test_quantize_jit.py b/test/quantization/jit/test_quantize_jit.py index 8033670e2951..64d975cc93f3 100644 --- a/test/quantization/jit/test_quantize_jit.py +++ b/test/quantization/jit/test_quantize_jit.py @@ -86,7 +86,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_skip_dequant_constant_prop(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3).float() @@ -306,7 +306,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return x class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = CustomConv() self.bn = CustomBn() @@ -442,7 +442,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_insert_observers(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3) @@ -464,7 +464,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): pass class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5) @@ -475,7 +475,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return self.addOne(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3) self.sub = Sub() @@ -512,7 +512,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return self.op(inp) class Outer(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.inner_a = Inner(Operator(1)) self.inner_b = Inner(Operator(3.0)) @@ -531,7 +531,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_insert_observers_child_qconfig(self): class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5) @@ -539,7 +539,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return self.fc(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3) self.sub = Sub() @@ -566,7 +566,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): ) def test_insert_observers_skip_values(self): class ConvFunctionalReLU(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3) @@ -574,7 +574,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return F.relu(self.conv(x)) class ConvReLUModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3) self.relu = torch.nn.ReLU() @@ -583,7 +583,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return self.relu(self.conv(x)) class AddReLUModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU() self.conv = torch.nn.Conv2d(3, 3, 3).float() @@ -594,7 +594,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return self.relu(out) class AddFunctionalReLU(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3).float() @@ -644,7 +644,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_insert_observers_weight_dtype(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3) @@ -685,7 +685,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_insert_observers_shared_class_type(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 5, 3).float() self.conv2 = torch.nn.Conv2d(3, 5, 3).float() @@ -712,7 +712,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3).float() @@ -744,7 +744,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): """Make sure we propagate observed property through general ops""" class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 3).float() self.conv2 = torch.nn.Conv2d(3, 3, 3).float() @@ -782,7 +782,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): """Make sure we propagate observed property through general ops""" class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 3).float() self.conv2 = torch.nn.Conv2d(3, 3, 3).float() @@ -829,7 +829,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return x class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 1).float() self.conv2 = torch.nn.Conv2d(3, 3, 1).float() @@ -890,7 +890,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return self.conv(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant_prop = QuantProp(True) self.res = Res(False) @@ -954,7 +954,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return self.conv(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.res1 = Res(True) self.res2 = Res(False) @@ -1031,7 +1031,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_insert_quant_dequant(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3).float() @@ -1065,7 +1065,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_insert_quant_dequant_shared_class_type(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 3).float() self.conv2 = torch.nn.Conv2d(3, 3, 3).float() @@ -1131,7 +1131,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_dedup_module_uses(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU() @@ -1156,7 +1156,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_replicate_dequantize(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 1).float() @@ -1246,7 +1246,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): """ class Res(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 1).float() self.conv2 = torch.nn.Conv2d(3, 3, 1).float() @@ -1261,7 +1261,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return self.conv2(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.res1 = Res() self.res2 = Res() @@ -1280,7 +1280,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_finalize_for_linear(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5).float() @@ -1312,7 +1312,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_finalize_debug(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3).float() self.avgpool = torch.nn.AvgPool2d(3) @@ -1340,7 +1340,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_module_list(self): class SimpleLinearLayer(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5).float() @@ -1348,7 +1348,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return self.fc(x) class ComplexModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layers = torch.nn.ModuleList( [SimpleLinearLayer() for i in range(2)] @@ -1374,7 +1374,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_conv_trace(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1d = torch.nn.Conv1d(3, 3, 3).float() self.conv2d = torch.nn.Conv2d(3, 3, 3).float() @@ -1406,7 +1406,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_convtranspose_trace(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.convtranspose1d = torch.nn.ConvTranspose1d(3, 3, 3).float() self.convtranspose2d = torch.nn.ConvTranspose2d(3, 3, 3).float() @@ -1443,7 +1443,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): ) def test_replicate_dequant_same_value(self): class Mul(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3).float() @@ -1459,7 +1459,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): def test_interface_with_fork(self): class SubModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.embedding1 = torch.nn.EmbeddingBag( num_embeddings=10, @@ -1473,7 +1473,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return self.embedding1(x, y) class OrigMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.embedding1 = torch.nn.EmbeddingBag( num_embeddings=10, @@ -1494,7 +1494,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): class TestModule(torch.nn.Module): proxy_mod: ModInterface - def __init__(self): + def __init__(self) -> None: super().__init__() self.proxy_mod = OrigMod() self.sub = SubModule() @@ -1505,7 +1505,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return b class MainModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.test = TestModule() @@ -1573,7 +1573,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): """ class MainModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fork_ops = ForkModule() @@ -1592,7 +1592,7 @@ class TestQuantizeJitPasses(QuantizationTestCase): return torch.nn.functional.linear(x, w, b) class ForkModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.test = TestModule() @@ -1764,7 +1764,7 @@ class TestQuantizeJitOps(QuantizationTestCase): """ class QuantizedAdd(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -1824,7 +1824,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return self.relu(x) class AddFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -1838,7 +1838,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x) class InplaceAddFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -1852,7 +1852,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x) class AddInplaceFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -1866,7 +1866,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x, True) class InplaceAddInplaceFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -1913,7 +1913,7 @@ class TestQuantizeJitOps(QuantizationTestCase): @skipIfNoFBGEMM def test_quantized_add(self): class QuantizedAdd(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -1924,7 +1924,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return x + y class QuantizedInplaceAdd(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -1970,7 +1970,7 @@ class TestQuantizeJitOps(QuantizationTestCase): @skipIfNoFBGEMM def test_quantized_add_scalar(self): class QuantizedAddScalar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -1979,7 +1979,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return x + 3 class QuantizedInplaceAddScalar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -2046,7 +2046,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return self.relu(x) class AddFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -2058,7 +2058,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x) class InplaceAddFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -2070,7 +2070,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x) class AddInplaceFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -2082,7 +2082,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x, True) class InplaceAddInplaceFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -2143,7 +2143,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return self.relu(x) class AddScalarFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -2152,7 +2152,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x + 3) class InplaceAddScalarFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -2162,7 +2162,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x) class AddScalarInplaceFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -2171,7 +2171,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x + 3, True) class InplaceAddScalarInplaceFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -2214,7 +2214,7 @@ class TestQuantizeJitOps(QuantizationTestCase): """ class QuantizedCat(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -2338,7 +2338,7 @@ class TestQuantizeJitOps(QuantizationTestCase): @skipIfNoFBGEMM def test_quantized_mul(self): class QuantizedMul(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -2349,7 +2349,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return x * y class QuantizedInplaceMul(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -2395,7 +2395,7 @@ class TestQuantizeJitOps(QuantizationTestCase): @skipIfNoFBGEMM def test_quantized_mul_scalar(self): class QuantizedMulScalar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -2404,7 +2404,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return x * 3 class QuantizedInplaceMulScalar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -2471,7 +2471,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return self.relu(x) class MulFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -2483,7 +2483,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x) class InplaceMulFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -2495,7 +2495,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x) class MulInplaceFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -2507,7 +2507,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x, True) class InplaceMulInplaceFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() @@ -2568,7 +2568,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return self.relu(x) class MulScalarFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -2577,7 +2577,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x * 3) class InplaceMulScalarFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -2587,7 +2587,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x) class MulScalarInplaceFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -2596,7 +2596,7 @@ class TestQuantizeJitOps(QuantizationTestCase): return F.relu(x * 3, True) class InplaceMulScalarInplaceFunctionalRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() @@ -2718,7 +2718,7 @@ class TestQuantizeJitOps(QuantizationTestCase): """Make sure dequantize can support Tuple of tensor""" class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 3).float() self.conv2 = torch.nn.Conv2d(3, 3, 3).float() @@ -2734,7 +2734,7 @@ class TestQuantizeJitOps(QuantizationTestCase): @skipIfNoFBGEMM def test_clamp(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() self.relu6 = torch.nn.ReLU6() @@ -2775,7 +2775,7 @@ class TestQuantizeJitOps(QuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.maxpool1d = torch.nn.MaxPool1d(kernel_size=3) self.maxpool2d = torch.nn.MaxPool2d(kernel_size=3) @@ -2891,7 +2891,7 @@ class TestQuantizeJitOps(QuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.avg_pool1d = torch.nn.AvgPool1d(3) @@ -3016,7 +3016,7 @@ class TestQuantizeJitOps(QuantizationTestCase): @skipIfNoFBGEMM def test_cat_linear(self): class LinearModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.randn(5, 5) @@ -3040,7 +3040,7 @@ class TestQuantizeJitOps(QuantizationTestCase): class TestQuantizeDynamicJitPasses(QuantizationTestCase): def test_prepare_dynamic(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5) @@ -3067,7 +3067,7 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase): def test_prepare_dynamic_child_qconfig(self): class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5) @@ -3075,7 +3075,7 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase): return self.fc(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3) self.sub = Sub() @@ -3105,7 +3105,7 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase): def test_insert_quant_dequant_linear_dynamic(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 5).float() self.fc2 = torch.nn.Linear(5, 5).float() @@ -3156,7 +3156,7 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase): @override_qengines def test_dynamic_multi_op(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) @@ -3175,7 +3175,7 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase): @override_qengines def test_dynamic_quant_multi_uses(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5).float() @@ -3203,7 +3203,7 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase): return self.linear(x) class DynamicModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.ones(5, 5)) self.mod1 = myMod(self.weight) @@ -3236,7 +3236,7 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase): @override_qengines def test_dynamic_with_if(self): class Res(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.ones(5, 5)) @@ -3247,7 +3247,7 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase): return torch.nn.functional.linear(x, self.weight) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.res1 = Res() self.res2 = Res() @@ -3298,7 +3298,7 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase): def test_dynamic_weight_observer(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5).float() self.fc2 = torch.nn.Linear(5, 5).float() @@ -3332,7 +3332,7 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase): def test_convert_dynamic_fp16(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5) @@ -3347,7 +3347,7 @@ class TestQuantizeDynamicJitPasses(QuantizationTestCase): def test_quantize_dynamic_fp16(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5) diff --git a/test/quantization/pt2e/test_duplicate_dq.py b/test/quantization/pt2e/test_duplicate_dq.py index ac866afc17c1..905098c3e6ac 100644 --- a/test/quantization/pt2e/test_duplicate_dq.py +++ b/test/quantization/pt2e/test_duplicate_dq.py @@ -30,7 +30,7 @@ from torch.testing._internal.common_utils import IS_WINDOWS class TestHelperModules: class Conv2dWithObsSharingOps(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.hardtanh = torch.nn.Hardtanh() @@ -46,7 +46,7 @@ class TestHelperModules: return x class Conv2dWithSharedDQ(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 3) self.conv2 = torch.nn.Conv2d(3, 3, 1) @@ -64,7 +64,7 @@ class TestHelperModules: return w, add_output, extra_output class ModuleForDifferentQconfig(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 3) self.conv2 = torch.nn.Conv2d(3, 3, 1) diff --git a/test/quantization/pt2e/test_graph_utils.py b/test/quantization/pt2e/test_graph_utils.py index 5d25e542e705..09a39c5b0cec 100644 --- a/test/quantization/pt2e/test_graph_utils.py +++ b/test/quantization/pt2e/test_graph_utils.py @@ -16,7 +16,7 @@ class TestGraphUtils(TestCase): @unittest.skipIf(IS_WINDOWS, "torch.compile is not supported on Windows") def test_conv_bn_conv_relu(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 3) self.bn1 = torch.nn.BatchNorm2d(3) @@ -62,7 +62,7 @@ class TestGraphUtils(TestCase): @unittest.skipIf(IS_WINDOWS, "torch.compile is not supported on Windows") def test_conv_bn_relu(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bn1 = torch.nn.BatchNorm2d(3) self.conv2 = torch.nn.Conv2d(3, 3, 3) @@ -97,7 +97,7 @@ class TestGraphUtils(TestCase): @unittest.skipIf(IS_WINDOWS, "torch.compile is not supported on Windows") def test_customized_equivalet_types_dict(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) diff --git a/test/quantization/pt2e/test_metadata_porting.py b/test/quantization/pt2e/test_metadata_porting.py index d06cb7e00cf2..5f2d1e2d3cf5 100644 --- a/test/quantization/pt2e/test_metadata_porting.py +++ b/test/quantization/pt2e/test_metadata_porting.py @@ -18,7 +18,7 @@ from torch.testing._internal.common_utils import IS_WINDOWS class TestHelperModules: class Conv2dWithObsSharingOps(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.hardtanh = torch.nn.Hardtanh() @@ -463,7 +463,7 @@ class TestMetaDataPorting(QuantizationTestCase): """ class MatmulWithConstInput(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.register_parameter("w", torch.nn.Parameter(torch.rand(8, 16))) diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py index 2f06908568fd..5da2a2c60791 100644 --- a/test/quantization/pt2e/test_quantize_pt2e.py +++ b/test/quantization/pt2e/test_quantize_pt2e.py @@ -1350,7 +1350,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) self.dont_fold_me = torch.nn.Parameter(torch.randn(2, 2)) @@ -1389,7 +1389,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.randn(2, 2) @@ -1420,7 +1420,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) @@ -1815,7 +1815,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.dropout = torch.nn.Dropout(0.5, inplace=inplace) @@ -1881,7 +1881,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bn = torch.nn.BatchNorm2d(3) @@ -1949,7 +1949,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase): def test_allow_exported_model_train_eval(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bn = torch.nn.BatchNorm2d(3) self.dropout = torch.nn.Dropout(0.5) @@ -2250,7 +2250,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase): return model class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) @@ -2314,7 +2314,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) diff --git a/test/quantization/pt2e/test_quantize_pt2e_qat.py b/test/quantization/pt2e/test_quantize_pt2e_qat.py index d0398652d842..bf68ba512d15 100644 --- a/test/quantization/pt2e/test_quantize_pt2e_qat.py +++ b/test/quantization/pt2e/test_quantize_pt2e_qat.py @@ -1100,7 +1100,7 @@ class TestQuantizePT2EQATModels(PT2EQATTestCase): class TestQuantizeMixQATAndPTQ(QuantizationTestCase): class TwoLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(16, 8, bias=False) self.linear2 = torch.nn.Linear(8, 8) @@ -1109,7 +1109,7 @@ class TestQuantizeMixQATAndPTQ(QuantizationTestCase): return self.linear2(self.linear1(x)) class QATPTQTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 16, 3) self.linears = TestQuantizeMixQATAndPTQ.TwoLinear() diff --git a/test/quantization/pt2e/test_representation.py b/test/quantization/pt2e/test_representation.py index b4273686c18f..d34027ff3444 100644 --- a/test/quantization/pt2e/test_representation.py +++ b/test/quantization/pt2e/test_representation.py @@ -81,7 +81,7 @@ class TestPT2ERepresentation(QuantizationTestCase): def test_static_linear(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) @@ -103,7 +103,7 @@ class TestPT2ERepresentation(QuantizationTestCase): def test_dynamic_linear(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) @@ -128,7 +128,7 @@ class TestPT2ERepresentation(QuantizationTestCase): def test_conv2d(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv2d = torch.nn.Conv2d(3, 3, 3) @@ -150,7 +150,7 @@ class TestPT2ERepresentation(QuantizationTestCase): def test_add(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, y): @@ -176,7 +176,7 @@ class TestPT2ERepresentation(QuantizationTestCase): def test_add_relu(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, y): @@ -224,7 +224,7 @@ class TestPT2ERepresentation(QuantizationTestCase): """Test representation for quantize_per_channel and dequantize_per_channel op""" class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) @@ -275,7 +275,7 @@ class TestPT2ERepresentation(QuantizationTestCase): """Test representation for quantize and dequantize op""" class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x, y): diff --git a/test/quantization/pt2e/test_x86inductor_quantizer.py b/test/quantization/pt2e/test_x86inductor_quantizer.py index d38c03182d52..aa254762db11 100644 --- a/test/quantization/pt2e/test_x86inductor_quantizer.py +++ b/test/quantization/pt2e/test_x86inductor_quantizer.py @@ -2032,7 +2032,7 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase): """ class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(5, 10) self.relu1 = torch.nn.ReLU(inplace=False) @@ -2045,7 +2045,7 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase): return x class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) self.sub = Sub() @@ -2094,7 +2094,7 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase): """Test that if a module name has an underscore, we can still quantize it.""" class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() # This module name has underscores, which can be part of a mangled name. self.foo_bar = torch.nn.Linear(2, 2) @@ -2147,7 +2147,7 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(5, 10) self.linear2 = torch.nn.Linear(10, 5) @@ -2201,7 +2201,7 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(5, 10) self.linear2 = torch.nn.Linear(10, 5) @@ -2380,7 +2380,7 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase): """ class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(5, 10) self.linear2 = torch.nn.Linear(10, 5) diff --git a/test/quantization/pt2e/test_xnnpack_quantizer.py b/test/quantization/pt2e/test_xnnpack_quantizer.py index 61b3868120e5..5e850a684828 100644 --- a/test/quantization/pt2e/test_xnnpack_quantizer.py +++ b/test/quantization/pt2e/test_xnnpack_quantizer.py @@ -299,7 +299,7 @@ class TestXNNPACKQuantizer(PT2EQuantizationTestCase): def test_set_module_name(self): class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) @@ -307,7 +307,7 @@ class TestXNNPACKQuantizer(PT2EQuantizationTestCase): return self.linear(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) self.sub = Sub() @@ -344,7 +344,7 @@ class TestXNNPACKQuantizer(PT2EQuantizationTestCase): """Test that if a module name has an underscore, we can still quantize it""" class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() # This module name has underscores, which can be part of a mangled # name. @@ -385,7 +385,7 @@ class TestXNNPACKQuantizer(PT2EQuantizationTestCase): def test_set_module_type(self): class Sub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) @@ -393,7 +393,7 @@ class TestXNNPACKQuantizer(PT2EQuantizationTestCase): return self.linear(x) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) self.sub = Sub() @@ -428,7 +428,7 @@ class TestXNNPACKQuantizer(PT2EQuantizationTestCase): def test_set_module_type_case_2(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d( in_channels=3, @@ -952,7 +952,7 @@ class TestXNNPACKQuantizer(PT2EQuantizationTestCase): def test_add_mul_long(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.t = torch.tensor([100]) diff --git a/test/test_autograd.py b/test/test_autograd.py index 4045bd7864be..f3cf20490b24 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -164,7 +164,7 @@ class TestAutograd(TestCase): @torch.no_grad() class Foo: - def __init__(self): + def __init__(self) -> None: assert not torch.is_grad_enabled() def foo(self): @@ -186,7 +186,7 @@ class TestAutograd(TestCase): class Foo2: @torch.no_grad() - def __init__(self): + def __init__(self) -> None: assert not torch.is_grad_enabled() @torch.no_grad() @@ -6816,7 +6816,7 @@ for shape in [(1,), ()]: def test_checkpointing_without_reentrant_with_context_fn(self): class VerboseTorchDispatchMode(TorchDispatchMode): - def __init__(self): + def __init__(self) -> None: self.operators = [] def __torch_dispatch__(self, func, types, args=(), kwargs=None): @@ -7074,7 +7074,7 @@ for shape in [(1,), ()]: @parametrize("use_reentrant", [True, False]) def test_checkpointing_without_reentrant_detached_tensor(self, use_reentrant): class NoGradModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(2, 2, bias=False) self.lin2 = nn.Linear(2, 2, bias=False) @@ -7160,7 +7160,7 @@ for shape in [(1,), ()]: """ class LinearModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(2, 2, bias=False) @@ -7219,7 +7219,7 @@ for shape in [(1,), ()]: """ class MyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layer = torch.nn.Linear(5, 5, bias=False) @@ -9443,14 +9443,14 @@ for shape in [(1,), ()]: pass class error_on_pack_hook(torch.autograd.graph.saved_tensors_hooks): - def __init__(self): + def __init__(self) -> None: def pack_hook(x): raise CustomError("pack") super().__init__(pack_hook, lambda x: x) class error_on_unpack_hook(torch.autograd.graph.saved_tensors_hooks): - def __init__(self): + def __init__(self) -> None: def unpack_hook(x): raise CustomError("unpack") @@ -9847,7 +9847,7 @@ TORCH_LIBRARY(test_autograd_cpp_node, m) { any_hook_handles: List[RemovableHandle] = [] class MultiOutputModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lin = nn.Linear(3, 3) @@ -9866,7 +9866,7 @@ TORCH_LIBRARY(test_autograd_cpp_node, m) { return out class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod1 = MultiOutputModule() self.mod2 = MultiOutputModule() @@ -13341,7 +13341,7 @@ class TestNestedCheckpoint(TestCase): counter = [0] class SinCounterMode(TorchDispatchMode): - def __init__(self): + def __init__(self) -> None: self.count = 0 def __torch_dispatch__(self, func, types, args=(), kwargs=None): @@ -13615,7 +13615,7 @@ class TestSelectiveActivationCheckpoint(TestCase): counters = [] class Policy: - def __init__(self): + def __init__(self) -> None: self.counter = [0] self.recompute_counter = [0] diff --git a/test/test_compile_benchmark_util.py b/test/test_compile_benchmark_util.py index 05c42184eda1..3e7af5679edb 100644 --- a/test/test_compile_benchmark_util.py +++ b/test/test_compile_benchmark_util.py @@ -22,7 +22,7 @@ except ImportError: class TestCompileBenchmarkUtil(TestCase): def test_training_and_inference(self): class ToyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.Tensor(2, 2)) diff --git a/test/test_cpp_extensions_jit.py b/test/test_cpp_extensions_jit.py index 3db94346a0d5..9b190b29f3ad 100644 --- a/test/test_cpp_extensions_jit.py +++ b/test/test_cpp_extensions_jit.py @@ -580,7 +580,7 @@ class TestCppExtensionJIT(common.TestCase): # Create a torch.nn.Module which uses the C++ module as a submodule. class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = torch.nn.Parameter(torch.tensor(1.0)) self.net = extension.Net(3, 5) diff --git a/test/test_cuda.py b/test/test_cuda.py index f0765f5ea512..8e4e9ab5b6fa 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -1176,7 +1176,7 @@ except RuntimeError as e: MultiplyInStream = self._make_multiply_in_stream() class StreamModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.event = torch.cuda.Event() self.stream0 = torch.cuda.Stream() @@ -3901,7 +3901,7 @@ exit(2) return grad_output * ctx.constant, None class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = torch.nn.Parameter(torch.randn(())) diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 6ce675e7b6f4..b259579566c0 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -888,7 +888,7 @@ class TestCustomOp(CustomOpTestCaseBase): # Sequence[int] gets automagically turned into int[] in the schema. # This test checks that we actually do support arbitrary sequence types. class MySequence(collections.abc.Sequence): - def __init__(self): + def __init__(self) -> None: self._container = [1, 2, 3] def __getitem__(self, idx): diff --git a/test/test_dataloader.py b/test/test_dataloader.py index 4198bb6fe0d8..21d35c282e26 100644 --- a/test/test_dataloader.py +++ b/test/test_dataloader.py @@ -481,7 +481,7 @@ class TestStackDataset(TestCase): def test_getitems(self): class GetItemsDataset(Dataset): - def __init__(self): + def __init__(self) -> None: self.data = torch.randn(4) def __getitem__(self, item): @@ -510,7 +510,7 @@ class TestStackDataset(TestCase): def test_getitems_raises_index_error(self): class GetItemsDataset(Dataset): - def __init__(self): + def __init__(self) -> None: self.data = torch.randn(4) def __getitem__(self, item): @@ -532,7 +532,7 @@ class TestStackDataset(TestCase): def test_getitems_value_error(self): class GetItemsDataset(Dataset): - def __init__(self): + def __init__(self) -> None: self.data = torch.randn(4) def __getitem__(self, item): @@ -2995,7 +2995,7 @@ class IntegrationTestDataLoaderDataPipe(TestCase): class StringDataset(Dataset): - def __init__(self): + def __init__(self) -> None: self.s = "12345" def __len__(self): @@ -3108,7 +3108,7 @@ class TestDictDataLoader(TestCase): class DummyDataset(torch.utils.data.Dataset): - def __init__(self): + def __init__(self) -> None: self.data = list(range(10)) def __len__(self): @@ -3488,7 +3488,7 @@ class TestSetAffinity(TestCase): class ConvDataset(Dataset): - def __init__(self): + def __init__(self) -> None: self.x = torch.ones(1, 1, 24000) # Call convolution on parent process self[0] diff --git a/test/test_datapipe.py b/test/test_datapipe.py index 37cf896eda24..30ae3e62040b 100644 --- a/test/test_datapipe.py +++ b/test/test_datapipe.py @@ -3856,7 +3856,7 @@ class TestIterDataPipeCountSampleYielded(TestCase): def test_iterdatapipe_sample_yielded_return_self(self): class _CustomGeneratorDataPipe(IterDataPipe): # This class's `__iter__` is not a generator function - def __init__(self): + def __init__(self) -> None: self.source = iter(range(10)) def __iter__(self): @@ -3871,7 +3871,7 @@ class TestIterDataPipeCountSampleYielded(TestCase): def test_iterdatapipe_sample_yielded_next(self): class _CustomNextDataPipe(IterDataPipe): # This class's `__iter__` returns `self` and has a `__next__` - def __init__(self): + def __init__(self) -> None: self.source = iter(range(10)) def __iter__(self): @@ -3889,7 +3889,7 @@ class TestIterDataPipeCountSampleYielded(TestCase): def test_iterdatapipe_sample_yielded_next_exception(self): class _CustomNextDataPipe(IterDataPipe): # This class's `__iter__` returns `self` and has a `__next__` - def __init__(self): + def __init__(self) -> None: self.source = iter(range(10)) self.count = 0 @@ -3924,7 +3924,7 @@ class TestIterDataPipeCountSampleYielded(TestCase): class _CustomNonGeneratorTestDataPipe(IterDataPipe): - def __init__(self): + def __init__(self) -> None: self.n = 10 self.source = list(range(self.n)) @@ -3937,7 +3937,7 @@ class _CustomNonGeneratorTestDataPipe(IterDataPipe): class _CustomSelfNextTestDataPipe(IterDataPipe): - def __init__(self): + def __init__(self) -> None: self.n = 10 self.iter = iter(range(self.n)) diff --git a/test/test_decomp.py b/test/test_decomp.py index f22796ac663a..d8790fb5e8c4 100644 --- a/test/test_decomp.py +++ b/test/test_decomp.py @@ -1185,7 +1185,7 @@ class DecompOneOffTests(TestCase): # add support for float16 over there we should update this test as well. class ScaledDotProductAttention(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward( diff --git a/test/test_deploy.py b/test/test_deploy.py index a3b0ee6c33fb..b852802c0c20 100644 --- a/test/test_deploy.py +++ b/test/test_deploy.py @@ -15,7 +15,7 @@ class TestFreezer(TestCase): code_str = textwrap.dedent( """ class MyCls: - def __init__(self): + def __init__(self) -> None: pass """ ) diff --git a/test/test_expanded_weights.py b/test/test_expanded_weights.py index 02cfca058c74..fbeb8f77cb80 100644 --- a/test/test_expanded_weights.py +++ b/test/test_expanded_weights.py @@ -893,7 +893,7 @@ class TestExpandedWeightModule(TestCase): def test_per_sample_api_compute_batch_size(self): class CustomModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(5, 5) @@ -926,7 +926,7 @@ class TestExpandedWeightModule(TestCase): elem2: torch.Tensor class CustomModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(5, 5) diff --git a/test/test_fake_tensor.py b/test/test_fake_tensor.py index 0780601a9bb6..7e7a2cf70fa7 100644 --- a/test/test_fake_tensor.py +++ b/test/test_fake_tensor.py @@ -709,7 +709,7 @@ class FakeTensorTest(TestCase): check_copy(mod, mod_copied) class ModuleNew(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = torch.rand([10, 2]) self.b = self.a @@ -781,7 +781,7 @@ class FakeTensorTest(TestCase): ) def test_mixed_real_and_fake_inputs(self): class _TestPattern(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 1, 1) self.bn = torch.nn.BatchNorm2d(1) @@ -1323,7 +1323,7 @@ class FakeTensorOperatorInvariants(TestCase): ) def test_flash_attention(self): class Repro(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, arg1, arg2, arg3): @@ -1378,7 +1378,7 @@ class FakeTensorOperatorInvariants(TestCase): @unittest.skipIf(not RUN_CUDA, "requires cuda") def test_conv_c1_backward(self): class Repro(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, arg1, arg2, arg3): @@ -1408,7 +1408,7 @@ class FakeTensorOperatorInvariants(TestCase): def test_no_dispatch_with_like_function(self): class CountingMode(TorchDispatchMode): - def __init__(self): + def __init__(self) -> None: self.count = 0 def __torch_dispatch__(self, func, types, args=(), kwargs=None): @@ -1430,7 +1430,7 @@ make_propagate_real_tensors_cls(FakeTensorOperatorInvariants) class FakeTensorPropTest(TestCase): def test_fake_tensor_prop_on_nn_module(self): class ToyNnModuleWithParameters(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layer1 = torch.nn.Linear(4, 3) self.layer2 = torch.nn.Linear(3, 2) @@ -1486,7 +1486,7 @@ class FakeTensorPropTest(TestCase): def test_fake_tensor_prop_on_nn_module_with_optional_args(self): class OptionalArgumentInBetween(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layer1 = torch.nn.Linear(4, 3) self.layer2 = torch.nn.Linear(3, 2) @@ -1546,7 +1546,7 @@ class FakeTensorPropTest(TestCase): def test_torch_load_with_fake_mode(self): class TheModelClass(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 10) diff --git a/test/test_flop_counter.py b/test/test_flop_counter.py index 43428a40be52..f9d66bef003f 100644 --- a/test/test_flop_counter.py +++ b/test/test_flop_counter.py @@ -704,7 +704,7 @@ class TestFlopCounter(TestCase): return {"a": torch.mm(x, x)} class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = Foo() self.b = Foo() diff --git a/test/test_functional_optim.py b/test/test_functional_optim.py index 5e2a1e67e015..1d8a6fe84087 100644 --- a/test/test_functional_optim.py +++ b/test/test_functional_optim.py @@ -13,7 +13,7 @@ from torch.testing._internal.common_utils import run_tests, TestCase class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() torch.manual_seed(0) self.lin1 = nn.Linear(3, 3, bias=False) diff --git a/test/test_fx.py b/test/test_fx.py index 80e46c9cfb2e..e5e263f3a0c4 100644 --- a/test/test_fx.py +++ b/test/test_fx.py @@ -193,7 +193,7 @@ class TestFX(JitTestCase): def test_graph_module(self): class MySub(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w = torch.nn.Parameter(torch.rand(4, 3)) @@ -201,7 +201,7 @@ class TestFX(JitTestCase): return self.w + x class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lin = torch.nn.Linear(4, 3) self.sub_mod = MySub() @@ -409,7 +409,7 @@ class TestFX(JitTestCase): return False class MyReluMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU() @@ -477,7 +477,7 @@ class TestFX(JitTestCase): def test_wrap_with_submodule(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) @@ -849,7 +849,7 @@ class TestFX(JitTestCase): bs, d_hid = 10, 23 class ExampleCode(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid)) self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid)) @@ -897,7 +897,7 @@ class TestFX(JitTestCase): def test_tensor_attribute(self): class TensorAttribute(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.tensor = torch.rand(3, 4) @@ -909,7 +909,7 @@ class TestFX(JitTestCase): traced(torch.rand(4, 4)) class WrapperForQualname(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.ta = TensorAttribute() @@ -975,7 +975,7 @@ class TestFX(JitTestCase): def test_pickle_graphmodule(self): class Nested(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.st = torch.nn.Linear(4, 4) @@ -1042,7 +1042,7 @@ class TestFX(JitTestCase): def test_deepcopy_with_submods_params(self): class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) @@ -1050,7 +1050,7 @@ class TestFX(JitTestCase): return torch.relu(x) + self.param class Baz(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.bar = Bar() @@ -1083,7 +1083,7 @@ class TestFX(JitTestCase): return torch.rand(3, 4) class UnpacksList(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sa = SomeArgs() @@ -1100,7 +1100,7 @@ class TestFX(JitTestCase): return torch.rand(3, 4) class UnpacksDict(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sk = SomeKwargs() @@ -1127,7 +1127,7 @@ class TestFX(JitTestCase): def test_pretty_print_node(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param: torch.nn.Parameter = torch.nn.Parameter( torch.rand(3, 4)) @@ -1217,7 +1217,7 @@ class TestFX(JitTestCase): # Test non-proxy len class FXLenTest2(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l = [3, 4, 5] @@ -1570,7 +1570,7 @@ class TestFX(JitTestCase): return o.y class Root(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.inner = HasCustomArgObjectWhenLeaf() @@ -1627,7 +1627,7 @@ class TestFX(JitTestCase): def test_example_shape_prop(self): class TestCase(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.attr = torch.randn(3, 4) self.submod = torch.nn.Linear(4, 4) @@ -1657,7 +1657,7 @@ class TestFX(JitTestCase): def test_shape_prop_layout(self): class ConvTest(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv_mod = torch.nn.Conv2d(5, 5, 3) @@ -1689,7 +1689,7 @@ class TestFX(JitTestCase): return (3, torch.sum(x)) class UnderTest(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rt = ReturnTwo() @@ -1716,7 +1716,7 @@ class TestFX(JitTestCase): def test_shape_prop_layout_3d(self): class ConvTest3d(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv_mod = torch.nn.Conv3d(5, 5, 3) @@ -1742,7 +1742,7 @@ class TestFX(JitTestCase): def test_nn_module_stack(self): class SubModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv_mod = torch.nn.Conv2d(64, 64, (3, 3), padding=1, bias=False) @@ -1750,7 +1750,7 @@ class TestFX(JitTestCase): return self.conv_mod(x) class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sub_mod = SubModule() @@ -1772,7 +1772,7 @@ class TestFX(JitTestCase): def test_transformer_preserves_nn_module_stack_for_get_attr(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.ones(1, 1)) @@ -1796,7 +1796,7 @@ class TestFX(JitTestCase): def test_interpreter(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) @@ -1814,7 +1814,7 @@ class TestFX(JitTestCase): def test_interpreter_other_graph(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) @@ -1832,7 +1832,7 @@ class TestFX(JitTestCase): def test_interpreter_run_node_override(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) @@ -1882,7 +1882,7 @@ class TestFX(JitTestCase): def test_interpreter_partial_eval(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) @@ -1956,7 +1956,7 @@ class TestFX(JitTestCase): def test_transformer_noop(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) @@ -1997,7 +1997,7 @@ class TestFX(JitTestCase): def test_transformer_multi_outputs(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) @@ -2420,7 +2420,7 @@ class TestFX(JitTestCase): return MyOutput(foo=d + d, bar=d * 3) class CallsModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m = ModuleReturnDataclass() @@ -2465,7 +2465,7 @@ class TestFX(JitTestCase): return d[42] class CallsModWithDict(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m = ModWithDictArg() @@ -2484,7 +2484,7 @@ class TestFX(JitTestCase): return d[42] class CallsModWithDict(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m = ModWithDictArg() @@ -2519,7 +2519,7 @@ class TestFX(JitTestCase): def test_direct_param_use(self): class TransposeTest(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.b = torch.nn.Parameter(torch.rand(4, 3)) @@ -2527,7 +2527,7 @@ class TestFX(JitTestCase): return self.b class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = TransposeTest() @@ -2593,7 +2593,7 @@ class TestFX(JitTestCase): self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping") class FooBar1234(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"]) @@ -2608,7 +2608,7 @@ class TestFX(JitTestCase): self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping") class FooBar2341(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.f = torch.classes._TorchScriptTesting._ReLUClass() @@ -2629,7 +2629,7 @@ class TestFX(JitTestCase): return torch.relu(x) class Holder(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.s = torch.jit.script(Scripted()) @@ -2701,7 +2701,7 @@ class TestFX(JitTestCase): def getitem_inner(self): class GetItemBase(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.pe = torch.nn.Buffer(torch.randn(8, 8)) @@ -2759,7 +2759,7 @@ class TestFX(JitTestCase): def test_snake_case(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.activations = torch.nn.ModuleDict([ ["snake_case", torch.nn.ReLU()], @@ -2825,7 +2825,7 @@ class TestFX(JitTestCase): def test_custom_traceback_raised_when_exception_source_is_graphmodule(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.W = torch.nn.Parameter(torch.randn(5)) @@ -2852,7 +2852,7 @@ class TestFX(JitTestCase): def test_custom_traceback_not_raised_when_exception_source_is_submodule(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 4) @@ -2926,7 +2926,7 @@ class TestFX(JitTestCase): def test_ast_rewriter_reassigns_submodules(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bn = torch.nn.BatchNorm2d(100) @@ -3037,7 +3037,7 @@ class TestFX(JitTestCase): def test_ast_rewriter_wrap_with_submodule(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) @@ -3056,7 +3056,7 @@ class TestFX(JitTestCase): def test_submodule_manipulation_API(self): class C(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(16, 33, 3, stride=2) self.param = torch.nn.Parameter(torch.rand(2, 3)) @@ -3065,7 +3065,7 @@ class TestFX(JitTestCase): return self.conv(torch.cat([self.param, x])) class B(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(100, 200) self.buf = torch.nn.Buffer(torch.randn(2, 3)) @@ -3075,7 +3075,7 @@ class TestFX(JitTestCase): return self.linear(torch.cat([self.buf, self.net_c(x)])) class A(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.net_b = B() self.param = torch.nn.Parameter(torch.rand(2, 3)) @@ -3202,7 +3202,7 @@ class TestFX(JitTestCase): def test_delete_unused_submodules_leaf(self): class SubModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(10, 10) self.relu = torch.nn.ReLU() @@ -3213,7 +3213,7 @@ class TestFX(JitTestCase): return x class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.submod = SubModule() @@ -3235,7 +3235,7 @@ class TestFX(JitTestCase): def test_fx_stateless(self): class MockModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l1 = torch.nn.Linear(1, 1) self.buffer = torch.nn.Buffer(torch.ones(1)) @@ -3268,7 +3268,7 @@ class TestFX(JitTestCase): return t + t class B(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super(type(self), self).__init__() self.calling = False self.called = False @@ -3383,7 +3383,7 @@ class TestFX(JitTestCase): def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.my_buff = torch.nn.Buffer(torch.rand(3, 4)) self.register_parameter( diff --git a/test/test_fx_experimental.py b/test/test_fx_experimental.py index 5d3efbf8a781..634b10c42164 100644 --- a/test/test_fx_experimental.py +++ b/test/test_fx_experimental.py @@ -121,7 +121,7 @@ class TestFXExperimental(JitTestCase): def test_large_node_error(self): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -178,7 +178,7 @@ class TestFXExperimental(JitTestCase): def test_size_based_partition(self): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) self.c = torch.rand(4) @@ -210,7 +210,7 @@ class TestFXExperimental(JitTestCase): def test_partition_device_mapping(self): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -249,7 +249,7 @@ class TestFXExperimental(JitTestCase): layers.append(torch.nn.ReLU()) return layers - def __init__(self): + def __init__(self) -> None: super().__init__() layers = self.create_mlp(4, 4, 4) self.bottom_layers = torch.nn.Sequential(*layers) @@ -303,7 +303,7 @@ class TestFXExperimental(JitTestCase): def test_partition_latency(self): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -360,7 +360,7 @@ class TestFXExperimental(JitTestCase): def test_cost_aware_partition(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -422,7 +422,7 @@ class TestFXExperimental(JitTestCase): def test_aot_based_partition(self): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.b = torch.rand(4) self.c = torch.rand(4) @@ -481,7 +481,7 @@ class TestFXExperimental(JitTestCase): def test_saturate_host(self): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -537,7 +537,7 @@ class TestFXExperimental(JitTestCase): def test_conv_bn_fusion_not_running_state(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(32, 64, 3, stride=2) self.bn = torch.nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False) @@ -561,7 +561,7 @@ class TestFXExperimental(JitTestCase): def test_conv_bn_fusion_mixed_dtype(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False, dtype=torch.bfloat16) self.bn = torch.nn.BatchNorm2d(16, eps=0.001, momentum=0.1, affine=True, track_running_stats=True) @@ -612,7 +612,7 @@ class TestFXExperimental(JitTestCase): def test_meta_tracer(self): class MetaTracerTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(num_embeddings=42, embedding_dim=16) self.layernorm = torch.nn.LayerNorm(16) @@ -735,7 +735,7 @@ terrible spacing def test_subgraph_creation(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) @@ -855,7 +855,7 @@ terrible spacing def test_split_module_default_arg(self): class ModelToTrace(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lin = torch.nn.Linear(512, 512) @@ -1169,7 +1169,7 @@ class {test_classname}(torch.nn.Module): def test_subgraph_uniquename(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) @@ -1199,7 +1199,7 @@ class {test_classname}(torch.nn.Module): d_hid = 4 class ExampleCode(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid)) self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid)) @@ -1244,7 +1244,7 @@ class {test_classname}(torch.nn.Module): def test_to_folder(self): class Test(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.W = torch.nn.Parameter(torch.randn(2)) self.seq = torch.nn.Sequential(torch.nn.BatchNorm1d(2, 2)) @@ -1299,7 +1299,7 @@ class {test_classname}(torch.nn.Module): } class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 2) self.bn = torch.nn.BatchNorm2d(3) @@ -1493,7 +1493,7 @@ class {test_classname}(torch.nn.Module): import torch.nn as nn class Foo(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() layers = [] layers2 = [] diff --git a/test/test_fx_passes.py b/test/test_fx_passes.py index 491633f0e4b9..e5ed6e078c9e 100644 --- a/test/test_fx_passes.py +++ b/test/test_fx_passes.py @@ -20,7 +20,7 @@ logging.basicConfig(level=logging.WARNING) logger = logging.getLogger(__name__) class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) self.linear2 = torch.nn.Linear(4, 4) @@ -45,7 +45,7 @@ class TestModule(torch.nn.Module): return add_4, add_6, relu class TestDeepModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4) diff --git a/test/test_jit.py b/test/test_jit.py index c222c9b9598b..adc3c71b4b9e 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -337,7 +337,7 @@ def _sum_of_list(tensorlist): # has to be at top level or Pickle complains class FooToPickle(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bar = torch.jit.ScriptModule() @@ -477,7 +477,7 @@ class TestJit(JitTestCase): @unittest.skipIf(not RUN_CUDA, "restore device requires CUDA") def test_restore_device_cuda(self): class MyModule(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.b0 = nn.Buffer(torch.randn(1, 3)) self.p0 = nn.Parameter(torch.randn(2, 3)) @@ -531,7 +531,7 @@ class TestJit(JitTestCase): @unittest.skipIf(not RUN_CUDA, "restore device requires CUDA") def test_restore_shared_storage_on_cuda(self): class Foo(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() whole_tensor = torch.randn(4, 5, dtype=torch.float, device='cpu') self.p0 = nn.Parameter(whole_tensor.narrow(0, 0, 1)) @@ -948,7 +948,7 @@ class TestJit(JitTestCase): def test_Sequential(self): class Seq(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.seq = nn.Sequential(nn.Linear(10, 20), nn.Linear(20, 30)) @@ -963,7 +963,7 @@ class TestJit(JitTestCase): def test_ModuleList(self): class Mod(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.model = nn.ModuleList([nn.Linear(10, 10) for _ in range(10)]) self.model += (nn.Linear(10, 20),) @@ -1041,7 +1041,7 @@ class TestJit(JitTestCase): def test_nn_lp_pool2d(self): class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l = torch.nn.LPPool2d(2, 3) self.n = torch.nn.LPPool2d(2, (7, 1)) @@ -1057,7 +1057,7 @@ class TestJit(JitTestCase): def test_nn_lp_pool1d(self): class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l = torch.nn.LPPool1d(2, 3) self.n = torch.nn.LPPool1d(2, 7) @@ -1464,7 +1464,7 @@ graph(%Ra, %Rb): def test_pattern_based_module_rewrite(self): # Check match::module behavior class Test(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 20, 5, 1) self.bn = torch.nn.BatchNorm2d(num_features=20) @@ -2450,7 +2450,7 @@ graph(%Ra, %Rb): @unittest.skipIf(not RUN_CUDA, "requires CUDA") def test_cuda_export_restore(self): class Sub(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = nn.Parameter(torch.randn(3, 4)) @@ -2459,7 +2459,7 @@ graph(%Ra, %Rb): return self.weight + thing class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod = Sub() @@ -2493,7 +2493,7 @@ graph(%Ra, %Rb): def test_export_rnn(self): for clazz in [nn.RNN(10, 20, 2), nn.GRU(10, 20, 2)]: class RNNTest(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn = clazz @@ -2515,7 +2515,7 @@ graph(%Ra, %Rb): def test_export_lstm(self): class LSTMTest(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn = nn.LSTM(10, 20, 2) @@ -2538,7 +2538,7 @@ graph(%Ra, %Rb): def test_unique_state_dict(self): class MyModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() shared_param = torch.nn.Parameter(torch.ones(1)) self.register_parameter('w1', shared_param) @@ -2665,7 +2665,7 @@ graph(%Ra, %Rb): @unittest.skip("temporarily disable the test for fwd compatibility") def test_non_ascii_string(self): class Foo(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = "Over \u0e55\u0e57 57" @@ -2892,7 +2892,7 @@ graph(%Ra, %Rb): return self.one(self.two(x), x) class Bar(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sub = Foo() @@ -3036,7 +3036,7 @@ class TestScript(JitTestCase): class MyMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.nested = MyNestedMod() @@ -3309,7 +3309,7 @@ class TestScript(JitTestCase): def test_ignored_method_binding(self): class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x : int = 0 @@ -3339,7 +3339,7 @@ class TestScript(JitTestCase): class A(torch.nn.Module): __annotations__ = {"x": Optional[torch.Tensor]} - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = None @@ -3376,7 +3376,7 @@ class TestScript(JitTestCase): class M(torch.jit.ScriptModule): FOO = 0 - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = self.FOO m = M() @@ -3394,14 +3394,14 @@ class TestScript(JitTestCase): def test_not_initialized_err(self): class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: self.foo = torch.rand(2, 3) with self.assertRaises(RuntimeError): M() def test_attribute_in_init(self): class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.jit.Attribute(0.1, float) # we should be able to use self.foo as a float here @@ -3692,7 +3692,7 @@ def foo(x): return x class D(C, B): - def __init__(self): + def __init__(self) -> None: super().__init__() self.assertEqual(D()(v), v + v) @@ -3720,7 +3720,7 @@ def foo(x): def test_first_class_module(self): class Foo(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = nn.Parameter(torch.rand(3, 4)) @@ -3799,7 +3799,7 @@ def foo(x): def test_builtin_function_attributes(self): class Add(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.add = torch.add @@ -4048,11 +4048,11 @@ def foo(x): def test_class_as_attribute(self): @torch.jit.script class Foo321: - def __init__(self): + def __init__(self) -> None: self.x = 3 class FooBar1234(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.f = Foo321() @@ -4171,7 +4171,7 @@ def foo(x): def test_nested_aug_assign(self): @torch.jit.script class SomeClass: - def __init__(self): + def __init__(self) -> None: self.num = 99 def __iadd__(self, x): @@ -4185,7 +4185,7 @@ def foo(x): @torch.jit.script class SomeOutOfPlaceClass: - def __init__(self): + def __init__(self) -> None: self.num = 99 def __add__(self, x): @@ -4198,7 +4198,7 @@ def foo(x): return self.num == other.num class Child(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = 2 self.o = SomeClass() @@ -4206,7 +4206,7 @@ def foo(x): self.list = [1, 2, 3] class A(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.child = Child() @@ -4230,7 +4230,7 @@ def foo(x): @torch.jit.script class SomeNonAddableClass: - def __init__(self): + def __init__(self) -> None: self.num = 99 def __eq__(self, other): @@ -4239,7 +4239,7 @@ def foo(x): # with self.assertRaisesRegex(RuntimeError, "") class A(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = SomeNonAddableClass() @@ -4253,7 +4253,7 @@ def foo(x): def test_var_aug_assign(self): @torch.jit.script class SomeNonAddableClass: - def __init__(self): + def __init__(self) -> None: self.num = 99 def __eq__(self, other): @@ -4269,7 +4269,7 @@ def foo(x): @torch.jit.script class SomeClass: - def __init__(self): + def __init__(self) -> None: self.num = 99 def __iadd__(self, x): @@ -4283,7 +4283,7 @@ def foo(x): @torch.jit.script class SomeOutOfPlaceClass: - def __init__(self): + def __init__(self) -> None: self.num = 99 def __add__(self, x): @@ -4541,7 +4541,7 @@ def foo(xyz): return x class B(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.ModuleList([A()]) @@ -4552,7 +4552,7 @@ def foo(xyz): return x class C(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Sequential(B()) @@ -6647,7 +6647,7 @@ a") class M1(torch.nn.Module): flag: torch.jit.Final[bool] - def __init__(self): + def __init__(self) -> None: super().__init__() self.flag = True @@ -6659,7 +6659,7 @@ a") class M2(torch.nn.Module): flag: torch.jit.Final[bool] - def __init__(self): + def __init__(self) -> None: super().__init__() self.flag = False @@ -7100,7 +7100,7 @@ a") def test_nested_select_assign(self): class SubSubModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.abc = 11 @@ -7108,7 +7108,7 @@ a") return self.abc class SubModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = 11 self.nested = SubSubModule() @@ -7117,7 +7117,7 @@ a") return self.a class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sub = SubModule() self.hi = 1 @@ -7697,7 +7697,7 @@ dedent """ return F.relu(x, inplace=True) class ScriptMod(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.Conv2d_1a_3x3 = ScriptedConv2d(3, 32, kernel_size=3, stride=2) @@ -7718,7 +7718,7 @@ dedent """ return F.relu(x, inplace=True) class EagerMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.Conv2d_1a_3x3 = EagerConv2d(3, 32, kernel_size=3, stride=2) @@ -8144,7 +8144,7 @@ dedent """ def test_script_module(self): class M1(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = nn.Parameter(torch.randn(2)) @@ -8153,7 +8153,7 @@ dedent """ return self.weight + thing class PModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = nn.Parameter(torch.randn(2, 3)) @@ -8161,7 +8161,7 @@ dedent """ return self.a.mm(a) class M2(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() # test submodule self.sub = M1() @@ -8713,7 +8713,7 @@ dedent """ def test_script_module_call_noscript(self): class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.value = 1 @@ -8738,7 +8738,7 @@ dedent """ def test_script_module_nochange_submodule(self): class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sub = nn.Linear(5, 5) @@ -8763,7 +8763,7 @@ dedent """ return thing * 2 class MyMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod = (Sub()) self.mod2 = (Sub()) @@ -8802,7 +8802,7 @@ dedent """ __constants__ = ['b', 'i', 'c', 's'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.b = False self.i = 1 @@ -8849,7 +8849,7 @@ dedent """ class Foo(torch.jit.ScriptModule): __constants__ = ['invalid'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.invalid = [nn.Linear(3, 4)] @@ -8861,7 +8861,7 @@ dedent """ class Foo2(torch.jit.ScriptModule): __constants__ = ['invalid'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.invalid = int @@ -8871,7 +8871,7 @@ dedent """ class Foo3(torch.jit.ScriptModule): __constants__ = ['invalid'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.invalid = (3, 4, {}) @@ -8881,7 +8881,7 @@ dedent """ class Foo4(torch.jit.ScriptModule): __constants__ = ['invalid'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.invalid = np.int64(5) @@ -8892,7 +8892,7 @@ dedent """ def test_script_module_param_buffer_mutation(self): # TODO: add param mutation test case after JIT support it class ModuleBufferMutate(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.running_var = nn.Buffer(torch.tensor(0, dtype=torch.long)) @@ -8912,7 +8912,7 @@ dedent """ class M(torch.jit.ScriptModule): __constants__ = ['b'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.b = [1, 2, 3, 4] @@ -8947,7 +8947,7 @@ dedent """ def test_script_module_for2(self): class Sub(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = nn.Parameter(torch.randn(2)) @@ -8956,7 +8956,7 @@ dedent """ return self.weight + thing class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods = nn.ModuleList([Sub() for i in range(10)]) @@ -8979,7 +8979,7 @@ dedent """ def test_attr_qscheme_script(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.qscheme = torch.per_tensor_affine @@ -8995,7 +8995,7 @@ dedent """ def test_script_module_const_submodule_fail(self): class Sub(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = nn.Parameter(torch.randn(2)) @@ -9004,7 +9004,7 @@ dedent """ return self.weight + thing class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods = [Sub() for _ in range(10)] @@ -9018,7 +9018,7 @@ dedent """ M() class DerivedStateModule(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super(TestScript.DerivedStateModule, self).__init__() self.param = torch.nn.Parameter(torch.ones(3, 4, dtype=torch.float)) self.derived = nn.Buffer(torch.neg(self.param).detach().clone()) @@ -9174,7 +9174,7 @@ dedent """ def test_missing_getstate(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = 1 @@ -9204,7 +9204,7 @@ dedent """ @skipIfTorchDynamo("TorchDynamo fails with unknown reason") def test_pack_unpack_nested(self): class SubSubMod(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = nn.Buffer(torch.ones(3, 4) * 3) @@ -9221,7 +9221,7 @@ dedent """ return x + self.buf class SubMod(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buf = nn.Buffer(torch.ones(3, 4) * 2) self.ssm = SubSubMod() @@ -9239,7 +9239,7 @@ dedent """ return self.ssm(x + self.buf) class Mod(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.submod = SubMod() self.buf = nn.Buffer(torch.ones(3, 4) * 1) @@ -9312,7 +9312,7 @@ dedent """ class M(torch.jit.ScriptModule): __constants__ = ['mods'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods = 1 @@ -9340,7 +9340,7 @@ dedent """ def test_script_sequential_for(self): class Sub(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = nn.Parameter(torch.randn(2)) @@ -9349,7 +9349,7 @@ dedent """ return self.weight + thing class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods = nn.Sequential(Sub(), Sub(), Sub()) @@ -9377,7 +9377,7 @@ dedent """ def test_script_sequential_sliced_iteration(self): class seq_mod(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layers = [nn.ReLU(), nn.ReLU(), nn.ReLU()] self.layers = nn.Sequential(*self.layers) @@ -9395,7 +9395,7 @@ dedent """ def test_script_sequential_orderdict(self): class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods = nn.Sequential(OrderedDict([ ("conv", nn.Conv2d(1, 20, 5)), @@ -9411,7 +9411,7 @@ dedent """ def test_script_sequential_multi_output_fail(self): class Sub(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = nn.Parameter(torch.randn(2)) @@ -9425,7 +9425,7 @@ dedent """ return x, x, x class HaveSequential(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.someseq = nn.Sequential( Sub(), @@ -9446,7 +9446,7 @@ dedent """ @_tmp_donotuse_dont_inline_everything def test_script_sequential_in_mod_list(self): class Sub(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = nn.Parameter(torch.randn(2)) @@ -9455,7 +9455,7 @@ dedent """ return self.weight + thing class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods = nn.ModuleList([Sub(), nn.Sequential(Sub(), nn.Sequential(Sub(), Sub()), Sub())]) @@ -9473,7 +9473,7 @@ dedent """ @_tmp_donotuse_dont_inline_everything def test_script_nested_mod_list(self): class Sub(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = nn.Parameter(torch.randn(2)) @@ -9482,7 +9482,7 @@ dedent """ return self.weight + thing class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods = nn.ModuleList([nn.ModuleList([Sub()]), nn.Sequential(Sub()), nn.ModuleList([Sub(), Sub()])]) @@ -9502,7 +9502,7 @@ dedent """ class M(torch.jit.ScriptModule): __constants__ = ['dim'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.dim = 1 @@ -9514,7 +9514,7 @@ dedent """ self.assertEqual(torch.cat([v, v, v], dim=1), M()(v)) class StarTestSumStarred(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super(TestScript.StarTestSumStarred, self).__init__() def forward(self, *inputs): @@ -9524,7 +9524,7 @@ dedent """ return output class StarTestReturnThree(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super(TestScript.StarTestReturnThree, self).__init__() def forward(self, rep): @@ -9533,7 +9533,7 @@ dedent """ def test_script_star_expr(self): class M2(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m = torch.jit.trace(TestScript.StarTestSumStarred(), (torch.ones(4, 3), torch.ones(4, 3), torch.ones(4, 3))) @@ -9549,7 +9549,7 @@ dedent """ def test_script_star_expr_string(self): class M2(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.m = torch.jit.trace(TestScript.StarTestSumStarred(), (torch.ones(4, 3), torch.ones(4, 3), torch.ones(4, 3))) @@ -9565,7 +9565,7 @@ dedent """ self.assertEqual(m(torch.zeros(4, 3)), 3 * torch.zeros(4, 3)) class StarTestSumAndReturnThree(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super(TestScript.StarTestSumAndReturnThree, self).__init__() def forward(self, *inputs): @@ -9576,7 +9576,7 @@ dedent """ def test_script_star_assign(self): class M2(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.g = torch.jit.trace(TestScript.StarTestSumAndReturnThree(), torch.ones(4, 3)) self.define(''' @@ -9590,7 +9590,7 @@ dedent """ def test_script_module_star_assign2(self): class M2(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.g = torch.jit.trace( TestScript.StarTestSumAndReturnThree(), @@ -9607,7 +9607,7 @@ dedent """ def test_script_module_star_assign2_inplace(self): class M2(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.g = torch.jit.trace( TestScript.StarTestSumAndReturnThree(), @@ -9629,7 +9629,7 @@ dedent """ with self.assertRaisesRegex(RuntimeError, "cannot be used as a tuple"): class M2(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() @torch.jit.ignore @@ -9648,7 +9648,7 @@ dedent """ def test_script_module_star_assign_fail_builtin(self): with self.assertRaisesRegex(RuntimeError, "cannot be used as a tuple"): class M2(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.define(''' @@ -9762,7 +9762,7 @@ dedent """ def test_comment_ignore_indent(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: # useless comment that is not indented correctly # noqa: E115 super().__init__() @@ -9813,7 +9813,7 @@ dedent """ "val": Optional[torch.Tensor] } - def __init__(self): + def __init__(self) -> None: super().__init__() self.val = None @@ -9838,7 +9838,7 @@ dedent """ return self.forward_pytorch(input) * 2 class TestModule(LowLevelModule): - def __init__(self): + def __init__(self) -> None: super().__init__() # Replace the forward method self.forward = types.MethodType(LowLevelModule.forward, self) @@ -10343,7 +10343,7 @@ dedent """ def test_script_module_export_submodule(self): class M1(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = nn.Parameter(torch.randn(2)) @@ -10352,7 +10352,7 @@ dedent """ return self.weight + thing class M2(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() # test submodule self.sub = M1() @@ -10448,7 +10448,7 @@ dedent """ def test_script_module_export_tensor_cuda(self): class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.zeros((5, 5), device='cuda:0').random_()) @@ -10487,7 +10487,7 @@ dedent """ def test_script_module_export_shared_storage(self): class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param1 = torch.nn.Parameter(torch.rand(5, 5)) self.param2 = torch.nn.Parameter(self.param1[3]) @@ -10517,7 +10517,7 @@ dedent """ return {"1": x} class C(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Sequential(A(), B()) @@ -10908,7 +10908,7 @@ dedent """ class M(torch.jit.ScriptModule): __constants__ = ['d'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.d = torch.device('cpu') @@ -11097,7 +11097,7 @@ dedent """ input_shape = (10, 5) class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight_0 = torch.nn.Parameter(torch.rand(weight_0_shape)) self.weight_1 = torch.nn.Parameter(torch.rand(weight_1_shape)) @@ -11544,7 +11544,7 @@ dedent """ return thing * 2 class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mods = nn.ModuleList([Double(), Double()]) @@ -12190,7 +12190,7 @@ dedent """ def test_call_python_mod_from_tracing_fn(self): class PythonMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(4, 3), requires_grad=False) @@ -12224,7 +12224,7 @@ dedent """ @unittest.skip("error in first class mode") def test_call_traced_mod_from_tracing_fn(self): class TracedModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(4, 3), requires_grad=False) @@ -12254,7 +12254,7 @@ dedent """ def test_call_script_mod_from_tracing_fn(self): with self.assertRaisesRegex(RuntimeError, "must be registered as submodules"): class ScriptMod(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4), requires_grad=False) @@ -12276,7 +12276,7 @@ dedent """ return torch.neg(x) class TracedModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(4, 3)) @@ -12293,7 +12293,7 @@ dedent """ def test_call_python_mod_from_traced_module(self): class PythonModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(5, 7)) @@ -12301,7 +12301,7 @@ dedent """ return torch.mm(x, self.param) class TracedModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(4, 5)) self.mod = PythonModule() @@ -12384,7 +12384,7 @@ dedent """ def test_call_python_mod_from_script_fn(self): class PythonModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(5, 7)) @@ -12432,7 +12432,7 @@ dedent """ return torch.neg(x) class ScriptMod(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(4, 3)) @@ -12446,7 +12446,7 @@ dedent """ def test_call_python_mod_from_script_module(self): class PythonMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 5)) @@ -12455,7 +12455,7 @@ dedent """ return torch.mm(x, self.param) class ScriptMod(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(4, 3)) self.pm = PythonMod() @@ -12476,7 +12476,7 @@ dedent """ return torch.neg(x) class ScriptMod(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(4, 3)) @@ -12491,7 +12491,7 @@ dedent """ @_tmp_donotuse_dont_inline_everything def test_call_script_mod_from_script_module(self): class ScriptMod1(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 5)) @@ -12500,7 +12500,7 @@ dedent """ return torch.mm(x, self.param) class ScriptMod(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(4, 3)) self.tm = ScriptMod1() @@ -12519,7 +12519,7 @@ dedent """ def test_module_with_params_called_fails(self): with self.assertRaisesRegex(RuntimeError, "Cannot call a ScriptModule that is not a submodule of the caller"): class ScriptMod(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 3)) @@ -12840,7 +12840,7 @@ dedent """ # String frontend , Python 3-style type annotations , Script method def test_annot_string_py3_method(self): class TestModule(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() code = ''' @@ -12872,7 +12872,7 @@ dedent """ # String frontend , MyPy-style type comments , Script method def test_annot_string_mypy_method(self): class TestModule(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() code = ''' @@ -13090,7 +13090,7 @@ dedent """ # Initialize a ScriptModule that uses the weak module above multiple times class Strong(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = TestLinear(10, 10) self.fc1.weight = torch.nn.Parameter(weights) @@ -13637,7 +13637,7 @@ dedent """ # Check simpler module class NoArgState(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer1 = nn.Buffer(torch.ones(2, 2)) self.buffer2 = nn.Buffer(torch.ones(2, 2)) @@ -14692,7 +14692,7 @@ dedent """ return x[0] + 5 class S(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weak = Over() @@ -14822,7 +14822,7 @@ dedent """ def test_nn_LSTM_with_layers(self): class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn = nn.LSTM(2, 3, 2, dropout=0) @@ -14831,7 +14831,7 @@ dedent """ return self.rnn(x, (h0, c0))[0] class Eager(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rnn = nn.LSTM(2, 3, 2, dropout=0) @@ -14848,7 +14848,7 @@ dedent """ input = torch.nn.utils.rnn.pack_sequence([torch.randn(5, 5)]) class S(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = torch.nn.LSTM(5, 5) @@ -14866,7 +14866,7 @@ dedent """ tensor_input = torch.randn(5, 5, 5) class SeqLengthGRU(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = torch.nn.GRU(5, 5) @@ -14875,7 +14875,7 @@ dedent """ return self.x(input) class TensorGRU(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = torch.nn.GRU(5, 5) @@ -15071,7 +15071,7 @@ dedent """ @unittest.skipIf(not RUN_CUDA, "no CUDA") def test_weak_cuda(self): class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.lstm = torch.nn.LSTM(5, 5) self.lstm.cuda() @@ -15088,7 +15088,7 @@ dedent """ def test_ignore_decorator(self): with warnings.catch_warnings(record=True) as warns: class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() tensor = torch.zeros(1, requires_grad=False) self.some_state = nn.Buffer(torch.nn.Parameter(tensor)) @@ -15185,7 +15185,7 @@ dedent """ def test_module_none_attrs(self): class MyMod(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.optional_value = None @@ -15231,7 +15231,7 @@ dedent """ tester = self class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() for name, value, the_type in tester.get_pickle_values(): setattr(self, name, torch.jit.Attribute(value, the_type)) @@ -15271,7 +15271,7 @@ dedent """ tester = self class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() for name, value, the_type in tester.get_pickle_values(): setattr(self, "_" + name, torch.jit.Attribute(value, the_type)) @@ -15310,7 +15310,7 @@ dedent """ return (self.table, self.list) class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.table = torch.jit.Attribute({"this": "is", "a different": "dict"}, Dict[str, str]) self.tensor = torch.jit.Attribute(torch.randn(2, 2), torch.Tensor) @@ -15327,7 +15327,7 @@ dedent """ def test_serialization_big_ints(self): class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.int32_max = torch.jit.Attribute(2**31 - 1, int) self.int32_min = torch.jit.Attribute(-2**31, int) @@ -15359,7 +15359,7 @@ dedent """ @unittest.skipIf(IS_WINDOWS, "NYI: TemporaryFileName on Windows") def test_serialization_sharing(self): class M(torch.jit.ScriptModule): - def __init__(self): + def __init__(self) -> None: super().__init__() self.list = torch.jit.Attribute([], List[str]) @@ -15398,7 +15398,7 @@ dedent """ print('foo') class Redirect: - def __init__(self): + def __init__(self) -> None: self.s = '' def write(self, s): @@ -15416,7 +15416,7 @@ dedent """ def test_dtype_attr(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.dtype = torch.zeros([]).dtype @@ -15429,7 +15429,7 @@ dedent """ def test_named_buffers_are_iterable(self): class MyMod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod = (torch.nn.ReLU()) self.mod2 = (torch.nn.ReLU()) @@ -15512,7 +15512,7 @@ dedent """ class HasAttrMod(torch.nn.Module): __constants__ = ["fee"] - def __init__(self): + def __init__(self) -> None: super().__init__() self.fee = 3 @@ -15536,7 +15536,7 @@ dedent """ @torch.jit.script class FooTest: - def __init__(self): + def __init__(self) -> None: self.x = 1 def foo(self, y): @@ -15637,7 +15637,7 @@ dedent """ def test_get_set_state_with_tensors(self): class M(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.tensor = torch.randn(2, 2) @@ -15831,7 +15831,7 @@ dedent """ class InnerSubmod(nn.Module): __constants__ = ['my_constant'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Buffer(torch.ones(1)) self.register_parameter("bar", torch.nn.Parameter(torch.ones(1))) @@ -15842,7 +15842,7 @@ dedent """ return x + x class Inner(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.submod = InnerSubmod() @@ -16099,7 +16099,7 @@ def add_nn_module_test(*args, **kwargs): class TheModule(torch.jit.ScriptModule): __constants__ = submodule_constants - def __init__(self): + def __init__(self) -> None: super().__init__() self.submodule = nn_module(*constructor_args) diff --git a/test/test_jit_autocast.py b/test/test_jit_autocast.py index ee0f1fa3eb89..b78127614d8e 100644 --- a/test/test_jit_autocast.py +++ b/test/test_jit_autocast.py @@ -705,7 +705,7 @@ class TestAutocast(JitTestCase): @unittest.skipIf(not TEST_CUDA, "No cuda") def test_jit_freeze_autocast_constants(self): class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = torch.rand((3, 4), dtype=torch.float).cuda() diff --git a/test/test_jit_fuser.py b/test/test_jit_fuser.py index 9d59dcce08be..98cef4031c50 100644 --- a/test/test_jit_fuser.py +++ b/test/test_jit_fuser.py @@ -821,7 +821,7 @@ class TestFuser(JitTestCase): class M(torch.jit.ScriptModule): __constants__ = ['d'] - def __init__(self): + def __init__(self) -> None: super().__init__() self.d = torch.device('cuda') diff --git a/test/test_jit_fuser_te.py b/test/test_jit_fuser_te.py index 9c344051ca0c..7698796e3ef0 100644 --- a/test/test_jit_fuser_te.py +++ b/test/test_jit_fuser_te.py @@ -1088,7 +1088,7 @@ class TestTEFuser(JitTestCase): class M(torch.jit.ScriptModule): __constants__ = ["d"] - def __init__(self): + def __init__(self) -> None: super().__init__() self.d = torch.device("cuda") diff --git a/test/test_jit_llga_fuser.py b/test/test_jit_llga_fuser.py index 08753846ffa4..45a86096ae22 100644 --- a/test/test_jit_llga_fuser.py +++ b/test/test_jit_llga_fuser.py @@ -486,7 +486,7 @@ class TestFusionPattern(JitLlgaTestCase): @dtypes(torch.float32, torch.bfloat16) def test_conv2d_clamp(self, dtype): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True) self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True) @@ -519,7 +519,7 @@ class TestFusionPattern(JitLlgaTestCase): @dtypes(torch.float32, torch.bfloat16) def test_conv2d_bn(self, dtype): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True) self.bn1 = nn.BatchNorm2d(32) @@ -541,7 +541,7 @@ class TestFusionPattern(JitLlgaTestCase): @dtypes(torch.float32, torch.bfloat16) def test_conv2d_bn_relu(self, dtype): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True) self.bn1 = nn.BatchNorm2d(32) @@ -645,7 +645,7 @@ class TestFusionPattern(JitLlgaTestCase): @dtypes(torch.float32, torch.bfloat16) def test_wildcard(self, dtype): class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True) self.eltwise = nn.ReLU() @@ -773,7 +773,7 @@ class TestEnableDisableLlgaFuser(JitTestCase): class TestDynamoAOT(JitTestCase): def test_dynamo_aot_ts_onednn(self): class Seq(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layers = nn.Sequential( nn.Linear(10, 10), diff --git a/test/test_metal.py b/test/test_metal.py index 6b9b29ea5492..050816bff5d1 100644 --- a/test/test_metal.py +++ b/test/test_metal.py @@ -63,7 +63,7 @@ class TestMetalRewritePass(TestCase): conv_bias_shape = (output_channels) class Conv2D(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False) self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False) @@ -83,7 +83,7 @@ class TestMetalRewritePass(TestCase): TestMetalRewritePass.validate_transformed_module(Conv2D(), pattern_count_map, data_shape) class Conv2DRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False) self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False) @@ -122,7 +122,7 @@ class TestMetalRewritePass(TestCase): class Conv2DHardtanh(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False) self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False) diff --git a/test/test_mobile_optimizer.py b/test/test_mobile_optimizer.py index 28113d0bdf08..6029f06ba4e6 100644 --- a/test/test_mobile_optimizer.py +++ b/test/test_mobile_optimizer.py @@ -53,7 +53,7 @@ class TestOptimizer(TestCase): linear_weight_shape = (weight_output_dim, linear_input_shape) class MyTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape)) self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape)) @@ -85,7 +85,7 @@ class TestOptimizer(TestCase): class BNTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 20, 5, 1) self.bn = torch.nn.BatchNorm2d(num_features=20) @@ -166,7 +166,7 @@ class TestOptimizer(TestCase): torch.testing.assert_close(bn_scripted_module(bn_input), no_bn_fold_scripted_module(bn_input), rtol=1e-2, atol=1e-3) class MyMobileOptimizedTagTest(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape)) self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim)) @@ -183,7 +183,7 @@ class TestOptimizer(TestCase): self.assertTrue(tag) class MyPreserveMethodsTest(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape)) self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim)) @@ -207,7 +207,7 @@ class TestOptimizer(TestCase): self.assertNotEqual(preserveThis, None) class OptimizeNoForwardTest(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l = nn.Linear(10, 100) self.l2 = nn.Linear(100, 1) @@ -233,7 +233,7 @@ class TestOptimizer(TestCase): torch.testing.assert_close(initial_result, optimized_result, rtol=1e-2, atol=1e-3) class BNTestNoForwardModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 20, 5, 1) self.bn = torch.nn.BatchNorm2d(num_features=20) @@ -272,7 +272,7 @@ class TestOptimizer(TestCase): return class Child(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv2 = nn.Conv2d(1, 1, 1) @@ -281,7 +281,7 @@ class TestOptimizer(TestCase): return x class Parent(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.conv1 = nn.Conv2d(1, 1, 1) @@ -307,7 +307,7 @@ class TestOptimizer(TestCase): def test_generate_mobile_module_lints(self): class MyTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(4, 4) self.dropout = torch.nn.Dropout(p=0.5) @@ -318,7 +318,7 @@ class TestOptimizer(TestCase): return out class MyBNModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bn = torch.nn.BatchNorm2d(4, affine=True) @@ -409,7 +409,7 @@ class TestOptimizer(TestCase): return class Standalone(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.conv1 = nn.Conv2d(1, 1, 1) @@ -430,7 +430,7 @@ class TestOptimizer(TestCase): pass class Child(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 1, 1) @@ -439,7 +439,7 @@ class TestOptimizer(TestCase): return x class Parent(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.conv1 = nn.Conv2d(1, 1, 1) @@ -511,7 +511,7 @@ class TestOptimizer(TestCase): def test_clone_module_with_class(self): class MyInnerTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.pqr = torch.Tensor([10., 20., 30.]) @@ -523,7 +523,7 @@ class TestOptimizer(TestCase): return 20 class MyTestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.abc = 23 self.pqr = torch.Tensor([1., 2., 3.]) diff --git a/test/test_model_dump.py b/test/test_model_dump.py index f7ae07131a99..01a03d43ab7d 100644 --- a/test/test_model_dump.py +++ b/test/test_model_dump.py @@ -17,7 +17,7 @@ from torch.testing._internal.common_quantized import supported_qengines class SimpleModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layer1 = torch.nn.Linear(16, 64) self.relu1 = torch.nn.ReLU() @@ -34,7 +34,7 @@ class SimpleModel(torch.nn.Module): class QuantModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.dequant = torch.ao.quantization.DeQuantStub() @@ -48,7 +48,7 @@ class QuantModel(torch.nn.Module): class ModelWithLists(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.rt = [torch.zeros(1)] self.ot = [torch.zeros(1), None] @@ -223,7 +223,7 @@ class TestModelDump(TestCase): # Make sure we can handle a model with both constants and data tensors. class ComposedModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.w1 = torch.zeros(1, 2) self.w2 = torch.ones(2, 2) diff --git a/test/test_module_tracker.py b/test/test_module_tracker.py index 450a78792277..abbaaed4491a 100644 --- a/test/test_module_tracker.py +++ b/test/test_module_tracker.py @@ -24,7 +24,7 @@ class TestModuleTracker(TestCase): return {"a": torch.mm(x, x)} class Mod(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = Foo() self.b = torch.nn.ModuleDict({"nest": Foo()}) diff --git a/test/test_mps.py b/test/test_mps.py index f2280278299f..ec67eb841eae 100644 --- a/test/test_mps.py +++ b/test/test_mps.py @@ -8598,13 +8598,13 @@ class TestNNMPS(NNTestCase): def _create_basic_net(self): class Layer(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layer_dummy_param = Parameter(torch.empty(3, 5)) self.layer_dummy_buf = Buffer(torch.zeros(1, 3, 3, 7)) class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l1 = Layer() self.dummy_param = Parameter(torch.empty(3, 5)) diff --git a/test/test_nestedtensor.py b/test/test_nestedtensor.py index 8238fb718cb7..b50d73e760f8 100644 --- a/test/test_nestedtensor.py +++ b/test/test_nestedtensor.py @@ -6559,7 +6559,7 @@ class TestNestedTensorSubclass(NestedTensorTestCase): torch.manual_seed(0) class mha(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() torch.manual_seed(0) self.linear = torch.nn.Linear(d2, d3, device=device) diff --git a/test/test_nn.py b/test/test_nn.py index ff2d840b1670..19fff4317e09 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -189,7 +189,7 @@ class TestNN(NNTestCase): def test_share_memory(self): class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.p = nn.Parameter(torch.eye(5)) self.par = nn.ParameterList() @@ -307,7 +307,7 @@ class TestNN(NNTestCase): return [k for k, _ in named_parameters] class M1(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param1 = nn.Parameter(torch.empty(3, 3)) self.param2 = self.param1 @@ -319,7 +319,7 @@ class TestNN(NNTestCase): ["param1", "param2"]) class M2(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.mod1 = nn.Linear(3, 4, bias=False) self.mod2 = self.mod1 @@ -358,7 +358,7 @@ class TestNN(NNTestCase): # test remove_duplicate class M(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.buffer1 = Buffer(torch.empty(3, 5)) self.buffer2 = self.buffer1 @@ -371,7 +371,7 @@ class TestNN(NNTestCase): def test_buffer_bad_module_subclass(self): class MyBadModule(nn.Linear): - def __init__(self): + def __init__(self) -> None: super().__init__(2, 2) self.bar = Buffer(torch.rand(2, 2)) @@ -384,7 +384,7 @@ class TestNN(NNTestCase): def test_call_supports_python_dict_output(self): class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l1 = nn.Linear(10, 20) self.register_backward_hook(self.hook) @@ -412,7 +412,7 @@ class TestNN(NNTestCase): def test_train_errors_for_invalid_mode(self): class SubclassNet(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l1 = nn.Linear(2, 2) @@ -485,7 +485,7 @@ class TestNN(NNTestCase): def test_modules(self): class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l1 = l self.l2 = l @@ -498,7 +498,7 @@ class TestNN(NNTestCase): def test_named_modules(self): class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l1 = l self.l2 = l @@ -2506,7 +2506,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""") def test_container_copy(self): class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(4, 5) @@ -12018,7 +12018,7 @@ if __name__ == '__main__': @parametrize_test('foreach', (False, True)) def test_clip_grad_norm_multi_device(self, devices, foreach): class TestModel(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.layer1 = nn.Linear(10, 10) self.layer2 = nn.Linear(10, 10) @@ -12890,13 +12890,13 @@ class TestFusionUtils(TestCase): class TestUtils(TestCase): def test_consume_prefix_in_state_dict_if_present(self): class Block(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(3, 3, 3, bias=True) self.conv2 = nn.Conv2d(3, 3, 3, bias=False) class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear1 = nn.Linear(5, 5) self.linear2 = nn.Linear(5, 5) diff --git a/test/test_nnapi.py b/test/test_nnapi.py index 33e3ab3f53f3..ef9fe7bb6dab 100644 --- a/test/test_nnapi.py +++ b/test/test_nnapi.py @@ -367,7 +367,7 @@ class TestNNAPI(TestCase): def test_to(self): class ToCPU(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.prelu = torch.nn.PReLU() diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py index f9fe889d1d40..2ab6503897fe 100644 --- a/test/test_proxy_tensor.py +++ b/test/test_proxy_tensor.py @@ -617,7 +617,7 @@ def forward(self, x_1): def test_make_fx_model_fwd_bwd(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) @@ -670,7 +670,7 @@ def forward(self, x_1): def test_make_fx_model_fwd_bwd_wgtupdate(self): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) @@ -1628,14 +1628,14 @@ def forward(self, a_1): def test_make_fx_with_custom_tracer_preserving_nn_module_stack(self): class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() def forward(self, x): return x + 1 class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.bar = Bar() diff --git a/test/test_python_dispatch.py b/test/test_python_dispatch.py index 0b3d9d487f29..fbaa779668ab 100644 --- a/test/test_python_dispatch.py +++ b/test/test_python_dispatch.py @@ -1644,7 +1644,7 @@ $3: f32[] = torch._ops.aten.add.Tensor($1, $2)""", sub_count = 0 class PoliteMode(TorchDispatchMode): - def __init__(self): + def __init__(self) -> None: self.pre_count = 0 self.post_count = 0 diff --git a/test/test_serialization.py b/test/test_serialization.py index d9703a4b8d6b..1de84947131e 100644 --- a/test/test_serialization.py +++ b/test/test_serialization.py @@ -4045,7 +4045,7 @@ class TestSerialization(TestCase, SerializationMixin): @unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows") def test_serialization_mmap_loading(self, weights_only, path_type): class DummyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(3, 1024) self.fc2 = torch.nn.Linear(1024, 5) @@ -4071,7 +4071,7 @@ class TestSerialization(TestCase, SerializationMixin): "CUDA is unavailable or NamedTemporaryFile on Windows") def test_serialization_mmap_loading_with_map_location(self): class DummyModel(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(3, 1024) self.fc2 = torch.nn.Linear(1024, 5) diff --git a/test/test_sparse.py b/test/test_sparse.py index 18f794f71279..b284cb471f8c 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -89,7 +89,7 @@ def gradcheck_semantics(test_name='gradcheck'): class CrossRefSparseFakeMode(torch._subclasses.CrossRefFakeMode): - def __init__(self): + def __init__(self) -> None: super().__init__( self.ignore_op, check_strides=False, check_aliasing=False, diff --git a/test/test_sparse_semi_structured.py b/test/test_sparse_semi_structured.py index 70aefb2349ce..cc697e47ff54 100644 --- a/test/test_sparse_semi_structured.py +++ b/test/test_sparse_semi_structured.py @@ -190,7 +190,7 @@ class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase): """ class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear = nn.Linear(128, 128) diff --git a/test/test_spectral_ops.py b/test/test_spectral_ops.py index 35cbff467ff6..e5b2c32e38ab 100644 --- a/test/test_spectral_ops.py +++ b/test/test_spectral_ops.py @@ -1592,7 +1592,7 @@ class FFTDocTestFinder: '''The default doctest finder doesn't like that function.__module__ doesn't match torch.fft. It assumes the functions are leaked imports. ''' - def __init__(self): + def __init__(self) -> None: self.parser = doctest.DocTestParser() def find(self, obj, name=None, module=None, globs=None, extraglobs=None): diff --git a/test/test_stateless.py b/test/test_stateless.py index 35c2f1af64e6..a68d8ad9b5f7 100644 --- a/test/test_stateless.py +++ b/test/test_stateless.py @@ -15,7 +15,7 @@ from torch.testing._internal.common_utils import run_tests, TestCase, parametriz class MockModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l1 = torch.nn.Linear(1, 1) self.buffer = torch.nn.Buffer(torch.ones(1)) @@ -26,7 +26,7 @@ class MockModule(torch.nn.Module): class MockTiedModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l1 = torch.nn.Linear(1, 1) self.tied_bias = self.l1.bias @@ -630,7 +630,7 @@ class TestStatelessFunctionalAPI(TestCase): ]) def test_setattr(self, functional_call): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Buffer(torch.tensor([0.0])) @@ -654,7 +654,7 @@ class TestStatelessFunctionalAPI(TestCase): ]) def test_in_place_operator(self, functional_call): class Foo(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.foo = torch.nn.Buffer(torch.tensor([0.0])) @@ -678,7 +678,7 @@ class TestStatelessFunctionalAPI(TestCase): ]) def test_setattr_strict(self, functional_call): class Bar(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() assert not hasattr(self, 'extra') @@ -775,7 +775,7 @@ class TestStatelessFunctionalAPI(TestCase): ]) def test_functional_call_member_reference(self, functional_call): class Module(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l1 = torch.nn.Linear(1, 1) self.buffer = torch.nn.Buffer(torch.ones(1)) diff --git a/test/test_static_runtime.py b/test/test_static_runtime.py index 863f3c37c217..5665687446b1 100644 --- a/test/test_static_runtime.py +++ b/test/test_static_runtime.py @@ -156,7 +156,7 @@ def output_graph(a, b, c, iters: int): class SubModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = 11 self.b = 2 @@ -166,7 +166,7 @@ class SubModule(nn.Module): class SubModule2(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.a = 12 self.b = 2 @@ -177,7 +177,7 @@ class SubModule2(nn.Module): class TestModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.sub1 = SubModule() self.sub2 = SubModule2() diff --git a/test/test_subclass.py b/test/test_subclass.py index 996511145e07..f1474eebdea4 100644 --- a/test/test_subclass.py +++ b/test/test_subclass.py @@ -130,7 +130,7 @@ class TestSubclass(TestCase): create_fn = partial(self._create_tensor, tensor_cls) class MyModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.p1 = nn.Parameter(create_fn()) @@ -179,7 +179,7 @@ class TestSubclass(TestCase): create_fn = partial(self._create_tensor, tensor_cls) class MyModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = nn.Parameter(create_fn()) @@ -206,7 +206,7 @@ class TestSubclass(TestCase): self.fail('dummy fail for base tensor until the test passes for subclasses') class MyLazyModule(LazyModuleMixin, nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.param = nn.UninitializedParameter() diff --git a/test/test_tensorboard.py b/test/test_tensorboard.py index a6eddd66cddd..f292089b1dda 100644 --- a/test/test_tensorboard.py +++ b/test/test_tensorboard.py @@ -520,7 +520,7 @@ class TestTensorBoardPytorchGraph(BaseTestCase): dummy_input = (torch.zeros(1, 3),) class myLinear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l = torch.nn.Linear(3, 5) @@ -597,7 +597,7 @@ class TestTensorBoardPytorchGraph(BaseTestCase): def test_pytorch_graph_dict_input(self): class Model(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l = torch.nn.Linear(3, 5) @@ -605,7 +605,7 @@ class TestTensorBoardPytorchGraph(BaseTestCase): return self.l(x) class ModelDict(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.l = torch.nn.Linear(3, 5) @@ -640,7 +640,7 @@ class TestTensorBoardPytorchGraph(BaseTestCase): # However, it should not raise an error during # the add_graph call and still continue. class myMLP(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.input_len = 1 * 28 * 28 self.fc1 = torch.nn.Linear(self.input_len, 1200) diff --git a/test/test_tensorexpr.py b/test/test_tensorexpr.py index a33c3afbf48d..c6e3c66f8ebf 100644 --- a/test/test_tensorexpr.py +++ b/test/test_tensorexpr.py @@ -1545,7 +1545,7 @@ class TestTensorExprFuser(BaseTestClass): def test_alias_analysis_module(self): class AliasModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() torch.manual_seed(1337) self.a = torch.randn(128, 128) @@ -1583,7 +1583,7 @@ class TestTensorExprFuser(BaseTestClass): def test_alias_analysis_inputs(self): class AliasModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() torch.manual_seed(1337) self.a = torch.randn(128, 128) @@ -1616,7 +1616,7 @@ class TestTensorExprFuser(BaseTestClass): def test_alias_analysis_input_and_module(self): class AliasModule(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() torch.manual_seed(1337) self.a = torch.randn(128, 128) diff --git a/test/test_testing.py b/test/test_testing.py index f07a467cb812..da263d020ba5 100644 --- a/test/test_testing.py +++ b/test/test_testing.py @@ -2062,7 +2062,7 @@ class TestTestParametrizationDeviceType(TestCase): # Create a test module, ModuleInfo entry, and decorator to apply. class TestModule(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.x = torch.nn.Parameter(torch.randn(3)) diff --git a/test/test_utils.py b/test/test_utils.py index 7fb34b809f44..fe9d908c45a7 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -117,7 +117,7 @@ class TestCheckpoint(TestCase): # the number of times forward pass happens def test_checkpoint_trigger(self): class Net(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.counter = 0 @@ -218,7 +218,7 @@ class TestCheckpoint(TestCase): def test_checkpoint_module_list(self): class ModuleListNet(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() module_list = [ nn.Linear(100, 50), diff --git a/test/test_vulkan.py b/test/test_vulkan.py index a9093f4191f5..a93244bcc669 100644 --- a/test/test_vulkan.py +++ b/test/test_vulkan.py @@ -66,7 +66,7 @@ class TestVulkanRewritePass(TestCase): conv_bias_shape = (output_channels) class Conv2D(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False) self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False) @@ -86,7 +86,7 @@ class TestVulkanRewritePass(TestCase): TestVulkanRewritePass.validate_transformed_module(Conv2D(), pattern_count_map, data_shape) class Conv2DRelu(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False) self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False) @@ -125,7 +125,7 @@ class TestVulkanRewritePass(TestCase): class Conv2DHardtanh(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False) self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False) diff --git a/test/test_weak.py b/test/test_weak.py index 11894584b13c..538fafe92f30 100644 --- a/test/test_weak.py +++ b/test/test_weak.py @@ -444,7 +444,7 @@ class WeakKeyDictionaryTestCase(TestCase): outerself = self class SimpleUserDict: - def __init__(self): + def __init__(self) -> None: self.d = outerself.reference def keys(self): @@ -475,7 +475,7 @@ class WeakKeyDictionaryTestCase(TestCase): class FailingUserDict: def keys(self): class BogonIter: - def __init__(self): + def __init__(self) -> None: self.i = 1 def __iter__(self): @@ -497,7 +497,7 @@ class WeakKeyDictionaryTestCase(TestCase): class FailingUserDict: def keys(self): class BogonIter: - def __init__(self): + def __init__(self) -> None: self.i = ord("a") def __iter__(self): @@ -785,7 +785,7 @@ class WeakKeyDictionaryScriptObjectTestCase(TestCase): outerself = self class SimpleUserDict: - def __init__(self): + def __init__(self) -> None: self.d = outerself.reference def keys(self): @@ -816,7 +816,7 @@ class WeakKeyDictionaryScriptObjectTestCase(TestCase): class FailingUserDict: def keys(self): class BogonIter: - def __init__(self): + def __init__(self) -> None: self.i = 1 def __iter__(self): @@ -838,7 +838,7 @@ class WeakKeyDictionaryScriptObjectTestCase(TestCase): class FailingUserDict: def keys(self): class BogonIter: - def __init__(self): + def __init__(self) -> None: self.i = ord("a") def __iter__(self): diff --git a/test/test_xnnpack_integration.py b/test/test_xnnpack_integration.py index b16de2114e0d..d7ae11177576 100644 --- a/test/test_xnnpack_integration.py +++ b/test/test_xnnpack_integration.py @@ -811,7 +811,7 @@ class TestXNNPACKRewritePass(TestCase): weight_shape = (weight_output_dim, data_shape[-1]) class Linear(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.rand(weight_shape), requires_grad=False @@ -824,7 +824,7 @@ class TestXNNPACKRewritePass(TestCase): return F.linear(x, self.weight, self.bias) class LinearNoBias(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.rand(weight_shape), requires_grad=False @@ -880,7 +880,7 @@ class TestXNNPACKRewritePass(TestCase): conv_bias_shape = output_channels class Conv2D(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.rand(conv_weight_shape), requires_grad=False @@ -905,7 +905,7 @@ class TestXNNPACKRewritePass(TestCase): ) class Conv2DT(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.rand(conv_transpose_weight_shape), requires_grad=False @@ -1104,7 +1104,7 @@ class TestXNNPACKRewritePass(TestCase): ) class MFusionAntiPattern(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear_weight = torch.nn.Parameter( torch.rand(linear_weight_shape), requires_grad=False @@ -1139,7 +1139,7 @@ class TestXNNPACKRewritePass(TestCase): ) class MFusionAntiPatternParamMinMax(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.linear_weight = torch.nn.Parameter( torch.rand(linear_weight_shape), requires_grad=False @@ -1179,7 +1179,7 @@ class TestXNNPACKRewritePass(TestCase): weight_shape = (weight_output_dim, data_shape[-1]) class DecomposedLinearAddmm(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.rand(weight_shape), requires_grad=False @@ -1193,7 +1193,7 @@ class TestXNNPACKRewritePass(TestCase): return torch.addmm(self.bias, x, weight_t) class DecomposedLinearMatmulAdd(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.rand(weight_shape), requires_grad=False @@ -1209,7 +1209,7 @@ class TestXNNPACKRewritePass(TestCase): return res class DecomposedLinearMatmul(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.rand(weight_shape), requires_grad=False @@ -1349,7 +1349,7 @@ class TestXNNPACKConv1dTransformPass(TestCase): conv_bias_shape = output_channels class Conv1D(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter( torch.rand(conv_weight_shape), requires_grad=False @@ -1442,7 +1442,7 @@ class TestXNNPACKConv1dTransformPass(TestCase): fc_bias_shape = output_features class Net(torch.nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.conv_weight = torch.nn.Parameter( torch.rand(conv_weight_shape), requires_grad=False diff --git a/test/torch_np/numpy_tests/core/test_multiarray.py b/test/torch_np/numpy_tests/core/test_multiarray.py index 346bf4fdf127..085e8ee5a212 100644 --- a/test/torch_np/numpy_tests/core/test_multiarray.py +++ b/test/torch_np/numpy_tests/core/test_multiarray.py @@ -1152,7 +1152,7 @@ class TestCreation(TestCase): def test_no_len_object_type(self): # gh-5100, want object array from iterable object without len() class Point2: - def __init__(self): + def __init__(self) -> None: pass def __getitem__(self, ind):