From 88ab3e43228b7440a33bf534cde493446a31538c Mon Sep 17 00:00:00 2001 From: Aaron Gokaslan Date: Sun, 20 Aug 2023 01:36:14 +0000 Subject: [PATCH] [BE]: Update ruff to 0.285 (#107519) This updates ruff to 0.285 which is faster, better, and have fixes a bunch of false negatives with regards to fstrings. I also enabled RUF017 which looks for accidental quadratic list summation. Luckily, seems like there are no instances of it in our codebase, so enabling it so that it stays like that. :) Pull Request resolved: https://github.com/pytorch/pytorch/pull/107519 Approved by: https://github.com/ezyang --- .lintrunner.toml | 2 +- .../torchvision_models.py | 2 +- .../operator_benchmark/benchmark_core.py | 6 +- benchmarks/overrides_benchmark/bench.py | 6 +- pyproject.toml | 1 + test/cpp_api_parity/functional_impl_check.py | 2 +- test/cpp_api_parity/module_impl_check.py | 14 ++-- test/nn/test_multihead_attention.py | 2 +- test/onnx/model_defs/squeezenet.py | 3 +- test/quantization/core/test_docs.py | 6 +- test/quantization/core/test_quantized_op.py | 8 +-- test/test_autocast.py | 2 +- test/test_binary_ufuncs.py | 6 +- test/test_cpp_extensions_jit.py | 11 ++-- test/test_cuda.py | 4 +- test/test_dispatch.py | 6 +- test/test_jit.py | 18 ++--- test/test_mobile_optimizer.py | 2 +- test/test_mps.py | 4 +- test/test_nn.py | 8 +-- test/test_ops.py | 6 +- test/test_reductions.py | 4 +- test/test_tensor_creation_ops.py | 10 ++- test/test_torch.py | 8 +-- test/test_transformers.py | 2 +- test/test_unary_ufuncs.py | 18 ++--- .../numpy_tests/core/test_multiarray.py | 2 +- .../numpy_tests/core/test_scalarmath.py | 4 +- .../numpy_tests/lib/test_histograms.py | 10 +-- tools/pyi/gen_pyi.py | 26 ++++---- torch/__init__.py | 4 +- torch/_lobpcg.py | 8 +-- torch/_namedtensor_internals.py | 23 +++---- torch/_numpy/testing/utils.py | 4 +- torch/_ops.py | 6 +- torch/_prims/__init__.py | 16 ++--- torch/_refs/nn/functional/__init__.py | 4 +- torch/ao/nn/quantizable/modules/rnn.py | 4 +- .../ao/nn/quantized/dynamic/modules/linear.py | 2 +- torch/ao/nn/quantized/dynamic/modules/rnn.py | 4 +- torch/ao/quantization/fx/convert.py | 4 +- torch/ao/quantization/fx/prepare.py | 2 +- torch/ao/quantization/pt2e/qat_utils.py | 8 +-- torch/ao/quantization/utils.py | 6 +- torch/autograd/functional.py | 66 ++++++++----------- torch/autograd/profiler_util.py | 18 ++--- torch/backends/cuda/__init__.py | 6 +- torch/distributed/distributed_c10d.py | 4 +- torch/distributed/nn/api/remote_module.py | 10 ++- torch/distributed/pipeline/sync/pipe.py | 4 +- torch/distributions/independent.py | 4 +- .../multipledispatch/dispatcher.py | 9 ++- torch/jit/_recursive.py | 16 ++--- torch/jit/_script.py | 8 +-- torch/jit/_trace.py | 6 +- torch/nn/functional.py | 36 +++++----- torch/nn/init.py | 5 +- torch/nn/modules/adaptive.py | 8 +-- torch/nn/modules/container.py | 6 +- torch/nn/modules/conv.py | 7 +- torch/nn/modules/module.py | 48 +++++++------- torch/nn/modules/rnn.py | 4 +- torch/nn/parallel/data_parallel.py | 8 +-- torch/nn/parallel/distributed.py | 4 +- torch/nn/parallel/parallel_apply.py | 4 +- torch/nn/parameter.py | 6 +- torch/nn/utils/parametrizations.py | 2 +- torch/nn/utils/prune.py | 10 +-- torch/nn/utils/rnn.py | 4 +- torch/nn/utils/spectral_norm.py | 2 +- torch/optim/lr_scheduler.py | 8 +-- torch/overrides.py | 6 +- torch/quasirandom.py | 6 +- torch/serialization.py | 6 +- torch/testing/_internal/common_device_type.py | 2 +- torch/testing/_internal/common_utils.py | 18 +++-- torch/testing/_internal/hypothesis_utils.py | 2 +- torch/testing/_internal/jit_utils.py | 2 +- torch/utils/bundled_inputs.py | 13 ++-- torch/utils/cpp_extension.py | 6 +- torch/utils/data/dataloader.py | 20 +++--- torch/utils/data/datapipes/_decorator.py | 14 ++-- torch/utils/data/sampler.py | 2 +- torch/utils/hipify/hipify_python.py | 2 +- torch/utils/hooks.py | 8 +-- torch/utils/mobile_optimizer.py | 4 +- 86 files changed, 319 insertions(+), 403 deletions(-) diff --git a/.lintrunner.toml b/.lintrunner.toml index 3ddb4933cfee..cbe86939222d 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -2664,6 +2664,6 @@ init_command = [ 'python3', 'tools/linter/adapters/pip_init.py', '--dry-run={{DRYRUN}}', - 'ruff==0.0.280', + 'ruff==0.0.285', ] is_formatter = true diff --git a/benchmarks/functional_autograd_benchmark/torchvision_models.py b/benchmarks/functional_autograd_benchmark/torchvision_models.py index 3c1f2b471381..595259bd247d 100644 --- a/benchmarks/functional_autograd_benchmark/torchvision_models.py +++ b/benchmarks/functional_autograd_benchmark/torchvision_models.py @@ -168,7 +168,7 @@ class ResNet(nn.Module): if len(replace_stride_with_dilation) != 3: raise ValueError( "replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation) + f"or a 3-element tuple, got {replace_stride_with_dilation}" ) self.groups = groups self.base_width = width_per_group diff --git a/benchmarks/operator_benchmark/benchmark_core.py b/benchmarks/operator_benchmark/benchmark_core.py index 73a43d65ff98..439cdba8b362 100644 --- a/benchmarks/operator_benchmark/benchmark_core.py +++ b/benchmarks/operator_benchmark/benchmark_core.py @@ -200,10 +200,10 @@ class BenchmarkRunner: def _print_header(self): DASH_LINE = "-" * 40 print( - "# {}\n" + f"# {DASH_LINE}\n" "# PyTorch/Caffe2 Operator Micro-benchmarks\n" - "# {}\n" - "# Tag : {}\n".format(DASH_LINE, DASH_LINE, self.args.tag_filter) + f"# {DASH_LINE}\n" + f"# Tag : {self.args.tag_filter}\n" ) if self.args.list_tests: print("# List of tests:") diff --git a/benchmarks/overrides_benchmark/bench.py b/benchmarks/overrides_benchmark/bench.py index 0811d0e0d762..93c7f1b130d4 100644 --- a/benchmarks/overrides_benchmark/bench.py +++ b/benchmarks/overrides_benchmark/bench.py @@ -57,10 +57,8 @@ def main(): bench_min, bench_std = bench(tensor_1, tensor_2) print( - "Type {} had a minimum time of {} us" - " and a standard deviation of {} us.".format( - t.__name__, (10**6 * bench_min), (10**6) * bench_std - ) + f"Type {t.__name__} had a minimum time of {10**6 * bench_min} us" + f" and a standard deviation of {(10**6) * bench_std} us." ) diff --git a/pyproject.toml b/pyproject.toml index 220986d1160e..eb764cb895fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,6 +74,7 @@ select = [ "PIE807", "PIE810", "PLE", + "RUF017", "TRY302", ] diff --git a/test/cpp_api_parity/functional_impl_check.py b/test/cpp_api_parity/functional_impl_check.py index 828f57e7e698..ad56e917ef6d 100644 --- a/test/cpp_api_parity/functional_impl_check.py +++ b/test/cpp_api_parity/functional_impl_check.py @@ -173,7 +173,7 @@ def write_test_to_test_class( assert not ('cpp_options_args' in test_params_dict and 'cpp_function_call' in test_params_dict), ( "Only one of `cpp_options_args` and `cpp_function_call` entries " - "should be present in test params dict:\n{}").format(pprint.pformat(test_params_dict)) + f"should be present in test params dict:\n{pprint.pformat(test_params_dict)}") functional_name = compute_functional_name(test_params_dict) diff --git a/test/cpp_api_parity/module_impl_check.py b/test/cpp_api_parity/module_impl_check.py index aa18798940ae..1aa6273f0d99 100644 --- a/test/cpp_api_parity/module_impl_check.py +++ b/test/cpp_api_parity/module_impl_check.py @@ -209,11 +209,11 @@ def process_test_params_for_module(test_params_dict, device, test_instance_class if 'constructor_args' in test_params_dict: assert 'cpp_constructor_args' in test_params_dict, ( "If `constructor_args` is present in test params dict, to enable C++ API parity test, " - "`cpp_constructor_args` must be present in:\n{}" + f"`cpp_constructor_args` must be present in:\n{pprint.pformat(test_params_dict)}" "If you are interested in adding the C++ API parity test, please see:\n" "NOTE [How to check NN module / functional API parity between Python and C++ frontends]. \n" "If not, please add `test_cpp_api_parity=False` to the test params dict and file an issue about this." - ).format(pprint.pformat(test_params_dict)) + ) return TorchNNModuleTestParams( module_name=module_name, @@ -233,16 +233,16 @@ def write_test_to_test_class( module_name = compute_module_name(test_params_dict) assert hasattr(torch.nn, module_name), ( - "`torch.nn` doesn't have module `{}`. " + f"`torch.nn` doesn't have module `{module_name}`. " "If you are adding a new test, please set `fullname` using format `ModuleName_desc` " - "or set `module_name` using format `ModuleName` in the module test dict:\n{}" - ).format(module_name, pprint.pformat(test_params_dict)) + f"or set `module_name` using format `ModuleName` in the module test dict:\n{pprint.pformat(test_params_dict)}" + ) module_full_name = 'torch::nn::' + module_name assert module_full_name in parity_table['torch::nn'], ( - "Please add `{}` entry to `torch::nn` section of `test/cpp_api_parity/parity-tracker.md`. " - "(Discovered while processing\n{}.)").format(module_full_name, pprint.pformat(test_params_dict)) + f"Please add `{module_full_name}` entry to `torch::nn` section of `test/cpp_api_parity/parity-tracker.md`. " + f"(Discovered while processing\n{pprint.pformat(test_params_dict)}.)") for device in devices: test_params = process_test_params_for_module( diff --git a/test/nn/test_multihead_attention.py b/test/nn/test_multihead_attention.py index 7c1ba2084c29..d5ae098b1d25 100644 --- a/test/nn/test_multihead_attention.py +++ b/test/nn/test_multihead_attention.py @@ -329,7 +329,7 @@ class TestMultiheadAttentionNN(NNTestCase): key = torch.rand(batch_size, src_len, embed_dim) # [N, S, D] value = key # [N, S, D] attn_mask = torch.randint(0, 2, (batch_size, tgt_len, src_len)).float() # [N, T, S] - attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0)) + attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, 0.0) mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads) diff --git a/test/onnx/model_defs/squeezenet.py b/test/onnx/model_defs/squeezenet.py index b6d83cd53059..a97b399c7dac 100644 --- a/test/onnx/model_defs/squeezenet.py +++ b/test/onnx/model_defs/squeezenet.py @@ -32,8 +32,7 @@ class SqueezeNet(nn.Module): super().__init__() if version not in [1.0, 1.1]: raise ValueError( - "Unsupported SqueezeNet version {version}:" - "1.0 or 1.1 expected".format(version=version) + f"Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected" ) self.num_classes = num_classes if version == 1.0: diff --git a/test/quantization/core/test_docs.py b/test/quantization/core/test_docs.py index 82ef4d277805..a5a49f2ff2fa 100644 --- a/test/quantization/core/test_docs.py +++ b/test/quantization/core/test_docs.py @@ -82,10 +82,8 @@ class TestQuantizationDocs(QuantizationTestCase): # want to make sure we are actually getting some code, assert last_line_num - line_num_start > 3 or short_snippet, ( - "The code in {} identified by {} seems suspiciously short:" - "\n\n###code-start####\n{}###code-end####".format( - path_to_file, unique_identifier, code - ) + f"The code in {path_to_file} identified by {unique_identifier} seems suspiciously short:" + f"\n\n###code-start####\n{code}###code-end####" ) return code diff --git a/test/quantization/core/test_quantized_op.py b/test/quantization/core/test_quantized_op.py index da7ef8347271..1d84f29cd66d 100644 --- a/test/quantization/core/test_quantized_op.py +++ b/test/quantization/core/test_quantized_op.py @@ -806,11 +806,11 @@ class TestQuantizedOps(TestCase): C_relu, C_relu_hat.q_scale(), C_relu_hat.q_zero_point(), dtype) self.assertEqual(C_ref.dequantize(), C_hat.dequantize(), - msg="{}_scalar results don't match: " - "{} vs {}".format(binary_op_name, C_ref.dequantize(), C_hat.dequantize())) + msg=f"{binary_op_name}_scalar results don't match: " + f"{C_ref.dequantize()} vs {C_hat.dequantize()}") self.assertEqual(C_relu_ref.dequantize(), C_relu_hat.dequantize(), - msg="{}_scalar_relu results don't match: " - "{} vs {}".format(binary_op_name, C_relu_ref.dequantize(), C_relu_hat.dequantize())) + msg=f"{binary_op_name}_scalar_relu results don't match: " + f"{C_relu_ref.dequantize()} vs {C_relu_hat.dequantize()}") @unittest.skipIf(IS_MACOS, "skipping macos test") @given(A=hu.tensor(shapes=hu.array_shapes(1, 4, 1, 5), diff --git a/test/test_autocast.py b/test/test_autocast.py index 7e449df11fef..79499d8cb50d 100644 --- a/test/test_autocast.py +++ b/test/test_autocast.py @@ -67,7 +67,7 @@ class TestAutocastCPU(TestCase): if (output is not None) and (output_method is not None): self.assertTrue(type(output) == type(output_method)) comparison = compare(output, output_method) - self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op)) + self.assertTrue(comparison, f"torch.{op} result did not match Tensor.{op} result") # Compare numerics to Python-side "autocasting" that (we expect) does the same thing # as the C++-side autocasting, and should be bitwise accurate. diff --git a/test/test_binary_ufuncs.py b/test/test_binary_ufuncs.py index 6c044cca9afe..a61af4b4b4d8 100644 --- a/test/test_binary_ufuncs.py +++ b/test/test_binary_ufuncs.py @@ -168,9 +168,9 @@ class TestBinaryUfuncs(TestCase): if _numel(l) <= 100 and _numel(r) <= 100: msg = ( "Failed to produce expected results! Input lhs tensor was" - " {}, rhs tensor was {}, torch result is {}, and reference result is" - " {}." - ).format(l, r, actual, expected) + f" {l}, rhs tensor was {r}, torch result is {actual}, and reference result is" + f" {expected}." + ) else: msg = None diff --git a/test/test_cpp_extensions_jit.py b/test/test_cpp_extensions_jit.py index ef3dc560e207..9862ff959541 100644 --- a/test/test_cpp_extensions_jit.py +++ b/test/test_cpp_extensions_jit.py @@ -149,17 +149,14 @@ class TestCppExtensionJIT(common.TestCase): err = err.decode("ascii") if not p.returncode == 0 or not err == '': - raise AssertionError("Flags: {}\nReturncode: {}\nStderr: {}\n" - "Output: {} ".format(flags, p.returncode, - err, output)) + raise AssertionError(f"Flags: {flags}\nReturncode: {p.returncode}\nStderr: {err}\n" + f"Output: {output} ") actual_arches = sorted(re.findall(r'sm_\d\d', output)) expected_arches = sorted(['sm_' + xx for xx in expected_values]) self.assertEqual(actual_arches, expected_arches, - msg="Flags: {}, Actual: {}, Expected: {}\n" - "Stderr: {}\nOutput: {}".format( - flags, actual_arches, expected_arches, - err, output)) + msg=f"Flags: {flags}, Actual: {actual_arches}, Expected: {expected_arches}\n" + f"Stderr: {err}\nOutput: {output}") temp_dir = tempfile.mkdtemp() old_envvar = os.environ.get('TORCH_CUDA_ARCH_LIST', None) diff --git a/test/test_cuda.py b/test/test_cuda.py index 43914649992c..5f637ac9fb85 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -168,7 +168,7 @@ class TestCuda(TestCase): def test_set_per_process_memory_fraction(self): # test invalid fraction value. with self.assertRaisesRegex(TypeError, "Invalid type"): - torch.cuda.set_per_process_memory_fraction(int(1)) + torch.cuda.set_per_process_memory_fraction(1) with self.assertRaisesRegex(ValueError, "Invalid fraction value"): torch.cuda.set_per_process_memory_fraction(-0.1) with self.assertRaisesRegex(ValueError, "Invalid fraction value"): @@ -1765,7 +1765,7 @@ torch.cuda.synchronize() if (output is not None) and (output_method is not None): self.assertTrue(type(output) == type(output_method)) comparison = compare(output, output_method) - self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op)) + self.assertTrue(comparison, f"torch.{op} result did not match Tensor.{op} result") # Compare numerics to Python-side "autocasting" that (we expect) does the same thing # as the C++-side autocasting, and should be bitwise accurate. diff --git a/test/test_dispatch.py b/test/test_dispatch.py index cb485bda7af4..5b03c49c3090 100644 --- a/test/test_dispatch.py +++ b/test/test_dispatch.py @@ -152,10 +152,10 @@ class TestDispatch(TestCase): # NB: this finally test asserts that if a registrations fails, # the dispatcher is left in the same state *that it was before*! check_invariants( - "running ctors {} and then failing to run ctor {} " + f"running ctors {ctor_order[:i]} and then failing to run ctor {op_ix} " "(did this failure leave the dispatcher in a wedged state? " "it shouldn't!)" - .format(ctor_order[:i], op_ix)) + ) break last_ctor = i if expect_raises and len(active_ops) == len(ops): @@ -165,7 +165,7 @@ class TestDispatch(TestCase): self.assertTrue( False, "expected exception to be raised, but nothing was raised " - "(after running ctors {})".format(ctor_order)) + f"(after running ctors {ctor_order})") # In the order specified by dtor_order, run deregistrations for i, op_ix in enumerate(dtor_order): # Trigger a destruction diff --git a/test/test_jit.py b/test/test_jit.py index 03fc3679e2c3..cb71f5c888a8 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -5810,19 +5810,19 @@ a") raise RuntimeError('Unknown dtype') if binary: - code = ''' + code = f''' graph(%3 : Tensor, %4 : Tensor): - %2 : {dtype}(*, *) = aten::{op}(%3, %4) - %1 : {dtype}(*, *) = aten::relu(%2) + %2 : {dtype_str}(*, *) = aten::{op}(%3, %4) + %1 : {dtype_str}(*, *) = aten::relu(%2) return (%1) - '''.format(op=op, dtype=dtype_str) + ''' else: - code = ''' + code = f''' graph(%3 : Tensor): - %2 : {dtype}(*, *) = aten::{op}(%3) - %1 : {dtype}(*, *) = aten::relu(%2) + %2 : {dtype_str}(*, *) = aten::{op}(%3) + %1 : {dtype_str}(*, *) = aten::relu(%2) return (%1) - '''.format(op=op, dtype=dtype_str) + ''' graph = parse_ir(code) inputs = (2 if binary else 1) * [torch.rand(26, 2048, dtype=dtype)] @@ -14936,7 +14936,7 @@ dedent """ value = torch.rand((src_l, bsz, embed_size)) mask = (torch.triu(torch.ones(src_l, src_l)) == 1).transpose(0, 1) - mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)).to(torch.get_default_dtype()) + mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, 0.0).to(torch.get_default_dtype()) jit_out = jit_multihead_attn_forward(query, key, value, embed_size, nhead, diff --git a/test/test_mobile_optimizer.py b/test/test_mobile_optimizer.py index eeb62ecc4bc4..e672d69ab5dd 100644 --- a/test/test_mobile_optimizer.py +++ b/test/test_mobile_optimizer.py @@ -582,7 +582,7 @@ class TestOptimizer(TestCase): self.assertTrue( cloned.qualified_name.startswith('__torch__.'), ("Expected the cloned module's name to start with the string " - "'__torch__.', but got: {}").format(cloned.qualified_name), + f"'__torch__.', but got: {cloned.qualified_name}"), ) diff --git a/test/test_mps.py b/test/test_mps.py index b2f7d649be50..e4cdd1dbfa0a 100644 --- a/test/test_mps.py +++ b/test/test_mps.py @@ -9436,8 +9436,8 @@ class TestConvolutionMPS(TestCaseMPS): output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(output, groundtruth, atol=1e-5, rtol=0, - msg="groundtruth comparison failed for mode={}, " - "padding_mode={}".format(mode, padding_mode)) + msg=f"groundtruth comparison failed for mode={mode}, " + f"padding_mode={padding_mode}") class TestAdvancedIndexing(TestCaseMPS): supported_dtypes = [torch.float32, torch.float16, torch.int64, torch.int32, torch.int16, torch.uint8] diff --git a/test/test_nn.py b/test/test_nn.py index 85aa6034600b..556e184c9940 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -5959,8 +5959,8 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""") output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(output, groundtruth, atol=1e-5, rtol=0, - msg="groundtruth comparison failed for mode={}, " - "padding_mode={}".format(mode, padding_mode)) + msg=f"groundtruth comparison failed for mode={mode}, " + f"padding_mode={padding_mode}") # See NOTE [ grid_sample CPU fallback ] output = torch._grid_sampler_2d_cpu_fallback( @@ -6047,8 +6047,8 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""") F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners).sum().backward() self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0, - msg="gradient groundtruth comparison failed for mode={}, " - "padding_mode={}, input_requires_grad={}".format(mode, padding_mode, input_requires_grad)) + msg=f"gradient groundtruth comparison failed for mode={mode}, " + f"padding_mode={padding_mode}, input_requires_grad={input_requires_grad}") grid.grad.zero_() # See NOTE [ grid_sample CPU fallback ] diff --git a/test/test_ops.py b/test/test_ops.py index ab2d1aa599a7..a3b14857cae0 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1098,10 +1098,8 @@ class TestCommon(TestCase): RuntimeError, msg=( "inplace variant either incorrectly allowed " - "resizing or you have marked the sample {}" - " incorrectly with `broadcasts_self=True".format( - sample.summary() - ) + f"resizing or you have marked the sample {sample.summary()}" + " incorrectly with `broadcasts_self=True" ), ): variant_forward = variant( diff --git a/test/test_reductions.py b/test/test_reductions.py index 8cb0a64ce298..7ba000f47602 100644 --- a/test/test_reductions.py +++ b/test/test_reductions.py @@ -3503,8 +3503,8 @@ as the input tensor excluding its innermost dimension'): expected = np.asarray(expected) # transform numpy scalars to numpy.ndarray instances msg = ("Failed to produce expected results! Input tensor was" - " {}, torch result is {}, and reference result is" - " {}.").format(t, actual, expected) if t.numel() < 10 else None + f" {t}, torch result is {actual}, and reference result is" + f" {expected}.") if t.numel() < 10 else None self.assertEqual(actual, expected, msg, exact_dtype=exact_dtype) diff --git a/test/test_tensor_creation_ops.py b/test/test_tensor_creation_ops.py index cc419adf8d2c..815e28282c3b 100644 --- a/test/test_tensor_creation_ops.py +++ b/test/test_tensor_creation_ops.py @@ -451,9 +451,8 @@ class TestTensorCreation(TestCase): other_dtype = torch.float64 if dtype == torch.float32 else torch.float32 a = torch.tensor([1, 2], device=device, dtype=dtype) b = torch.tensor([3, 4], device=device, dtype=other_dtype) - error = "Expected object of scalar type {} but got scalar type " \ - "{} for second argument".format(dtype_name(dtype), - dtype_name(other_dtype)) + error = f"Expected object of scalar type {dtype_name(dtype)} but got scalar type " \ + f"{dtype_name(other_dtype)} for second argument" with self.assertRaisesRegex(RuntimeError, error): op(a, b) @@ -472,9 +471,8 @@ class TestTensorCreation(TestCase): b = torch.tensor([3, 4], device=device, dtype=dtype) out = torch.zeros(2, device=device, dtype=dtype) expected_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128 - error = "Expected object of scalar type {} but got scalar type " \ - "{} for argument 'out'".format( - complex_dtype_name(expected_dtype), dtype_name(dtype)) + error = f"Expected object of scalar type {complex_dtype_name(expected_dtype)} but got scalar type " \ + f"{dtype_name(dtype)} for argument 'out'" with self.assertRaisesRegex(RuntimeError, error): op(a, b, out=out) diff --git a/test/test_torch.py b/test/test_torch.py index ab1a92bdb437..2d0e02de141c 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -8682,8 +8682,8 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], for common_args in [multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args]: for k, v in common_args.items(): - self.assertNotIn(v, desc, 'The argument description "{}" in {} can be ' - 'replaced by {{{}}}'.format(v, func, k)) + self.assertNotIn(v, desc, f'The argument description "{v}" in {func} can be ' + f'replaced by {{{k}}}') def test_doc(self): checked_types = (types.MethodType, types.FunctionType, @@ -8719,8 +8719,8 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], full_name = ns_name + '.' + name if any(r.match(name) for r in skip_regexes): self.assertFalse(has_doc, - 'New docs have been added for {}, please remove ' - 'it from the skipped list in TestTorch.test_doc'.format(full_name)) + f'New docs have been added for {full_name}, please remove ' + 'it from the skipped list in TestTorch.test_doc') else: self.assertTrue(has_doc, f'{full_name} is missing documentation') diff --git a/test/test_transformers.py b/test/test_transformers.py index b7ff2aadb15c..5a33300cce55 100644 --- a/test/test_transformers.py +++ b/test/test_transformers.py @@ -147,7 +147,7 @@ class TestTransformers(NNTestCase): query = torch.rand(batch_size, tgt_len, embed_dim, device=device) # [N, T, D] attn_mask = torch.randint(0, 2, (tgt_len, tgt_len)).cuda().float() # [T, T] - attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0)) + attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, 0.0) attn_mask_4d = attn_mask.expand(batch_size, num_heads, tgt_len, tgt_len) diff --git a/test/test_unary_ufuncs.py b/test/test_unary_ufuncs.py index 6c6744b6b781..eb6e270cde57 100644 --- a/test/test_unary_ufuncs.py +++ b/test/test_unary_ufuncs.py @@ -101,9 +101,9 @@ class TestUnaryUfuncs(TestCase): result.item(), float("nan"), msg=( - "input of {} outside lower domain boundary" - " {} produced {}, not nan!" - ).format(lower_tensor.item(), low, result.item()), + f"input of {lower_tensor.item()} outside lower domain boundary" + f" {low} produced {result.item()}, not nan!" + ), ) if high is not None: @@ -120,9 +120,9 @@ class TestUnaryUfuncs(TestCase): result.item(), float("nan"), msg=( - "input of {} outside upper domain boundary" - " {} produced {}, not nan!" - ).format(higher_tensor.item(), high, result.item()), + f"input of {higher_tensor.item()} outside upper domain boundary" + f" {high} produced {result.item()}, not nan!" + ), ) # Helper for comparing torch tensors and numpy arrays @@ -245,9 +245,9 @@ class TestUnaryUfuncs(TestCase): if t.numel() < 10: msg = ( "Failed to produce expected results! Input tensor was" - " {}, torch result is {}, and reference result is" - " {}." - ).format(t, actual, expected) + f" {t}, torch result is {actual}, and reference result is" + f" {expected}." + ) else: msg = None diff --git a/test/torch_np/numpy_tests/core/test_multiarray.py b/test/torch_np/numpy_tests/core/test_multiarray.py index 65bd6edb353b..5f48d1c848ed 100644 --- a/test/torch_np/numpy_tests/core/test_multiarray.py +++ b/test/torch_np/numpy_tests/core/test_multiarray.py @@ -6431,7 +6431,7 @@ class TestWhere: e = float("-Infinity") assert_equal(np.where(True, d, e).dtype, np.float32) # also check upcast - e = float(1e150) + e = 1e150 assert_equal(np.where(True, d, e).dtype, np.float64) def test_ndim(self): diff --git a/test/torch_np/numpy_tests/core/test_scalarmath.py b/test/torch_np/numpy_tests/core/test_scalarmath.py index 0e2658b68933..99f404ac4c45 100644 --- a/test/torch_np/numpy_tests/core/test_scalarmath.py +++ b/test/torch_np/numpy_tests/core/test_scalarmath.py @@ -264,9 +264,7 @@ class TestPower: a = t1(3) b = t2(2) result = a**b - msg = ("error with {!r} and {!r}:" "got {!r}, expected {!r}").format( - t1, t2, result, 9 - ) + msg = f"error with {t1!r} and {t2!r}:" f"got {result!r}, expected {9!r}" if np.issubdtype(np.dtype(result), np.integer): assert_(result == 9, msg) else: diff --git a/test/torch_np/numpy_tests/lib/test_histograms.py b/test/torch_np/numpy_tests/lib/test_histograms.py index 0482d2a6cce6..8ffd1e7d9cf6 100644 --- a/test/torch_np/numpy_tests/lib/test_histograms.py +++ b/test/torch_np/numpy_tests/lib/test_histograms.py @@ -485,8 +485,8 @@ class TestHistogramOptimBinNums: assert_equal( len(a), numbins, - err_msg="For the {} estimator " - "with datasize of {}".format(estimator, testlen), + err_msg=f"For the {estimator} estimator " + f"with datasize of {testlen}", ) def test_small(self): @@ -532,8 +532,8 @@ class TestHistogramOptimBinNums: assert_equal( len(a), expbins, - err_msg="For the {} estimator " - "with datasize of {}".format(estimator, testlen), + err_msg=f"For the {estimator} estimator " + f"with datasize of {testlen}", ) def test_incorrect_methods(self): @@ -566,7 +566,7 @@ class TestHistogramOptimBinNums: assert_equal( len(a), numbins, - err_msg="{} estimator, " "No Variance test".format(estimator), + err_msg=f"{estimator} estimator, " "No Variance test", ) def test_limited_variance(self): diff --git a/tools/pyi/gen_pyi.py b/tools/pyi/gen_pyi.py index d255852be112..1f01d96ad5cc 100644 --- a/tools/pyi/gen_pyi.py +++ b/tools/pyi/gen_pyi.py @@ -462,7 +462,7 @@ def gen_nn_functional(fm: FileManager) -> None: "pdist", "cosine_similarity", ] - imported_hints = ["from .. import {0} as {0}".format(_) for _ in torch_imports] + imported_hints = [f"from .. import {_} as {_}" for _ in torch_imports] # Functions imported into `torch.nn.functional` from `torch._C._nn` c_nn_imports = [ @@ -479,9 +479,7 @@ def gen_nn_functional(fm: FileManager) -> None: "one_hot", "scaled_dot_product_attention", ] - imported_hints += [ - "from .._C._nn import {0} as {0}".format(_) for _ in c_nn_imports - ] + imported_hints += [f"from .._C._nn import {_} as {_}" for _ in c_nn_imports] # This is from `torch._C._nn` but renamed imported_hints.append("from .._C._nn import log_sigmoid\nlogsigmoid = log_sigmoid") @@ -875,15 +873,13 @@ def gen_pyi( ) for binop in ["mul", "true_divide", "floor_divide"]: unsorted_function_hints[binop].append( - "def {}(input: Union[Tensor, Number], other: Union[Tensor, Number], " - "*, out: Optional[Tensor] = None) -> Tensor: ...".format(binop) + f"def {binop}(input: Union[Tensor, Number], other: Union[Tensor, Number], " + "*, out: Optional[Tensor] = None) -> Tensor: ..." ) for binop in ["add", "sub"]: unsorted_function_hints[binop].append( - "def {}(input: Union[Tensor, Number], other: Union[Tensor, Number], " - "*, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ...".format( - binop - ) + f"def {binop}(input: Union[Tensor, Number], other: Union[Tensor, Number], " + "*, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ..." ) native_functions = parse_native_yaml( @@ -1086,8 +1082,8 @@ def gen_pyi( binop += "_" out_suffix = "" unsorted_tensor_method_hints[binop].append( - "def {}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]{})" - " -> Tensor: ...".format(binop, out_suffix) + f"def {binop}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]{out_suffix})" + " -> Tensor: ..." ) for binop in ["add", "sub"]: for inplace in [False, True]: @@ -1096,9 +1092,9 @@ def gen_pyi( binop += "_" out_suffix = "" unsorted_tensor_method_hints[binop].append( - "def {}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], " - "*, alpha: Optional[Number] = 1{})" - " -> Tensor: ...".format(binop, out_suffix) + f"def {binop}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], " + f"*, alpha: Optional[Number] = 1{out_suffix})" + " -> Tensor: ..." ) simple_conversions = [ "byte", diff --git a/torch/__init__.py b/torch/__init__.py index 1f2b5c12b562..2bcdcce0ba8c 100644 --- a/torch/__init__.py +++ b/torch/__init__.py @@ -1730,8 +1730,8 @@ def _register_device_module(device_type, module): device_type = torch.device(device_type).type m = sys.modules[__name__] if hasattr(m, device_type): - raise RuntimeError("The runtime module of '{}' has already " - "been registered with '{}'".format(device_type, getattr(m, device_type))) + raise RuntimeError(f"The runtime module of '{device_type}' has already " + f"been registered with '{getattr(m, device_type)}'") setattr(m, device_type, module) torch_module_name = '.'.join([__name__, device_type]) sys.modules[torch_module_name] = module diff --git a/torch/_lobpcg.py b/torch/_lobpcg.py index 16f37cdf6a4a..a5ed5cf8fcfd 100644 --- a/torch/_lobpcg.py +++ b/torch/_lobpcg.py @@ -614,8 +614,8 @@ def _lobpcg( if m < 3 * n: raise ValueError( - "LPBPCG algorithm is not applicable when the number of A rows (={})" - " is smaller than 3 x the number of requested eigenpairs (={})".format(m, n) + f"LPBPCG algorithm is not applicable when the number of A rows (={m})" + f" is smaller than 3 x the number of requested eigenpairs (={n})" ) method = "ortho" if method is None else method @@ -1151,9 +1151,7 @@ class LOBPCG: assert B is not None raise ValueError( "Overdetermined shape of U:" - " #B-cols(={}) >= #U-cols(={}) + #V-cols(={}) must hold".format( - B.shape[-1], U.shape[-1], V.shape[-1] - ) + f" #B-cols(={B.shape[-1]}) >= #U-cols(={U.shape[-1]}) + #V-cols(={V.shape[-1]}) must hold" ) self.ivars["ortho_i"] = i self.ivars["ortho_j"] = j diff --git a/torch/_namedtensor_internals.py b/torch/_namedtensor_internals.py index 47bdcd82d14a..cbc9de2de091 100644 --- a/torch/_namedtensor_internals.py +++ b/torch/_namedtensor_internals.py @@ -50,8 +50,8 @@ def single_ellipsis_index(names, fn_name): ellipsis_indices = [i for i, name in enumerate(names) if is_ellipsis(name)] if len(ellipsis_indices) >= 2: raise RuntimeError( - "{}: More than one Ellipsis ('...') found in names (" - "{}). This function supports up to one Ellipsis.".format(fn_name, names) + f"{fn_name}: More than one Ellipsis ('...') found in names (" + f"{names}). This function supports up to one Ellipsis." ) if len(ellipsis_indices) == 1: return ellipsis_indices[0] @@ -97,15 +97,8 @@ def update_names_with_mapping(tensor, rename_map, inplace): dim_map[old_dim] = new_dim else: raise RuntimeError( - ( - "{api_name}: Tried to rename dim '{old_dim}' to dim " - "{new_dim} in Tensor[{dims}] but dim '{old_dim}' does not exist" - ).format( - old_dim=old_dim, - new_dim=new_dim, - dims=tensor.names, - api_name=namer_api_name(inplace), - ) + f"{namer_api_name(inplace)}: Tried to rename dim '{old_dim}' to dim " + f"{new_dim} in Tensor[{tensor.names}] but dim '{old_dim}' does not exist" ) return tensor._update_names(tuple(dim_map.values()), inplace) @@ -149,10 +142,10 @@ def update_names(tensor, names, rename_map, inplace): has_rename_pairs = bool(rename_map) if has_names and has_rename_pairs: raise RuntimeError( - "{api_name}: This function takes either positional " - "args or keyword args, but not both. Use tensor.{api_name}(*names) " - "to name dims and tensor.{api_name}(**rename_map) to rename " - "dims.".format(api_name=namer_api_name(inplace)) + f"{namer_api_name(inplace)}: This function takes either positional " + f"args or keyword args, but not both. Use tensor.{namer_api_name(inplace)}(*names) " + f"to name dims and tensor.{namer_api_name(inplace)}(**rename_map) to rename " + "dims." ) # Special case for tensor.rename(*[]), which is valid for a 0 dim tensor. diff --git a/torch/_numpy/testing/utils.py b/torch/_numpy/testing/utils.py index cfd14e0e5bd4..50791cea34b8 100644 --- a/torch/_numpy/testing/utils.py +++ b/torch/_numpy/testing/utils.py @@ -1411,8 +1411,8 @@ def assert_array_max_ulp(a, b, maxulp=1, dtype=None): ret = nulp_diff(a, b, dtype) if not np.all(ret <= maxulp): raise AssertionError( - "Arrays are not almost equal up to {:g} " - "ULP (max difference is {:g} ULP)".format(maxulp, np.max(ret)) + f"Arrays are not almost equal up to {maxulp:g} " + f"ULP (max difference is {np.max(ret):g} ULP)" ) return ret diff --git a/torch/_ops.py b/torch/_ops.py index 329d8fa534a9..19d7df3aab23 100644 --- a/torch/_ops.py +++ b/torch/_ops.py @@ -646,10 +646,8 @@ class OpOverloadPacket: # an object name different from the one the attribute # query was performed on. raise AttributeError( - "'{}' can't have an overload name beginning with '__' and the " - "underlying op {} has no attribute {} either.".format( - str(self), str(self._op), key - ) + f"'{str(self)}' can't have an overload name beginning with '__' and the " + f"underlying op {str(self._op)} has no attribute {key} either." ) from None try: diff --git a/torch/_prims/__init__.py b/torch/_prims/__init__.py index 0e7d06a1e5bd..8b1a58eef786 100644 --- a/torch/_prims/__init__.py +++ b/torch/_prims/__init__.py @@ -1483,10 +1483,8 @@ def _slice_meta( raise ValueError(msg) if x > y: msg = ( - "Attempting to slice a tensor but a start index in {} is greater than" - " the length of its corresponding dimension in shape {}".format( - start_indices, a.shape - ) + f"Attempting to slice a tensor but a start index in {start_indices} is greater than" + f" the length of its corresponding dimension in shape {a.shape}" ) raise ValueError(msg) @@ -1496,16 +1494,14 @@ def _slice_meta( raise ValueError(msg) if x > y: msg = ( - "Attempting to slice a tensor but a stop index in {} is greater than the length of " - " its corresponding dimension in shape {}".format( - limit_indices, a.shape - ) + f"Attempting to slice a tensor but a stop index in {limit_indices} is greater than the length of " + f" its corresponding dimension in shape {a.shape}" ) raise ValueError(msg) if x < z: msg = ( - "Attempting to slice a tensor but a start index in {} is greater than " - " its corresponding stop index {}".format(x, z) + f"Attempting to slice a tensor but a start index in {x} is greater than " + f" its corresponding stop index {z}" ) for x in _strides: diff --git a/torch/_refs/nn/functional/__init__.py b/torch/_refs/nn/functional/__init__.py index a64279774740..8eafe93e6350 100644 --- a/torch/_refs/nn/functional/__init__.py +++ b/torch/_refs/nn/functional/__init__.py @@ -605,9 +605,7 @@ def margin_ranking_loss( if input1.ndim != input2.ndim or input1.ndim != target.ndim: raise RuntimeError( "margin_ranking_loss : All input tensors should have same dimension but got sizes: " - "input1: {}, input2: {}, target: {} ".format( - input1.shape, input2.shape, target.shape - ) + f"input1: {input1.shape}, input2: {input2.shape}, target: {target.shape} " ) _check_reduction_value(reduction) loss = torch.clamp_min(-target * (input1 - input2) + margin, 0) diff --git a/torch/ao/nn/quantizable/modules/rnn.py b/torch/ao/nn/quantizable/modules/rnn.py index b4de0fd1ed9c..882c63d699dc 100644 --- a/torch/ao/nn/quantizable/modules/rnn.py +++ b/torch/ao/nn/quantizable/modules/rnn.py @@ -318,8 +318,8 @@ class LSTM(torch.nn.Module): if num_layers == 1: warnings.warn("dropout option adds dropout after all but last " "recurrent layer, so non-zero dropout expects " - "num_layers greater than 1, but got dropout={} " - "and num_layers={}".format(dropout, num_layers)) + f"num_layers greater than 1, but got dropout={dropout} " + f"and num_layers={num_layers}") layers = [_LSTMLayer(self.input_size, self.hidden_size, self.bias, batch_first=False, diff --git a/torch/ao/nn/quantized/dynamic/modules/linear.py b/torch/ao/nn/quantized/dynamic/modules/linear.py index 22f483f32fd7..a8a366e57f53 100644 --- a/torch/ao/nn/quantized/dynamic/modules/linear.py +++ b/torch/ao/nn/quantized/dynamic/modules/linear.py @@ -105,7 +105,7 @@ class Linear(nnq.Linear): weight_observer = default_dynamic_qconfig.weight() dtype = weight_observer.dtype assert dtype in [torch.qint8, torch.float16], "The only supported dtypes for " \ - "dynamic quantized linear are qint8 and float16 got: {}".format(dtype) + f"dynamic quantized linear are qint8 and float16 got: {dtype}" weight_observer(mod.weight) if dtype == torch.qint8: qweight = _quantize_weight(mod.weight.float(), weight_observer) diff --git a/torch/ao/nn/quantized/dynamic/modules/rnn.py b/torch/ao/nn/quantized/dynamic/modules/rnn.py index af50d51bb370..f8c68c841ef2 100644 --- a/torch/ao/nn/quantized/dynamic/modules/rnn.py +++ b/torch/ao/nn/quantized/dynamic/modules/rnn.py @@ -93,8 +93,8 @@ class RNNBase(torch.nn.Module): if dropout > 0 and num_layers == 1: # type: ignore[operator] warnings.warn("dropout option adds dropout after all but last " "recurrent layer, so non-zero dropout expects " - "num_layers greater than 1, but got dropout={} and " - "num_layers={}".format(dropout, num_layers)) + f"num_layers greater than 1, but got dropout={dropout} and " + f"num_layers={num_layers}") if mode == 'LSTM': gate_size = 4 * hidden_size diff --git a/torch/ao/quantization/fx/convert.py b/torch/ao/quantization/fx/convert.py index a85036f51be1..917735529b7d 100644 --- a/torch/ao/quantization/fx/convert.py +++ b/torch/ao/quantization/fx/convert.py @@ -977,8 +977,8 @@ def convert( assert k in convert_node_name_to_qconfig, f'Expected key {k} in convert node_name_to_qconfig' if convert_node_name_to_qconfig[k] is not None: assert qconfig_equals(v, convert_node_name_to_qconfig[k]), \ - "Expected k {} to have the same value in prepare and convert QConfigMappings, " \ - "but {} was updated to {}".format(k, v, convert_node_name_to_qconfig[k]) + f"Expected k {k} to have the same value in prepare and convert QConfigMappings, " \ + f"but {v} was updated to {convert_node_name_to_qconfig[k]}" node_name_to_qconfig = convert_node_name_to_qconfig custom_module_classes = get_custom_module_class_keys(convert_custom_config.observed_to_quantized_mapping) diff --git a/torch/ao/quantization/fx/prepare.py b/torch/ao/quantization/fx/prepare.py index 12a29ebbb86e..abdfba6ba1f6 100644 --- a/torch/ao/quantization/fx/prepare.py +++ b/torch/ao/quantization/fx/prepare.py @@ -189,7 +189,7 @@ def _create_obs_or_fq_from_qspec( edge_or_node = quantization_spec.edge_or_node assert edge_or_node in obs_or_fq_map, \ "please make sure only refer to edge or node that has " \ - "observer/fake_quant inserted: '{}' not in\n{}".format(edge_or_node, obs_or_fq_map.keys()) + f"observer/fake_quant inserted: '{edge_or_node}' not in\n{obs_or_fq_map.keys()}" return obs_or_fq_map[edge_or_node] elif isinstance(quantization_spec, DerivedQuantizationSpec): # can't use asdict, so not calling get_observer_kwargs here diff --git a/torch/ao/quantization/pt2e/qat_utils.py b/torch/ao/quantization/pt2e/qat_utils.py index 837e19b16028..ee4982942d93 100644 --- a/torch/ao/quantization/pt2e/qat_utils.py +++ b/torch/ao/quantization/pt2e/qat_utils.py @@ -226,10 +226,10 @@ def _get_quantized_qat_conv2d_bn_pattern( ) else: scaled_weight = torch.ops.quantized_decomposed.quantize_per_tensor( - scaled_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8, + scaled_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8, ) scaled_weight = torch.ops.quantized_decomposed.dequantize_per_tensor( - scaled_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8, + scaled_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8, ) if has_bias: zero_bias = torch.zeros_like(kwargs["conv_bias"], dtype=x.dtype) @@ -283,10 +283,10 @@ def _get_folded_quantized_qat_conv2d_bn_pattern( ) else: conv_weight = torch.ops.quantized_decomposed.quantize_per_tensor( - conv_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8, + conv_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8, ) conv_weight = torch.ops.quantized_decomposed.dequantize_per_tensor( - conv_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8, + conv_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8, ) if has_bias: x = F.conv2d(x, conv_weight, kwargs["conv_bias"]) diff --git a/torch/ao/quantization/utils.py b/torch/ao/quantization/utils.py index 154372ef9a85..308fce44a40c 100644 --- a/torch/ao/quantization/utils.py +++ b/torch/ao/quantization/utils.py @@ -201,7 +201,7 @@ def get_swapped_custom_module_class(custom_module, custom_module_class_mapping, quant_type = get_quant_type(qconfig) class_mapping = custom_module_class_mapping.get(quant_type, {}) assert type(custom_module) in class_mapping, "did not find corresponding observed " \ - "module class for {} in mapping: {}".format(type(custom_module), class_mapping) + f"module class for {type(custom_module)} in mapping: {class_mapping}" return class_mapping[type(custom_module)] def activation_dtype(qconfig): @@ -298,8 +298,8 @@ def get_quant_type(qconfig): elif activation.dtype == torch.float16: return QuantType.STATIC - raise Exception("Unrecognized dtype combination in get_quant_type: activation({})," - "weight({})".format(activation.dtype, weight.dtype)) + raise Exception(f"Unrecognized dtype combination in get_quant_type: activation({activation.dtype})," + f"weight({weight.dtype})") def check_min_max_valid(min_val: torch.Tensor, max_val: torch.Tensor) -> bool: """ Checks if the given minimum and maximum values are valid, meaning that diff --git a/torch/autograd/functional.py b/torch/autograd/functional.py index 755494a88ade..30045bc8671c 100644 --- a/torch/autograd/functional.py +++ b/torch/autograd/functional.py @@ -33,17 +33,13 @@ def _as_tuple(inp, arg_name=None, fn_name=None): if not isinstance(el, torch.Tensor): if is_inp_tuple: raise TypeError( - "The {} given to {} must be either a Tensor or a tuple of Tensors but the" - " value at index {} has type {}.".format( - arg_name, fn_name, i, type(el) - ) + f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the" + f" value at index {i} has type {type(el)}." ) else: raise TypeError( - "The {} given to {} must be either a Tensor or a tuple of Tensors but the" - " given {} has type {}.".format( - arg_name, fn_name, arg_name, type(el) - ) + f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the" + f" given {arg_name} has type {type(el)}." ) return is_inp_tuple, inp @@ -134,37 +130,35 @@ def _check_requires_grad(inputs, input_type, strict): if inp is None: # This can only be reached for grad_inputs. raise RuntimeError( - "The output of the user-provided function is independent of input {}." - " This is not allowed in strict mode.".format(i) + f"The output of the user-provided function is independent of input {i}." + " This is not allowed in strict mode." ) if not inp.requires_grad: if input_type == "hessian": raise RuntimeError( - "The hessian of the user-provided function with respect to input {}" + f"The hessian of the user-provided function with respect to input {i}" " is independent of the input. This is not allowed in strict mode." " You should ensure that your function is thrice differentiable and that" - " the hessian depends on the inputs.".format(i) + " the hessian depends on the inputs." ) elif input_type == "jacobian": raise RuntimeError( "While computing the hessian, found that the jacobian of the user-provided" - " function with respect to input {} is independent of the input. This is not" + f" function with respect to input {i} is independent of the input. This is not" " allowed in strict mode. You should ensure that your function is twice" " differentiable and that the jacobian depends on the inputs (this would be" - " violated by a linear function for example).".format(i) + " violated by a linear function for example)." ) elif input_type == "grad_inputs": raise RuntimeError( - "The gradient with respect to input {} is independent of the inputs of the" - " user-provided function. This is not allowed in strict mode.".format( - i - ) + f"The gradient with respect to input {i} is independent of the inputs of the" + " user-provided function. This is not allowed in strict mode." ) else: raise RuntimeError( - "Output {} of the user-provided function does not require gradients." + f"Output {i} of the user-provided function does not require gradients." " The outputs must be computed in a differentiable manner from the input" - " when running in strict mode.".format(i) + " when running in strict mode." ) @@ -221,27 +215,25 @@ def _fill_in_zeros(grads, refs, strict, create_graph, stage): if stage == "back": raise RuntimeError( "The output of the user-provided function is independent of " - "input {}. This is not allowed in strict mode.".format(i) + f"input {i}. This is not allowed in strict mode." ) elif stage == "back_trick": raise RuntimeError( - "The gradient with respect to the input is independent of entry {}" + f"The gradient with respect to the input is independent of entry {i}" " in the grad_outputs when using the double backward trick to compute" - " forward mode gradients. This is not allowed in strict mode.".format( - i - ) + " forward mode gradients. This is not allowed in strict mode." ) elif stage == "double_back": raise RuntimeError( "The jacobian of the user-provided function is independent of " - "input {}. This is not allowed in strict mode.".format(i) + f"input {i}. This is not allowed in strict mode." ) else: raise RuntimeError( "The hessian of the user-provided function is independent of " - "entry {} in the grad_jacobian. This is not allowed in strict " + f"entry {i} in the grad_jacobian. This is not allowed in strict " "mode as it prevents from using the double backward trick to " - "replace forward mode AD.".format(i) + "replace forward mode AD." ) grads_i = torch.zeros_like(refs[i]) @@ -250,16 +242,12 @@ def _fill_in_zeros(grads, refs, strict, create_graph, stage): if "double" not in stage: raise RuntimeError( "The jacobian of the user-provided function is independent of " - "input {}. This is not allowed in strict mode when create_graph=True.".format( - i - ) + f"input {i}. This is not allowed in strict mode when create_graph=True." ) else: raise RuntimeError( "The hessian of the user-provided function is independent of " - "input {}. This is not allowed in strict mode when create_graph=True.".format( - i - ) + f"input {i}. This is not allowed in strict mode when create_graph=True." ) res += (grads_i,) @@ -811,17 +799,17 @@ def jacobian( if strict and create_graph and not vj_el.requires_grad: msg = ( "The jacobian of the user-provided function is " - "independent of input {}. This is not allowed in " - "strict mode when create_graph=True.".format(i) + f"independent of input {i}. This is not allowed in " + "strict mode when create_graph=True." ) raise RuntimeError(msg) jac_i_el.append(vj_el) else: if strict: msg = ( - "Output {} of the user-provided function is " - "independent of input {}. This is not allowed in " - "strict mode.".format(i, el_idx) + f"Output {i} of the user-provided function is " + f"independent of input {el_idx}. This is not allowed in " + "strict mode." ) raise RuntimeError(msg) jac_i_el.append(torch.zeros_like(inp_el)) diff --git a/torch/autograd/profiler_util.py b/torch/autograd/profiler_util.py index a6944d6a4840..de330f10a4fe 100644 --- a/torch/autograd/profiler_util.py +++ b/torch/autograd/profiler_util.py @@ -246,20 +246,14 @@ class EventList(list): # 's' and 'f' draw Flow arrows from # the CPU launch to the GPU kernel f.write( - '{{"name": "{}", ' + f'{{"name": "{evt.trace_name}", ' '"ph": "s", ' - '"ts": {}, ' - '"tid": {}, ' + f'"ts": {evt.time_range.start}, ' + f'"tid": {evt.thread}, ' '"pid": "CPU functions", ' - '"id": {}, ' - '"cat": "cpu_to_{}", ' - '"args": {{}}}}, '.format( - evt.trace_name, - evt.time_range.start, - evt.thread, - next_id, - device_name, - ) + f'"id": {next_id}, ' + f'"cat": "cpu_to_{device_name}", ' + '"args": {{}}}}, ' ) # Note: use torch.profiler to get device kernel trace next_id += 1 diff --git a/torch/backends/cuda/__init__.py b/torch/backends/cuda/__init__.py index ab63b15f32e0..54ecd2681387 100644 --- a/torch/backends/cuda/__init__.py +++ b/torch/backends/cuda/__init__.py @@ -94,10 +94,8 @@ class cuFFTPlanCacheManager: index = torch.cuda._utils._get_device_index(device) if index < 0 or index >= torch.cuda.device_count(): raise RuntimeError( - ( - "cufft_plan_cache: expected 0 <= device index < {}, but got " - "device with index {}" - ).format(torch.cuda.device_count(), index) + f"cufft_plan_cache: expected 0 <= device index < {torch.cuda.device_count()}, but got " + f"device with index {index}" ) if len(self.caches) == 0: self.caches.extend( diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py index 8269caec4421..59e5a71f76c7 100644 --- a/torch/distributed/distributed_c10d.py +++ b/torch/distributed/distributed_c10d.py @@ -1123,9 +1123,9 @@ def init_process_group( if backend == Backend.MPI: if world_size != -1 or rank != -1: warnings.warn( - "For MPI backend, world_size ({}) and rank ({}) " + f"For MPI backend, world_size ({world_size}) and rank ({rank}) " "are ignored since they are assigned by the " - "MPI runtime.".format(world_size, rank) + "MPI runtime." ) default_pg, _ = _new_process_group_helper( diff --git a/torch/distributed/nn/api/remote_module.py b/torch/distributed/nn/api/remote_module.py index 75d7e6215d56..935e27436ea2 100644 --- a/torch/distributed/nn/api/remote_module.py +++ b/torch/distributed/nn/api/remote_module.py @@ -500,8 +500,8 @@ class _RemoteModule(nn.Module): and k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING ): raise AttributeError( - "Attribute {} must be either in ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` or " - "``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.".format(k) + f"Attribute {k} must be either in ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` or " + "``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``." ) def _install_generated_methods(self): @@ -729,11 +729,9 @@ def _remote_module_reducer(remote_module): # Check if unpickled attributes are all in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING. elif k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING: print( - "The new attribute ``{}`` of RemoteModule is ignored during RPC pickling. " + f"The new attribute ``{k}`` of RemoteModule is ignored during RPC pickling. " "To pickle this attribute, please add it to ``_REMOTE_MODULE_PICKLED_ATTRIBUTES``. " - "Otherwise, please explicitly add it to ``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.".format( - k - ), + "Otherwise, please explicitly add it to ``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.", file=sys.stderr, ) diff --git a/torch/distributed/pipeline/sync/pipe.py b/torch/distributed/pipeline/sync/pipe.py index 707f56c9d2ee..65063e9b1c82 100644 --- a/torch/distributed/pipeline/sync/pipe.py +++ b/torch/distributed/pipeline/sync/pipe.py @@ -112,8 +112,8 @@ def _retrieve_device(module: nn.Module) -> torch.device: device = parameter.device elif device != parameter.device: raise ValueError( - 'nn.Module: {}, should have all parameters on a single device,' - ' please use .to() to place the module on a single device'.format(module)) + f'nn.Module: {module}, should have all parameters on a single device,' + ' please use .to() to place the module on a single device') return device if device is not None else torch.device("cpu") diff --git a/torch/distributions/independent.py b/torch/distributions/independent.py index a58e81b7562e..35b705fd0f29 100644 --- a/torch/distributions/independent.py +++ b/torch/distributions/independent.py @@ -45,9 +45,7 @@ class Independent(Distribution): if reinterpreted_batch_ndims > len(base_distribution.batch_shape): raise ValueError( "Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), " - "actual {} vs {}".format( - reinterpreted_batch_ndims, len(base_distribution.batch_shape) - ) + f"actual {reinterpreted_batch_ndims} vs {len(base_distribution.batch_shape)}" ) shape = base_distribution.batch_shape + base_distribution.event_shape event_dim = reinterpreted_batch_ndims + len(base_distribution.event_shape) diff --git a/torch/fx/experimental/unification/multipledispatch/dispatcher.py b/torch/fx/experimental/unification/multipledispatch/dispatcher.py index 65ad70a3e1ab..d2a8e6bfc7ff 100644 --- a/torch/fx/experimental/unification/multipledispatch/dispatcher.py +++ b/torch/fx/experimental/unification/multipledispatch/dispatcher.py @@ -205,9 +205,9 @@ class Dispatcher: if not isinstance(typ, (type, list)): str_sig = ', '.join(c.__name__ if isinstance(c, type) else str(c) for c in signature) - raise TypeError("Tried to dispatch on non-type: {}\n" - "In signature: <{}>\n" - "In function: {}".format(typ, str_sig, self.name)) + raise TypeError(f"Tried to dispatch on non-type: {typ}\n" + f"In signature: <{str_sig}>\n" + f"In function: {self.name}") # handle variadic signatures if isinstance(typ, list): @@ -272,8 +272,7 @@ class Dispatcher: raise NotImplementedError( "Matching functions for " - "{}: <{}> found, but none completed successfully".format( - self.name, str_signature(types),),) from e + f"{self.name}: <{str_signature(types)}> found, but none completed successfully",) from e def __str__(self): return f"" diff --git a/torch/jit/_recursive.py b/torch/jit/_recursive.py index aa4e370e3b25..c7d107503771 100644 --- a/torch/jit/_recursive.py +++ b/torch/jit/_recursive.py @@ -310,17 +310,17 @@ def infer_concrete_type_builder(nn_module, share_types=True): ) warnings.warn( - "'{}' was found in ScriptModule constants, " - " but it is a non-constant {}. Consider removing it.".format(name, hint) + f"'{name}' was found in ScriptModule constants, " + f" but it is a non-constant {hint}. Consider removing it." ) continue if not hasattr(nn_module, name): # TODO: We should really error in this case, but its bc-breaking so # we need to warn for at least one release warnings.warn( - "'{}' was found in ScriptModule constants, " + f"'{name}' was found in ScriptModule constants, " "but was not actually set in __init__. " - "Consider removing it.".format(name) + "Consider removing it." ) continue value = getattr(nn_module, name) @@ -370,8 +370,8 @@ def infer_concrete_type_builder(nn_module, share_types=True): hint = ( "(This function exists as an attribute on the Python module, " "but we failed to compile it to a TorchScript function. " - "\nThe error stack is reproduced here:\n{}" - ).format(e) + f"\nThe error stack is reproduced here:\n{e}" + ) concrete_type_builder.add_failed_attribute(name, hint) pass @@ -998,9 +998,9 @@ def try_compile_fn(fn, loc): if not inspect.isfunction(fn) and not inspect.ismethod(fn): raise RuntimeError( - "`{}` is not a function. Recursive scripting only supports " + f"`{fn}` is not a function. Recursive scripting only supports " "Python functions or methods currently.\n" - "Consider manually annotating `{}` with @torch.jit.script.".format(fn, fn) + f"Consider manually annotating `{fn}` with @torch.jit.script." ) # We don't have the actual scope where the function was defined, but we can diff --git a/torch/jit/_script.py b/torch/jit/_script.py index bcfe27b9663c..acedc52786f7 100644 --- a/torch/jit/_script.py +++ b/torch/jit/_script.py @@ -257,7 +257,7 @@ class OrderedModuleDict(OrderedDictWrapper): else: raise RuntimeError( "Cannot re-assign modules in a ScriptModule with non-scripted " - "module, tried to replace existing module '{}': {}".format(k, v) + f"module, tried to replace existing module '{k}': {v}" ) def __getitem__(self, k): @@ -1402,7 +1402,7 @@ def _check_overload_defaults(impl_defaults, overload_defaults, loc): loc, "Default parameters on overloads do not affect the runtime so they " "must equal to the default parameter on the implementation function. Found on " - "parameter {name}".format(name=name), + f"parameter {name}", ) @@ -1461,9 +1461,9 @@ def _check_directly_compile_overloaded(obj): qual_name = _qualified_name(obj) if _jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj): raise RuntimeError( - "Function {} cannot be directly compiled because it" + f"Function {qual_name} cannot be directly compiled because it" " is overloaded. It must be used in a context of a function" - " where its inputs can determine which overload to call.".format(qual_name) + " where its inputs can determine which overload to call." ) diff --git a/torch/jit/_trace.py b/torch/jit/_trace.py index 11e8de464aeb..2eaa79cfe0bc 100644 --- a/torch/jit/_trace.py +++ b/torch/jit/_trace.py @@ -254,10 +254,8 @@ def verify(model, args, loss_fn=torch.sum, devices=None): out = (out,) if loss_fn == torch.sum and len(out) != 1: raise ValueError( - ( - "Model returns {} outputs, but default loss function " - "(torch.sum) can only handle a single output" - ).format(len(out)) + f"Model returns {len(out)} outputs, but default loss function " + "(torch.sum) can only handle a single output" ) out_vars, _ = _flatten(out) saved_outs = [ diff --git a/torch/nn/functional.py b/torch/nn/functional.py index 0aa729d92124..9abf844acce3 100644 --- a/torch/nn/functional.py +++ b/torch/nn/functional.py @@ -903,9 +903,7 @@ def _unpool_output_size( if len(output_size) != len(kernel_size): raise ValueError( "output_size should be a sequence containing " - "{} or {} elements, but it has a length of '{}'".format( - len(kernel_size), len(kernel_size) + 2, len(output_size) - ) + f"{len(kernel_size)} or {len(kernel_size) + 2} elements, but it has a length of '{len(output_size)}'" ) for d in range(len(kernel_size)): min_size = default_size[d] - stride[d] @@ -2356,8 +2354,8 @@ def embedding_bag( if per_sample_weights is not None and input.size() != per_sample_weights.size(): raise ValueError( - "embedding_bag: If per_sample_weights ({}) is not None, " - "then it must have the same shape as the input ({})".format(per_sample_weights.shape, input.shape) + f"embedding_bag: If per_sample_weights ({per_sample_weights.shape}) is not None, " + f"then it must have the same shape as the input ({input.shape})" ) if not weight.dim() == 2: @@ -2375,7 +2373,7 @@ def embedding_bag( "if input is 2D, then offsets has to be None" ", as input is treated is a mini-batch of" " fixed length sequences. However, found " - "offsets of type {}".format(type_str) + f"offsets of type {type_str}" ) offsets = torch.arange(0, input.numel(), input.size(1), dtype=input.dtype, device=input.device) @@ -2416,7 +2414,7 @@ def embedding_bag( raise NotImplementedError( "embedding_bag: per_sample_weights was not None. " "per_sample_weights is only supported for mode='sum' " - "(got mode='{}'). Please open a feature request on GitHub.".format(mode) + f"(got mode='{mode}'). Please open a feature request on GitHub." ) ret, _, _, _ = torch.embedding_bag( @@ -3223,9 +3221,9 @@ def smooth_l1_loss( ) if not (target.size() == input.size()): warnings.warn( - "Using a target size ({}) that is different to the input size ({}). " + f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " "This will likely lead to incorrect results due to broadcasting. " - "Please ensure they have the same size.".format(target.size(), input.size()), + "Please ensure they have the same size.", stacklevel=2, ) if size_average is not None or reduce is not None: @@ -3260,9 +3258,9 @@ def huber_loss( delta=delta, ) if not (target.size() == input.size()): - warnings.warn("Using a target size ({}) that is different to the input size ({}). " + warnings.warn(f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " "This will likely lead to incorrect results due to broadcasting. " - "Please ensure they have the same size.".format(target.size(), input.size()), + "Please ensure they have the same size.", stacklevel=2) expanded_input, expanded_target = torch.broadcast_tensors(input, target) @@ -3288,9 +3286,9 @@ def l1_loss( ) if not (target.size() == input.size()): warnings.warn( - "Using a target size ({}) that is different to the input size ({}). " + f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " "This will likely lead to incorrect results due to broadcasting. " - "Please ensure they have the same size.".format(target.size(), input.size()), + "Please ensure they have the same size.", stacklevel=2, ) if size_average is not None or reduce is not None: @@ -3319,9 +3317,9 @@ def mse_loss( ) if not (target.size() == input.size()): warnings.warn( - "Using a target size ({}) that is different to the input size ({}). " + f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " "This will likely lead to incorrect results due to broadcasting. " - "Please ensure they have the same size.".format(target.size(), input.size()), + "Please ensure they have the same size.", stacklevel=2, ) if size_average is not None or reduce is not None: @@ -4044,8 +4042,8 @@ def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optiona raise NotImplementedError( "Input Error: Only 3D, 4D and 5D input Tensors supported" - " (got {}D) for the modes: nearest | linear | bilinear | bicubic | trilinear | area | nearest-exact" - " (got {})".format(input.dim(), mode) + f" (got {input.dim()}D) for the modes: nearest | linear | bilinear | bicubic | trilinear | area | nearest-exact" + f" (got {mode})" ) @@ -4277,7 +4275,7 @@ def grid_sample( raise ValueError( "nn.functional.grid_sample(): expected padding_mode " "to be 'zeros', 'border', or 'reflection', " - "but got: '{}'".format(padding_mode) + f"but got: '{padding_mode}'" ) if mode == "bilinear": @@ -4385,7 +4383,7 @@ def affine_grid(theta: Tensor, size: List[int], align_corners: Optional[bool] = raise NotImplementedError( "affine_grid only supports 4D and 5D sizes, " "for 2D and 3D affine transforms, respectively. " - "Got size {}.".format(size) + f"Got size {size}." ) # check for empty span if align_corners and min(spatial_size) == 1: diff --git a/torch/nn/init.py b/torch/nn/init.py index 21768a65a930..31e78442796b 100644 --- a/torch/nn/init.py +++ b/torch/nn/init.py @@ -543,14 +543,13 @@ def _make_deprecate(meth): warnings.warn(f"nn.init.{old_name} is now deprecated in favor of nn.init.{new_name}.", stacklevel=2) return meth(*args, **kwargs) - deprecated_init.__doc__ = r""" + deprecated_init.__doc__ = fr""" {old_name}(...) .. warning:: This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`. - See :func:`~torch.nn.init.{new_name}` for details.""".format( - old_name=old_name, new_name=new_name) + See :func:`~torch.nn.init.{new_name}` for details.""" deprecated_init.__name__ = old_name return deprecated_init diff --git a/torch/nn/modules/adaptive.py b/torch/nn/modules/adaptive.py index cf2f56efa817..dd5539bec330 100644 --- a/torch/nn/modules/adaptive.py +++ b/torch/nn/modules/adaptive.py @@ -224,11 +224,9 @@ class AdaptiveLogSoftmaxWithLoss(Module): used_rows += row_indices.numel() if used_rows != batch_size: - raise RuntimeError("Target values should be in [0, {}], " - "but values in range [{}, {}] " - "were found. ".format(self.n_classes - 1, - target.min().item(), - target.max().item())) + raise RuntimeError(f"Target values should be in [0, {self.n_classes - 1}], " + f"but values in range [{target.min().item()}, {target.max().item()}] " + "were found. ") head_output = self.head(input) head_logprob = log_softmax(head_output, dim=1) diff --git a/torch/nn/modules/container.py b/torch/nn/modules/container.py index bbd253530e2c..2b8cd1bab3b2 100644 --- a/torch/nn/modules/container.py +++ b/torch/nn/modules/container.py @@ -148,8 +148,7 @@ class Sequential(Module): return ret else: raise ValueError('add operator supports only objects ' - 'of Sequential class, but {} is given.'.format( - str(type(other)))) + f'of Sequential class, but {str(type(other))} is given.') def pop(self, key: Union[int, slice]) -> Module: v = self[key] @@ -164,8 +163,7 @@ class Sequential(Module): return self else: raise ValueError('add operator supports only objects ' - 'of Sequential class, but {} is given.'.format( - str(type(other)))) + f'of Sequential class, but {str(type(other))} is given.') def __mul__(self, other: int) -> 'Sequential': if not isinstance(other, int): diff --git a/torch/nn/modules/conv.py b/torch/nn/modules/conv.py index 9fb1135eb9d6..cb018d98dc20 100644 --- a/torch/nn/modules/conv.py +++ b/torch/nn/modules/conv.py @@ -656,10 +656,9 @@ class _ConvTransposeNd(_ConvNd): min_size = min_sizes[i] max_size = max_sizes[i] if size < min_size or size > max_size: - raise ValueError(( - "requested an output size of {}, but valid sizes range " - "from {} to {} (for an input of {})").format( - output_size, min_sizes, max_sizes, input.size()[2:])) + raise ValueError( + f"requested an output size of {output_size}, but valid sizes range " + f"from {min_sizes} to {max_sizes} (for an input of {input.size()[2:]})") res = torch.jit.annotate(List[int], []) for d in range(num_spatial_dims): diff --git a/torch/nn/modules/module.py b/torch/nn/modules/module.py index a0255076c561..8fad74054500 100644 --- a/torch/nn/modules/module.py +++ b/torch/nn/modules/module.py @@ -461,8 +461,8 @@ class Module: "".format(type(self).__name__, next(iter(kwargs)))) if self.call_super_init is False and bool(args): - raise TypeError("{}.__init__() takes 1 positional argument but {} were" - " given".format(type(self).__name__, len(args) + 1)) + raise TypeError(f"{type(self).__name__}.__init__() takes 1 positional argument but {len(args) + 1} were" + " given") """ Calls super().__setattr__('a', a) instead of the typical self.a = a @@ -537,9 +537,9 @@ class Module: elif hasattr(self, name) and name not in self._buffers: raise KeyError(f"attribute '{name}' already exists") elif tensor is not None and not isinstance(tensor, torch.Tensor): - raise TypeError("cannot assign '{}' object to buffer '{}' " + raise TypeError(f"cannot assign '{torch.typename(tensor)}' object to buffer '{name}' " "(torch Tensor or None required)" - .format(torch.typename(tensor), name)) + ) else: for hook in _global_buffer_registration_hooks.values(): output = hook(self, name, tensor) @@ -580,15 +580,15 @@ class Module: if param is None: self._parameters[name] = None elif not isinstance(param, Parameter): - raise TypeError("cannot assign '{}' object to parameter '{}' " + raise TypeError(f"cannot assign '{torch.typename(param)}' object to parameter '{name}' " "(torch.nn.Parameter or None required)" - .format(torch.typename(param), name)) + ) elif param.grad_fn: raise ValueError( - "Cannot assign non-leaf Tensor to parameter '{0}'. Model " - "parameters must be created explicitly. To express '{0}' " + f"Cannot assign non-leaf Tensor to parameter '{name}'. Model " + f"parameters must be created explicitly. To express '{name}' " "as a function of another Tensor, compute the value in " - "the forward() method.".format(name)) + "the forward() method.") else: for hook in _global_parameter_registration_hooks.values(): output = hook(self, name, param) @@ -1143,7 +1143,7 @@ class Module: if dtype is not None: if not (dtype.is_floating_point or dtype.is_complex): raise TypeError('nn.Module.to only accepts floating point or complex ' - 'dtypes, but got desired dtype={}'.format(dtype)) + f'dtypes, but got desired dtype={dtype}') if dtype.is_complex: warnings.warn( "Complex modules are a new feature under active development whose design may change, " @@ -1712,9 +1712,9 @@ class Module: self.register_parameter(name, value) elif params is not None and name in params: if value is not None: - raise TypeError("cannot assign '{}' as parameter '{}' " + raise TypeError(f"cannot assign '{torch.typename(value)}' as parameter '{name}' " "(torch.nn.Parameter or None expected)" - .format(torch.typename(value), name)) + ) self.register_parameter(name, value) else: modules = self.__dict__.get('_modules') @@ -1730,9 +1730,9 @@ class Module: modules[name] = value elif modules is not None and name in modules: if value is not None: - raise TypeError("cannot assign '{}' as child module '{}' " + raise TypeError(f"cannot assign '{torch.typename(value)}' as child module '{name}' " "(torch.nn.Module or None expected)" - .format(torch.typename(value), name)) + ) for hook in _global_module_registration_hooks.values(): output = hook(self, name, value) if output is not None: @@ -1742,9 +1742,9 @@ class Module: buffers = self.__dict__.get('_buffers') if buffers is not None and name in buffers: if value is not None and not isinstance(value, torch.Tensor): - raise TypeError("cannot assign '{}' as buffer '{}' " + raise TypeError(f"cannot assign '{torch.typename(value)}' as buffer '{name}' " "(torch.Tensor or None expected)" - .format(torch.typename(value), name)) + ) for hook in _global_buffer_registration_hooks.values(): output = hook(self, name, value) if output is not None: @@ -2000,10 +2000,10 @@ class Module: if key in state_dict: input_param = state_dict[key] if not torch.overrides.is_tensor_like(input_param): - error_msgs.append('While copying the parameter named "{}", ' + error_msgs.append(f'While copying the parameter named "{key}", ' 'expected torch.Tensor or Tensor-like object from checkpoint but ' - 'received {}' - .format(key, type(input_param))) + f'received {type(input_param)}' + ) continue # This is used to avoid copying uninitialized parameters into @@ -2039,11 +2039,11 @@ class Module: else: param.copy_(input_param) except Exception as ex: - error_msgs.append('While copying the parameter named "{}", ' - 'whose dimensions in the model are {} and ' - 'whose dimensions in the checkpoint are {}, ' - 'an exception occurred : {}.' - .format(key, param.size(), input_param.size(), ex.args)) + error_msgs.append(f'While copying the parameter named "{key}", ' + f'whose dimensions in the model are {param.size()} and ' + f'whose dimensions in the checkpoint are {input_param.size()}, ' + f'an exception occurred : {ex.args}.' + ) elif strict: missing_keys.append(key) diff --git a/torch/nn/modules/rnn.py b/torch/nn/modules/rnn.py index 1ea44a6e4d73..b49d804fb45a 100644 --- a/torch/nn/modules/rnn.py +++ b/torch/nn/modules/rnn.py @@ -70,8 +70,8 @@ class RNNBase(Module): if dropout > 0 and num_layers == 1: warnings.warn("dropout option adds dropout after all but last " "recurrent layer, so non-zero dropout expects " - "num_layers greater than 1, but got dropout={} and " - "num_layers={}".format(dropout, num_layers)) + f"num_layers greater than 1, but got dropout={dropout} and " + f"num_layers={num_layers}") if not isinstance(hidden_size, int): raise TypeError(f"hidden_size should be of type int, got: {type(hidden_size).__name__}") diff --git a/torch/nn/parallel/data_parallel.py b/torch/nn/parallel/data_parallel.py index 40253c4b43c2..2551d43fdc9e 100644 --- a/torch/nn/parallel/data_parallel.py +++ b/torch/nn/parallel/data_parallel.py @@ -169,8 +169,8 @@ class DataParallel(Module, Generic[T]): for t in chain(self.module.parameters(), self.module.buffers()): if t.device != self.src_device_obj: raise RuntimeError("module must have its parameters and buffers " - "on device {} (device_ids[0]) but found one of " - "them on device: {}".format(self.src_device_obj, t.device)) + f"on device {self.src_device_obj} (device_ids[0]) but found one of " + f"them on device: {t.device}") inputs, module_kwargs = self.scatter(inputs, kwargs, self.device_ids) # for forward function without any inputs, empty list and dict will be created @@ -249,8 +249,8 @@ def data_parallel( for t in chain(module.parameters(), module.buffers()): if t.device != src_device_obj: raise RuntimeError("module must have its parameters and buffers " - "on device {} (device_ids[0]) but found one of " - "them on device: {}".format(src_device_obj, t.device)) + f"on device {src_device_obj} (device_ids[0]) but found one of " + f"them on device: {t.device}") inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim) # for module without any inputs, empty list and dict will be created diff --git a/torch/nn/parallel/distributed.py b/torch/nn/parallel/distributed.py index d72346f23760..24f63e18a29e 100644 --- a/torch/nn/parallel/distributed.py +++ b/torch/nn/parallel/distributed.py @@ -697,9 +697,7 @@ class DistributedDataParallel(Module, Joinable): self._log_and_throw( ValueError, "DistributedDataParallel's input module must be on " - "the same type of devices, but input module parameters locate in {}.".format( - distinct_device_types - ), + f"the same type of devices, but input module parameters locate in {distinct_device_types}.", ) self.device_type = list(distinct_device_types)[0] diff --git a/torch/nn/parallel/parallel_apply.py b/torch/nn/parallel/parallel_apply.py index fc14a968620d..36de8845e56b 100644 --- a/torch/nn/parallel/parallel_apply.py +++ b/torch/nn/parallel/parallel_apply.py @@ -71,8 +71,8 @@ def parallel_apply( if t is None: with lock: results[i] = ExceptionWrapper( - where="in replica {}, no device was provided and no tensor input was found; " - "device cannot be resolved".format(i)) + where=f"in replica {i}, no device was provided and no tensor input was found; " + "device cannot be resolved") return device = t.get_device() if stream is None: diff --git a/torch/nn/parameter.py b/torch/nn/parameter.py index c15ad0c863c9..c8716b6c20d3 100644 --- a/torch/nn/parameter.py +++ b/torch/nn/parameter.py @@ -154,11 +154,11 @@ class UninitializedTensorMixin: kwargs = {} return super().__torch_function__(func, types, args, kwargs) raise ValueError( - 'Attempted to use an uninitialized parameter in {}. ' + f'Attempted to use an uninitialized parameter in {func}. ' 'This error happens when you are using a `LazyModule` or ' - 'explicitly manipulating `torch.nn.parameter.{}` ' + f'explicitly manipulating `torch.nn.parameter.{cls.__name__}` ' 'objects. When using LazyModules Call `forward` with a dummy batch ' - 'to initialize the parameters before calling torch functions'.format(func, cls.__name__)) + 'to initialize the parameters before calling torch functions') def is_lazy(param): diff --git a/torch/nn/utils/parametrizations.py b/torch/nn/utils/parametrizations.py index c451be6dd792..40dc763269be 100644 --- a/torch/nn/utils/parametrizations.py +++ b/torch/nn/utils/parametrizations.py @@ -379,7 +379,7 @@ class _SpectralNorm(Module): if n_power_iterations <= 0: raise ValueError('Expected n_power_iterations to be positive, but ' - 'got n_power_iterations={}'.format(n_power_iterations)) + f'got n_power_iterations={n_power_iterations}') self.dim = dim if dim >= 0 else dim + ndim self.eps = eps if ndim > 1: diff --git a/torch/nn/utils/prune.py b/torch/nn/utils/prune.py index 1e16f11b6b62..15e244afaf1d 100644 --- a/torch/nn/utils/prune.py +++ b/torch/nn/utils/prune.py @@ -288,9 +288,7 @@ class PruningContainer(BasePruningMethod): elif method is not None and self._tensor_name != method._tensor_name: raise ValueError( "Can only add pruning methods acting on " - "the parameter named '{}' to PruningContainer {}.".format( - self._tensor_name, self - ) + f"the parameter named '{self._tensor_name}' to PruningContainer {self}." + f" Found '{method._tensor_name}'" ) # if all checks passed, add to _pruning_methods tuple @@ -1092,9 +1090,7 @@ def global_unstructured(parameters, pruning_method, importance_scores=None, **kw if method.PRUNING_TYPE != "unstructured": raise TypeError( 'Only "unstructured" PRUNING_TYPE supported for ' - "the `pruning_method`. Found method {} of type {}".format( - pruning_method, method.PRUNING_TYPE - ) + f"the `pruning_method`. Found method {pruning_method} of type {method.PRUNING_TYPE}" ) container.add_pruning_method(method) @@ -1280,7 +1276,7 @@ def _validate_structured_pruning(t): raise ValueError( "Structured pruning can only be applied to " "multidimensional tensors. Found tensor of shape " - "{} with {} dims".format(shape, len(shape)) + f"{shape} with {len(shape)} dims" ) diff --git a/torch/nn/utils/rnn.py b/torch/nn/utils/rnn.py index 4f84cfb9d5a9..fd7a12d933df 100644 --- a/torch/nn/utils/rnn.py +++ b/torch/nn/utils/rnn.py @@ -327,8 +327,8 @@ def pad_packed_sequence( if total_length < max_seq_length: raise ValueError("Expected total_length to be at least the length " "of the longest sequence in input, but got " - "total_length={} and max sequence length being {}" - .format(total_length, max_seq_length)) + f"total_length={total_length} and max sequence length being {max_seq_length}" + ) max_seq_length = total_length padded_output, lengths = _VF._pad_packed_sequence( sequence.data, sequence.batch_sizes, batch_first, padding_value, max_seq_length) diff --git a/torch/nn/utils/spectral_norm.py b/torch/nn/utils/spectral_norm.py index ea29d095ea1c..0dd5973abb67 100644 --- a/torch/nn/utils/spectral_norm.py +++ b/torch/nn/utils/spectral_norm.py @@ -29,7 +29,7 @@ class SpectralNorm: self.dim = dim if n_power_iterations <= 0: raise ValueError('Expected n_power_iterations to be positive, but ' - 'got n_power_iterations={}'.format(n_power_iterations)) + f'got n_power_iterations={n_power_iterations}') self.n_power_iterations = n_power_iterations self.eps = eps diff --git a/torch/optim/lr_scheduler.py b/torch/optim/lr_scheduler.py index d78fdbf18580..337537ebe5a5 100644 --- a/torch/optim/lr_scheduler.py +++ b/torch/optim/lr_scheduler.py @@ -40,7 +40,7 @@ class LRScheduler: for i, group in enumerate(optimizer.param_groups): if 'initial_lr' not in group: raise KeyError("param 'initial_lr' is not specified " - "in param_groups[{}] when resuming an optimizer".format(i)) + f"in param_groups[{i}] when resuming an optimizer") self.base_lrs = [group['initial_lr'] for group in optimizer.param_groups] self.last_epoch = last_epoch @@ -645,8 +645,8 @@ class SequentialLR(LRScheduler): if (len(milestones) != len(schedulers) - 1): raise ValueError( "Sequential Schedulers expects number of schedulers provided to be one more " - "than the number of milestone points, but got number of schedulers {} and the " - "number of milestones to be equal to {}".format(len(schedulers), len(milestones)) + f"than the number of milestone points, but got number of schedulers {len(schedulers)} and the " + f"number of milestones to be equal to {len(milestones)}" ) self._schedulers = schedulers self._milestones = milestones @@ -862,7 +862,7 @@ class ChainedScheduler(LRScheduler): if (schedulers[scheduler_idx].optimizer != schedulers[0].optimizer): raise ValueError( "ChainedScheduler expects all schedulers to belong to the same optimizer, but " - "got schedulers at index {} and {} to be different".format(0, scheduler_idx) + f"got schedulers at index {0} and {scheduler_idx} to be different" ) self._schedulers = list(schedulers) self.optimizer = schedulers[0].optimizer diff --git a/torch/overrides.py b/torch/overrides.py index 757e9bf711ff..96ff7c0b64df 100644 --- a/torch/overrides.py +++ b/torch/overrides.py @@ -1574,9 +1574,9 @@ def handle_torch_function( func_name = f'{public_api.__module__}.{public_api.__name__}' msg = ( - "no implementation found for '{}' on types that implement " - '__torch_function__: {}' - ).format(func_name, [type(arg) for arg in overloaded_args]) + f"no implementation found for '{func_name}' on types that implement " + f'__torch_function__: {[type(arg) for arg in overloaded_args]}' + ) if _is_torch_function_mode_enabled(): msg += f" nor in mode {_get_current_function_mode()}" raise TypeError(msg) diff --git a/torch/quasirandom.py b/torch/quasirandom.py index c5086da283a4..1c9b949c5565 100644 --- a/torch/quasirandom.py +++ b/torch/quasirandom.py @@ -122,11 +122,11 @@ class SobolEngine: total_n = self.num_generated + n if not (total_n & (total_n - 1) == 0): raise ValueError("The balance properties of Sobol' points require " - "n to be a power of 2. {0} points have been " - "previously generated, then: n={0}+2**{1}={2}. " + f"n to be a power of 2. {self.num_generated} points have been " + f"previously generated, then: n={self.num_generated}+2**{m}={total_n}. " "If you still want to do this, please use " "'SobolEngine.draw()' instead." - .format(self.num_generated, m, total_n)) + ) return self.draw(n=n, out=out, dtype=dtype) def reset(self): diff --git a/torch/serialization.py b/torch/serialization.py index 875c6abe030a..aeb1d8444612 100644 --- a/torch/serialization.py +++ b/torch/serialization.py @@ -202,10 +202,8 @@ def check_module_version_greater_or_equal(module, req_version_tuple, error_if_ma except Exception as e: message = ( - "'{}' module version string is malformed '{}' and cannot be compared" - " with tuple {}" - ).format( - module.__name__, module.__version__, str(req_version_tuple) + f"'{module.__name__}' module version string is malformed '{module.__version__}' and cannot be compared" + f" with tuple {str(req_version_tuple)}" ) if error_if_malformed: raise RuntimeError(message) from e diff --git a/torch/testing/_internal/common_device_type.py b/torch/testing/_internal/common_device_type.py index 7f045b0e658b..c1d19c02b43b 100644 --- a/torch/testing/_internal/common_device_type.py +++ b/torch/testing/_internal/common_device_type.py @@ -1195,7 +1195,7 @@ class dtypes: assert isinstance(arg, (list, tuple)), \ "When one dtype variant is a tuple or list, " \ "all dtype variants must be. " \ - "Received non-list non-tuple dtype {}".format(str(arg)) + f"Received non-list non-tuple dtype {str(arg)}" assert all(isinstance(dtype, torch.dtype) for dtype in arg), f"Unknown dtype in {str(arg)}" else: assert all(isinstance(arg, torch.dtype) for arg in args), f"Unknown dtype in {str(args)}" diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py index c0d21778f5e7..9680690c83b6 100644 --- a/torch/testing/_internal/common_utils.py +++ b/torch/testing/_internal/common_utils.py @@ -506,9 +506,8 @@ class parametrize(_TestParametrizer): values = list(values) if len(self.arg_names) > 1 else [values] if len(values) != len(self.arg_names): - raise RuntimeError('Expected # values == # arg names, but got: {} ' - 'values and {} names for test "{}"'.format( - len(values), len(self.arg_names), test.__name__)) + raise RuntimeError(f'Expected # values == # arg names, but got: {len(values)} ' + f'values and {len(self.arg_names)} names for test "{test.__name__}"') param_kwargs = dict(zip(self.arg_names, values)) @@ -3461,9 +3460,9 @@ This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0""" return accept_output("output") else: raise RuntimeError( - ("I got this output for {}{}:\n\n{}\n\n" - "No expect file exists; to accept the current output, run:\n" - "python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None + f"I got this output for {munged_id}{subname_output}:\n\n{s}\n\n" + "No expect file exists; to accept the current output, run:\n" + f"python {__main__.__file__} {munged_id} --accept") from None # a hack for JIT tests if IS_WINDOWS: @@ -4066,10 +4065,9 @@ def check_test_defined_in_running_script(test_case): if running_script_path is None: return test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__))) - assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \ - "is not defined in the running script \"{}\", but in \"{}\". Did you " \ - "accidentally import a unittest.TestCase from another file?".format( - test_case.id(), running_script_path, test_case_class_file) + assert test_case_class_file == running_script_path, f"Class of loaded TestCase \"{test_case.id()}\" " \ + f"is not defined in the running script \"{running_script_path}\", but in \"{test_case_class_file}\". Did you " \ + "accidentally import a unittest.TestCase from another file?" def load_tests(loader, tests, pattern): set_running_script_path() diff --git a/torch/testing/_internal/hypothesis_utils.py b/torch/testing/_internal/hypothesis_utils.py index 4ace78f7594e..0654a64b96b2 100644 --- a/torch/testing/_internal/hypothesis_utils.py +++ b/torch/testing/_internal/hypothesis_utils.py @@ -362,7 +362,7 @@ def assert_deadline_disabled(): warning_message = ( "Your version of hypothesis is outdated. " "To avoid `DeadlineExceeded` errors, please update. " - "Current hypothesis version: {}".format(hypothesis.__version__) + f"Current hypothesis version: {hypothesis.__version__}" ) warnings.warn(warning_message) else: diff --git a/torch/testing/_internal/jit_utils.py b/torch/testing/_internal/jit_utils.py index f96c2fb436be..447a425b9a5b 100644 --- a/torch/testing/_internal/jit_utils.py +++ b/torch/testing/_internal/jit_utils.py @@ -66,7 +66,7 @@ def get_execution_plan(graph_executor_state): num_plans = len(execution_plans) if num_plans != 1: raise RuntimeError('This test assumes this GraphExecutor should ' - 'only have one execution plan, got: {}'.format(num_plans)) + f'only have one execution plan, got: {num_plans}') return execution_plans[0] class _AssertRaisesRegexWithHighlightContext: diff --git a/torch/utils/bundled_inputs.py b/torch/utils/bundled_inputs.py index df2d771f6f51..b18857b0e716 100644 --- a/torch/utils/bundled_inputs.py +++ b/torch/utils/bundled_inputs.py @@ -334,17 +334,14 @@ def augment_many_model_functions_with_bundled_inputs( # Add to the high level helper methods inputs_info = repr(info[function]) if info and function in info else '[]' - get_bundled_inputs_functions_and_info_template += """ + get_bundled_inputs_functions_and_info_template += f""" temp_dict : Dict[str,List[str]] = {{}} - info: List[str] = {info} + info: List[str] = {inputs_info} temp_dict['info'] = info - temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{name}'] - all_inputs['{name}'] = temp_dict - """.format( - name=function_name, - info=inputs_info, - ) + temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{function_name}'] + all_inputs['{function_name}'] = temp_dict + """ # To ensure backwards compatibility and a streamlined api for forward these wrappers are provided if function_name == 'forward': diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py index 9baeaa6e2dc8..e596d9bf8338 100644 --- a/torch/utils/cpp_extension.py +++ b/torch/utils/cpp_extension.py @@ -1424,11 +1424,9 @@ def load_inline(name, raise ValueError(f"Expected 'functions' to be a list or dict, but was {type(functions)}") for function_name, docstring in functions.items(): if with_pytorch_error_handling: - module_def.append( - 'm.def("{0}", torch::wrap_pybind_function({0}), "{1}");' - .format(function_name, docstring)) + module_def.append(f'm.def("{function_name}", torch::wrap_pybind_function({function_name}), "{docstring}");') else: - module_def.append('m.def("{0}", {0}, "{1}");'.format(function_name, docstring)) + module_def.append(f'm.def("{function_name}", {function_name}, "{docstring}");') module_def.append('}') cpp_sources += module_def diff --git a/torch/utils/data/dataloader.py b/torch/utils/data/dataloader.py index 7f5090561795..f3cd8e3d331a 100644 --- a/torch/utils/data/dataloader.py +++ b/torch/utils/data/dataloader.py @@ -315,7 +315,7 @@ class DataLoader(Generic[T_co]): # See NOTE [ Custom Samplers and IterableDataset ] raise ValueError( "DataLoader with IterableDataset: expected unspecified " - "batch_sampler option, but got batch_sampler={}".format(batch_sampler)) + f"batch_sampler option, but got batch_sampler={batch_sampler}") else: shuffle = bool(shuffle) self._dataset_kind = _DatasetKind.Map @@ -397,19 +397,19 @@ class DataLoader(Generic[T_co]): valid_start_methods = multiprocessing.get_all_start_methods() if multiprocessing_context not in valid_start_methods: raise ValueError( - ('multiprocessing_context option ' - 'should specify a valid start method in {!r}, but got ' - 'multiprocessing_context={!r}').format(valid_start_methods, multiprocessing_context)) + 'multiprocessing_context option ' + f'should specify a valid start method in {valid_start_methods!r}, but got ' + f'multiprocessing_context={multiprocessing_context!r}') multiprocessing_context = multiprocessing.get_context(multiprocessing_context) if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext): - raise TypeError(('multiprocessing_context option should be a valid context ' - 'object or a string specifying the start method, but got ' - 'multiprocessing_context={}').format(multiprocessing_context)) + raise TypeError('multiprocessing_context option should be a valid context ' + 'object or a string specifying the start method, but got ' + f'multiprocessing_context={multiprocessing_context}') else: - raise ValueError(('multiprocessing_context can only be used with ' - 'multi-process loading (num_workers > 0), but got ' - 'num_workers={}').format(self.num_workers)) + raise ValueError('multiprocessing_context can only be used with ' + 'multi-process loading (num_workers > 0), but got ' + f'num_workers={self.num_workers}') self.__multiprocessing_context = multiprocessing_context diff --git a/torch/utils/data/datapipes/_decorator.py b/torch/utils/data/datapipes/_decorator.py index d2afd996f54b..b6e93776aa38 100644 --- a/torch/utils/data/datapipes/_decorator.py +++ b/torch/utils/data/datapipes/_decorator.py @@ -70,7 +70,7 @@ class non_deterministic: if isinstance(arg, Type): # type: ignore[arg-type] if not issubclass(arg, IterDataPipe): # type: ignore[arg-type] raise TypeError("Only `IterDataPipe` can be decorated with `non_deterministic`" - ", but {} is found".format(arg.__name__)) + f", but {arg.__name__} is found") self.cls = arg # type: ignore[assignment] # 2. Decorator has an argument of a function # This class should behave differently given different inputs. Use this @@ -103,13 +103,13 @@ class non_deterministic: res = self.deterministic_fn(*args, **kwargs) # type: ignore[call-arg, misc] if not isinstance(res, bool): raise TypeError("deterministic_fn of `non_deterministic` decorator is required " - "to return a boolean value, but {} is found".format(type(res))) + f"to return a boolean value, but {type(res)} is found") global _determinism if _determinism and res: - raise TypeError("{} is non-deterministic with the inputs, but you set " + raise TypeError(f"{self.cls.__name__} is non-deterministic with the inputs, but you set " # type: ignore[union-attr] "'guaranteed_datapipes_determinism'. You can turn off determinism " "for this DataPipe if that is acceptable for your application" - .format(self.cls.__name__)) # type: ignore[union-attr] + ) return self.cls(*args, **kwargs) # type: ignore[call-arg, misc] @@ -130,9 +130,9 @@ def argument_validation(f): if not isinstance(value, IterDataPipe): raise TypeError(f"Expected argument '{argument_name}' as a IterDataPipe, but found {type(value)}") if not value.type.issubtype(hint.type): - raise TypeError("Expected type of argument '{}' as a subtype of " - "hint {}, but found {}" - .format(argument_name, hint.type, value.type)) + raise TypeError(f"Expected type of argument '{argument_name}' as a subtype of " + f"hint {hint.type}, but found {value.type}" + ) return f(*args, **kwargs) diff --git a/torch/utils/data/sampler.py b/torch/utils/data/sampler.py index 1e56df5adc8a..1ba1dfc665ef 100644 --- a/torch/utils/data/sampler.py +++ b/torch/utils/data/sampler.py @@ -223,7 +223,7 @@ class WeightedRandomSampler(Sampler[int]): weights_tensor = torch.as_tensor(weights, dtype=torch.double) if len(weights_tensor.shape) != 1: raise ValueError("weights should be a 1d sequence but given " - "weights have shape {}".format(tuple(weights_tensor.shape))) + f"weights have shape {tuple(weights_tensor.shape)}") self.weights = weights_tensor self.num_samples = num_samples diff --git a/torch/utils/hipify/hipify_python.py b/torch/utils/hipify/hipify_python.py index 34361d09221c..4d7bf7233cfc 100755 --- a/torch/utils/hipify/hipify_python.py +++ b/torch/utils/hipify/hipify_python.py @@ -502,7 +502,7 @@ def hip_header_magic(input_string): # Check if one of the following headers is already included. headers = ["hip/hip_runtime.h", "hip/hip_runtime_api.h"] - if any(re.search(r'#include ("{0}"|<{0}>)'.format(ext), output_string) for ext in headers): + if any(re.search(fr'#include ("{ext}"|<{ext}>)', output_string) for ext in headers): return output_string # Rough logic to detect if we're inside device code diff --git a/torch/utils/hooks.py b/torch/utils/hooks.py index 1cc83911d32d..e0ab3618242a 100644 --- a/torch/utils/hooks.py +++ b/torch/utils/hooks.py @@ -83,10 +83,10 @@ def warn_if_has_hooks(tensor): for k in tensor._backward_hooks: hook = tensor._backward_hooks[k] if not hasattr(k, "__torch_unserializable__"): - warnings.warn("backward hook {} on tensor will not be " + warnings.warn(f"backward hook {repr(hook)} on tensor will not be " "serialized. If this is expected, you can " "decorate the function with @torch.utils.hooks.unserializable_hook " - "to suppress this warning".format(repr(hook))) + "to suppress this warning") class BackwardHook: """ @@ -140,7 +140,7 @@ class BackwardHook: if len(out) != len(res): raise RuntimeError("Backward hook returned an invalid number of grad_input, " - "got {}, but expected {}".format(len(out), len(res))) + f"got {len(out)}, but expected {len(res)}") res = out @@ -209,7 +209,7 @@ class BackwardHook: actual_len = len(hook_grad_outputs) if actual_len != expected_len: raise RuntimeError("Backward pre hook returned an invalid number of grad_output, " - "got {}, but expected {}".format(actual_len, expected_len)) + f"got {actual_len}, but expected {expected_len}") self.grad_outputs = hook_grad_outputs # Special case if no input required gradients, this hook should call the user diff --git a/torch/utils/mobile_optimizer.py b/torch/utils/mobile_optimizer.py index fae6efe265f9..b89169a80a93 100644 --- a/torch/utils/mobile_optimizer.py +++ b/torch/utils/mobile_optimizer.py @@ -95,9 +95,9 @@ def generate_mobile_module_lints(script_module: torch.jit.ScriptModule): for name, param in script_module.named_parameters(): if param.requires_grad: - lint_list.append({"name": LintCode.REQUIRES_GRAD.name, "message": "Param {} requires grad, " + lint_list.append({"name": LintCode.REQUIRES_GRAD.name, "message": f"Param {name} requires grad, " "please set torch.no_grad() to reduce memory usage and improve computation speed during " - "inference phase.".format(name)}) + "inference phase."}) op_names = torch.jit.export_opnames(script_module) for op_name in op_names: