From dcc3cf7066b4d8cab63ecb73daf1e36b01220a4e Mon Sep 17 00:00:00 2001 From: Xuehai Pan Date: Thu, 9 Jan 2025 02:29:27 +0800 Subject: [PATCH] [BE] fix ruff rule E226: add missing whitespace around operator in f-strings (#144415) The fixes are generated by: ```bash ruff check --fix --preview --unsafe-fixes --select=E226 . lintrunner -a --take "RUFF,PYFMT" --all-files ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/144415 Approved by: https://github.com/huydhn, https://github.com/Skylion007 --- .github/scripts/trymerge.py | 2 +- benchmarks/dynamo/common.py | 4 ++-- .../microbenchmarks/cache_debug_microbenchmarks.py | 2 +- benchmarks/dynamo/microbenchmarks/fx_microbenchmarks.py | 2 +- benchmarks/dynamo/microbenchmarks/overheads.py | 2 +- benchmarks/dynamo/pr_time_benchmarks/check_results.py | 6 +++--- benchmarks/dynamo/runner.py | 2 +- benchmarks/dynamo/training_loss.py | 4 ++-- functorch/examples/compilation/fuse_module.py | 4 ++-- functorch/examples/maml_omniglot/maml-omniglot-higher.py | 2 +- functorch/examples/maml_omniglot/maml-omniglot-ptonly.py | 2 +- .../examples/maml_omniglot/maml-omniglot-transforms.py | 2 +- test/export/random_dag.py | 6 +++--- test/test_testing.py | 2 +- third_party/generate-xnnpack-wrappers.py | 2 +- torch/_dynamo/backends/debugging.py | 6 +++--- torch/_dynamo/debug_utils.py | 8 +++++--- torch/_inductor/wrapper_benchmark.py | 2 +- torch/_logging/_internal.py | 2 +- torch/_numpy/testing/utils.py | 6 ++---- torch/distributed/_tools/ilp_utils.py | 6 +++--- torch/distributed/tensor/examples/convnext_example.py | 9 ++++++--- torch/distributions/utils.py | 2 +- torch/distributions/wishart.py | 2 +- torch/export/unflatten.py | 2 +- torch/profiler/_memory_profiler.py | 4 ++-- torch/profiler/_pattern_matcher.py | 6 +++--- torch/profiler/_utils.py | 4 ++-- .../distributed/rpc/examples/parameter_server_test.py | 2 +- 29 files changed, 54 insertions(+), 51 deletions(-) diff --git a/.github/scripts/trymerge.py b/.github/scripts/trymerge.py index ca18ddcf4712..21af4ca195bd 100755 --- a/.github/scripts/trymerge.py +++ b/.github/scripts/trymerge.py @@ -669,7 +669,7 @@ def get_ghstack_prs( if not open_only or not candidate.is_closed(): return False print( - f"Skipping {idx+1} of {len(rev_list)} PR (#{candidate.pr_num}) as its already been merged" + f"Skipping {idx + 1} of {len(rev_list)} PR (#{candidate.pr_num}) as its already been merged" ) return True diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py index 086cd68dc916..c1d11c596b5f 100644 --- a/benchmarks/dynamo/common.py +++ b/benchmarks/dynamo/common.py @@ -676,7 +676,7 @@ def print_summary_table(data, print_dataframe=False): print(col.ljust(width), f"mean={data[col].mean():.3f}x") elif col in ("accuracy"): pass_rate = (data[col] == "pass").mean() - print(col.ljust(width), f"pass_rate={100*pass_rate:.2f}%") + print(col.ljust(width), f"pass_rate={100 * pass_rate:.2f}%") else: cdata = data[col] print( @@ -4993,7 +4993,7 @@ def run(runner, args, original_dir=None): for i, name in enumerate(model_names): current_name = name if args.progress: - print(f"Running model {i+1}/{nmodels}", flush=True) + print(f"Running model {i + 1}/{nmodels}", flush=True) try: timeout = args.timeout diff --git a/benchmarks/dynamo/microbenchmarks/cache_debug_microbenchmarks.py b/benchmarks/dynamo/microbenchmarks/cache_debug_microbenchmarks.py index f152f0c9bd10..2f76511cf0e4 100644 --- a/benchmarks/dynamo/microbenchmarks/cache_debug_microbenchmarks.py +++ b/benchmarks/dynamo/microbenchmarks/cache_debug_microbenchmarks.py @@ -25,7 +25,7 @@ def main(): return details.debug_lines() t = min(timeit.repeat(fn, number=K, repeat=3)) - print(f"iterating over {N*K} FX nodes took {t:.1f}s ({N*K/t:.0f} nodes/s)") + print(f"iterating over {N * K} FX nodes took {t:.1f}s ({N * K / t:.0f} nodes/s)") if __name__ == "__main__": diff --git a/benchmarks/dynamo/microbenchmarks/fx_microbenchmarks.py b/benchmarks/dynamo/microbenchmarks/fx_microbenchmarks.py index 0f957dc4aaf7..ecdf6b62ed6c 100644 --- a/benchmarks/dynamo/microbenchmarks/fx_microbenchmarks.py +++ b/benchmarks/dynamo/microbenchmarks/fx_microbenchmarks.py @@ -24,7 +24,7 @@ def main(): pass t = min(timeit.repeat(fn, number=K, repeat=3)) - print(f"iterating over {N*K} FX nodes took {t:.1f}s ({N*K/t:.0f} nodes/s)") + print(f"iterating over {N * K} FX nodes took {t:.1f}s ({N * K / t:.0f} nodes/s)") if __name__ == "__main__": diff --git a/benchmarks/dynamo/microbenchmarks/overheads.py b/benchmarks/dynamo/microbenchmarks/overheads.py index 687fe58cc795..47d7dd0f50e0 100644 --- a/benchmarks/dynamo/microbenchmarks/overheads.py +++ b/benchmarks/dynamo/microbenchmarks/overheads.py @@ -19,7 +19,7 @@ def bench(name, fn, requires_grad): end = time.perf_counter() results = timeit.repeat(lambda: fn(x), number=1000, repeat=1000) - print(f"{name} {np.median(results)*1000:.1f}us (warmup={end-start:.1f}s)") + print(f"{name} {np.median(results) * 1000:.1f}us (warmup={end - start:.1f}s)") def main(): diff --git a/benchmarks/dynamo/pr_time_benchmarks/check_results.py b/benchmarks/dynamo/pr_time_benchmarks/check_results.py index afa6c376e62b..3c836c1b790a 100644 --- a/benchmarks/dynamo/pr_time_benchmarks/check_results.py +++ b/benchmarks/dynamo/pr_time_benchmarks/check_results.py @@ -144,7 +144,7 @@ def main(): fail = True print( f"REGRESSION: benchmark {key} failed, actual result {result} " - f"is {ratio:.2f}% higher than expected {entry.expected_value} ±{entry.noise_margin*100:+.2f}% " + f"is {ratio:.2f}% higher than expected {entry.expected_value} ±{entry.noise_margin * 100:+.2f}% " f"if this is an expected regression, please update the expected results.\n" ) print( @@ -158,7 +158,7 @@ def main(): print( f"WIN: benchmark {key} failed, actual result {result} is {ratio:+.2f}% lower than " - f"expected {entry.expected_value} ±{entry.noise_margin*100:.2f}% " + f"expected {entry.expected_value} ±{entry.noise_margin * 100:.2f}% " f"please update the expected results. \n" ) print( @@ -170,7 +170,7 @@ def main(): else: print( f"PASS: benchmark {key} pass, actual result {result} {ratio:+.2f}% is within " - f"expected {entry.expected_value} ±{entry.noise_margin*100:.2f}%\n" + f"expected {entry.expected_value} ±{entry.noise_margin * 100:.2f}%\n" ) log("pass") diff --git a/benchmarks/dynamo/runner.py b/benchmarks/dynamo/runner.py index 3e1ab1a4b3b9..4d49b60ffa46 100755 --- a/benchmarks/dynamo/runner.py +++ b/benchmarks/dynamo/runner.py @@ -543,7 +543,7 @@ def build_summary(args): out_io.write(f"Number CUDA Devices: {torch.cuda.device_count()}\n") out_io.write(f"Device Name: {torch.cuda.get_device_name(0)}\n") out_io.write( - f"Device Memory [GB]: {torch.cuda.get_device_properties(0).total_memory/1e9}\n" + f"Device Memory [GB]: {torch.cuda.get_device_properties(0).total_memory / 1e9}\n" ) title = "## Build Summary" diff --git a/benchmarks/dynamo/training_loss.py b/benchmarks/dynamo/training_loss.py index ff797f07d971..1e7e57dfdbae 100644 --- a/benchmarks/dynamo/training_loss.py +++ b/benchmarks/dynamo/training_loss.py @@ -193,9 +193,9 @@ def main(): print( f"Train model on {args.epochs} epochs with backend {args.backend} and optimizer {args.optimizer}:" ) - print(f"PyTorch spent {timedelta(seconds=native_elapsed/args.epochs)} per epoch") + print(f"PyTorch spent {timedelta(seconds=native_elapsed / args.epochs)} per epoch") print( - f"TorchDynamo spent {timedelta(seconds=dynamo_elapsed/args.epochs)} per epoch" + f"TorchDynamo spent {timedelta(seconds=dynamo_elapsed / args.epochs)} per epoch" ) diff --git a/functorch/examples/compilation/fuse_module.py b/functorch/examples/compilation/fuse_module.py index 3e3c64d1da7d..176c8b39faf4 100644 --- a/functorch/examples/compilation/fuse_module.py +++ b/functorch/examples/compilation/fuse_module.py @@ -51,6 +51,6 @@ for a, b in zip(run(mod, input), run(compiled_mod, input)): for _ in range(5): i = 10000 t = timeit.Timer("mod(input)", globals=globals()).timeit(10000) - print(f"eager {t/i*1e6}") + print(f"eager {t / i * 1e6}") t = timeit.Timer("compiled_mod(input)", globals=globals()).timeit(10000) - print(f"compiled {t/i*1e6}") + print(f"compiled {t / i * 1e6}") diff --git a/functorch/examples/maml_omniglot/maml-omniglot-higher.py b/functorch/examples/maml_omniglot/maml-omniglot-higher.py index 82e33581124e..c341a6cb6a47 100755 --- a/functorch/examples/maml_omniglot/maml-omniglot-higher.py +++ b/functorch/examples/maml_omniglot/maml-omniglot-higher.py @@ -235,7 +235,7 @@ def test(db, net, device, epoch, log): qry_losses = torch.cat(qry_losses).mean().item() qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item() - print(f"[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}") + print(f"[Epoch {epoch + 1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}") log.append( { "epoch": epoch + 1, diff --git a/functorch/examples/maml_omniglot/maml-omniglot-ptonly.py b/functorch/examples/maml_omniglot/maml-omniglot-ptonly.py index 35696675305e..5132af9f2b67 100755 --- a/functorch/examples/maml_omniglot/maml-omniglot-ptonly.py +++ b/functorch/examples/maml_omniglot/maml-omniglot-ptonly.py @@ -225,7 +225,7 @@ def test(db, net, device, epoch, log): qry_losses = torch.cat(qry_losses).mean().item() qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item() - print(f"[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}") + print(f"[Epoch {epoch + 1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}") log.append( { "epoch": epoch + 1, diff --git a/functorch/examples/maml_omniglot/maml-omniglot-transforms.py b/functorch/examples/maml_omniglot/maml-omniglot-transforms.py index 971d940f4032..182d27a57f67 100755 --- a/functorch/examples/maml_omniglot/maml-omniglot-transforms.py +++ b/functorch/examples/maml_omniglot/maml-omniglot-transforms.py @@ -225,7 +225,7 @@ def test(db, net, device, epoch, log): qry_losses = torch.cat(qry_losses).mean().item() qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item() - print(f"[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}") + print(f"[Epoch {epoch + 1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}") log.append( { "epoch": epoch + 1, diff --git a/test/export/random_dag.py b/test/export/random_dag.py index 0aec7e24c6a0..ea3e199f6ff1 100644 --- a/test/export/random_dag.py +++ b/test/export/random_dag.py @@ -137,7 +137,7 @@ class Unflatten(TestGenerator): code = Block() code.new_line("super().__init__()") if i < self.n - 1: - code.new_line(f"self.n{i+1} = N{i+1}()") + code.new_line(f"self.n{i + 1} = N{i + 1}()") return code def gen_forward_body(self, i: int): @@ -207,7 +207,7 @@ class ConstantUnflatten(Unflatten): code.new_line("super().__init__()") code.new_line("self.const = torch.ones(1)") if i < self.n - 1: - code.new_line(f"self.n{i+1} = N{i+1}()") + code.new_line(f"self.n{i + 1} = N{i + 1}()") return code def gen_forward_body(self, i: int): @@ -249,7 +249,7 @@ class BufferUnflatten(Unflatten): code.new_line("super().__init__()") code.new_line("self.buf = torch.nn.Buffer(torch.ones(1))") if i < self.n - 1: - code.new_line(f"self.n{i+1} = N{i+1}()") + code.new_line(f"self.n{i + 1} = N{i + 1}()") return code def gen_forward_body(self, i: int): diff --git a/test/test_testing.py b/test/test_testing.py index bfb84759a383..9fd8ce8bcd2d 100644 --- a/test/test_testing.py +++ b/test/test_testing.py @@ -489,7 +489,7 @@ if __name__ == '__main__': del env[PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY] env[PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY] = 'cpu' _, stderr = TestCase.run_process_no_exception(test_filter_file_template, env=env) - self.assertIn(f'Ran {test_bases_count-1} test', stderr.decode('ascii')) + self.assertIn(f'Ran {test_bases_count - 1} test', stderr.decode('ascii')) # Test with setting both should throw exception env[PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY] = 'cpu' diff --git a/third_party/generate-xnnpack-wrappers.py b/third_party/generate-xnnpack-wrappers.py index c02d69374e64..087304bc4166 100755 --- a/third_party/generate-xnnpack-wrappers.py +++ b/third_party/generate-xnnpack-wrappers.py @@ -100,7 +100,7 @@ IGNORED_SOURCES = set(( def handle_singleline_parse(line): start_index = line.find("(") end_index = line.find(")") - line = line[start_index+1:end_index] + line = line[start_index + 1:end_index] key_val = line.split(" ") return key_val[0], [x[4:] for x in key_val[1:]] diff --git a/torch/_dynamo/backends/debugging.py b/torch/_dynamo/backends/debugging.py index a9dcfe3b42c2..305e2164509a 100644 --- a/torch/_dynamo/backends/debugging.py +++ b/torch/_dynamo/backends/debugging.py @@ -312,7 +312,7 @@ class ExplainOutput: output += "Break Reasons:\n" for idx, break_reason in enumerate(self.break_reasons): - output += f" Break Reason {idx+1}:\n" + output += f" Break Reason {idx + 1}:\n" output += f" Reason: {break_reason.reason}\n" output += " User Stack:\n" for frame_summary in break_reason.user_stack: @@ -321,14 +321,14 @@ class ExplainOutput: if self.ops_per_graph is not None: output += "Ops per Graph:\n" for idx, ops in enumerate(self.ops_per_graph): - output += f" Ops {idx+1}:\n" + output += f" Ops {idx + 1}:\n" for op in ops: output += f" {op}\n" if self.out_guards is not None: output += "Out Guards:\n" for i, guard in enumerate(self.out_guards): - output += f" Guard {i+1}:\n" + output += f" Guard {i + 1}:\n" output += f" {str(guard)}" if self.compile_times is not None: diff --git a/torch/_dynamo/debug_utils.py b/torch/_dynamo/debug_utils.py index f774f48f33e5..9078f5e370d0 100644 --- a/torch/_dynamo/debug_utils.py +++ b/torch/_dynamo/debug_utils.py @@ -184,7 +184,7 @@ class NNModuleToString: example_param = next(module.parameters(), None) if example_param is not None and example_param.is_cuda: module_str = f"{module_str}.cuda()" - model_str += f"{tab*2}self.{module_name} = {module_str}\n" + model_str += f"{tab * 2}self.{module_name} = {module_str}\n" for buffer_name, buffer in gm._buffers.items(): if buffer is None: @@ -203,7 +203,9 @@ class NNModuleToString: ) if buffer.is_cuda: tensor_str = f"{tensor_str}.cuda()" - model_str += f"{tab*2}self.register_buffer('{buffer_name}', {tensor_str})\n" + model_str += ( + f"{tab * 2}self.register_buffer('{buffer_name}', {tensor_str})\n" + ) for param_name, param in gm._parameters.items(): if param is None: @@ -212,7 +214,7 @@ class NNModuleToString: if param.is_cuda: maybe_device = ', device="cuda"' tensor_str = f"torch.nn.Parameter(torch.randn({list(param.shape)}, dtype={param.dtype}{maybe_device}))" - model_str += f"{tab*2}self.{param_name} = {tensor_str}\n" + model_str += f"{tab * 2}self.{param_name} = {tensor_str}\n" # TODO - Keep this code for now. But, I don't think we will need this. # attrs = dir(gm) diff --git a/torch/_inductor/wrapper_benchmark.py b/torch/_inductor/wrapper_benchmark.py index e919bb09e548..77071cbc2de2 100644 --- a/torch/_inductor/wrapper_benchmark.py +++ b/torch/_inductor/wrapper_benchmark.py @@ -396,7 +396,7 @@ def compiled_module_main(benchmark_name, benchmark_compiled_module_fn): if torch.cuda.is_available(): peak_mem = torch.cuda.max_memory_allocated() - print(f"Peak GPU memory usage {peak_mem/1e6:.3f} MB") + print(f"Peak GPU memory usage {peak_mem / 1e6:.3f} MB") if torch.cuda.is_available() and args.cuda_memory_snapshot: collect_memory_snapshot(benchmark_compiled_module_fn) diff --git a/torch/_logging/_internal.py b/torch/_logging/_internal.py index 02f12cb37fb3..189438a696ed 100644 --- a/torch/_logging/_internal.py +++ b/torch/_logging/_internal.py @@ -844,7 +844,7 @@ class TorchLogsFormatter(logging.Formatter): filepath = make_module_path_relative(record.pathname) prefix = ( - f"{record.rankprefix}{shortlevel}{record.asctime}.{int(record.msecs*1000):06d} {record.process} " + f"{record.rankprefix}{shortlevel}{record.asctime}.{int(record.msecs * 1000):06d} {record.process} " f"{filepath}:" f"{record.lineno}]{record.traceid}{record.artifactprefix}" ) diff --git a/torch/_numpy/testing/utils.py b/torch/_numpy/testing/utils.py index abd0756187bd..29885b917049 100644 --- a/torch/_numpy/testing/utils.py +++ b/torch/_numpy/testing/utils.py @@ -2274,7 +2274,7 @@ def check_free_memory(free_bytes): ) msg = ( - f"{free_bytes/1e9} GB memory required, but environment variable " + f"{free_bytes / 1e9} GB memory required, but environment variable " f"NPY_AVAILABLE_MEM={env_value} set" ) else: @@ -2288,9 +2288,7 @@ def check_free_memory(free_bytes): ) mem_free = -1 else: - msg = ( - f"{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available" - ) + msg = f"{free_bytes / 1e9} GB memory required, but {mem_free / 1e9} GB available" return msg if mem_free < free_bytes else None diff --git a/torch/distributed/_tools/ilp_utils.py b/torch/distributed/_tools/ilp_utils.py index 43872339d5f3..170a8dfba814 100644 --- a/torch/distributed/_tools/ilp_utils.py +++ b/torch/distributed/_tools/ilp_utils.py @@ -257,11 +257,11 @@ def display_bytes(b: int, unit: str = "MiB") -> str: return a string that represent the number of bytes in a desired unit """ if unit == "KiB": - return f"{b/2**10:.2f} KiB" + return f"{b / 2**10:.2f} KiB" if unit == "MiB": - return f"{b/2**20:.2f} MiB" + return f"{b / 2**20:.2f} MiB" if unit == "GiB": - return f"{b/2**30:.2f} GiB" + return f"{b / 2**30:.2f} GiB" return f"{b:.2f} bytes" diff --git a/torch/distributed/tensor/examples/convnext_example.py b/torch/distributed/tensor/examples/convnext_example.py index ec035644f0d5..04b04b69ee9c 100644 --- a/torch/distributed/tensor/examples/convnext_example.py +++ b/torch/distributed/tensor/examples/convnext_example.py @@ -243,13 +243,16 @@ def train_convnext_example(): max_reserved = torch.cuda.max_memory_reserved() max_allocated = torch.cuda.max_memory_allocated() print( - f"rank {rank}, {ITER_TIME} iterations, average latency {(end - start)/ITER_TIME*1000:10.2f} ms" + f"rank {rank}, {ITER_TIME} iterations, " + f"average latency {(end - start) / ITER_TIME * 1000:10.2f} ms" ) print( - f"rank {rank}, forward {forward_time/ITER_TIME*1000:10.2f} ms, backward {backward_time/ITER_TIME*1000:10.2f} ms" + f"rank {rank}, forward {forward_time / ITER_TIME * 1000:10.2f} ms, " + f"backward {backward_time / ITER_TIME * 1000:10.2f} ms" ) print( - f"rank {rank}, max reserved {max_reserved/1024/1024/1024:8.2f} GiB, max allocated {max_allocated/1024/1024/1024:8.2f} GiB" + f"rank {rank}, max reserved {max_reserved / 1024 / 1024 / 1024:8.2f} GiB, " + f"max allocated {max_allocated / 1024 / 1024 / 1024:8.2f} GiB" ) dist.destroy_process_group() diff --git a/torch/distributions/utils.py b/torch/distributions/utils.py index 198f0a314d24..ea969b25c04a 100644 --- a/torch/distributions/utils.py +++ b/torch/distributions/utils.py @@ -187,7 +187,7 @@ def tril_matrix_to_vec(mat: Tensor, diag: int = 0) -> Tensor: """ n = mat.shape[-1] if not torch._C._get_tracing_state() and (diag < -n or diag >= n): - raise ValueError(f"diag ({diag}) provided is outside [{-n}, {n-1}].") + raise ValueError(f"diag ({diag}) provided is outside [{-n}, {n - 1}].") arange = torch.arange(n, device=mat.device) tril_mask = arange < arange.view(-1, 1) + (diag + 1) vec = mat[..., tril_mask] diff --git a/torch/distributions/wishart.py b/torch/distributions/wishart.py index 98856e3b822e..cb10adcc183c 100644 --- a/torch/distributions/wishart.py +++ b/torch/distributions/wishart.py @@ -107,7 +107,7 @@ class Wishart(ExponentialFamily): if self.df.le(event_shape[-1] - 1).any(): raise ValueError( - f"Value of df={df} expected to be greater than ndim - 1 = {event_shape[-1]-1}." + f"Value of df={df} expected to be greater than ndim - 1 = {event_shape[-1] - 1}." ) if scale_tril is not None: diff --git a/torch/export/unflatten.py b/torch/export/unflatten.py index 1474225b5f91..a1e3e31f1e5a 100644 --- a/torch/export/unflatten.py +++ b/torch/export/unflatten.py @@ -890,7 +890,7 @@ def _add_submodule( def _call_name(base: str, n: int) -> str: # Given n >= 0, generate call names to a submodule `base` of the form # `base`, `base@1`, `base@2`, etc. - return base if n == 1 else f"{base}@{n-1}" + return base if n == 1 else f"{base}@{n - 1}" def _is_call_name(call_name: str, base: str) -> bool: diff --git a/torch/profiler/_memory_profiler.py b/torch/profiler/_memory_profiler.py index d065a3b155df..8c72300ce0e4 100644 --- a/torch/profiler/_memory_profiler.py +++ b/torch/profiler/_memory_profiler.py @@ -1168,8 +1168,8 @@ class MemoryProfileTimeline: title = "\n\n".join( ([title] if title else []) + [ - f"Max memory allocated: {max_memory_allocated/(1024**3):.2f} GiB \n" - f"Max memory reserved: {max_memory_reserved/(1024**3):.2f} GiB" + f"Max memory allocated: {max_memory_allocated / (1024**3):.2f} GiB \n" + f"Max memory reserved: {max_memory_reserved / (1024**3):.2f} GiB" ] ) axes.set_title(title) diff --git a/torch/profiler/_pattern_matcher.py b/torch/profiler/_pattern_matcher.py index 9b2d87a6a37e..6bac511dbbfc 100644 --- a/torch/profiler/_pattern_matcher.py +++ b/torch/profiler/_pattern_matcher.py @@ -84,7 +84,7 @@ class Pattern: ) return ( f"{self.name}: {len(events)} events matched. " - f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time/new_time, 2)}X)" + f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time / new_time, 2)}X)" ) def match(self, event: _ProfilerEvent): @@ -622,7 +622,7 @@ def report_all_anti_patterns( ] reported = set() summaries = [] - message_list = [f"{'-'*40}TorchTidy Report{'-'*40}"] + message_list = [f"{'-' * 40}TorchTidy Report{'-' * 40}"] message_list.append("Matched Events:") for anti_pattern in anti_patterns: @@ -657,6 +657,6 @@ def report_all_anti_patterns( message_list.append("Summary:") message_list += summaries - message_list.append(f"{'-'*40}TorchTidy Report{'-'*40}") + message_list.append(f"{'-' * 40}TorchTidy Report{'-' * 40}") if print_enable: print("\n".join(message_list)) diff --git a/torch/profiler/_utils.py b/torch/profiler/_utils.py index 20dfeb80adeb..283d31c87024 100644 --- a/torch/profiler/_utils.py +++ b/torch/profiler/_utils.py @@ -335,11 +335,11 @@ class BasicEvaluation: output += "\n".join( [ - f"""{'-'*80} + f"""{'-' * 80} Event: {event} Source code location: {source_code_location(event.event)} Percentage idle time: {self.metrics[event].fraction_idle_time * 100:.2f}% -{'-'*80}""" +{'-' * 80}""" for event in event_list ] ) diff --git a/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py b/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py index 928f28c19211..1bad7694fdf1 100644 --- a/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py +++ b/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py @@ -112,7 +112,7 @@ def run_ps(trainers): torch.futures.wait_all(futs) stop = perf_counter() timed_log("Finish training") - timed_log(f"Time spent training: {stop-start}s") + timed_log(f"Time spent training: {stop - start}s") class ParameterServerTest(RpcAgentTestFixture):