[BE] fix ruff rule E226: add missing whitespace around operator in f-strings (#144415)

The fixes are generated by:

```bash
ruff check --fix --preview --unsafe-fixes --select=E226 .
lintrunner -a --take "RUFF,PYFMT" --all-files
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/144415
Approved by: https://github.com/huydhn, https://github.com/Skylion007
This commit is contained in:
Xuehai Pan
2025-01-09 02:29:27 +08:00
committed by PyTorch MergeBot
parent a742859fc2
commit dcc3cf7066
29 changed files with 54 additions and 51 deletions

View File

@ -669,7 +669,7 @@ def get_ghstack_prs(
if not open_only or not candidate.is_closed(): if not open_only or not candidate.is_closed():
return False return False
print( print(
f"Skipping {idx+1} of {len(rev_list)} PR (#{candidate.pr_num}) as its already been merged" f"Skipping {idx + 1} of {len(rev_list)} PR (#{candidate.pr_num}) as its already been merged"
) )
return True return True

View File

@ -676,7 +676,7 @@ def print_summary_table(data, print_dataframe=False):
print(col.ljust(width), f"mean={data[col].mean():.3f}x") print(col.ljust(width), f"mean={data[col].mean():.3f}x")
elif col in ("accuracy"): elif col in ("accuracy"):
pass_rate = (data[col] == "pass").mean() pass_rate = (data[col] == "pass").mean()
print(col.ljust(width), f"pass_rate={100*pass_rate:.2f}%") print(col.ljust(width), f"pass_rate={100 * pass_rate:.2f}%")
else: else:
cdata = data[col] cdata = data[col]
print( print(
@ -4993,7 +4993,7 @@ def run(runner, args, original_dir=None):
for i, name in enumerate(model_names): for i, name in enumerate(model_names):
current_name = name current_name = name
if args.progress: if args.progress:
print(f"Running model {i+1}/{nmodels}", flush=True) print(f"Running model {i + 1}/{nmodels}", flush=True)
try: try:
timeout = args.timeout timeout = args.timeout

View File

@ -25,7 +25,7 @@ def main():
return details.debug_lines() return details.debug_lines()
t = min(timeit.repeat(fn, number=K, repeat=3)) t = min(timeit.repeat(fn, number=K, repeat=3))
print(f"iterating over {N*K} FX nodes took {t:.1f}s ({N*K/t:.0f} nodes/s)") print(f"iterating over {N * K} FX nodes took {t:.1f}s ({N * K / t:.0f} nodes/s)")
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -24,7 +24,7 @@ def main():
pass pass
t = min(timeit.repeat(fn, number=K, repeat=3)) t = min(timeit.repeat(fn, number=K, repeat=3))
print(f"iterating over {N*K} FX nodes took {t:.1f}s ({N*K/t:.0f} nodes/s)") print(f"iterating over {N * K} FX nodes took {t:.1f}s ({N * K / t:.0f} nodes/s)")
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -19,7 +19,7 @@ def bench(name, fn, requires_grad):
end = time.perf_counter() end = time.perf_counter()
results = timeit.repeat(lambda: fn(x), number=1000, repeat=1000) results = timeit.repeat(lambda: fn(x), number=1000, repeat=1000)
print(f"{name} {np.median(results)*1000:.1f}us (warmup={end-start:.1f}s)") print(f"{name} {np.median(results) * 1000:.1f}us (warmup={end - start:.1f}s)")
def main(): def main():

View File

@ -144,7 +144,7 @@ def main():
fail = True fail = True
print( print(
f"REGRESSION: benchmark {key} failed, actual result {result} " f"REGRESSION: benchmark {key} failed, actual result {result} "
f"is {ratio:.2f}% higher than expected {entry.expected_value} ±{entry.noise_margin*100:+.2f}% " f"is {ratio:.2f}% higher than expected {entry.expected_value} ±{entry.noise_margin * 100:+.2f}% "
f"if this is an expected regression, please update the expected results.\n" f"if this is an expected regression, please update the expected results.\n"
) )
print( print(
@ -158,7 +158,7 @@ def main():
print( print(
f"WIN: benchmark {key} failed, actual result {result} is {ratio:+.2f}% lower than " f"WIN: benchmark {key} failed, actual result {result} is {ratio:+.2f}% lower than "
f"expected {entry.expected_value} ±{entry.noise_margin*100:.2f}% " f"expected {entry.expected_value} ±{entry.noise_margin * 100:.2f}% "
f"please update the expected results. \n" f"please update the expected results. \n"
) )
print( print(
@ -170,7 +170,7 @@ def main():
else: else:
print( print(
f"PASS: benchmark {key} pass, actual result {result} {ratio:+.2f}% is within " f"PASS: benchmark {key} pass, actual result {result} {ratio:+.2f}% is within "
f"expected {entry.expected_value} ±{entry.noise_margin*100:.2f}%\n" f"expected {entry.expected_value} ±{entry.noise_margin * 100:.2f}%\n"
) )
log("pass") log("pass")

View File

@ -543,7 +543,7 @@ def build_summary(args):
out_io.write(f"Number CUDA Devices: {torch.cuda.device_count()}\n") out_io.write(f"Number CUDA Devices: {torch.cuda.device_count()}\n")
out_io.write(f"Device Name: {torch.cuda.get_device_name(0)}\n") out_io.write(f"Device Name: {torch.cuda.get_device_name(0)}\n")
out_io.write( out_io.write(
f"Device Memory [GB]: {torch.cuda.get_device_properties(0).total_memory/1e9}\n" f"Device Memory [GB]: {torch.cuda.get_device_properties(0).total_memory / 1e9}\n"
) )
title = "## Build Summary" title = "## Build Summary"

View File

@ -193,9 +193,9 @@ def main():
print( print(
f"Train model on {args.epochs} epochs with backend {args.backend} and optimizer {args.optimizer}:" f"Train model on {args.epochs} epochs with backend {args.backend} and optimizer {args.optimizer}:"
) )
print(f"PyTorch spent {timedelta(seconds=native_elapsed/args.epochs)} per epoch") print(f"PyTorch spent {timedelta(seconds=native_elapsed / args.epochs)} per epoch")
print( print(
f"TorchDynamo spent {timedelta(seconds=dynamo_elapsed/args.epochs)} per epoch" f"TorchDynamo spent {timedelta(seconds=dynamo_elapsed / args.epochs)} per epoch"
) )

View File

@ -51,6 +51,6 @@ for a, b in zip(run(mod, input), run(compiled_mod, input)):
for _ in range(5): for _ in range(5):
i = 10000 i = 10000
t = timeit.Timer("mod(input)", globals=globals()).timeit(10000) t = timeit.Timer("mod(input)", globals=globals()).timeit(10000)
print(f"eager {t/i*1e6}") print(f"eager {t / i * 1e6}")
t = timeit.Timer("compiled_mod(input)", globals=globals()).timeit(10000) t = timeit.Timer("compiled_mod(input)", globals=globals()).timeit(10000)
print(f"compiled {t/i*1e6}") print(f"compiled {t / i * 1e6}")

View File

@ -235,7 +235,7 @@ def test(db, net, device, epoch, log):
qry_losses = torch.cat(qry_losses).mean().item() qry_losses = torch.cat(qry_losses).mean().item()
qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item() qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item()
print(f"[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}") print(f"[Epoch {epoch + 1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}")
log.append( log.append(
{ {
"epoch": epoch + 1, "epoch": epoch + 1,

View File

@ -225,7 +225,7 @@ def test(db, net, device, epoch, log):
qry_losses = torch.cat(qry_losses).mean().item() qry_losses = torch.cat(qry_losses).mean().item()
qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item() qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item()
print(f"[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}") print(f"[Epoch {epoch + 1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}")
log.append( log.append(
{ {
"epoch": epoch + 1, "epoch": epoch + 1,

View File

@ -225,7 +225,7 @@ def test(db, net, device, epoch, log):
qry_losses = torch.cat(qry_losses).mean().item() qry_losses = torch.cat(qry_losses).mean().item()
qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item() qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item()
print(f"[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}") print(f"[Epoch {epoch + 1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}")
log.append( log.append(
{ {
"epoch": epoch + 1, "epoch": epoch + 1,

View File

@ -137,7 +137,7 @@ class Unflatten(TestGenerator):
code = Block() code = Block()
code.new_line("super().__init__()") code.new_line("super().__init__()")
if i < self.n - 1: if i < self.n - 1:
code.new_line(f"self.n{i+1} = N{i+1}()") code.new_line(f"self.n{i + 1} = N{i + 1}()")
return code return code
def gen_forward_body(self, i: int): def gen_forward_body(self, i: int):
@ -207,7 +207,7 @@ class ConstantUnflatten(Unflatten):
code.new_line("super().__init__()") code.new_line("super().__init__()")
code.new_line("self.const = torch.ones(1)") code.new_line("self.const = torch.ones(1)")
if i < self.n - 1: if i < self.n - 1:
code.new_line(f"self.n{i+1} = N{i+1}()") code.new_line(f"self.n{i + 1} = N{i + 1}()")
return code return code
def gen_forward_body(self, i: int): def gen_forward_body(self, i: int):
@ -249,7 +249,7 @@ class BufferUnflatten(Unflatten):
code.new_line("super().__init__()") code.new_line("super().__init__()")
code.new_line("self.buf = torch.nn.Buffer(torch.ones(1))") code.new_line("self.buf = torch.nn.Buffer(torch.ones(1))")
if i < self.n - 1: if i < self.n - 1:
code.new_line(f"self.n{i+1} = N{i+1}()") code.new_line(f"self.n{i + 1} = N{i + 1}()")
return code return code
def gen_forward_body(self, i: int): def gen_forward_body(self, i: int):

View File

@ -489,7 +489,7 @@ if __name__ == '__main__':
del env[PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY] del env[PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY]
env[PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY] = 'cpu' env[PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY] = 'cpu'
_, stderr = TestCase.run_process_no_exception(test_filter_file_template, env=env) _, stderr = TestCase.run_process_no_exception(test_filter_file_template, env=env)
self.assertIn(f'Ran {test_bases_count-1} test', stderr.decode('ascii')) self.assertIn(f'Ran {test_bases_count - 1} test', stderr.decode('ascii'))
# Test with setting both should throw exception # Test with setting both should throw exception
env[PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY] = 'cpu' env[PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY] = 'cpu'

View File

@ -100,7 +100,7 @@ IGNORED_SOURCES = set((
def handle_singleline_parse(line): def handle_singleline_parse(line):
start_index = line.find("(") start_index = line.find("(")
end_index = line.find(")") end_index = line.find(")")
line = line[start_index+1:end_index] line = line[start_index + 1:end_index]
key_val = line.split(" ") key_val = line.split(" ")
return key_val[0], [x[4:] for x in key_val[1:]] return key_val[0], [x[4:] for x in key_val[1:]]

View File

@ -312,7 +312,7 @@ class ExplainOutput:
output += "Break Reasons:\n" output += "Break Reasons:\n"
for idx, break_reason in enumerate(self.break_reasons): for idx, break_reason in enumerate(self.break_reasons):
output += f" Break Reason {idx+1}:\n" output += f" Break Reason {idx + 1}:\n"
output += f" Reason: {break_reason.reason}\n" output += f" Reason: {break_reason.reason}\n"
output += " User Stack:\n" output += " User Stack:\n"
for frame_summary in break_reason.user_stack: for frame_summary in break_reason.user_stack:
@ -321,14 +321,14 @@ class ExplainOutput:
if self.ops_per_graph is not None: if self.ops_per_graph is not None:
output += "Ops per Graph:\n" output += "Ops per Graph:\n"
for idx, ops in enumerate(self.ops_per_graph): for idx, ops in enumerate(self.ops_per_graph):
output += f" Ops {idx+1}:\n" output += f" Ops {idx + 1}:\n"
for op in ops: for op in ops:
output += f" {op}\n" output += f" {op}\n"
if self.out_guards is not None: if self.out_guards is not None:
output += "Out Guards:\n" output += "Out Guards:\n"
for i, guard in enumerate(self.out_guards): for i, guard in enumerate(self.out_guards):
output += f" Guard {i+1}:\n" output += f" Guard {i + 1}:\n"
output += f" {str(guard)}" output += f" {str(guard)}"
if self.compile_times is not None: if self.compile_times is not None:

View File

@ -184,7 +184,7 @@ class NNModuleToString:
example_param = next(module.parameters(), None) example_param = next(module.parameters(), None)
if example_param is not None and example_param.is_cuda: if example_param is not None and example_param.is_cuda:
module_str = f"{module_str}.cuda()" module_str = f"{module_str}.cuda()"
model_str += f"{tab*2}self.{module_name} = {module_str}\n" model_str += f"{tab * 2}self.{module_name} = {module_str}\n"
for buffer_name, buffer in gm._buffers.items(): for buffer_name, buffer in gm._buffers.items():
if buffer is None: if buffer is None:
@ -203,7 +203,9 @@ class NNModuleToString:
) )
if buffer.is_cuda: if buffer.is_cuda:
tensor_str = f"{tensor_str}.cuda()" tensor_str = f"{tensor_str}.cuda()"
model_str += f"{tab*2}self.register_buffer('{buffer_name}', {tensor_str})\n" model_str += (
f"{tab * 2}self.register_buffer('{buffer_name}', {tensor_str})\n"
)
for param_name, param in gm._parameters.items(): for param_name, param in gm._parameters.items():
if param is None: if param is None:
@ -212,7 +214,7 @@ class NNModuleToString:
if param.is_cuda: if param.is_cuda:
maybe_device = ', device="cuda"' maybe_device = ', device="cuda"'
tensor_str = f"torch.nn.Parameter(torch.randn({list(param.shape)}, dtype={param.dtype}{maybe_device}))" tensor_str = f"torch.nn.Parameter(torch.randn({list(param.shape)}, dtype={param.dtype}{maybe_device}))"
model_str += f"{tab*2}self.{param_name} = {tensor_str}\n" model_str += f"{tab * 2}self.{param_name} = {tensor_str}\n"
# TODO - Keep this code for now. But, I don't think we will need this. # TODO - Keep this code for now. But, I don't think we will need this.
# attrs = dir(gm) # attrs = dir(gm)

View File

@ -396,7 +396,7 @@ def compiled_module_main(benchmark_name, benchmark_compiled_module_fn):
if torch.cuda.is_available(): if torch.cuda.is_available():
peak_mem = torch.cuda.max_memory_allocated() peak_mem = torch.cuda.max_memory_allocated()
print(f"Peak GPU memory usage {peak_mem/1e6:.3f} MB") print(f"Peak GPU memory usage {peak_mem / 1e6:.3f} MB")
if torch.cuda.is_available() and args.cuda_memory_snapshot: if torch.cuda.is_available() and args.cuda_memory_snapshot:
collect_memory_snapshot(benchmark_compiled_module_fn) collect_memory_snapshot(benchmark_compiled_module_fn)

View File

@ -844,7 +844,7 @@ class TorchLogsFormatter(logging.Formatter):
filepath = make_module_path_relative(record.pathname) filepath = make_module_path_relative(record.pathname)
prefix = ( prefix = (
f"{record.rankprefix}{shortlevel}{record.asctime}.{int(record.msecs*1000):06d} {record.process} " f"{record.rankprefix}{shortlevel}{record.asctime}.{int(record.msecs * 1000):06d} {record.process} "
f"{filepath}:" f"{filepath}:"
f"{record.lineno}]{record.traceid}{record.artifactprefix}" f"{record.lineno}]{record.traceid}{record.artifactprefix}"
) )

View File

@ -2274,7 +2274,7 @@ def check_free_memory(free_bytes):
) )
msg = ( msg = (
f"{free_bytes/1e9} GB memory required, but environment variable " f"{free_bytes / 1e9} GB memory required, but environment variable "
f"NPY_AVAILABLE_MEM={env_value} set" f"NPY_AVAILABLE_MEM={env_value} set"
) )
else: else:
@ -2288,9 +2288,7 @@ def check_free_memory(free_bytes):
) )
mem_free = -1 mem_free = -1
else: else:
msg = ( msg = f"{free_bytes / 1e9} GB memory required, but {mem_free / 1e9} GB available"
f"{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available"
)
return msg if mem_free < free_bytes else None return msg if mem_free < free_bytes else None

View File

@ -257,11 +257,11 @@ def display_bytes(b: int, unit: str = "MiB") -> str:
return a string that represent the number of bytes in a desired unit return a string that represent the number of bytes in a desired unit
""" """
if unit == "KiB": if unit == "KiB":
return f"{b/2**10:.2f} KiB" return f"{b / 2**10:.2f} KiB"
if unit == "MiB": if unit == "MiB":
return f"{b/2**20:.2f} MiB" return f"{b / 2**20:.2f} MiB"
if unit == "GiB": if unit == "GiB":
return f"{b/2**30:.2f} GiB" return f"{b / 2**30:.2f} GiB"
return f"{b:.2f} bytes" return f"{b:.2f} bytes"

View File

@ -243,13 +243,16 @@ def train_convnext_example():
max_reserved = torch.cuda.max_memory_reserved() max_reserved = torch.cuda.max_memory_reserved()
max_allocated = torch.cuda.max_memory_allocated() max_allocated = torch.cuda.max_memory_allocated()
print( print(
f"rank {rank}, {ITER_TIME} iterations, average latency {(end - start)/ITER_TIME*1000:10.2f} ms" f"rank {rank}, {ITER_TIME} iterations, "
f"average latency {(end - start) / ITER_TIME * 1000:10.2f} ms"
) )
print( print(
f"rank {rank}, forward {forward_time/ITER_TIME*1000:10.2f} ms, backward {backward_time/ITER_TIME*1000:10.2f} ms" f"rank {rank}, forward {forward_time / ITER_TIME * 1000:10.2f} ms, "
f"backward {backward_time / ITER_TIME * 1000:10.2f} ms"
) )
print( print(
f"rank {rank}, max reserved {max_reserved/1024/1024/1024:8.2f} GiB, max allocated {max_allocated/1024/1024/1024:8.2f} GiB" f"rank {rank}, max reserved {max_reserved / 1024 / 1024 / 1024:8.2f} GiB, "
f"max allocated {max_allocated / 1024 / 1024 / 1024:8.2f} GiB"
) )
dist.destroy_process_group() dist.destroy_process_group()

View File

@ -187,7 +187,7 @@ def tril_matrix_to_vec(mat: Tensor, diag: int = 0) -> Tensor:
""" """
n = mat.shape[-1] n = mat.shape[-1]
if not torch._C._get_tracing_state() and (diag < -n or diag >= n): if not torch._C._get_tracing_state() and (diag < -n or diag >= n):
raise ValueError(f"diag ({diag}) provided is outside [{-n}, {n-1}].") raise ValueError(f"diag ({diag}) provided is outside [{-n}, {n - 1}].")
arange = torch.arange(n, device=mat.device) arange = torch.arange(n, device=mat.device)
tril_mask = arange < arange.view(-1, 1) + (diag + 1) tril_mask = arange < arange.view(-1, 1) + (diag + 1)
vec = mat[..., tril_mask] vec = mat[..., tril_mask]

View File

@ -107,7 +107,7 @@ class Wishart(ExponentialFamily):
if self.df.le(event_shape[-1] - 1).any(): if self.df.le(event_shape[-1] - 1).any():
raise ValueError( raise ValueError(
f"Value of df={df} expected to be greater than ndim - 1 = {event_shape[-1]-1}." f"Value of df={df} expected to be greater than ndim - 1 = {event_shape[-1] - 1}."
) )
if scale_tril is not None: if scale_tril is not None:

View File

@ -890,7 +890,7 @@ def _add_submodule(
def _call_name(base: str, n: int) -> str: def _call_name(base: str, n: int) -> str:
# Given n >= 0, generate call names to a submodule `base` of the form # Given n >= 0, generate call names to a submodule `base` of the form
# `base`, `base@1`, `base@2`, etc. # `base`, `base@1`, `base@2`, etc.
return base if n == 1 else f"{base}@{n-1}" return base if n == 1 else f"{base}@{n - 1}"
def _is_call_name(call_name: str, base: str) -> bool: def _is_call_name(call_name: str, base: str) -> bool:

View File

@ -1168,8 +1168,8 @@ class MemoryProfileTimeline:
title = "\n\n".join( title = "\n\n".join(
([title] if title else []) ([title] if title else [])
+ [ + [
f"Max memory allocated: {max_memory_allocated/(1024**3):.2f} GiB \n" f"Max memory allocated: {max_memory_allocated / (1024**3):.2f} GiB \n"
f"Max memory reserved: {max_memory_reserved/(1024**3):.2f} GiB" f"Max memory reserved: {max_memory_reserved / (1024**3):.2f} GiB"
] ]
) )
axes.set_title(title) axes.set_title(title)

View File

@ -84,7 +84,7 @@ class Pattern:
) )
return ( return (
f"{self.name}: {len(events)} events matched. " f"{self.name}: {len(events)} events matched. "
f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time/new_time, 2)}X)" f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time / new_time, 2)}X)"
) )
def match(self, event: _ProfilerEvent): def match(self, event: _ProfilerEvent):
@ -622,7 +622,7 @@ def report_all_anti_patterns(
] ]
reported = set() reported = set()
summaries = [] summaries = []
message_list = [f"{'-'*40}TorchTidy Report{'-'*40}"] message_list = [f"{'-' * 40}TorchTidy Report{'-' * 40}"]
message_list.append("Matched Events:") message_list.append("Matched Events:")
for anti_pattern in anti_patterns: for anti_pattern in anti_patterns:
@ -657,6 +657,6 @@ def report_all_anti_patterns(
message_list.append("Summary:") message_list.append("Summary:")
message_list += summaries message_list += summaries
message_list.append(f"{'-'*40}TorchTidy Report{'-'*40}") message_list.append(f"{'-' * 40}TorchTidy Report{'-' * 40}")
if print_enable: if print_enable:
print("\n".join(message_list)) print("\n".join(message_list))

View File

@ -335,11 +335,11 @@ class BasicEvaluation:
output += "\n".join( output += "\n".join(
[ [
f"""{'-'*80} f"""{'-' * 80}
Event: {event} Event: {event}
Source code location: {source_code_location(event.event)} Source code location: {source_code_location(event.event)}
Percentage idle time: {self.metrics[event].fraction_idle_time * 100:.2f}% Percentage idle time: {self.metrics[event].fraction_idle_time * 100:.2f}%
{'-'*80}""" {'-' * 80}"""
for event in event_list for event in event_list
] ]
) )

View File

@ -112,7 +112,7 @@ def run_ps(trainers):
torch.futures.wait_all(futs) torch.futures.wait_all(futs)
stop = perf_counter() stop = perf_counter()
timed_log("Finish training") timed_log("Finish training")
timed_log(f"Time spent training: {stop-start}s") timed_log(f"Time spent training: {stop - start}s")
class ParameterServerTest(RpcAgentTestFixture): class ParameterServerTest(RpcAgentTestFixture):