mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE] fix ruff rule E226: add missing whitespace around operator in f-strings (#144415)
The fixes are generated by: ```bash ruff check --fix --preview --unsafe-fixes --select=E226 . lintrunner -a --take "RUFF,PYFMT" --all-files ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/144415 Approved by: https://github.com/huydhn, https://github.com/Skylion007
This commit is contained in:
committed by
PyTorch MergeBot
parent
a742859fc2
commit
dcc3cf7066
2
.github/scripts/trymerge.py
vendored
2
.github/scripts/trymerge.py
vendored
@ -669,7 +669,7 @@ def get_ghstack_prs(
|
||||
if not open_only or not candidate.is_closed():
|
||||
return False
|
||||
print(
|
||||
f"Skipping {idx+1} of {len(rev_list)} PR (#{candidate.pr_num}) as its already been merged"
|
||||
f"Skipping {idx + 1} of {len(rev_list)} PR (#{candidate.pr_num}) as its already been merged"
|
||||
)
|
||||
return True
|
||||
|
||||
|
@ -676,7 +676,7 @@ def print_summary_table(data, print_dataframe=False):
|
||||
print(col.ljust(width), f"mean={data[col].mean():.3f}x")
|
||||
elif col in ("accuracy"):
|
||||
pass_rate = (data[col] == "pass").mean()
|
||||
print(col.ljust(width), f"pass_rate={100*pass_rate:.2f}%")
|
||||
print(col.ljust(width), f"pass_rate={100 * pass_rate:.2f}%")
|
||||
else:
|
||||
cdata = data[col]
|
||||
print(
|
||||
@ -4993,7 +4993,7 @@ def run(runner, args, original_dir=None):
|
||||
for i, name in enumerate(model_names):
|
||||
current_name = name
|
||||
if args.progress:
|
||||
print(f"Running model {i+1}/{nmodels}", flush=True)
|
||||
print(f"Running model {i + 1}/{nmodels}", flush=True)
|
||||
|
||||
try:
|
||||
timeout = args.timeout
|
||||
|
@ -25,7 +25,7 @@ def main():
|
||||
return details.debug_lines()
|
||||
|
||||
t = min(timeit.repeat(fn, number=K, repeat=3))
|
||||
print(f"iterating over {N*K} FX nodes took {t:.1f}s ({N*K/t:.0f} nodes/s)")
|
||||
print(f"iterating over {N * K} FX nodes took {t:.1f}s ({N * K / t:.0f} nodes/s)")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -24,7 +24,7 @@ def main():
|
||||
pass
|
||||
|
||||
t = min(timeit.repeat(fn, number=K, repeat=3))
|
||||
print(f"iterating over {N*K} FX nodes took {t:.1f}s ({N*K/t:.0f} nodes/s)")
|
||||
print(f"iterating over {N * K} FX nodes took {t:.1f}s ({N * K / t:.0f} nodes/s)")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -19,7 +19,7 @@ def bench(name, fn, requires_grad):
|
||||
end = time.perf_counter()
|
||||
|
||||
results = timeit.repeat(lambda: fn(x), number=1000, repeat=1000)
|
||||
print(f"{name} {np.median(results)*1000:.1f}us (warmup={end-start:.1f}s)")
|
||||
print(f"{name} {np.median(results) * 1000:.1f}us (warmup={end - start:.1f}s)")
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -144,7 +144,7 @@ def main():
|
||||
fail = True
|
||||
print(
|
||||
f"REGRESSION: benchmark {key} failed, actual result {result} "
|
||||
f"is {ratio:.2f}% higher than expected {entry.expected_value} ±{entry.noise_margin*100:+.2f}% "
|
||||
f"is {ratio:.2f}% higher than expected {entry.expected_value} ±{entry.noise_margin * 100:+.2f}% "
|
||||
f"if this is an expected regression, please update the expected results.\n"
|
||||
)
|
||||
print(
|
||||
@ -158,7 +158,7 @@ def main():
|
||||
|
||||
print(
|
||||
f"WIN: benchmark {key} failed, actual result {result} is {ratio:+.2f}% lower than "
|
||||
f"expected {entry.expected_value} ±{entry.noise_margin*100:.2f}% "
|
||||
f"expected {entry.expected_value} ±{entry.noise_margin * 100:.2f}% "
|
||||
f"please update the expected results. \n"
|
||||
)
|
||||
print(
|
||||
@ -170,7 +170,7 @@ def main():
|
||||
else:
|
||||
print(
|
||||
f"PASS: benchmark {key} pass, actual result {result} {ratio:+.2f}% is within "
|
||||
f"expected {entry.expected_value} ±{entry.noise_margin*100:.2f}%\n"
|
||||
f"expected {entry.expected_value} ±{entry.noise_margin * 100:.2f}%\n"
|
||||
)
|
||||
|
||||
log("pass")
|
||||
|
@ -543,7 +543,7 @@ def build_summary(args):
|
||||
out_io.write(f"Number CUDA Devices: {torch.cuda.device_count()}\n")
|
||||
out_io.write(f"Device Name: {torch.cuda.get_device_name(0)}\n")
|
||||
out_io.write(
|
||||
f"Device Memory [GB]: {torch.cuda.get_device_properties(0).total_memory/1e9}\n"
|
||||
f"Device Memory [GB]: {torch.cuda.get_device_properties(0).total_memory / 1e9}\n"
|
||||
)
|
||||
|
||||
title = "## Build Summary"
|
||||
|
@ -193,9 +193,9 @@ def main():
|
||||
print(
|
||||
f"Train model on {args.epochs} epochs with backend {args.backend} and optimizer {args.optimizer}:"
|
||||
)
|
||||
print(f"PyTorch spent {timedelta(seconds=native_elapsed/args.epochs)} per epoch")
|
||||
print(f"PyTorch spent {timedelta(seconds=native_elapsed / args.epochs)} per epoch")
|
||||
print(
|
||||
f"TorchDynamo spent {timedelta(seconds=dynamo_elapsed/args.epochs)} per epoch"
|
||||
f"TorchDynamo spent {timedelta(seconds=dynamo_elapsed / args.epochs)} per epoch"
|
||||
)
|
||||
|
||||
|
||||
|
@ -51,6 +51,6 @@ for a, b in zip(run(mod, input), run(compiled_mod, input)):
|
||||
for _ in range(5):
|
||||
i = 10000
|
||||
t = timeit.Timer("mod(input)", globals=globals()).timeit(10000)
|
||||
print(f"eager {t/i*1e6}")
|
||||
print(f"eager {t / i * 1e6}")
|
||||
t = timeit.Timer("compiled_mod(input)", globals=globals()).timeit(10000)
|
||||
print(f"compiled {t/i*1e6}")
|
||||
print(f"compiled {t / i * 1e6}")
|
||||
|
@ -235,7 +235,7 @@ def test(db, net, device, epoch, log):
|
||||
|
||||
qry_losses = torch.cat(qry_losses).mean().item()
|
||||
qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item()
|
||||
print(f"[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}")
|
||||
print(f"[Epoch {epoch + 1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}")
|
||||
log.append(
|
||||
{
|
||||
"epoch": epoch + 1,
|
||||
|
@ -225,7 +225,7 @@ def test(db, net, device, epoch, log):
|
||||
|
||||
qry_losses = torch.cat(qry_losses).mean().item()
|
||||
qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item()
|
||||
print(f"[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}")
|
||||
print(f"[Epoch {epoch + 1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}")
|
||||
log.append(
|
||||
{
|
||||
"epoch": epoch + 1,
|
||||
|
@ -225,7 +225,7 @@ def test(db, net, device, epoch, log):
|
||||
|
||||
qry_losses = torch.cat(qry_losses).mean().item()
|
||||
qry_accs = 100.0 * torch.cat(qry_accs).float().mean().item()
|
||||
print(f"[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}")
|
||||
print(f"[Epoch {epoch + 1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}")
|
||||
log.append(
|
||||
{
|
||||
"epoch": epoch + 1,
|
||||
|
@ -137,7 +137,7 @@ class Unflatten(TestGenerator):
|
||||
code = Block()
|
||||
code.new_line("super().__init__()")
|
||||
if i < self.n - 1:
|
||||
code.new_line(f"self.n{i+1} = N{i+1}()")
|
||||
code.new_line(f"self.n{i + 1} = N{i + 1}()")
|
||||
return code
|
||||
|
||||
def gen_forward_body(self, i: int):
|
||||
@ -207,7 +207,7 @@ class ConstantUnflatten(Unflatten):
|
||||
code.new_line("super().__init__()")
|
||||
code.new_line("self.const = torch.ones(1)")
|
||||
if i < self.n - 1:
|
||||
code.new_line(f"self.n{i+1} = N{i+1}()")
|
||||
code.new_line(f"self.n{i + 1} = N{i + 1}()")
|
||||
return code
|
||||
|
||||
def gen_forward_body(self, i: int):
|
||||
@ -249,7 +249,7 @@ class BufferUnflatten(Unflatten):
|
||||
code.new_line("super().__init__()")
|
||||
code.new_line("self.buf = torch.nn.Buffer(torch.ones(1))")
|
||||
if i < self.n - 1:
|
||||
code.new_line(f"self.n{i+1} = N{i+1}()")
|
||||
code.new_line(f"self.n{i + 1} = N{i + 1}()")
|
||||
return code
|
||||
|
||||
def gen_forward_body(self, i: int):
|
||||
|
@ -489,7 +489,7 @@ if __name__ == '__main__':
|
||||
del env[PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY]
|
||||
env[PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY] = 'cpu'
|
||||
_, stderr = TestCase.run_process_no_exception(test_filter_file_template, env=env)
|
||||
self.assertIn(f'Ran {test_bases_count-1} test', stderr.decode('ascii'))
|
||||
self.assertIn(f'Ran {test_bases_count - 1} test', stderr.decode('ascii'))
|
||||
|
||||
# Test with setting both should throw exception
|
||||
env[PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY] = 'cpu'
|
||||
|
2
third_party/generate-xnnpack-wrappers.py
vendored
2
third_party/generate-xnnpack-wrappers.py
vendored
@ -100,7 +100,7 @@ IGNORED_SOURCES = set((
|
||||
def handle_singleline_parse(line):
|
||||
start_index = line.find("(")
|
||||
end_index = line.find(")")
|
||||
line = line[start_index+1:end_index]
|
||||
line = line[start_index + 1:end_index]
|
||||
key_val = line.split(" ")
|
||||
return key_val[0], [x[4:] for x in key_val[1:]]
|
||||
|
||||
|
@ -312,7 +312,7 @@ class ExplainOutput:
|
||||
|
||||
output += "Break Reasons:\n"
|
||||
for idx, break_reason in enumerate(self.break_reasons):
|
||||
output += f" Break Reason {idx+1}:\n"
|
||||
output += f" Break Reason {idx + 1}:\n"
|
||||
output += f" Reason: {break_reason.reason}\n"
|
||||
output += " User Stack:\n"
|
||||
for frame_summary in break_reason.user_stack:
|
||||
@ -321,14 +321,14 @@ class ExplainOutput:
|
||||
if self.ops_per_graph is not None:
|
||||
output += "Ops per Graph:\n"
|
||||
for idx, ops in enumerate(self.ops_per_graph):
|
||||
output += f" Ops {idx+1}:\n"
|
||||
output += f" Ops {idx + 1}:\n"
|
||||
for op in ops:
|
||||
output += f" {op}\n"
|
||||
|
||||
if self.out_guards is not None:
|
||||
output += "Out Guards:\n"
|
||||
for i, guard in enumerate(self.out_guards):
|
||||
output += f" Guard {i+1}:\n"
|
||||
output += f" Guard {i + 1}:\n"
|
||||
output += f" {str(guard)}"
|
||||
|
||||
if self.compile_times is not None:
|
||||
|
@ -184,7 +184,7 @@ class NNModuleToString:
|
||||
example_param = next(module.parameters(), None)
|
||||
if example_param is not None and example_param.is_cuda:
|
||||
module_str = f"{module_str}.cuda()"
|
||||
model_str += f"{tab*2}self.{module_name} = {module_str}\n"
|
||||
model_str += f"{tab * 2}self.{module_name} = {module_str}\n"
|
||||
|
||||
for buffer_name, buffer in gm._buffers.items():
|
||||
if buffer is None:
|
||||
@ -203,7 +203,9 @@ class NNModuleToString:
|
||||
)
|
||||
if buffer.is_cuda:
|
||||
tensor_str = f"{tensor_str}.cuda()"
|
||||
model_str += f"{tab*2}self.register_buffer('{buffer_name}', {tensor_str})\n"
|
||||
model_str += (
|
||||
f"{tab * 2}self.register_buffer('{buffer_name}', {tensor_str})\n"
|
||||
)
|
||||
|
||||
for param_name, param in gm._parameters.items():
|
||||
if param is None:
|
||||
@ -212,7 +214,7 @@ class NNModuleToString:
|
||||
if param.is_cuda:
|
||||
maybe_device = ', device="cuda"'
|
||||
tensor_str = f"torch.nn.Parameter(torch.randn({list(param.shape)}, dtype={param.dtype}{maybe_device}))"
|
||||
model_str += f"{tab*2}self.{param_name} = {tensor_str}\n"
|
||||
model_str += f"{tab * 2}self.{param_name} = {tensor_str}\n"
|
||||
|
||||
# TODO - Keep this code for now. But, I don't think we will need this.
|
||||
# attrs = dir(gm)
|
||||
|
@ -396,7 +396,7 @@ def compiled_module_main(benchmark_name, benchmark_compiled_module_fn):
|
||||
|
||||
if torch.cuda.is_available():
|
||||
peak_mem = torch.cuda.max_memory_allocated()
|
||||
print(f"Peak GPU memory usage {peak_mem/1e6:.3f} MB")
|
||||
print(f"Peak GPU memory usage {peak_mem / 1e6:.3f} MB")
|
||||
|
||||
if torch.cuda.is_available() and args.cuda_memory_snapshot:
|
||||
collect_memory_snapshot(benchmark_compiled_module_fn)
|
||||
|
@ -844,7 +844,7 @@ class TorchLogsFormatter(logging.Formatter):
|
||||
filepath = make_module_path_relative(record.pathname)
|
||||
|
||||
prefix = (
|
||||
f"{record.rankprefix}{shortlevel}{record.asctime}.{int(record.msecs*1000):06d} {record.process} "
|
||||
f"{record.rankprefix}{shortlevel}{record.asctime}.{int(record.msecs * 1000):06d} {record.process} "
|
||||
f"{filepath}:"
|
||||
f"{record.lineno}]{record.traceid}{record.artifactprefix}"
|
||||
)
|
||||
|
@ -2274,7 +2274,7 @@ def check_free_memory(free_bytes):
|
||||
)
|
||||
|
||||
msg = (
|
||||
f"{free_bytes/1e9} GB memory required, but environment variable "
|
||||
f"{free_bytes / 1e9} GB memory required, but environment variable "
|
||||
f"NPY_AVAILABLE_MEM={env_value} set"
|
||||
)
|
||||
else:
|
||||
@ -2288,9 +2288,7 @@ def check_free_memory(free_bytes):
|
||||
)
|
||||
mem_free = -1
|
||||
else:
|
||||
msg = (
|
||||
f"{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available"
|
||||
)
|
||||
msg = f"{free_bytes / 1e9} GB memory required, but {mem_free / 1e9} GB available"
|
||||
|
||||
return msg if mem_free < free_bytes else None
|
||||
|
||||
|
@ -257,11 +257,11 @@ def display_bytes(b: int, unit: str = "MiB") -> str:
|
||||
return a string that represent the number of bytes in a desired unit
|
||||
"""
|
||||
if unit == "KiB":
|
||||
return f"{b/2**10:.2f} KiB"
|
||||
return f"{b / 2**10:.2f} KiB"
|
||||
if unit == "MiB":
|
||||
return f"{b/2**20:.2f} MiB"
|
||||
return f"{b / 2**20:.2f} MiB"
|
||||
if unit == "GiB":
|
||||
return f"{b/2**30:.2f} GiB"
|
||||
return f"{b / 2**30:.2f} GiB"
|
||||
return f"{b:.2f} bytes"
|
||||
|
||||
|
||||
|
@ -243,13 +243,16 @@ def train_convnext_example():
|
||||
max_reserved = torch.cuda.max_memory_reserved()
|
||||
max_allocated = torch.cuda.max_memory_allocated()
|
||||
print(
|
||||
f"rank {rank}, {ITER_TIME} iterations, average latency {(end - start)/ITER_TIME*1000:10.2f} ms"
|
||||
f"rank {rank}, {ITER_TIME} iterations, "
|
||||
f"average latency {(end - start) / ITER_TIME * 1000:10.2f} ms"
|
||||
)
|
||||
print(
|
||||
f"rank {rank}, forward {forward_time/ITER_TIME*1000:10.2f} ms, backward {backward_time/ITER_TIME*1000:10.2f} ms"
|
||||
f"rank {rank}, forward {forward_time / ITER_TIME * 1000:10.2f} ms, "
|
||||
f"backward {backward_time / ITER_TIME * 1000:10.2f} ms"
|
||||
)
|
||||
print(
|
||||
f"rank {rank}, max reserved {max_reserved/1024/1024/1024:8.2f} GiB, max allocated {max_allocated/1024/1024/1024:8.2f} GiB"
|
||||
f"rank {rank}, max reserved {max_reserved / 1024 / 1024 / 1024:8.2f} GiB, "
|
||||
f"max allocated {max_allocated / 1024 / 1024 / 1024:8.2f} GiB"
|
||||
)
|
||||
dist.destroy_process_group()
|
||||
|
||||
|
@ -187,7 +187,7 @@ def tril_matrix_to_vec(mat: Tensor, diag: int = 0) -> Tensor:
|
||||
"""
|
||||
n = mat.shape[-1]
|
||||
if not torch._C._get_tracing_state() and (diag < -n or diag >= n):
|
||||
raise ValueError(f"diag ({diag}) provided is outside [{-n}, {n-1}].")
|
||||
raise ValueError(f"diag ({diag}) provided is outside [{-n}, {n - 1}].")
|
||||
arange = torch.arange(n, device=mat.device)
|
||||
tril_mask = arange < arange.view(-1, 1) + (diag + 1)
|
||||
vec = mat[..., tril_mask]
|
||||
|
@ -107,7 +107,7 @@ class Wishart(ExponentialFamily):
|
||||
|
||||
if self.df.le(event_shape[-1] - 1).any():
|
||||
raise ValueError(
|
||||
f"Value of df={df} expected to be greater than ndim - 1 = {event_shape[-1]-1}."
|
||||
f"Value of df={df} expected to be greater than ndim - 1 = {event_shape[-1] - 1}."
|
||||
)
|
||||
|
||||
if scale_tril is not None:
|
||||
|
@ -890,7 +890,7 @@ def _add_submodule(
|
||||
def _call_name(base: str, n: int) -> str:
|
||||
# Given n >= 0, generate call names to a submodule `base` of the form
|
||||
# `base`, `base@1`, `base@2`, etc.
|
||||
return base if n == 1 else f"{base}@{n-1}"
|
||||
return base if n == 1 else f"{base}@{n - 1}"
|
||||
|
||||
|
||||
def _is_call_name(call_name: str, base: str) -> bool:
|
||||
|
@ -1168,8 +1168,8 @@ class MemoryProfileTimeline:
|
||||
title = "\n\n".join(
|
||||
([title] if title else [])
|
||||
+ [
|
||||
f"Max memory allocated: {max_memory_allocated/(1024**3):.2f} GiB \n"
|
||||
f"Max memory reserved: {max_memory_reserved/(1024**3):.2f} GiB"
|
||||
f"Max memory allocated: {max_memory_allocated / (1024**3):.2f} GiB \n"
|
||||
f"Max memory reserved: {max_memory_reserved / (1024**3):.2f} GiB"
|
||||
]
|
||||
)
|
||||
axes.set_title(title)
|
||||
|
@ -84,7 +84,7 @@ class Pattern:
|
||||
)
|
||||
return (
|
||||
f"{self.name}: {len(events)} events matched. "
|
||||
f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time/new_time, 2)}X)"
|
||||
f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time / new_time, 2)}X)"
|
||||
)
|
||||
|
||||
def match(self, event: _ProfilerEvent):
|
||||
@ -622,7 +622,7 @@ def report_all_anti_patterns(
|
||||
]
|
||||
reported = set()
|
||||
summaries = []
|
||||
message_list = [f"{'-'*40}TorchTidy Report{'-'*40}"]
|
||||
message_list = [f"{'-' * 40}TorchTidy Report{'-' * 40}"]
|
||||
message_list.append("Matched Events:")
|
||||
|
||||
for anti_pattern in anti_patterns:
|
||||
@ -657,6 +657,6 @@ def report_all_anti_patterns(
|
||||
|
||||
message_list.append("Summary:")
|
||||
message_list += summaries
|
||||
message_list.append(f"{'-'*40}TorchTidy Report{'-'*40}")
|
||||
message_list.append(f"{'-' * 40}TorchTidy Report{'-' * 40}")
|
||||
if print_enable:
|
||||
print("\n".join(message_list))
|
||||
|
@ -335,11 +335,11 @@ class BasicEvaluation:
|
||||
|
||||
output += "\n".join(
|
||||
[
|
||||
f"""{'-'*80}
|
||||
f"""{'-' * 80}
|
||||
Event: {event}
|
||||
Source code location: {source_code_location(event.event)}
|
||||
Percentage idle time: {self.metrics[event].fraction_idle_time * 100:.2f}%
|
||||
{'-'*80}"""
|
||||
{'-' * 80}"""
|
||||
for event in event_list
|
||||
]
|
||||
)
|
||||
|
@ -112,7 +112,7 @@ def run_ps(trainers):
|
||||
torch.futures.wait_all(futs)
|
||||
stop = perf_counter()
|
||||
timed_log("Finish training")
|
||||
timed_log(f"Time spent training: {stop-start}s")
|
||||
timed_log(f"Time spent training: {stop - start}s")
|
||||
|
||||
class ParameterServerTest(RpcAgentTestFixture):
|
||||
|
||||
|
Reference in New Issue
Block a user