[BE] fix ruff rule E226: add missing whitespace around operator in f-strings (#144415)

The fixes are generated by:

```bash
ruff check --fix --preview --unsafe-fixes --select=E226 .
lintrunner -a --take "RUFF,PYFMT" --all-files
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/144415
Approved by: https://github.com/huydhn, https://github.com/Skylion007
This commit is contained in:
Xuehai Pan
2025-01-09 02:29:27 +08:00
committed by PyTorch MergeBot
parent a742859fc2
commit dcc3cf7066
29 changed files with 54 additions and 51 deletions

View File

@ -203,7 +203,9 @@ class NNModuleToString:
)
if buffer.is_cuda:
tensor_str = f"{tensor_str}.cuda()"
model_str += f"{tab*2}self.register_buffer('{buffer_name}', {tensor_str})\n"
model_str += (
f"{tab * 2}self.register_buffer('{buffer_name}', {tensor_str})\n"
)
for param_name, param in gm._parameters.items():
if param is None:

View File

@ -2288,9 +2288,7 @@ def check_free_memory(free_bytes):
)
mem_free = -1
else:
msg = (
f"{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available"
)
msg = f"{free_bytes / 1e9} GB memory required, but {mem_free / 1e9} GB available"
return msg if mem_free < free_bytes else None

View File

@ -243,13 +243,16 @@ def train_convnext_example():
max_reserved = torch.cuda.max_memory_reserved()
max_allocated = torch.cuda.max_memory_allocated()
print(
f"rank {rank}, {ITER_TIME} iterations, average latency {(end - start)/ITER_TIME*1000:10.2f} ms"
f"rank {rank}, {ITER_TIME} iterations, "
f"average latency {(end - start) / ITER_TIME * 1000:10.2f} ms"
)
print(
f"rank {rank}, forward {forward_time/ITER_TIME*1000:10.2f} ms, backward {backward_time/ITER_TIME*1000:10.2f} ms"
f"rank {rank}, forward {forward_time / ITER_TIME * 1000:10.2f} ms, "
f"backward {backward_time / ITER_TIME * 1000:10.2f} ms"
)
print(
f"rank {rank}, max reserved {max_reserved/1024/1024/1024:8.2f} GiB, max allocated {max_allocated/1024/1024/1024:8.2f} GiB"
f"rank {rank}, max reserved {max_reserved / 1024 / 1024 / 1024:8.2f} GiB, "
f"max allocated {max_allocated / 1024 / 1024 / 1024:8.2f} GiB"
)
dist.destroy_process_group()