[BE][CI] bump ruff to 0.8.4 (#143753)

Changes:

1. Bump `ruff` from 0.7.4 to 0.8.4
2. Change `%`-formatted strings to f-string
3. Change arguments with the `__`-prefix to positional-only arguments with the `/` separator in function signature.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/143753
Approved by: https://github.com/Skylion007
This commit is contained in:
Xuehai Pan
2024-12-24 16:15:15 +08:00
committed by PyTorch MergeBot
parent dbbc81cb34
commit b77406a9ec
46 changed files with 313 additions and 288 deletions

View File

@ -109,17 +109,17 @@ def sweep(benchmark):
def print_header():
local_print("\n")
local_print("%22s" % "")
for p in [50, 75, 90, 95]:
local_print("%14s%10s" % ("sec/iter", "ex/sec"))
local_print(" " * 22)
for _ in [50, 75, 90, 95]:
local_print(f"{'sec/iter':14s}{'ex/sec':10s}")
local_print("\n")
def print_measurements(prefix, nelem, measurements):
measurements = sorted(measurements)
local_print("%8s:" % prefix)
local_print(f"{prefix:8s}:")
for p in [50, 75, 90, 95]:
v = np.percentile(measurements, p)
local_print(" p%02d: %1.3fs %6d/s" % (p, v, nelem / v))
local_print(f" p{p:02d}: {v:1.3f}s {nelem / v:6d}/s")
local_print("\n")
# Every process runs once by themselves to warm up (CUDA init, etc).
@ -133,7 +133,7 @@ def sweep(benchmark):
# Multi-machine benchmarks
for i in range(1, (dist.get_world_size() // 8) + 1):
append_benchmark(" %dM/8G" % i, range(i * 8))
append_benchmark(f" {i:d}M/8G", range(i * 8))
# Run benchmarks in order of increasing number of GPUs
print_header()
@ -239,7 +239,7 @@ def main():
print()
torch.cuda.set_device(dist.get_rank() % 8)
device = torch.device("cuda:%d" % (dist.get_rank() % 8))
device = torch.device(f"cuda:{dist.get_rank() % 8:d}")
benchmarks = []
if args.model: