mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE]: Apply PERF401 autofixes from ruff (#140980)
* Automatically applies ruff rule 401. Turns loops into equivalent list comprehensions which are faster and do not leak the scope of the loop variables. * list comprehensions not only often have better typing, but are 50+% faster than for loops on overhead. They also preserve length information etc and are better for the interpreter to optimize. * Manually went back and made mypy happy after the change. * Also fixed style lints in files covered by flake8 but not by pyfmt Pull Request resolved: https://github.com/pytorch/pytorch/pull/140980 Approved by: https://github.com/justinchuby, https://github.com/malfet
This commit is contained in:
committed by
PyTorch MergeBot
parent
8d708090c0
commit
12e95aa4ee
@ -4669,8 +4669,7 @@ def _generate_indices_prefer_all_rows(rows: int, cols: int, num_indices: int) ->
|
||||
|
||||
for r in range(rows):
|
||||
# Note that this can yield overlapping indices
|
||||
for c in random.choices(col_indices, k=n_per_row):
|
||||
indices.append((r, c))
|
||||
indices.extend((r, c) for c in random.choices(col_indices, k=n_per_row))
|
||||
|
||||
return torch.tensor(indices[:num_indices])
|
||||
|
||||
@ -5165,9 +5164,7 @@ def get_cycles_per_ms() -> float:
|
||||
# and seems to return stable values. Therefore, we enable caching
|
||||
# using lru_cache decorator above.
|
||||
num = 10
|
||||
vals = []
|
||||
for _ in range(num):
|
||||
vals.append(measure())
|
||||
vals = [measure() for _ in range(num)]
|
||||
vals = sorted(vals)
|
||||
return mean(vals[2 : num - 2])
|
||||
|
||||
|
Reference in New Issue
Block a user