Enable all PIE rules on ruff (#165814)

This PR enables all PIE rules on ruff, there are already some enabled rules from this family, the new added rules are
```
PIE796  Enum contains duplicate value: {value}
PIE808  Unnecessary start argument in range
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/165814
Approved by: https://github.com/ezyang
This commit is contained in:
Yuanyuan Chen
2025-10-18 06:40:12 +00:00
committed by PyTorch MergeBot
parent e595136187
commit c79dfdc655
91 changed files with 195 additions and 200 deletions

View File

@ -11615,7 +11615,7 @@ def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=Fal
# numpy searchsorted only supports 1D inputs so we split up ND inputs
orig_shape = boundary.shape
num_splits = np.prod(sorted_sequence.shape[:-1])
splits = range(0, num_splits)
splits = range(num_splits)
sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1)
if sorter is not None:
sorter = sorter.reshape(num_splits, -1)
@ -16258,7 +16258,7 @@ op_db: list[OpInfo] = [
aten_backward_name='_prelu_kernel_backward',
ref=lambda x, weight:
np.maximum(0., x) + np.minimum(0., x) *
(weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])),
(weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(x.ndim)])),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,

View File

@ -2896,7 +2896,7 @@ def _multilabelmarginloss_reference(input, target):
sum = 0
for target_index in targets:
for i in range(0, len(input)):
for i in range(len(input)):
if i not in targets:
sum += max(0, 1 - input[target_index] + input[i])
@ -2914,7 +2914,7 @@ def multilabelmarginloss_reference(input, target, reduction='mean'):
n = input.size(0)
dim = input.size(1)
output = input.new(n).zero_()
for i in range(0, n):
for i in range(n):
output[i] = _multilabelmarginloss_reference(input[i], target[i])
if reduction == 'mean':
@ -2955,7 +2955,7 @@ def _multimarginloss_reference(input, target_idx, p, margin, weight):
weight = input.new(len(input)).fill_(1)
output = 0
for i in range(0, len(input)):
for i in range(len(input)):
if i != target_idx:
output += weight[target_idx] * (max(0, (margin - input[target_idx] + input[i])) ** p)
return output
@ -2972,7 +2972,7 @@ def multimarginloss_reference(input, target, p=1, margin=1, weight=None, reducti
n = input.size(0)
dim = input.size(1)
output = input.new(n)
for x in range(0, n):
for x in range(n):
output[x] = _multimarginloss_reference(input[x], target[x], p, margin, weight)
if reduction == 'mean':
@ -2987,7 +2987,7 @@ def multimarginloss_reference(input, target, p=1, margin=1, weight=None, reducti
def cosineembeddingloss_reference(input1, input2, target, margin=0, reduction='mean'):
def _cos(a, b):
cos = a.new(a.size(0))
for i in range(0, a.size(0)):
for i in range(a.size(0)):
cos[i] = (a[i] * b[i]).sum() / ((((a[i] * a[i]).sum() + 1e-12) * ((b[i] * b[i]).sum() + 1e-12)) ** 0.5)
return cos

View File

@ -705,7 +705,7 @@ class LocalDTensorTestBase(DTensorTestBase):
self.skipTest(msg)
def _get_local_tensor_mode(self):
return LocalTensorMode(frozenset(range(0, self.world_size)))
return LocalTensorMode(frozenset(range(self.world_size)))
def setUp(self) -> None:
super().setUp()

View File

@ -658,13 +658,13 @@ class DistributedTest:
return (group, group_id, rank)
def _init_full_group_test(self, **kwargs):
group = list(range(0, dist.get_world_size()))
group = list(range(dist.get_world_size()))
group_id = dist.new_group(**kwargs)
rank = dist.get_rank()
return (group, group_id, rank)
def _init_global_test(self):
group = list(range(0, dist.get_world_size()))
group = list(range(dist.get_world_size()))
group_id = dist.group.WORLD
rank = dist.get_rank()
return (group, group_id, rank)
@ -1114,7 +1114,7 @@ class DistributedTest:
averager = averagers.PeriodicModelAverager(
period=period, warmup_steps=warmup_steps
)
for step in range(0, 20):
for step in range(20):
# Reset the parameters at every step.
param.data = copy.deepcopy(tensor)
for params in model.parameters():
@ -1143,7 +1143,7 @@ class DistributedTest:
averager = averagers.PeriodicModelAverager(
period=period, warmup_steps=warmup_steps
)
for step in range(0, 20):
for step in range(20):
# Reset the parameters at every step.
for param_group in opt.param_groups:
for params in param_group["params"]:
@ -1203,7 +1203,7 @@ class DistributedTest:
averager = averagers.PeriodicModelAverager(
period=period, warmup_steps=warmup_steps
)
for step in range(0, 20):
for step in range(20):
# Reset the parameters at every step.
param.data = copy.deepcopy(tensor)
for params in model.parameters():
@ -1284,7 +1284,7 @@ class DistributedTest:
expected_global_avg_tensor = (
torch.ones_like(param.data) * sum(range(world_size)) / world_size
)
for step in range(0, 25):
for step in range(25):
# Reset the parameters at every step.
param.data = copy.deepcopy(tensor)
for params in model.parameters():
@ -1390,7 +1390,7 @@ class DistributedTest:
for val in ["1", "0"]:
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = val
for src in range(0, world_size):
for src in range(world_size):
send_tensor = _build_tensor(rank + 1, device_id=device_id).fill_(
src
)
@ -1409,7 +1409,7 @@ class DistributedTest:
for req in reqs:
req.wait()
for src in range(0, world_size):
for src in range(world_size):
self.assertEqual(recv_tensors[src], expected_tensors[src])
self._barrier()
@ -1505,7 +1505,7 @@ class DistributedTest:
rank = dist.get_rank()
p2p_op_list = []
for src in range(0, dist.get_world_size()):
for src in range(dist.get_world_size()):
if src == rank:
continue
send_tensor = _build_tensor(rank + 1)
@ -1528,7 +1528,7 @@ class DistributedTest:
rank = dist.get_rank()
p2p_op_list = []
for src in range(0, dist.get_world_size()):
for src in range(dist.get_world_size()):
if src == rank:
continue
send_tensor = _build_tensor(rank + 1)
@ -1602,10 +1602,10 @@ class DistributedTest:
tensor = _build_tensor(rank + 1, device_id=device_id)
profiler_cls = profiler_ctx if profiler_ctx is not None else nullcontext()
with profiler_cls as prof:
for src in range(0, world_size):
for src in range(world_size):
if src == rank:
# Send mode
for dst in range(0, world_size):
for dst in range(world_size):
if dst == rank:
continue
dist.send(tensor, dst)
@ -1674,10 +1674,10 @@ class DistributedTest:
tensor = _build_tensor(send_size)
ctx = profiler_ctx if profiler_ctx is not None else nullcontext()
with ctx as prof:
for src in range(0, dist.get_world_size()):
for src in range(dist.get_world_size()):
if src == rank:
# Send mode
for dst in range(0, dist.get_world_size()):
for dst in range(dist.get_world_size()):
if dst == rank:
continue
dist.send(tensor, dst)
@ -1742,10 +1742,10 @@ class DistributedTest:
ctx = profiler_ctx if profiler_ctx is not None else nullcontext()
with ctx as prof:
for dst in range(0, dist.get_world_size()):
for dst in range(dist.get_world_size()):
if dst == rank:
# Recv mode
for dst in range(0, dist.get_world_size()):
for dst in range(dist.get_world_size()):
if dst == rank:
continue
@ -1846,10 +1846,10 @@ class DistributedTest:
tensor = _build_tensor(send_recv_size, value=rank)
ctx = profiler_ctx if profiler_ctx is not None else nullcontext()
with ctx as prof:
for dst in range(0, world_size):
for dst in range(world_size):
if dst == rank:
# Recv mode
for src in range(0, world_size):
for src in range(world_size):
if src == rank:
continue
output_tensor = _build_tensor(send_recv_size, value=-1)
@ -7480,7 +7480,7 @@ class DistributedTest:
for baseline_iter in baseline_num_iters:
for offset in iteration_offsets:
mapping = dict.fromkeys(
range(0, num_early_join_ranks), baseline_iter
range(num_early_join_ranks), baseline_iter
)
# if num_early_join_ranks > 1, ranks > 0 that will join early
# iterate offset//2 more times than rank 0, to test nodes

View File

@ -166,7 +166,7 @@ class AllReduce:
# collect all data to the list and make them
# all on rank 0 device
tensors = [
data[src_rank][i].to(rank_0_device) for src_rank in range(0, len(data))
data[src_rank][i].to(rank_0_device) for src_rank in range(len(data))
]
# now mimic reduce across all ranks

View File

@ -266,7 +266,7 @@ class CommonDistAutogradTest(RpcAgentTestFixture):
grads = dist_autograd.get_gradients(context_id)
nargs = len(args)
ngrads = 0
for i in range(0, nargs):
for i in range(nargs):
if local_grads[i] is not None:
self.assertIn(args[i], grads)
self.assertEqual(local_grads[i], grads[args[i]])
@ -1973,7 +1973,7 @@ class DistAutogradTest(CommonDistAutogradTest):
DistAutogradTest._test_clean_context_backward_context_id = context_id
# Send the context id to all nodes.
for i in range(0, self.world_size):
for i in range(self.world_size):
if i != self.rank:
rank_distance = (i - self.rank + self.world_size) % self.world_size
rpc.rpc_sync(
@ -1988,7 +1988,7 @@ class DistAutogradTest(CommonDistAutogradTest):
self.assertEqual(self.world_size - 1, len(known_context_ids))
t1 = torch.rand((3, 3), requires_grad=True)
for i in range(0, 100):
for i in range(100):
dst = self._next_rank()
t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1))

View File

@ -1818,7 +1818,7 @@ class RpcTest(RpcAgentTestFixture, RpcTestCommon):
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixed when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
dst_ranks = [rank for rank in range(self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
@ -1884,7 +1884,7 @@ class RpcTest(RpcAgentTestFixture, RpcTestCommon):
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
dst_ranks = [rank for rank in range(self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:

View File

@ -439,7 +439,7 @@ class JitTestCase(JitCommonTestCase):
state = model.get_debug_state()
plan = get_execution_plan(state)
num_bailouts = plan.code.num_bailouts()
for i in range(0, num_bailouts):
for i in range(num_bailouts):
plan.code.request_bailout(i)
bailout_outputs = model(*inputs)
self.assertEqual(bailout_outputs, expected)

View File

@ -912,7 +912,7 @@ if has_triton():
b_ptrs = b_ptr + (offs_k[:, None] + offs_bn[None, :])
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
for k in range(tl.cdiv(K, BLOCK_SIZE_K)):
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
accumulator = tl.dot(a, b, accumulator)