mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE]: Enable ruff rule SIM113 (#147290)
Lint rules that tells the user to avoid keeping track of their own counter and use the builtin enumerate when possible. Pull Request resolved: https://github.com/pytorch/pytorch/pull/147290 Approved by: https://github.com/jansel
This commit is contained in:
committed by
PyTorch MergeBot
parent
a8fa4bcfd2
commit
e738f7ba23
@ -192,7 +192,7 @@ for name, name2, log in chunker(entries, 3):
|
||||
"unique_graph_breaks": unique_graph_breaks,
|
||||
}
|
||||
)
|
||||
i += 1
|
||||
i += 1 # noqa: SIM113
|
||||
|
||||
if c:
|
||||
print(f"failed to classify {c} entries", file=sys.stderr)
|
||||
|
@ -277,7 +277,7 @@ class BidirLSTMLayer(jit.ScriptModule):
|
||||
out, out_state = direction(input, state)
|
||||
outputs += [out]
|
||||
output_states += [out_state]
|
||||
i += 1
|
||||
i += 1 # noqa: SIM113
|
||||
return torch.cat(outputs, -1), output_states
|
||||
|
||||
|
||||
@ -310,7 +310,7 @@ class StackedLSTM(jit.ScriptModule):
|
||||
state = states[i]
|
||||
output, out_state = rnn_layer(output, state)
|
||||
output_states += [out_state]
|
||||
i += 1
|
||||
i += 1 # noqa: SIM113
|
||||
return output, output_states
|
||||
|
||||
|
||||
@ -341,7 +341,7 @@ class StackedLSTM2(jit.ScriptModule):
|
||||
state = states[i]
|
||||
output, out_state = rnn_layer(output, state)
|
||||
output_states += [out_state]
|
||||
i += 1
|
||||
i += 1 # noqa: SIM113
|
||||
return output, output_states
|
||||
|
||||
|
||||
@ -383,7 +383,7 @@ class StackedLSTMWithDropout(jit.ScriptModule):
|
||||
if i < self.num_layers - 1:
|
||||
output = self.dropout_layer(output)
|
||||
output_states += [out_state]
|
||||
i += 1
|
||||
i += 1 # noqa: SIM113
|
||||
return output, output_states
|
||||
|
||||
|
||||
|
@ -94,7 +94,7 @@ def load_spmv_dataset(dataset_path, hidden_size, sparsity, device, n_limit=math.
|
||||
x_files.append(f.as_posix())
|
||||
if size[0] == hidden_size:
|
||||
y_files.append(f.as_posix())
|
||||
index += 1
|
||||
index += 1 # noqa: SIM113
|
||||
print()
|
||||
|
||||
for fx, fy in zip(x_files, y_files):
|
||||
@ -136,7 +136,7 @@ def load_spmm_dataset(
|
||||
x_files.append(f.as_posix())
|
||||
if size[0] == hidden_size:
|
||||
y_files.append(f.as_posix())
|
||||
index += 1
|
||||
index += 1 # noqa: SIM113
|
||||
print()
|
||||
|
||||
for fx, fy in zip(x_files, y_files):
|
||||
|
@ -77,7 +77,6 @@ ignore = [
|
||||
"PYI041",
|
||||
"PYI056",
|
||||
"SIM102", "SIM103", "SIM112", # flake8-simplify code styles
|
||||
"SIM113", # please fix
|
||||
"SIM105", # these ignores are from flake8-simplify. please fix or ignore with commented reason
|
||||
"SIM108", # SIM108 ignored because we prefer if-else-block instead of ternary expression
|
||||
"SIM110",
|
||||
|
@ -597,7 +597,7 @@ class AOTAutogradCacheTests(InductorTestCase):
|
||||
# see a recompilation (along with a cache miss).
|
||||
res1 = compiled_fn(a, b)
|
||||
# A first call should miss in the cache.
|
||||
expected_misses += 1
|
||||
expected_misses += 1 # noqa: SIM113
|
||||
self.assertEqual(
|
||||
counters["aot_autograd"]["autograd_cache_miss"], expected_misses
|
||||
)
|
||||
@ -611,7 +611,7 @@ class AOTAutogradCacheTests(InductorTestCase):
|
||||
)
|
||||
# Because dynamic shapes are enabled, we expect backwards to be compiled ahead of time
|
||||
# So we should see a cache save here
|
||||
expected_saves += 1
|
||||
expected_saves += 1 # noqa: SIM113
|
||||
self.assertEqual(
|
||||
counters["aot_autograd"]["autograd_cache_saved"], expected_saves
|
||||
)
|
||||
@ -632,7 +632,7 @@ class AOTAutogradCacheTests(InductorTestCase):
|
||||
# shape will still trigger a second call to autograd_cache.
|
||||
self._clear_dynamo_and_codecache()
|
||||
res2 = compiled_fn(a2, b2)
|
||||
expected_hits += 1
|
||||
expected_hits += 1 # noqa: SIM113
|
||||
self.assertEqual(
|
||||
counters["aot_autograd"]["autograd_cache_miss"], expected_misses
|
||||
)
|
||||
@ -641,7 +641,7 @@ class AOTAutogradCacheTests(InductorTestCase):
|
||||
expected_guard_misses,
|
||||
)
|
||||
# First compile is a regular cache miss, subsequent are guard misses
|
||||
expected_guard_misses += 1
|
||||
expected_guard_misses += 1 # noqa: SIM113
|
||||
self.assertEqual(
|
||||
counters["aot_autograd"]["autograd_cache_hit"], expected_hits
|
||||
)
|
||||
|
@ -969,7 +969,7 @@ class TestGradTransform(TestCase):
|
||||
expected = f"{repr(x)}"
|
||||
level = 0
|
||||
for op in op_list:
|
||||
level += 1
|
||||
level += 1 # noqa: SIM113
|
||||
if op == grad:
|
||||
expected = f"GradTrackingTensor(lvl={level}, value={expected})"
|
||||
elif op == vmap:
|
||||
|
@ -6914,7 +6914,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime):
|
||||
beam_idx = 0
|
||||
for _, token in enumerate(x[i]):
|
||||
beam_hyps.append(token)
|
||||
beam_idx += 1
|
||||
beam_idx += 1 # noqa: SIM113
|
||||
|
||||
if beam_idx == 6:
|
||||
break
|
||||
|
@ -30,6 +30,6 @@ if __name__ == "__main__":
|
||||
def work2():
|
||||
sum = 0
|
||||
for _ in range(100000000):
|
||||
sum += 1
|
||||
sum += 1 # noqa: SIM113
|
||||
|
||||
work2()
|
||||
|
@ -445,10 +445,8 @@ def get_base_name_to_sets_of_related_ops() -> dict[str, set[NSNodeTargetType]]:
|
||||
|
||||
base_name_to_sets_of_related_ops: dict[str, set[NSNodeTargetType]] = {}
|
||||
|
||||
counter = 0
|
||||
for set_of_related_ops in sets_of_related_ops:
|
||||
for counter, set_of_related_ops in enumerate(sets_of_related_ops):
|
||||
base_name = str(counter)
|
||||
counter += 1
|
||||
base_name_to_sets_of_related_ops[base_name] = set_of_related_ops
|
||||
|
||||
return base_name_to_sets_of_related_ops
|
||||
|
@ -128,10 +128,8 @@ def bias_correction(
|
||||
quantized_submodule = get_module(quantized_model, uncorrected_module)
|
||||
bias = get_param(quantized_submodule, "bias")
|
||||
if bias is not None:
|
||||
count = 0
|
||||
for data in img_data:
|
||||
for count, data in enumerate(img_data, start=1):
|
||||
quantized_model(data[0])
|
||||
count += 1
|
||||
if count == neval_batches:
|
||||
break
|
||||
ob_dict = ns.get_logger_dict(quantized_model)
|
||||
|
@ -1227,10 +1227,8 @@ class _ModuleFrame:
|
||||
self.seen_nodes[node.name] = node
|
||||
|
||||
def run_outer(self):
|
||||
i = 0
|
||||
for node in self.flat_graph.nodes:
|
||||
for i, node in enumerate(self.flat_graph.nodes):
|
||||
self.print(i, node.meta.get("nn_module_stack"), node.format_node())
|
||||
i += 1
|
||||
|
||||
# Copy all graph inputs
|
||||
node_idx: int = 0
|
||||
|
@ -195,10 +195,8 @@ def accuracy(output, target, topk=(1,)):
|
||||
|
||||
def train_one_epoch(model, criterion, optimizer, data_loader, device, ntrain_batches):
|
||||
model.train()
|
||||
cnt = 0
|
||||
for image, target in data_loader:
|
||||
for cnt, (image, target) in enumerate(data_loader, start=1):
|
||||
print('.', end='')
|
||||
cnt += 1
|
||||
image, target = image.to(device), target.to(device)
|
||||
output = model(image)
|
||||
loss = criterion(output, target)
|
||||
|
@ -2573,12 +2573,10 @@ class RpcTest(RpcAgentTestFixture, RpcTestCommon):
|
||||
)
|
||||
futs.append(fut)
|
||||
|
||||
j = 0
|
||||
for val in torch.futures.wait_all(futs):
|
||||
for j, val in enumerate(torch.futures.wait_all(futs)):
|
||||
self.assertEqual(
|
||||
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
|
||||
)
|
||||
j += 1
|
||||
|
||||
@dist_init
|
||||
def test_py_tensors_in_container(self):
|
||||
|
@ -30,6 +30,6 @@ if __name__ == "__main__":
|
||||
def work2():
|
||||
sum = 0
|
||||
for _ in range(100000000):
|
||||
sum += 1
|
||||
sum += 1 # noqa: SIM113
|
||||
|
||||
work2()
|
||||
|
@ -861,9 +861,8 @@ class DecisionEvaluator:
|
||||
"""
|
||||
|
||||
y_true = self.df["actual_winner"] if self.ranking else self.df["winner"]
|
||||
i = 0
|
||||
for pred, true, prob, leaf_id in zip(
|
||||
self.predictions, y_true, self.probas, self.leaf_ids
|
||||
for i, (pred, true, prob, leaf_id) in enumerate(
|
||||
zip(self.predictions, y_true, self.probas, self.leaf_ids)
|
||||
):
|
||||
avail_choices = self.df["avail_choices"].iloc[i]
|
||||
top_k_choices = self.top_k_classes(
|
||||
@ -884,7 +883,6 @@ class DecisionEvaluator:
|
||||
i,
|
||||
)
|
||||
self.eval_ranking_prediction(true, top_k_choices, i)
|
||||
i += 1
|
||||
|
||||
total = len(self.predictions)
|
||||
if return_safe_proba:
|
||||
|
Reference in New Issue
Block a user