diff --git a/.flake8 b/.flake8 index dfc371b386a4..5da4f8e575ea 100644 --- a/.flake8 +++ b/.flake8 @@ -14,7 +14,7 @@ ignore = # to line this up with executable bit EXE001, # these ignores are from flake8-bugbear; please fix! - B007,B008,B017,B019,B020,B023,B026,B028,B903,B904,B905,B906,B907 + B007,B008,B017,B019,B023,B026,B028,B903,B904,B905,B906,B907 # these ignores are from flake8-comprehensions; please fix! C407, # these ignores are from flake8-logging-format; please fix! diff --git a/caffe2/python/net_builder_test.py b/caffe2/python/net_builder_test.py index 5320c2b04588..ed0d0fb01d04 100644 --- a/caffe2/python/net_builder_test.py +++ b/caffe2/python/net_builder_test.py @@ -90,7 +90,7 @@ class TestNetBuilder(unittest.TestCase): plan.AddStep(to_execution_step(nb)) ws = workspace.C.Workspace() ws.run(plan) - expected = [ + expected_results = [ (y, 5), (z, False), (w, True), @@ -99,7 +99,7 @@ class TestNetBuilder(unittest.TestCase): (p, 2), (q, 3), ] - for b, expected in expected: + for b, expected in expected_results: actual = ws.blobs[str(b)].fetch() self.assertEqual(actual, expected) diff --git a/caffe2/python/schema.py b/caffe2/python/schema.py index 6e401bb89532..924afed41bd5 100644 --- a/caffe2/python/schema.py +++ b/caffe2/python/schema.py @@ -1081,7 +1081,7 @@ def from_column_list( 'col_names and col_blobs must have the same length.' ) root = _SchemaNode('root', 'Struct') - for col_name, col_type, col_blob, col_metadata in zip( + for col_name, col_type, col_blob, col_md in zip( col_names, col_types, col_blobs, col_metadata ): columns = col_name.split(FIELD_SEPARATOR) @@ -1095,7 +1095,7 @@ def from_column_list( field = Scalar( dtype=col_type, blob=col_blob, - metadata=col_metadata + metadata=col_md ) next = current.add_child(name, type_str) if field is not None: diff --git a/pyproject.toml b/pyproject.toml index d0ad34eb3790..77663a9d68d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ ignore = [ # these ignores are from flake8-bugbear; please fix! "B007", "B008", "B017", "B018", # Useless expression - "B019", "B020", + "B019", "B023", "B026", "B028", # No explicit `stacklevel` keyword argument found "B904", diff --git a/scripts/release_notes/test_release_notes.py b/scripts/release_notes/test_release_notes.py index 97df4561157a..adef14e20f97 100644 --- a/scripts/release_notes/test_release_notes.py +++ b/scripts/release_notes/test_release_notes.py @@ -34,8 +34,8 @@ class TestCommitList(unittest.TestCase): expected.write_to_disk() commit_list = CommitList.from_existing(commit_list_path) - for commit, expected in zip(commit_list.commits, expected.commits): - self.assertEqual(commit, expected) + for commit, expected_commit in zip(commit_list.commits, expected.commits): + self.assertEqual(commit, expected_commit) def test_update_to(self): with tempfile.TemporaryDirectory() as tempdir: diff --git a/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py b/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py index 27ea0cc75b1e..59c20e46b232 100644 --- a/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py +++ b/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py @@ -880,8 +880,8 @@ class LocalElasticAgentTest(unittest.TestCase): # global world size == sum of all the role world sizes expected_world_size = sum(expected_role_world_sizes.values()) - for role, run_results in run_results.items(): - for result in run_results: + for role, results in run_results.items(): + for result in results: res = result.return_values for role_info in res.values(): rank = role_info.rank diff --git a/test/distributed/fsdp/test_fsdp_dtensor_state_dict.py b/test/distributed/fsdp/test_fsdp_dtensor_state_dict.py index a47fbe12dae4..5e7d84bc790e 100644 --- a/test/distributed/fsdp/test_fsdp_dtensor_state_dict.py +++ b/test/distributed/fsdp/test_fsdp_dtensor_state_dict.py @@ -143,11 +143,11 @@ class TestFSDPWithDeviceMeshAndDTensor(DTensorTestBase): sharded_tensor_osd = FSDP.optim_state_dict(ref_model, ref_optim) # Check dtensor and sharded_tensor model state dict values are identical - for dtensor_sd, sharded_tensor_sd in zip( + for dtensor_sd_item, sharded_tensor_sd_item in zip( dtensor_sd.items(), sharded_tensor_sd.items() ): - k1, v1 = dtensor_sd - k2, v2 = sharded_tensor_sd + k1, v1 = dtensor_sd_item + k2, v2 = sharded_tensor_sd_item self.assertEqual(k1, k2) # if the ShardedTensor is an empty shard, @@ -227,15 +227,15 @@ class TestFSDPWithDeviceMeshAndDTensor(DTensorTestBase): new_optim_state_dict = FSDP.optim_state_dict(model, optim) # Check whether new_optim_state_dict is the same as ref_optim_state_dict. - for new_optim_state_dict, ref_optim_state_dict in zip( + for new_optim_state_dict_item, ref_optim_state_dict_item in zip( new_optim_state_dict["state"].items(), ref_optim_state_dict["state"].items(), ): # check FQN are the same - self.assertEqual(new_optim_state_dict[0], ref_optim_state_dict[0]) + self.assertEqual(new_optim_state_dict_item[0], ref_optim_state_dict_item[0]) for new_optim_hyper_param, ref_optim_hyper_param in zip( - new_optim_state_dict[1].items(), - ref_optim_state_dict[1].items(), + new_optim_state_dict_item[1].items(), + ref_optim_state_dict_item[1].items(), ): k1, v1 = new_optim_hyper_param k2, v2 = ref_optim_hyper_param diff --git a/test/distributed/fsdp/test_hsdp_dtensor_state_dict.py b/test/distributed/fsdp/test_hsdp_dtensor_state_dict.py index bf461f133161..23d97d6598bf 100644 --- a/test/distributed/fsdp/test_hsdp_dtensor_state_dict.py +++ b/test/distributed/fsdp/test_hsdp_dtensor_state_dict.py @@ -154,11 +154,11 @@ class TestHSDPWithDeviceMeshAndDTensor(DTensorTestBase): sharded_tensor_osd = FSDP.optim_state_dict(ref_model, ref_optim) # Check dtensor and sharded_tensor model state dict values are identical - for dtensor_sd, sharded_tensor_sd in zip( + for dtensor_sd_item, sharded_tensor_sd_item in zip( dtensor_sd.items(), sharded_tensor_sd.items() ): - k1, v1 = dtensor_sd - k2, v2 = sharded_tensor_sd + k1, v1 = dtensor_sd_item + k2, v2 = sharded_tensor_sd_item self.assertEqual(k1, k2) self.assertEqual(type(v1), DTensor) @@ -225,15 +225,15 @@ class TestHSDPWithDeviceMeshAndDTensor(DTensorTestBase): new_optim_state_dict = FSDP.optim_state_dict(model, optim) # Check whether new_optim_state_dict is the same as ref_optim_state_dict. - for new_optim_state_dict, ref_optim_state_dict in zip( + for new_optim_state_dict_item, ref_optim_state_dict_item in zip( new_optim_state_dict["state"].items(), ref_optim_state_dict["state"].items(), ): # check FQN are the same - self.assertEqual(new_optim_state_dict[0], ref_optim_state_dict[0]) + self.assertEqual(new_optim_state_dict_item[0], ref_optim_state_dict_item[0]) for new_optim_hyper_param, ref_optim_hyper_param in zip( - new_optim_state_dict[1].items(), - ref_optim_state_dict[1].items(), + new_optim_state_dict_item[1].items(), + ref_optim_state_dict_item[1].items(), ): k1, v1 = new_optim_hyper_param k2, v2 = ref_optim_hyper_param diff --git a/test/functorch/test_vmap.py b/test/functorch/test_vmap.py index 5b1f6c0e7634..95c0a896e484 100644 --- a/test/functorch/test_vmap.py +++ b/test/functorch/test_vmap.py @@ -3423,9 +3423,9 @@ class TestVmapOperatorsOpInfo(TestCase): sample_input = error_input.sample_input args = (sample_input.input,) + tuple(sample_input.args) kwargs = sample_input.kwargs - for args, in_dims, _ in generate_vmap_inputs(args, {}): + for batched_args, in_dims, _ in generate_vmap_inputs(args, {}): with self.assertRaises(Exception): - vmap(op, in_dims)(*args, **kwargs) + vmap(op, in_dims)(*batched_args, **kwargs) # Sample inputs check sample_inputs_op = { @@ -3455,16 +3455,16 @@ class TestVmapOperatorsOpInfo(TestCase): continue kwargs = sample_input.kwargs is_batch_norm_and_training = is_batch_norm_training(op.name, kwargs) - for args, in_dims, _ in generate_vmap_inputs( + for batched_args, in_dims, _ in generate_vmap_inputs( args, {}, is_batch_norm_and_training=is_batch_norm_and_training): for func in aliases: - self.vmap_outplace_test(func, args, kwargs, in_dims, check_shape_only, postprocess_fn) + self.vmap_outplace_test(func, batched_args, kwargs, in_dims, check_shape_only, postprocess_fn) if op.name in skip_inplace: continue if not is_valid_inplace_sample_input(sample_input, op, op.inplace_variant): continue for func in inplace_aliases: - self.vmap_inplace_test(func, args, kwargs, in_dims, postprocess_fn) + self.vmap_inplace_test(func, batched_args, kwargs, in_dims, postprocess_fn) if check_has_batch_rule: check_vmap_fallback(self, test, op) @@ -4195,11 +4195,11 @@ class TestVmapOperatorsOpInfo(TestCase): gout = torch.randn(2, 2, device=device) args = (leaf, gout) - for args, in_dims, _, in generate_vmap_inputs(args, {}): + for batched_args, in_dims, _, in generate_vmap_inputs(args, {}): if in_dims[1] is None: # triggers some composite compliance problem continue - self.vmap_outplace_test(push_vjp, args, {}, in_dims) + self.vmap_outplace_test(push_vjp, batched_args, {}, in_dims) def test_advanced_indexing(self, device): def test(f, args): diff --git a/test/jit/test_freezing.py b/test/jit/test_freezing.py index b207860dd0de..d5d25082bdca 100644 --- a/test/jit/test_freezing.py +++ b/test/jit/test_freezing.py @@ -2691,9 +2691,9 @@ class TestFrozenOptimizations(JitTestCase): with set_default_dtype(torch.float): conv_bias = [True, False] conv_ops = [nn.Conv2d, nn.Conv3d] - add_z = [True, False] + use_add_z = [True, False] use_tracing = [True, False] - for use_bias, conv, add_z, tracing in product(conv_bias, conv_ops, add_z, use_tracing): + for use_bias, conv, add_z, tracing in product(conv_bias, conv_ops, use_add_z, use_tracing): class Net(nn.Module): def __init__(self, in_channels, out_channels, **kwargs): super().__init__() diff --git a/test/nn/test_embedding.py b/test/nn/test_embedding.py index d9ec6753f0ee..5d876a155699 100644 --- a/test/nn/test_embedding.py +++ b/test/nn/test_embedding.py @@ -944,10 +944,10 @@ class TestEmbeddingNNDeviceType(NNTestCase): atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0) trainable_scale = (True, False) - include_last_offset = (True, False) + include_last_offset_list = (True, False) modes = (('sum', False), ('sum', True), ('max', False), ('mean', False)) for (mode, has_weight), trainable, include_last_offset in itertools.product( - modes, trainable_scale, include_last_offset + modes, trainable_scale, include_last_offset_list ): test_per_sample_weights_new_offsets( mode, trainable, include_last_offset, has_weight diff --git a/test/quantization/core/test_workflow_ops.py b/test/quantization/core/test_workflow_ops.py index a3528098b256..2e22d49af4ca 100644 --- a/test/quantization/core/test_workflow_ops.py +++ b/test/quantization/core/test_workflow_ops.py @@ -364,8 +364,8 @@ class TestFakeQuantizeOps(TestCase): def _test_backward_per_tensor_cachemask_impl(self, device): float_types = (torch.float32, torch.float16, torch.float64) torch_types = (torch.qint8, torch.quint8) - tensor_qparam = (True, False) - for float_type, torch_type, tensor_qparam in itertools.product(float_types, torch_types, tensor_qparam): + tensor_qparams = (True, False) + for float_type, torch_type, tensor_qparam in itertools.product(float_types, torch_types, tensor_qparams): X = torch.randn(4, 8).to(device).to(float_type) X.requires_grad_() # pick the scale + zp so that some values get clipped diff --git a/test/test_linalg.py b/test/test_linalg.py index c7b816ee5c1e..86a5aece3ff8 100644 --- a/test/test_linalg.py +++ b/test/test_linalg.py @@ -2413,7 +2413,7 @@ class TestLinalg(TestCase): self.assertEqual(v.mT.matmul(V).det().abs(), torch.ones(batches, device=device, dtype=dtype)) all_batches = [(), (1,), (3,), (2, 3)] - for actual_rank, size, all_batches in [ + for actual_rank, size, all_batches in [ # noqa: B020 (2, (17, 4), all_batches), (4, (17, 4), all_batches), (4, (17, 17), all_batches), @@ -7415,7 +7415,7 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2: self.assertEqual(s[..., :actual_rank], S[..., :actual_rank]) all_batches = [(), (1,), (3,), (2, 3)] - for actual_rank, size, all_batches in [ + for actual_rank, size, all_batches in [ # noqa: B020 (2, (17, 4), all_batches), (2, (100, 4), all_batches), (6, (100, 40), all_batches), diff --git a/test/test_nn.py b/test/test_nn.py index d12469ee91fd..2815ddcf2d4d 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -11760,7 +11760,7 @@ class TestNNDeviceType(NNTestCase): reductions = ['none', 'sum', 'mean'] label_smoothings = [0.05, 0.15] - weight = torch.tensor([0.3, 0.6], device=device) + wgt = torch.tensor([0.3, 0.6], device=device) inp1 = torch.tensor([[0.3, 0.4], [1, 2]], device=device) inp2 = torch.tensor([[0.3, 0.6], [1, 2]], device=device) @@ -11768,7 +11768,7 @@ class TestNNDeviceType(NNTestCase): targ_negative_ignore_index = torch.tensor([-2, 1], device=device) targ_positive_ignore_index = torch.tensor([2, 1], device=device) - for reduction, label_smoothing, weight in product(reductions, label_smoothings, (None, weight)): + for reduction, label_smoothing, weight in product(reductions, label_smoothings, (None, wgt)): def check_equal(loss, inp_targ_1, inp_targ_2): inp1, targ1 = inp_targ_1 inp2, targ2 = inp_targ_2 diff --git a/test/torch_np/numpy_tests/lib/test_function_base.py b/test/torch_np/numpy_tests/lib/test_function_base.py index 0ada4214e1e2..3934613a64fc 100644 --- a/test/torch_np/numpy_tests/lib/test_function_base.py +++ b/test/torch_np/numpy_tests/lib/test_function_base.py @@ -734,9 +734,9 @@ class TestDiff(TestCase): x = list(range(3)) assert_raises(ValueError, diff, x, n=-1) output = [diff(x, n=n) for n in range(1, 5)] - expected = [[1, 1], [0], [], []] + expected_output = [[1, 1], [0], [], []] # assert_(diff(x, n=0) is x) - for n, (expected, out) in enumerate(zip(expected, output), start=1): + for n, (expected, out) in enumerate(zip(expected_output, output), start=1): assert_(type(out) is np.ndarray) assert_array_equal(out, expected) assert_equal(out.dtype, np.int_) diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py index 621710df1b7e..a45e3e3e7953 100644 --- a/torch/_inductor/lowering.py +++ b/torch/_inductor/lowering.py @@ -323,19 +323,19 @@ def broadcast_symbolic_shapes(a, b): are symbolic sympy formulas. """ output = [] - for a, b in itertools.zip_longest( + for x, y in itertools.zip_longest( reversed(a), reversed(b), fillvalue=sympy.Integer(1) ): - if b == 1: - output.append(a) - elif a == 1: - output.append(b) + if y == 1: + output.append(x) + elif x == 1: + output.append(y) else: - V.graph.sizevars.guard_equals(a, b) - if len(sympy.expand(b).free_symbols) < len(sympy.expand(a).free_symbols): - output.append(b) # prefer shorter formula + V.graph.sizevars.guard_equals(x, y) + if len(sympy.expand(y).free_symbols) < len(sympy.expand(x).free_symbols): + output.append(y) # prefer shorter formula else: - output.append(a) + output.append(x) return tuple(reversed(output)) diff --git a/torch/_inductor/triton_heuristics.py b/torch/_inductor/triton_heuristics.py index 7fecff7cdb1d..be062e8eb3cb 100644 --- a/torch/_inductor/triton_heuristics.py +++ b/torch/_inductor/triton_heuristics.py @@ -540,8 +540,9 @@ def _find_names(obj): import inspect frame = inspect.currentframe() - for frame in iter(lambda: frame.f_back, None): # type: ignore[union-attr] + while frame is not None: frame.f_locals + frame = frame.f_back obj_names = [] for referrer in gc.get_referrers(obj): if isinstance(referrer, dict): diff --git a/torch/autograd/functional.py b/torch/autograd/functional.py index 30045bc8671c..bab85498bce8 100644 --- a/torch/autograd/functional.py +++ b/torch/autograd/functional.py @@ -550,9 +550,9 @@ def _jacfwd(func, inputs, strict=False, vectorize=False): is_outputs_tuple, outputs = output_info # Step 3: for each of the output tangents, split along dim 0 jacobian_input_output = [] - for jac, output_i in zip(outputs_before_split, outputs): + for jac_output_i, output_i in zip(outputs_before_split, outputs): jacobian_output_i_output = [] - for jac, input_j in zip(jac.split(input_numels, dim=0), inputs): + for jac, input_j in zip(jac_output_i.split(input_numels, dim=0), inputs): # We need to transpose the Jacobian because in forward AD, the # batch dimension represents that of the inputs jacobian_input_i_output_j = jac.permute(*range(1, jac.ndim), 0).reshape( @@ -758,9 +758,11 @@ def jacobian( # Step 3: The returned jacobian is one big tensor per input. In this step, # we split each Tensor by output. jacobian_input_output = [] - for jac, input_i in zip(jacobians_of_flat_output, inputs): + for jac_input_i, input_i in zip(jacobians_of_flat_output, inputs): jacobian_input_i_output = [] - for jac, output_j in zip(jac.split(output_numels, dim=0), outputs): + for jac, output_j in zip( + jac_input_i.split(output_numels, dim=0), outputs + ): jacobian_input_i_output_j = jac.view(output_j.shape + input_i.shape) jacobian_input_i_output.append(jacobian_input_i_output_j) jacobian_input_output.append(jacobian_input_i_output) diff --git a/torch/fx/experimental/migrate_gradual_types/constraint_generator.py b/torch/fx/experimental/migrate_gradual_types/constraint_generator.py index 7f83a639ff48..0b5307521533 100644 --- a/torch/fx/experimental/migrate_gradual_types/constraint_generator.py +++ b/torch/fx/experimental/migrate_gradual_types/constraint_generator.py @@ -1262,8 +1262,8 @@ class ConstraintGenerator: if isinstance(t, torch.Tensor): if len(t.shape) > 0: res = [] - for t in t.shape: - res.append(t) + for d in t.shape: + res.append(d) attr_type = TensorType(res) output, counter = gen_tvar(counter) self.symbol_dict[n] = output diff --git a/torch/nn/parallel/distributed.py b/torch/nn/parallel/distributed.py index 7b2005bdd593..dd674864d688 100644 --- a/torch/nn/parallel/distributed.py +++ b/torch/nn/parallel/distributed.py @@ -1286,8 +1286,8 @@ class DistributedDataParallel(Module, Joinable): ) yield from ps - for m in m.modules() if recurse else [m]: - yield from model_parameters(m) + for mod in m.modules() if recurse else [m]: + yield from model_parameters(mod) def _check_default_group(self): pickle_not_supported = False diff --git a/torch/testing/_internal/common_device_type.py b/torch/testing/_internal/common_device_type.py index 934218bfff88..96b7817b5c4a 100644 --- a/torch/testing/_internal/common_device_type.py +++ b/torch/testing/_internal/common_device_type.py @@ -458,7 +458,7 @@ class DeviceTypeTestBase(TestCase): parametrize_fn = compose_parametrize_fns(dtype_parametrize_fn, parametrize_fn) # Instantiate the parametrized tests. - for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls): + for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls): # noqa: B020 test_suffix = '' if test_suffix == '' else '_' + test_suffix device_suffix = '_' + cls.device_type diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py index 9683c02753fd..e24145a865da 100644 --- a/torch/testing/_internal/common_methods_invocations.py +++ b/torch/testing/_internal/common_methods_invocations.py @@ -4926,7 +4926,7 @@ def sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs): tgt_gen = (make_arg(size) for size in tgt_sizes) idx = make_idx((0,), high=1) src = make_arg((0,)) - for tgt, acc in product(tgt, (True, False)): + for tgt, acc in product(tgt_gen, (True, False)): yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), args=(idx.clone(), src.clone().requires_grad_(requires_grad), @@ -8190,9 +8190,9 @@ def sample_inputs_scaled_dot_product_attention(op_info, device, dtype, requires_ qkv_shapes = [(dim_3_q_shape, dim_3_kv_shape), (dim_4_q_shape, dim_4_kv_shape), broadcast_tuple] samples = [] - for qkv_shapes, is_causal, dropout_p in product( + for qkv_shape, is_causal, dropout_p in product( qkv_shapes, [True, False], [0.0, 0.5]): - shape_q, shape_kv = qkv_shapes + shape_q, shape_kv = qkv_shape samples.append(SampleInput( make(shape_q), make(shape_kv), diff --git a/torch/utils/data/datapipes/_hook_iterator.py b/torch/utils/data/datapipes/_hook_iterator.py index dae358fd0143..7319f9e96657 100644 --- a/torch/utils/data/datapipes/_hook_iterator.py +++ b/torch/utils/data/datapipes/_hook_iterator.py @@ -40,9 +40,9 @@ def _generate_input_args_string(obj): for param_name in signature.parameters.keys(): input_param_names.add(param_name) result = [] - for name, obj in inspect.getmembers(obj): + for name, value in inspect.getmembers(obj): if name in input_param_names: - result.append((name, _simplify_obj_name(obj))) + result.append((name, _simplify_obj_name(value))) return ', '.join([f'{name}={value}' for name, value in result])