diff --git a/.circleci/validate-docker-version.py b/.circleci/validate-docker-version.py index cba4a8ef66dc..9410b8856800 100755 --- a/.circleci/validate-docker-version.py +++ b/.circleci/validate-docker-version.py @@ -23,7 +23,7 @@ def load_tags_for_projects(workflow_config): def check_version(job, tags, expected_version): - valid_versions = [v for v in tags[job].split(",")] + valid_versions = tags[job].split(",") if expected_version not in valid_versions: raise RuntimeError( "We configured {} to use Docker version {}; but this " diff --git a/.flake8 b/.flake8 index 5cdb11a8fd35..098ce96604aa 100644 --- a/.flake8 +++ b/.flake8 @@ -8,6 +8,6 @@ ignore = # these ignores are from flake8-bugbear; please fix! B007,B008, # these ignores are from flake8-comprehensions; please fix! - C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415,C416 + C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415 per-file-ignores = __init__.py: F401 exclude = docs/src,venv,third_party,caffe2,scripts,docs/caffe2,torch/lib/include,torch/lib/tmp_install,build,torch/include,*.pyi,.git diff --git a/aten/src/ATen/preprocess_declarations.py b/aten/src/ATen/preprocess_declarations.py index d0f35850326e..b7d56125f4e5 100644 --- a/aten/src/ATen/preprocess_declarations.py +++ b/aten/src/ATen/preprocess_declarations.py @@ -91,7 +91,7 @@ def process_types_and_backends(option): # sort the result for easy reading for backend in backend_types.keys(): - backend_types[backend] = sorted([type for type in backend_types[backend]]) + backend_types[backend] = sorted(backend_types[backend]) option['backend_types'] = backend_types diff --git a/test/distributed/test_distributed.py b/test/distributed/test_distributed.py index f6fd7343c4a9..c72f179b2205 100644 --- a/test/distributed/test_distributed.py +++ b/test/distributed/test_distributed.py @@ -289,13 +289,13 @@ class _DistTestBase(object): return (group, group_id, rank) def _init_full_group_test(self, **kwargs): - group = [i for i in range(0, dist.get_world_size())] + group = list(range(0, dist.get_world_size())) group_id = dist.new_group(**kwargs) rank = dist.get_rank() return (group, group_id, rank) def _init_global_test(self): - group = [i for i in range(0, dist.get_world_size())] + group = list(range(0, dist.get_world_size())) group_id = dist.group.WORLD rank = dist.get_rank() return (group, group_id, rank) diff --git a/test/jit/test_list_dict.py b/test/jit/test_list_dict.py index 4a7a8e34c0af..e90780ed0482 100644 --- a/test/jit/test_list_dict.py +++ b/test/jit/test_list_dict.py @@ -459,7 +459,7 @@ class TestList(JitTestCase): def fn(x): # type: (int) - return [i for i in range(x)] + return [i for i in range(x)] # noqa: C416 test_func(fn, (9,)) test_func(fn, (0,)) diff --git a/test/test_dataloader.py b/test/test_dataloader.py index ffab63f41af6..94f278ff0214 100644 --- a/test/test_dataloader.py +++ b/test/test_dataloader.py @@ -976,7 +976,7 @@ class TestDataLoader(TestCase): dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=None, worker_init_fn=set_faulthander_if_available) dataloader_iter = iter(dataloader) - fetched = sorted([d for d in dataloader_iter]) + fetched = sorted(dataloader_iter) for a, b in zip(fetched, expected): # non-batched should not convert ints into tensors self.assertIsInstance(a, torch._six.int_classes) @@ -1808,7 +1808,7 @@ class TestWorkerQueueDataset(Dataset): class TestIndividualWorkerQueue(TestCase): def setUp(self): super(TestIndividualWorkerQueue, self).setUp() - self.dataset = TestWorkerQueueDataset([i for i in range(128)]) + self.dataset = TestWorkerQueueDataset(list(range(128))) def _run_ind_worker_queue_test(self, batch_size, num_workers): loader = DataLoader( @@ -1818,7 +1818,7 @@ class TestIndividualWorkerQueue(TestCase): current_worker_idx = 0 for i, (worker_ids, sample) in enumerate(loader): self.assertEqual(worker_ids.tolist(), [current_worker_idx] * batch_size) - self.assertEqual(sample.tolist(), [j for j in range(i * batch_size, (i + 1) * batch_size)]) + self.assertEqual(sample.tolist(), list(range(i * batch_size, (i + 1) * batch_size))) current_worker_idx += 1 if current_worker_idx == num_workers: current_worker_idx = 0 diff --git a/test/test_jit.py b/test/test_jit.py index eee92fb4476e..1c6144c96052 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -1973,7 +1973,7 @@ graph(%Ra, %Rb): trace_graph, _ = torch.jit._get_trace_graph(fn, (x,), _force_outplace=True) self.run_pass('dce', trace_graph) - ops = [n for n in trace_graph.nodes()] + ops = list(trace_graph.nodes()) for op in ops: self.assertTrue(op.hasAttribute('inplace')) inplace_flags = [False, True, True, False] @@ -3889,23 +3889,23 @@ graph(%Ra, %Rb): slstm(*inputs).sum().backward() global fw_graph fw_graph = slstm.graph_for(*inputs) - nodes = [n for n in fw_graph.nodes()] + nodes = list(fw_graph.nodes()) tested_blocks = False for node in nodes: - for output in [o for o in node.outputs()]: + for output in node.outputs(): self.assertTrue(hasattr(output, 'type')) self.assertTrue(output.type() is not None) - for input in [i for i in node.inputs()]: + for input in node.inputs(): self.assertTrue(hasattr(input, 'type')) self.assertTrue(input.type() is not None) - for block in [b for b in node.blocks()]: + for block in node.blocks(): tested_blocks = True self.assertTrue(hasattr(block, 'inputs')) self.assertTrue(hasattr(block, 'outputs')) - for output in [o for o in block.outputs()]: + for output in block.outputs(): self.assertTrue(hasattr(output, 'type')) self.assertTrue(output.type() is not None) - for input in [i for i in block.inputs()]: + for input in block.inputs(): self.assertTrue(hasattr(input, 'type')) self.assertTrue(input.type() is not None) self.assertTrue(hasattr(block, 'returnNode')) @@ -10013,7 +10013,7 @@ a") self.assertEqual(o, v) with self.assertRaisesRegex(Exception, "object is not iterable"): - print([val for val in m]) + print(list(m)) def test_attr_qscheme_script(self): class Foo(torch.nn.Module): @@ -12757,7 +12757,7 @@ a") self.assertEqual(torch.jit.script(mod)(torch.tensor(.5)), []) def bad_type_annotation(): - out = torch.jit.annotate(int, [x for x in [1, 2, 3]]) + out = torch.jit.annotate(int, [x for x in [1, 2, 3]]) # noqa: C416 return out with self.assertRaisesRegex(Exception, "Expected list type annotation"): diff --git a/test/test_sparse.py b/test/test_sparse.py index acf0acc5bfa6..eafce9ef6531 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -1884,14 +1884,14 @@ class TestSparse(TestCase): @cpu_only # not really, but we only really want to run this once def test_dtypes(self): - all_sparse_dtypes = [dtype for dtype in torch.testing.get_all_dtypes()] + all_sparse_dtypes = torch.testing.get_all_dtypes() do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu')) if torch.cuda.is_available(): do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0')) @cpu_only # not really, but we only really want to run this once def test_empty_full(self): - all_sparse_dtypes = [dtype for dtype in torch.testing.get_all_dtypes()] + all_sparse_dtypes = torch.testing.get_all_dtypes() do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu')) if torch.cuda.device_count() > 0: do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, None) diff --git a/torch/autograd/gradcheck.py b/torch/autograd/gradcheck.py index 690b11a99271..eeb139eaab05 100644 --- a/torch/autograd/gradcheck.py +++ b/torch/autograd/gradcheck.py @@ -57,8 +57,8 @@ def get_numerical_jacobian(fn, input, target=None, eps=1e-3): # It's much easier to iterate over flattened lists of tensors. # These are reference to the same objects in jacobian, so any changes # will be reflected in it as well. - x_tensors = [t for t in iter_tensors(target, True)] - j_tensors = [t for t in iter_tensors(jacobian)] + x_tensors = iter_tensors(target, True) + j_tensors = iter_tensors(jacobian) # TODO: compare structure for x_tensor, d_tensor in zip(x_tensors, j_tensors): diff --git a/torch/onnx/symbolic_opset9.py b/torch/onnx/symbolic_opset9.py index 84be4619d268..fb94611b12e5 100644 --- a/torch/onnx/symbolic_opset9.py +++ b/torch/onnx/symbolic_opset9.py @@ -2182,7 +2182,7 @@ def group_norm(g, input, num_groups, weight, bias, eps, cudnn_enabled): bias = g.op("Constant", value_t=bias_value) # Norm has shape [N, C, *] so we reshape weight and bias to [C, *] - axes = [i for i in range(1, len(input_sizes) - 1)] + axes = list(range(1, len(input_sizes) - 1)) return add(g, mul(g, norm, g.op("Unsqueeze", weight, axes_i=axes)), g.op("Unsqueeze", bias, axes_i=axes)) diff --git a/torch/optim/lr_scheduler.py b/torch/optim/lr_scheduler.py index b61573dc5c99..4743d22a4659 100644 --- a/torch/optim/lr_scheduler.py +++ b/torch/optim/lr_scheduler.py @@ -312,7 +312,7 @@ class MultiplicativeLR(_LRScheduler): return [group['lr'] * lmbda(self.last_epoch) for lmbda, group in zip(self.lr_lambdas, self.optimizer.param_groups)] else: - return [base_lr for base_lr in self.base_lrs] + return list(self.base_lrs) class StepLR(_LRScheduler): diff --git a/torch/storage.py b/torch/storage.py index b5aa4a27967a..eed2610ff251 100644 --- a/torch/storage.py +++ b/torch/storage.py @@ -47,7 +47,7 @@ class _StorageBase(object): def tolist(self): """Returns a list containing the elements of this storage""" - return [v for v in self] + return list(self) def cpu(self): """Returns a CPU copy of this storage if it's not already on the CPU""" diff --git a/torch/testing/_internal/distributed/rpc/rpc_test.py b/torch/testing/_internal/distributed/rpc/rpc_test.py index c7c567eff15c..28e3fb72814e 100644 --- a/torch/testing/_internal/distributed/rpc/rpc_test.py +++ b/torch/testing/_internal/distributed/rpc/rpc_test.py @@ -310,7 +310,7 @@ class RpcTest(RpcAgentTestFixture): self.assertEqual(worker_names, expected_worker_names) worker_ids = {worker_info.id for worker_info in worker_infos} - expected_worker_ids = {rank for rank in range(self.world_size)} + expected_worker_ids = set(range(self.world_size)) self.assertEqual(worker_ids, expected_worker_ids) @dist_init diff --git a/torch/utils/hipify/hipify_python.py b/torch/utils/hipify/hipify_python.py index 63ee0c78bb72..6c97bcebf28a 100755 --- a/torch/utils/hipify/hipify_python.py +++ b/torch/utils/hipify/hipify_python.py @@ -281,8 +281,8 @@ def processKernelLaunches(string, stats): return kernel_positions - # Grab positional ranges of all kernel launchces - get_kernel_positions = [k for k in find_kernel_bounds(string)] + # Grab positional ranges of all kernel launches + get_kernel_positions = list(find_kernel_bounds(string)) output_string = string # Replace each CUDA kernel with a HIP kernel. diff --git a/torch/utils/tensorboard/_caffe2_graph.py b/torch/utils/tensorboard/_caffe2_graph.py index efc3df0ed13c..1185f05bcca3 100644 --- a/torch/utils/tensorboard/_caffe2_graph.py +++ b/torch/utils/tensorboard/_caffe2_graph.py @@ -446,7 +446,7 @@ def _operator_to_node_simp(op, inter_blobs, seen): if op.name: name = op.name else: - name_list = [name for name in outputs] + name_list = list(outputs) scope = os.path.commonprefix(name_list) name = os.path.join(scope, op.type) assert(name)