Fix all occurrences of C416. (#33429)

Summary:
C416: Unnecessary (list/set) comprehension - rewrite using list/set().

See https://pypi.org/project/flake8-comprehensions/
Pull Request resolved: https://github.com/pytorch/pytorch/pull/33429

Differential Revision: D19972858

Pulled By: ezyang

fbshipit-source-id: faac042a94c59d737bd5ae983121a0a029346e23
This commit is contained in:
Hong Xu
2020-02-21 08:29:32 -08:00
committed by Facebook Github Bot
parent 4588f49f68
commit a6a72ac68f
15 changed files with 29 additions and 29 deletions

View File

@ -23,7 +23,7 @@ def load_tags_for_projects(workflow_config):
def check_version(job, tags, expected_version):
valid_versions = [v for v in tags[job].split(",")]
valid_versions = tags[job].split(",")
if expected_version not in valid_versions:
raise RuntimeError(
"We configured {} to use Docker version {}; but this "

View File

@ -8,6 +8,6 @@ ignore =
# these ignores are from flake8-bugbear; please fix!
B007,B008,
# these ignores are from flake8-comprehensions; please fix!
C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415,C416
C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415
per-file-ignores = __init__.py: F401
exclude = docs/src,venv,third_party,caffe2,scripts,docs/caffe2,torch/lib/include,torch/lib/tmp_install,build,torch/include,*.pyi,.git

View File

@ -91,7 +91,7 @@ def process_types_and_backends(option):
# sort the result for easy reading
for backend in backend_types.keys():
backend_types[backend] = sorted([type for type in backend_types[backend]])
backend_types[backend] = sorted(backend_types[backend])
option['backend_types'] = backend_types

View File

@ -289,13 +289,13 @@ class _DistTestBase(object):
return (group, group_id, rank)
def _init_full_group_test(self, **kwargs):
group = [i for i in range(0, dist.get_world_size())]
group = list(range(0, dist.get_world_size()))
group_id = dist.new_group(**kwargs)
rank = dist.get_rank()
return (group, group_id, rank)
def _init_global_test(self):
group = [i for i in range(0, dist.get_world_size())]
group = list(range(0, dist.get_world_size()))
group_id = dist.group.WORLD
rank = dist.get_rank()
return (group, group_id, rank)

View File

@ -459,7 +459,7 @@ class TestList(JitTestCase):
def fn(x):
# type: (int)
return [i for i in range(x)]
return [i for i in range(x)] # noqa: C416
test_func(fn, (9,))
test_func(fn, (0,))

View File

@ -976,7 +976,7 @@ class TestDataLoader(TestCase):
dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=None,
worker_init_fn=set_faulthander_if_available)
dataloader_iter = iter(dataloader)
fetched = sorted([d for d in dataloader_iter])
fetched = sorted(dataloader_iter)
for a, b in zip(fetched, expected):
# non-batched should not convert ints into tensors
self.assertIsInstance(a, torch._six.int_classes)
@ -1808,7 +1808,7 @@ class TestWorkerQueueDataset(Dataset):
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
super(TestIndividualWorkerQueue, self).setUp()
self.dataset = TestWorkerQueueDataset([i for i in range(128)])
self.dataset = TestWorkerQueueDataset(list(range(128)))
def _run_ind_worker_queue_test(self, batch_size, num_workers):
loader = DataLoader(
@ -1818,7 +1818,7 @@ class TestIndividualWorkerQueue(TestCase):
current_worker_idx = 0
for i, (worker_ids, sample) in enumerate(loader):
self.assertEqual(worker_ids.tolist(), [current_worker_idx] * batch_size)
self.assertEqual(sample.tolist(), [j for j in range(i * batch_size, (i + 1) * batch_size)])
self.assertEqual(sample.tolist(), list(range(i * batch_size, (i + 1) * batch_size)))
current_worker_idx += 1
if current_worker_idx == num_workers:
current_worker_idx = 0

View File

@ -1973,7 +1973,7 @@ graph(%Ra, %Rb):
trace_graph, _ = torch.jit._get_trace_graph(fn, (x,), _force_outplace=True)
self.run_pass('dce', trace_graph)
ops = [n for n in trace_graph.nodes()]
ops = list(trace_graph.nodes())
for op in ops:
self.assertTrue(op.hasAttribute('inplace'))
inplace_flags = [False, True, True, False]
@ -3889,23 +3889,23 @@ graph(%Ra, %Rb):
slstm(*inputs).sum().backward()
global fw_graph
fw_graph = slstm.graph_for(*inputs)
nodes = [n for n in fw_graph.nodes()]
nodes = list(fw_graph.nodes())
tested_blocks = False
for node in nodes:
for output in [o for o in node.outputs()]:
for output in node.outputs():
self.assertTrue(hasattr(output, 'type'))
self.assertTrue(output.type() is not None)
for input in [i for i in node.inputs()]:
for input in node.inputs():
self.assertTrue(hasattr(input, 'type'))
self.assertTrue(input.type() is not None)
for block in [b for b in node.blocks()]:
for block in node.blocks():
tested_blocks = True
self.assertTrue(hasattr(block, 'inputs'))
self.assertTrue(hasattr(block, 'outputs'))
for output in [o for o in block.outputs()]:
for output in block.outputs():
self.assertTrue(hasattr(output, 'type'))
self.assertTrue(output.type() is not None)
for input in [i for i in block.inputs()]:
for input in block.inputs():
self.assertTrue(hasattr(input, 'type'))
self.assertTrue(input.type() is not None)
self.assertTrue(hasattr(block, 'returnNode'))
@ -10013,7 +10013,7 @@ a")
self.assertEqual(o, v)
with self.assertRaisesRegex(Exception, "object is not iterable"):
print([val for val in m])
print(list(m))
def test_attr_qscheme_script(self):
class Foo(torch.nn.Module):
@ -12757,7 +12757,7 @@ a")
self.assertEqual(torch.jit.script(mod)(torch.tensor(.5)), [])
def bad_type_annotation():
out = torch.jit.annotate(int, [x for x in [1, 2, 3]])
out = torch.jit.annotate(int, [x for x in [1, 2, 3]]) # noqa: C416
return out
with self.assertRaisesRegex(Exception, "Expected list type annotation"):

View File

@ -1884,14 +1884,14 @@ class TestSparse(TestCase):
@cpu_only # not really, but we only really want to run this once
def test_dtypes(self):
all_sparse_dtypes = [dtype for dtype in torch.testing.get_all_dtypes()]
all_sparse_dtypes = torch.testing.get_all_dtypes()
do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu'))
if torch.cuda.is_available():
do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0'))
@cpu_only # not really, but we only really want to run this once
def test_empty_full(self):
all_sparse_dtypes = [dtype for dtype in torch.testing.get_all_dtypes()]
all_sparse_dtypes = torch.testing.get_all_dtypes()
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu'))
if torch.cuda.device_count() > 0:
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, None)

View File

@ -57,8 +57,8 @@ def get_numerical_jacobian(fn, input, target=None, eps=1e-3):
# It's much easier to iterate over flattened lists of tensors.
# These are reference to the same objects in jacobian, so any changes
# will be reflected in it as well.
x_tensors = [t for t in iter_tensors(target, True)]
j_tensors = [t for t in iter_tensors(jacobian)]
x_tensors = iter_tensors(target, True)
j_tensors = iter_tensors(jacobian)
# TODO: compare structure
for x_tensor, d_tensor in zip(x_tensors, j_tensors):

View File

@ -2182,7 +2182,7 @@ def group_norm(g, input, num_groups, weight, bias, eps, cudnn_enabled):
bias = g.op("Constant", value_t=bias_value)
# Norm has shape [N, C, *] so we reshape weight and bias to [C, *]
axes = [i for i in range(1, len(input_sizes) - 1)]
axes = list(range(1, len(input_sizes) - 1))
return add(g, mul(g, norm, g.op("Unsqueeze", weight, axes_i=axes)), g.op("Unsqueeze", bias, axes_i=axes))

View File

@ -312,7 +312,7 @@ class MultiplicativeLR(_LRScheduler):
return [group['lr'] * lmbda(self.last_epoch)
for lmbda, group in zip(self.lr_lambdas, self.optimizer.param_groups)]
else:
return [base_lr for base_lr in self.base_lrs]
return list(self.base_lrs)
class StepLR(_LRScheduler):

View File

@ -47,7 +47,7 @@ class _StorageBase(object):
def tolist(self):
"""Returns a list containing the elements of this storage"""
return [v for v in self]
return list(self)
def cpu(self):
"""Returns a CPU copy of this storage if it's not already on the CPU"""

View File

@ -310,7 +310,7 @@ class RpcTest(RpcAgentTestFixture):
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = {rank for rank in range(self.world_size)}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init

View File

@ -281,8 +281,8 @@ def processKernelLaunches(string, stats):
return kernel_positions
# Grab positional ranges of all kernel launchces
get_kernel_positions = [k for k in find_kernel_bounds(string)]
# Grab positional ranges of all kernel launches
get_kernel_positions = list(find_kernel_bounds(string))
output_string = string
# Replace each CUDA kernel with a HIP kernel.

View File

@ -446,7 +446,7 @@ def _operator_to_node_simp(op, inter_blobs, seen):
if op.name:
name = op.name
else:
name_list = [name for name in outputs]
name_list = list(outputs)
scope = os.path.commonprefix(name_list)
name = os.path.join(scope, op.type)
assert(name)