mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Enable PLC1802 on ruff (#165813)
This PR enables ruff check `PLC1802`, which detects len calls on sequences in a boolean test context. Pull Request resolved: https://github.com/pytorch/pytorch/pull/165813 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
aaac8cb0f5
commit
e595136187
@ -124,7 +124,7 @@ with open(MODELS_FILENAME) as fh:
|
|||||||
continue
|
continue
|
||||||
batch_size = int(batch_size)
|
batch_size = int(batch_size)
|
||||||
BATCH_SIZE_KNOWN_MODELS[model_name] = batch_size
|
BATCH_SIZE_KNOWN_MODELS[model_name] = batch_size
|
||||||
assert len(BATCH_SIZE_KNOWN_MODELS)
|
assert BATCH_SIZE_KNOWN_MODELS
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -212,6 +212,7 @@ select = [
|
|||||||
"PIE810",
|
"PIE810",
|
||||||
"PLC0131", # type bivariance
|
"PLC0131", # type bivariance
|
||||||
"PLC0132", # type param mismatch
|
"PLC0132", # type param mismatch
|
||||||
|
"PLC1802", # len({expression}) used as condition without comparison
|
||||||
"PLC0205", # string as __slots__
|
"PLC0205", # string as __slots__
|
||||||
"PLC3002", # unnecessary-direct-lambda-call
|
"PLC3002", # unnecessary-direct-lambda-call
|
||||||
"PLE",
|
"PLE",
|
||||||
|
@ -100,7 +100,7 @@ def param_search_greedy(x, bit_rate, n_bins=200, ratio=0.16):
|
|||||||
cur_min, cur_max, cur_loss = cur_min + stepsize, cur_max, loss1
|
cur_min, cur_max, cur_loss = cur_min + stepsize, cur_max, loss1
|
||||||
else:
|
else:
|
||||||
cur_min, cur_max, cur_loss = cur_min, cur_max - stepsize, loss2
|
cur_min, cur_max, cur_loss = cur_min, cur_max - stepsize, loss2
|
||||||
if len(solutions):
|
if solutions:
|
||||||
best = solutions[0]
|
best = solutions[0]
|
||||||
for solution in solutions:
|
for solution in solutions:
|
||||||
if solution[-1] < best[-1]:
|
if solution[-1] < best[-1]:
|
||||||
|
@ -98,14 +98,14 @@ def pretty_print_buckets(buckets: list[Bucket], bucket_bytes_cap: int) -> None:
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(rows):
|
if rows:
|
||||||
log.info(
|
log.info(
|
||||||
"\nDDPOptimizer used bucket cap %s and created %d buckets. Enable debug logs for detailed bucket info.",
|
"\nDDPOptimizer used bucket cap %s and created %d buckets. Enable debug logs for detailed bucket info.",
|
||||||
bucket_bytes_cap,
|
bucket_bytes_cap,
|
||||||
len(buckets),
|
len(buckets),
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(extended_buckets):
|
if extended_buckets:
|
||||||
log.warning(
|
log.warning(
|
||||||
"Some buckets were extended beyond their requested parameter capacities"
|
"Some buckets were extended beyond their requested parameter capacities"
|
||||||
" in order to ensure each subgraph has an output node, required for fx graph partitioning."
|
" in order to ensure each subgraph has an output node, required for fx graph partitioning."
|
||||||
@ -122,7 +122,7 @@ def pretty_print_buckets(buckets: list[Bucket], bucket_bytes_cap: int) -> None:
|
|||||||
tabulate(rows, headers=headers, tablefmt="simple_grid"),
|
tabulate(rows, headers=headers, tablefmt="simple_grid"),
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(extended_buckets):
|
if extended_buckets:
|
||||||
log.warning(
|
log.warning(
|
||||||
"DDPOptimizer extended these buckets to ensure per-subgraph output nodes:\n%s",
|
"DDPOptimizer extended these buckets to ensure per-subgraph output nodes:\n%s",
|
||||||
tabulate(
|
tabulate(
|
||||||
|
@ -1867,7 +1867,7 @@ class OutputGraph(OutputGraphCommon):
|
|||||||
_get_source_debug_name(var.source) for var in potential_side_effects
|
_get_source_debug_name(var.source) for var in potential_side_effects
|
||||||
]
|
]
|
||||||
|
|
||||||
if len(side_effect_refs):
|
if side_effect_refs:
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
f"While exporting, we found certain side effects happened in the model.forward. "
|
f"While exporting, we found certain side effects happened in the model.forward. "
|
||||||
f"Here are the list of potential sources you can double check: {side_effect_refs}"
|
f"Here are the list of potential sources you can double check: {side_effect_refs}"
|
||||||
@ -3736,7 +3736,7 @@ class SubgraphTracer(fx.Tracer):
|
|||||||
if v1 != v2
|
if v1 != v2
|
||||||
]
|
]
|
||||||
|
|
||||||
if len(mutated_inputs):
|
if mutated_inputs:
|
||||||
mutated_nodes = [input_nodes[i] for i in mutated_inputs]
|
mutated_nodes = [input_nodes[i] for i in mutated_inputs]
|
||||||
msg = f"Input mutation detected at {mutated_nodes}"
|
msg = f"Input mutation detected at {mutated_nodes}"
|
||||||
return MutationInfo(True, msg)
|
return MutationInfo(True, msg)
|
||||||
|
@ -1847,7 +1847,7 @@ class BuiltinVariable(VariableTracker):
|
|||||||
polyfills.builtins.iter_
|
polyfills.builtins.iter_
|
||||||
).call_function(tx, [obj, *args], {})
|
).call_function(tx, [obj, *args], {})
|
||||||
|
|
||||||
if len(args):
|
if args:
|
||||||
# iter(obj, sentinel) returns an object that implements
|
# iter(obj, sentinel) returns an object that implements
|
||||||
# __iter__ and __next__ methods (UserDefinedObjectVariable)
|
# __iter__ and __next__ methods (UserDefinedObjectVariable)
|
||||||
# Wrap the return value in a IteratorVariable subclass (LazyObjectIteratorVariable)
|
# Wrap the return value in a IteratorVariable subclass (LazyObjectIteratorVariable)
|
||||||
|
@ -834,7 +834,7 @@ def _schedule_for_comm(
|
|||||||
collective_cost -= snode_to_cost[candidate.snode]
|
collective_cost -= snode_to_cost[candidate.snode]
|
||||||
heapq.heapify(ready)
|
heapq.heapify(ready)
|
||||||
|
|
||||||
while len(ready):
|
while ready:
|
||||||
snode = heapq.heappop(ready).snode
|
snode = heapq.heappop(ready).snode
|
||||||
if reorder_for_overlap and contains_collective(snode):
|
if reorder_for_overlap and contains_collective(snode):
|
||||||
schedule_collective_for_overlap(snode)
|
schedule_collective_for_overlap(snode)
|
||||||
|
@ -2895,7 +2895,7 @@ def match_target_block_product(
|
|||||||
relative_scores[dim] = score / total_score
|
relative_scores[dim] = score / total_score
|
||||||
|
|
||||||
# Scale up dimensions by their relative scores until we reach the target
|
# Scale up dimensions by their relative scores until we reach the target
|
||||||
while curr_block_product < target_block_product and len(relative_scores):
|
while curr_block_product < target_block_product and relative_scores:
|
||||||
dim, score = max(relative_scores.items(), key=lambda item: item[1])
|
dim, score = max(relative_scores.items(), key=lambda item: item[1])
|
||||||
|
|
||||||
# Check if we've hit the max for this dimension
|
# Check if we've hit the max for this dimension
|
||||||
|
@ -792,7 +792,7 @@ def get_kernel_metadata(
|
|||||||
# where `inductor_nodes` contains nodes from multiple graph instances
|
# where `inductor_nodes` contains nodes from multiple graph instances
|
||||||
# is not supported. An example of this is conditional statements.
|
# is not supported. An example of this is conditional statements.
|
||||||
single_graph = None
|
single_graph = None
|
||||||
if len(inductor_nodes):
|
if inductor_nodes:
|
||||||
unique_graphs = OrderedSet(n.graph for n in inductor_nodes)
|
unique_graphs = OrderedSet(n.graph for n in inductor_nodes)
|
||||||
if len(unique_graphs) == 1:
|
if len(unique_graphs) == 1:
|
||||||
single_graph = inductor_nodes[0].graph
|
single_graph = inductor_nodes[0].graph
|
||||||
|
@ -237,7 +237,7 @@ class FSDPParamGroup:
|
|||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
f"FSDP expects uniform original parameter dtype but got {orig_dtypes}"
|
f"FSDP expects uniform original parameter dtype but got {orig_dtypes}"
|
||||||
)
|
)
|
||||||
self._orig_dtype = next(iter(orig_dtypes)) if len(trainable_params) else None
|
self._orig_dtype = next(iter(orig_dtypes)) if trainable_params else None
|
||||||
if len(trainable_params) > 0 and len(reduce_dtypes) != 1:
|
if len(trainable_params) > 0 and len(reduce_dtypes) != 1:
|
||||||
# This can be relaxed if we issue one reduce-scatter per reduce
|
# This can be relaxed if we issue one reduce-scatter per reduce
|
||||||
# dtype (but we would need a way for users to specify multiple
|
# dtype (but we would need a way for users to specify multiple
|
||||||
@ -245,9 +245,7 @@ class FSDPParamGroup:
|
|||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
f"FSDP expects uniform reduce dtype but got {reduce_dtypes}"
|
f"FSDP expects uniform reduce dtype but got {reduce_dtypes}"
|
||||||
)
|
)
|
||||||
self._reduce_dtype = (
|
self._reduce_dtype = next(iter(reduce_dtypes)) if trainable_params else None
|
||||||
next(iter(reduce_dtypes)) if len(trainable_params) else None
|
|
||||||
)
|
|
||||||
|
|
||||||
def lazy_init(self):
|
def lazy_init(self):
|
||||||
# Lazy init should be idempotent
|
# Lazy init should be idempotent
|
||||||
|
@ -2178,7 +2178,7 @@ BACKWARD_INPUT, BACKWARD_WEIGHT, and OVERLAP_F_B are supported."
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
# Mostly these operations should have finished long ago, but there isn't an obvious time when to wait for them
|
# Mostly these operations should have finished long ago, but there isn't an obvious time when to wait for them
|
||||||
while len(send_ops):
|
while send_ops:
|
||||||
_wait_batch_p2p(send_ops.pop())
|
_wait_batch_p2p(send_ops.pop())
|
||||||
|
|
||||||
assert len(self.unshard_ops) == 0, "Unused unshard operations"
|
assert len(self.unshard_ops) == 0, "Unused unshard operations"
|
||||||
|
@ -372,7 +372,7 @@ def _check_dependencies(m):
|
|||||||
|
|
||||||
if dependencies is not None:
|
if dependencies is not None:
|
||||||
missing_deps = [pkg for pkg in dependencies if not _check_module_exists(pkg)]
|
missing_deps = [pkg for pkg in dependencies if not _check_module_exists(pkg)]
|
||||||
if len(missing_deps):
|
if missing_deps:
|
||||||
raise RuntimeError(f"Missing dependencies: {', '.join(missing_deps)}")
|
raise RuntimeError(f"Missing dependencies: {', '.join(missing_deps)}")
|
||||||
|
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ class SampleInput:
|
|||||||
A SampleInput can be constructed "naturally" with *args and **kwargs or by
|
A SampleInput can be constructed "naturally" with *args and **kwargs or by
|
||||||
explicitly setting the "args" and "kwargs" parameters, but the two
|
explicitly setting the "args" and "kwargs" parameters, but the two
|
||||||
methods of construction cannot be mixed!"""
|
methods of construction cannot be mixed!"""
|
||||||
elif len(var_args) or len(var_kwargs):
|
elif var_args or var_kwargs:
|
||||||
assert (
|
assert (
|
||||||
output_process_fn_grad is None
|
output_process_fn_grad is None
|
||||||
and broadcasts_input is None
|
and broadcasts_input is None
|
||||||
|
@ -53,7 +53,7 @@ class ConcatDataFramesPipe(DFIterDataPipe):
|
|||||||
if len(buffer) == self.n_batch:
|
if len(buffer) == self.n_batch:
|
||||||
yield df_wrapper.concat(buffer)
|
yield df_wrapper.concat(buffer)
|
||||||
buffer = []
|
buffer = []
|
||||||
if len(buffer):
|
if buffer:
|
||||||
yield df_wrapper.concat(buffer)
|
yield df_wrapper.concat(buffer)
|
||||||
|
|
||||||
|
|
||||||
@ -78,7 +78,7 @@ class ShuffleDataFramesPipe(DFIterDataPipe):
|
|||||||
if len(buffer) == size:
|
if len(buffer) == size:
|
||||||
yield df_wrapper.concat(buffer)
|
yield df_wrapper.concat(buffer)
|
||||||
buffer = []
|
buffer = []
|
||||||
if len(buffer):
|
if buffer:
|
||||||
yield df_wrapper.concat(buffer)
|
yield df_wrapper.concat(buffer)
|
||||||
|
|
||||||
|
|
||||||
@ -107,7 +107,7 @@ class FilterDataFramesPipe(DFIterDataPipe):
|
|||||||
if len(buffer) == size:
|
if len(buffer) == size:
|
||||||
yield df_wrapper.concat(buffer)
|
yield df_wrapper.concat(buffer)
|
||||||
buffer = []
|
buffer = []
|
||||||
if len(buffer):
|
if buffer:
|
||||||
yield df_wrapper.concat(buffer)
|
yield df_wrapper.concat(buffer)
|
||||||
|
|
||||||
|
|
||||||
|
@ -626,7 +626,7 @@ class MultiplexerIterDataPipe(IterDataPipe):
|
|||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
iterators = [iter(x) for x in self.datapipes]
|
iterators = [iter(x) for x in self.datapipes]
|
||||||
while len(iterators):
|
while iterators:
|
||||||
for it in iterators:
|
for it in iterators:
|
||||||
try:
|
try:
|
||||||
value = next(it)
|
value = next(it)
|
||||||
|
@ -88,7 +88,7 @@ class FilterIterDataPipe(IterDataPipe[_T_co]):
|
|||||||
for idx, mask in enumerate(df_wrapper.iterate(condition)):
|
for idx, mask in enumerate(df_wrapper.iterate(condition)):
|
||||||
if mask:
|
if mask:
|
||||||
result.append(df_wrapper.get_item(data, idx))
|
result.append(df_wrapper.get_item(data, idx))
|
||||||
if len(result):
|
if result:
|
||||||
return True, df_wrapper.concat(result)
|
return True, df_wrapper.concat(result)
|
||||||
else:
|
else:
|
||||||
return False, None # type: ignore[return-value]
|
return False, None # type: ignore[return-value]
|
||||||
|
@ -309,7 +309,7 @@ class WeakIdKeyDictionary(MutableMapping):
|
|||||||
dict = type({})(dict)
|
dict = type({})(dict)
|
||||||
for key, value in dict.items():
|
for key, value in dict.items():
|
||||||
d[self.ref_type(key, self._remove)] = value # CHANGED
|
d[self.ref_type(key, self._remove)] = value # CHANGED
|
||||||
if len(kwargs):
|
if kwargs:
|
||||||
self.update(kwargs)
|
self.update(kwargs)
|
||||||
|
|
||||||
def __ior__(self, other):
|
def __ior__(self, other):
|
||||||
|
Reference in New Issue
Block a user