Enable PLC1802 on ruff (#165813)

This PR enables ruff check `PLC1802`, which detects len calls on sequences in a boolean test context.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/165813
Approved by: https://github.com/ezyang
This commit is contained in:
Yuanyuan Chen
2025-10-18 05:44:10 +00:00
committed by PyTorch MergeBot
parent aaac8cb0f5
commit e595136187
17 changed files with 23 additions and 24 deletions

View File

@ -124,7 +124,7 @@ with open(MODELS_FILENAME) as fh:
continue
batch_size = int(batch_size)
BATCH_SIZE_KNOWN_MODELS[model_name] = batch_size
assert len(BATCH_SIZE_KNOWN_MODELS)
assert BATCH_SIZE_KNOWN_MODELS
try:

View File

@ -212,6 +212,7 @@ select = [
"PIE810",
"PLC0131", # type bivariance
"PLC0132", # type param mismatch
"PLC1802", # len({expression}) used as condition without comparison
"PLC0205", # string as __slots__
"PLC3002", # unnecessary-direct-lambda-call
"PLE",

View File

@ -100,7 +100,7 @@ def param_search_greedy(x, bit_rate, n_bins=200, ratio=0.16):
cur_min, cur_max, cur_loss = cur_min + stepsize, cur_max, loss1
else:
cur_min, cur_max, cur_loss = cur_min, cur_max - stepsize, loss2
if len(solutions):
if solutions:
best = solutions[0]
for solution in solutions:
if solution[-1] < best[-1]:

View File

@ -98,14 +98,14 @@ def pretty_print_buckets(buckets: list[Bucket], bucket_bytes_cap: int) -> None:
)
)
if len(rows):
if rows:
log.info(
"\nDDPOptimizer used bucket cap %s and created %d buckets. Enable debug logs for detailed bucket info.",
bucket_bytes_cap,
len(buckets),
)
if len(extended_buckets):
if extended_buckets:
log.warning(
"Some buckets were extended beyond their requested parameter capacities"
" in order to ensure each subgraph has an output node, required for fx graph partitioning."
@ -122,7 +122,7 @@ def pretty_print_buckets(buckets: list[Bucket], bucket_bytes_cap: int) -> None:
tabulate(rows, headers=headers, tablefmt="simple_grid"),
)
if len(extended_buckets):
if extended_buckets:
log.warning(
"DDPOptimizer extended these buckets to ensure per-subgraph output nodes:\n%s",
tabulate(

View File

@ -1867,7 +1867,7 @@ class OutputGraph(OutputGraphCommon):
_get_source_debug_name(var.source) for var in potential_side_effects
]
if len(side_effect_refs):
if side_effect_refs:
warnings.warn(
f"While exporting, we found certain side effects happened in the model.forward. "
f"Here are the list of potential sources you can double check: {side_effect_refs}"
@ -3736,7 +3736,7 @@ class SubgraphTracer(fx.Tracer):
if v1 != v2
]
if len(mutated_inputs):
if mutated_inputs:
mutated_nodes = [input_nodes[i] for i in mutated_inputs]
msg = f"Input mutation detected at {mutated_nodes}"
return MutationInfo(True, msg)

View File

@ -1847,7 +1847,7 @@ class BuiltinVariable(VariableTracker):
polyfills.builtins.iter_
).call_function(tx, [obj, *args], {})
if len(args):
if args:
# iter(obj, sentinel) returns an object that implements
# __iter__ and __next__ methods (UserDefinedObjectVariable)
# Wrap the return value in a IteratorVariable subclass (LazyObjectIteratorVariable)

View File

@ -834,7 +834,7 @@ def _schedule_for_comm(
collective_cost -= snode_to_cost[candidate.snode]
heapq.heapify(ready)
while len(ready):
while ready:
snode = heapq.heappop(ready).snode
if reorder_for_overlap and contains_collective(snode):
schedule_collective_for_overlap(snode)

View File

@ -2895,7 +2895,7 @@ def match_target_block_product(
relative_scores[dim] = score / total_score
# Scale up dimensions by their relative scores until we reach the target
while curr_block_product < target_block_product and len(relative_scores):
while curr_block_product < target_block_product and relative_scores:
dim, score = max(relative_scores.items(), key=lambda item: item[1])
# Check if we've hit the max for this dimension

View File

@ -792,7 +792,7 @@ def get_kernel_metadata(
# where `inductor_nodes` contains nodes from multiple graph instances
# is not supported. An example of this is conditional statements.
single_graph = None
if len(inductor_nodes):
if inductor_nodes:
unique_graphs = OrderedSet(n.graph for n in inductor_nodes)
if len(unique_graphs) == 1:
single_graph = inductor_nodes[0].graph

View File

@ -237,7 +237,7 @@ class FSDPParamGroup:
raise AssertionError(
f"FSDP expects uniform original parameter dtype but got {orig_dtypes}"
)
self._orig_dtype = next(iter(orig_dtypes)) if len(trainable_params) else None
self._orig_dtype = next(iter(orig_dtypes)) if trainable_params else None
if len(trainable_params) > 0 and len(reduce_dtypes) != 1:
# This can be relaxed if we issue one reduce-scatter per reduce
# dtype (but we would need a way for users to specify multiple
@ -245,9 +245,7 @@ class FSDPParamGroup:
raise AssertionError(
f"FSDP expects uniform reduce dtype but got {reduce_dtypes}"
)
self._reduce_dtype = (
next(iter(reduce_dtypes)) if len(trainable_params) else None
)
self._reduce_dtype = next(iter(reduce_dtypes)) if trainable_params else None
def lazy_init(self):
# Lazy init should be idempotent

View File

@ -2178,7 +2178,7 @@ BACKWARD_INPUT, BACKWARD_WEIGHT, and OVERLAP_F_B are supported."
raise e
# Mostly these operations should have finished long ago, but there isn't an obvious time when to wait for them
while len(send_ops):
while send_ops:
_wait_batch_p2p(send_ops.pop())
assert len(self.unshard_ops) == 0, "Unused unshard operations"

View File

@ -372,7 +372,7 @@ def _check_dependencies(m):
if dependencies is not None:
missing_deps = [pkg for pkg in dependencies if not _check_module_exists(pkg)]
if len(missing_deps):
if missing_deps:
raise RuntimeError(f"Missing dependencies: {', '.join(missing_deps)}")

View File

@ -166,7 +166,7 @@ class SampleInput:
A SampleInput can be constructed "naturally" with *args and **kwargs or by
explicitly setting the "args" and "kwargs" parameters, but the two
methods of construction cannot be mixed!"""
elif len(var_args) or len(var_kwargs):
elif var_args or var_kwargs:
assert (
output_process_fn_grad is None
and broadcasts_input is None

View File

@ -53,7 +53,7 @@ class ConcatDataFramesPipe(DFIterDataPipe):
if len(buffer) == self.n_batch:
yield df_wrapper.concat(buffer)
buffer = []
if len(buffer):
if buffer:
yield df_wrapper.concat(buffer)
@ -78,7 +78,7 @@ class ShuffleDataFramesPipe(DFIterDataPipe):
if len(buffer) == size:
yield df_wrapper.concat(buffer)
buffer = []
if len(buffer):
if buffer:
yield df_wrapper.concat(buffer)
@ -107,7 +107,7 @@ class FilterDataFramesPipe(DFIterDataPipe):
if len(buffer) == size:
yield df_wrapper.concat(buffer)
buffer = []
if len(buffer):
if buffer:
yield df_wrapper.concat(buffer)

View File

@ -626,7 +626,7 @@ class MultiplexerIterDataPipe(IterDataPipe):
def __iter__(self):
iterators = [iter(x) for x in self.datapipes]
while len(iterators):
while iterators:
for it in iterators:
try:
value = next(it)

View File

@ -88,7 +88,7 @@ class FilterIterDataPipe(IterDataPipe[_T_co]):
for idx, mask in enumerate(df_wrapper.iterate(condition)):
if mask:
result.append(df_wrapper.get_item(data, idx))
if len(result):
if result:
return True, df_wrapper.concat(result)
else:
return False, None # type: ignore[return-value]

View File

@ -309,7 +309,7 @@ class WeakIdKeyDictionary(MutableMapping):
dict = type({})(dict)
for key, value in dict.items():
d[self.ref_type(key, self._remove)] = value # CHANGED
if len(kwargs):
if kwargs:
self.update(kwargs)
def __ior__(self, other):