From e59513618727068a949b670312b09634b90fae5e Mon Sep 17 00:00:00 2001 From: Yuanyuan Chen Date: Sat, 18 Oct 2025 05:44:10 +0000 Subject: [PATCH] Enable PLC1802 on ruff (#165813) This PR enables ruff check `PLC1802`, which detects len calls on sequences in a boolean test context. Pull Request resolved: https://github.com/pytorch/pytorch/pull/165813 Approved by: https://github.com/ezyang --- benchmarks/dynamo/huggingface.py | 2 +- pyproject.toml | 1 + test/quantization/core/test_quantized_tensor.py | 2 +- torch/_dynamo/backends/distributed.py | 6 +++--- torch/_dynamo/output_graph.py | 4 ++-- torch/_dynamo/variables/builtin.py | 2 +- torch/_inductor/comms.py | 2 +- torch/_inductor/runtime/triton_heuristics.py | 2 +- torch/_inductor/utils.py | 2 +- torch/distributed/fsdp/_fully_shard/_fsdp_param_group.py | 6 ++---- torch/distributed/pipelining/schedules.py | 2 +- torch/hub.py | 2 +- torch/testing/_internal/opinfo/core.py | 2 +- torch/utils/data/datapipes/dataframe/datapipes.py | 6 +++--- torch/utils/data/datapipes/iter/combining.py | 2 +- torch/utils/data/datapipes/iter/selecting.py | 2 +- torch/utils/weak.py | 2 +- 17 files changed, 23 insertions(+), 24 deletions(-) diff --git a/benchmarks/dynamo/huggingface.py b/benchmarks/dynamo/huggingface.py index 2c774bbb1d2e..d856a241ccac 100755 --- a/benchmarks/dynamo/huggingface.py +++ b/benchmarks/dynamo/huggingface.py @@ -124,7 +124,7 @@ with open(MODELS_FILENAME) as fh: continue batch_size = int(batch_size) BATCH_SIZE_KNOWN_MODELS[model_name] = batch_size -assert len(BATCH_SIZE_KNOWN_MODELS) +assert BATCH_SIZE_KNOWN_MODELS try: diff --git a/pyproject.toml b/pyproject.toml index 8e29c1c81d56..e42f08d296f3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -212,6 +212,7 @@ select = [ "PIE810", "PLC0131", # type bivariance "PLC0132", # type param mismatch + "PLC1802", # len({expression}) used as condition without comparison "PLC0205", # string as __slots__ "PLC3002", # unnecessary-direct-lambda-call "PLE", diff --git a/test/quantization/core/test_quantized_tensor.py b/test/quantization/core/test_quantized_tensor.py index f241cc438757..b46e2df1d9ee 100644 --- a/test/quantization/core/test_quantized_tensor.py +++ b/test/quantization/core/test_quantized_tensor.py @@ -100,7 +100,7 @@ def param_search_greedy(x, bit_rate, n_bins=200, ratio=0.16): cur_min, cur_max, cur_loss = cur_min + stepsize, cur_max, loss1 else: cur_min, cur_max, cur_loss = cur_min, cur_max - stepsize, loss2 - if len(solutions): + if solutions: best = solutions[0] for solution in solutions: if solution[-1] < best[-1]: diff --git a/torch/_dynamo/backends/distributed.py b/torch/_dynamo/backends/distributed.py index b282a6218816..6be9690c6a1c 100644 --- a/torch/_dynamo/backends/distributed.py +++ b/torch/_dynamo/backends/distributed.py @@ -98,14 +98,14 @@ def pretty_print_buckets(buckets: list[Bucket], bucket_bytes_cap: int) -> None: ) ) - if len(rows): + if rows: log.info( "\nDDPOptimizer used bucket cap %s and created %d buckets. Enable debug logs for detailed bucket info.", bucket_bytes_cap, len(buckets), ) - if len(extended_buckets): + if extended_buckets: log.warning( "Some buckets were extended beyond their requested parameter capacities" " in order to ensure each subgraph has an output node, required for fx graph partitioning." @@ -122,7 +122,7 @@ def pretty_print_buckets(buckets: list[Bucket], bucket_bytes_cap: int) -> None: tabulate(rows, headers=headers, tablefmt="simple_grid"), ) - if len(extended_buckets): + if extended_buckets: log.warning( "DDPOptimizer extended these buckets to ensure per-subgraph output nodes:\n%s", tabulate( diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py index 9bce964c3f1a..f39d80f89b45 100644 --- a/torch/_dynamo/output_graph.py +++ b/torch/_dynamo/output_graph.py @@ -1867,7 +1867,7 @@ class OutputGraph(OutputGraphCommon): _get_source_debug_name(var.source) for var in potential_side_effects ] - if len(side_effect_refs): + if side_effect_refs: warnings.warn( f"While exporting, we found certain side effects happened in the model.forward. " f"Here are the list of potential sources you can double check: {side_effect_refs}" @@ -3736,7 +3736,7 @@ class SubgraphTracer(fx.Tracer): if v1 != v2 ] - if len(mutated_inputs): + if mutated_inputs: mutated_nodes = [input_nodes[i] for i in mutated_inputs] msg = f"Input mutation detected at {mutated_nodes}" return MutationInfo(True, msg) diff --git a/torch/_dynamo/variables/builtin.py b/torch/_dynamo/variables/builtin.py index a03f7d0f4d74..09bdb81150e6 100644 --- a/torch/_dynamo/variables/builtin.py +++ b/torch/_dynamo/variables/builtin.py @@ -1847,7 +1847,7 @@ class BuiltinVariable(VariableTracker): polyfills.builtins.iter_ ).call_function(tx, [obj, *args], {}) - if len(args): + if args: # iter(obj, sentinel) returns an object that implements # __iter__ and __next__ methods (UserDefinedObjectVariable) # Wrap the return value in a IteratorVariable subclass (LazyObjectIteratorVariable) diff --git a/torch/_inductor/comms.py b/torch/_inductor/comms.py index 86f272c8b24e..3cf0156e043a 100644 --- a/torch/_inductor/comms.py +++ b/torch/_inductor/comms.py @@ -834,7 +834,7 @@ def _schedule_for_comm( collective_cost -= snode_to_cost[candidate.snode] heapq.heapify(ready) - while len(ready): + while ready: snode = heapq.heappop(ready).snode if reorder_for_overlap and contains_collective(snode): schedule_collective_for_overlap(snode) diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index 44b567bf5ecd..2ae2880fb018 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -2895,7 +2895,7 @@ def match_target_block_product( relative_scores[dim] = score / total_score # Scale up dimensions by their relative scores until we reach the target - while curr_block_product < target_block_product and len(relative_scores): + while curr_block_product < target_block_product and relative_scores: dim, score = max(relative_scores.items(), key=lambda item: item[1]) # Check if we've hit the max for this dimension diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index 233a294aaed6..f1c7f23cf719 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -792,7 +792,7 @@ def get_kernel_metadata( # where `inductor_nodes` contains nodes from multiple graph instances # is not supported. An example of this is conditional statements. single_graph = None - if len(inductor_nodes): + if inductor_nodes: unique_graphs = OrderedSet(n.graph for n in inductor_nodes) if len(unique_graphs) == 1: single_graph = inductor_nodes[0].graph diff --git a/torch/distributed/fsdp/_fully_shard/_fsdp_param_group.py b/torch/distributed/fsdp/_fully_shard/_fsdp_param_group.py index 39d5711ef33b..32939a554503 100644 --- a/torch/distributed/fsdp/_fully_shard/_fsdp_param_group.py +++ b/torch/distributed/fsdp/_fully_shard/_fsdp_param_group.py @@ -237,7 +237,7 @@ class FSDPParamGroup: raise AssertionError( f"FSDP expects uniform original parameter dtype but got {orig_dtypes}" ) - self._orig_dtype = next(iter(orig_dtypes)) if len(trainable_params) else None + self._orig_dtype = next(iter(orig_dtypes)) if trainable_params else None if len(trainable_params) > 0 and len(reduce_dtypes) != 1: # This can be relaxed if we issue one reduce-scatter per reduce # dtype (but we would need a way for users to specify multiple @@ -245,9 +245,7 @@ class FSDPParamGroup: raise AssertionError( f"FSDP expects uniform reduce dtype but got {reduce_dtypes}" ) - self._reduce_dtype = ( - next(iter(reduce_dtypes)) if len(trainable_params) else None - ) + self._reduce_dtype = next(iter(reduce_dtypes)) if trainable_params else None def lazy_init(self): # Lazy init should be idempotent diff --git a/torch/distributed/pipelining/schedules.py b/torch/distributed/pipelining/schedules.py index 589505de4e4a..067a9351d823 100644 --- a/torch/distributed/pipelining/schedules.py +++ b/torch/distributed/pipelining/schedules.py @@ -2178,7 +2178,7 @@ BACKWARD_INPUT, BACKWARD_WEIGHT, and OVERLAP_F_B are supported." raise e # Mostly these operations should have finished long ago, but there isn't an obvious time when to wait for them - while len(send_ops): + while send_ops: _wait_batch_p2p(send_ops.pop()) assert len(self.unshard_ops) == 0, "Unused unshard operations" diff --git a/torch/hub.py b/torch/hub.py index 4b68e997162a..d3328d1abe6e 100644 --- a/torch/hub.py +++ b/torch/hub.py @@ -372,7 +372,7 @@ def _check_dependencies(m): if dependencies is not None: missing_deps = [pkg for pkg in dependencies if not _check_module_exists(pkg)] - if len(missing_deps): + if missing_deps: raise RuntimeError(f"Missing dependencies: {', '.join(missing_deps)}") diff --git a/torch/testing/_internal/opinfo/core.py b/torch/testing/_internal/opinfo/core.py index 4a31fb454b5a..685fa2fd2efd 100644 --- a/torch/testing/_internal/opinfo/core.py +++ b/torch/testing/_internal/opinfo/core.py @@ -166,7 +166,7 @@ class SampleInput: A SampleInput can be constructed "naturally" with *args and **kwargs or by explicitly setting the "args" and "kwargs" parameters, but the two methods of construction cannot be mixed!""" - elif len(var_args) or len(var_kwargs): + elif var_args or var_kwargs: assert ( output_process_fn_grad is None and broadcasts_input is None diff --git a/torch/utils/data/datapipes/dataframe/datapipes.py b/torch/utils/data/datapipes/dataframe/datapipes.py index 2bf0dda77752..0c1b416e99c2 100644 --- a/torch/utils/data/datapipes/dataframe/datapipes.py +++ b/torch/utils/data/datapipes/dataframe/datapipes.py @@ -53,7 +53,7 @@ class ConcatDataFramesPipe(DFIterDataPipe): if len(buffer) == self.n_batch: yield df_wrapper.concat(buffer) buffer = [] - if len(buffer): + if buffer: yield df_wrapper.concat(buffer) @@ -78,7 +78,7 @@ class ShuffleDataFramesPipe(DFIterDataPipe): if len(buffer) == size: yield df_wrapper.concat(buffer) buffer = [] - if len(buffer): + if buffer: yield df_wrapper.concat(buffer) @@ -107,7 +107,7 @@ class FilterDataFramesPipe(DFIterDataPipe): if len(buffer) == size: yield df_wrapper.concat(buffer) buffer = [] - if len(buffer): + if buffer: yield df_wrapper.concat(buffer) diff --git a/torch/utils/data/datapipes/iter/combining.py b/torch/utils/data/datapipes/iter/combining.py index 36afe6769eb1..22f27327b2ee 100644 --- a/torch/utils/data/datapipes/iter/combining.py +++ b/torch/utils/data/datapipes/iter/combining.py @@ -626,7 +626,7 @@ class MultiplexerIterDataPipe(IterDataPipe): def __iter__(self): iterators = [iter(x) for x in self.datapipes] - while len(iterators): + while iterators: for it in iterators: try: value = next(it) diff --git a/torch/utils/data/datapipes/iter/selecting.py b/torch/utils/data/datapipes/iter/selecting.py index 78d1820cb6aa..afb0e91d8557 100644 --- a/torch/utils/data/datapipes/iter/selecting.py +++ b/torch/utils/data/datapipes/iter/selecting.py @@ -88,7 +88,7 @@ class FilterIterDataPipe(IterDataPipe[_T_co]): for idx, mask in enumerate(df_wrapper.iterate(condition)): if mask: result.append(df_wrapper.get_item(data, idx)) - if len(result): + if result: return True, df_wrapper.concat(result) else: return False, None # type: ignore[return-value] diff --git a/torch/utils/weak.py b/torch/utils/weak.py index cb8862e64531..ed311cd05956 100644 --- a/torch/utils/weak.py +++ b/torch/utils/weak.py @@ -309,7 +309,7 @@ class WeakIdKeyDictionary(MutableMapping): dict = type({})(dict) for key, value in dict.items(): d[self.ref_type(key, self._remove)] = value # CHANGED - if len(kwargs): + if kwargs: self.update(kwargs) def __ior__(self, other):