Enable all SIM rules except disabled ones (#164645)

`SIM` rules are useful for simplifying boolean expressions and enhances code readability.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/164645
Approved by: https://github.com/ezyang, https://github.com/mlazos
This commit is contained in:
Yuanyuan Chen
2025-10-17 07:27:06 +00:00
committed by PyTorch MergeBot
parent f1d882212a
commit e925dfcc6b
54 changed files with 98 additions and 134 deletions

View File

@ -3074,7 +3074,7 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
wrong_dtype_shards, [10, 10], init_rrefs=True
)
tensor_requires_grad = True if self.rank == 0 else False
tensor_requires_grad = self.rank == 0
wrong_requires_grad_shards = [
sharded_tensor.Shard(
torch.randn(
@ -3121,7 +3121,7 @@ class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
wrong_pin_memory_local_shards, [10, 10], init_rrefs=True
)
tensor_pin_memory = True if self.rank == 0 else False
tensor_pin_memory = self.rank == 0
wrong_pin_memory_shards_cross_ranks = [
sharded_tensor.Shard(
torch.randn(5, 5, pin_memory=tensor_pin_memory), local_shard_metadata

View File

@ -152,7 +152,7 @@ class TestStorageBase:
self.rank = 0 if not dist.is_initialized() else dist.get_rank()
def _get_ranks(self, name):
return self.fail_conf[name] if name in self.fail_conf else None
return self.fail_conf.get(name, None)
def _fail_rank(self, name):
ranks = self._get_ranks(name)

View File

@ -155,7 +155,7 @@ class TestFreezingWeights(FSDPTest):
ddp_kwargs = {
"device_ids": [self.rank],
"find_unused_parameters": True if disable_autograd else False,
"find_unused_parameters": bool(disable_autograd),
}
model = self._create_model(

View File

@ -66,7 +66,7 @@ class MockPipelineStage(_PipelineStageBase):
self.num_stages = kwargs.get("num_stages", 1)
self.group_size = kwargs.get("group_size", 1)
self.group_rank = kwargs.get("group_rank", 0)
self.group = kwargs.get("group", None)
self.group = kwargs.get("group")
def _create_grad_recv_info(self, *args, **kwargs):
return None

View File

@ -1066,7 +1066,7 @@ class TestDTensorPlacementTypes(DTensorTestBase):
assert_array_equal(expected_pad_sizes, pad_sizes)
is_tensor_empty = [
False if splitted_tensor.numel() > 0 else True
not splitted_tensor.numel() > 0
for splitted_tensor in splitted_tensor_list
]
expected_is_tensor_empty = [True] * self.world_size
@ -1089,12 +1089,10 @@ class TestDTensorPlacementTypes(DTensorTestBase):
for i, tensor in enumerate(splitted_tensor_list)
]
expected_is_tensor_empty = [
False if idx < size else True
for idx, _ in enumerate(range(self.world_size))
not idx < size for idx, _ in enumerate(range(self.world_size))
]
is_tensor_empty = [
False if unpadded_tensor.numel() > 0 else True
for unpadded_tensor in unpadded_list
not unpadded_tensor.numel() > 0 for unpadded_tensor in unpadded_list
]
assert_array_equal(expected_is_tensor_empty, is_tensor_empty)

View File

@ -2770,11 +2770,7 @@ class WorkHookTest(MultiProcessTestCase):
# from rank0 to other ranks. However, this is DDP's internal implementation,
# which is subject to change in future versions.
self.assertTrue(num_hook_fired[OpType.BROADCAST] > 0)
ctor_allreduce = (
num_hook_fired[OpType.ALLREDUCE]
if OpType.ALLREDUCE in num_hook_fired
else 0
)
ctor_allreduce = num_hook_fired.get(OpType.ALLREDUCE, 0)
x = torch.zeros(2, 1000).cuda(self.rank)
ddp(x).sum().backward()