mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
More ruff SIM fixes (#164695)
This PR applies ruff `SIM` rules to more files. Most changes are about simplifying `dict.get` because `None` is already the default value. Pull Request resolved: https://github.com/pytorch/pytorch/pull/164695 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
54ae61c573
commit
a029675f6f
@ -3421,7 +3421,7 @@ class ModuleTest(TestBase):
|
||||
kwargs.get('FIXME_no_cuda_gradgrad_comparison', False)
|
||||
self.precision = kwargs.get('precision', 2e-4)
|
||||
self.check_forward_only = kwargs.get('check_forward_only', False)
|
||||
self.default_dtype = kwargs.get('default_dtype', None)
|
||||
self.default_dtype = kwargs.get('default_dtype')
|
||||
if self.default_dtype is None:
|
||||
self.default_dtype = torch.get_default_dtype()
|
||||
|
||||
@ -3632,7 +3632,7 @@ class NewModuleTest(InputVariableMixin, ModuleTest): # type: ignore[misc]
|
||||
self.test_cpu = kwargs.get('test_cpu', True)
|
||||
self.has_sparse_gradients = kwargs.get('has_sparse_gradients', False)
|
||||
self.check_batched_grad = kwargs.get('check_batched_grad', True)
|
||||
self.gradcheck_fast_mode = kwargs.get('gradcheck_fast_mode', None)
|
||||
self.gradcheck_fast_mode = kwargs.get('gradcheck_fast_mode')
|
||||
self.supports_forward_ad = kwargs.get('supports_forward_ad', False)
|
||||
self.supports_fwgrad_bwgrad = kwargs.get('supports_fwgrad_bwgrad', False)
|
||||
|
||||
@ -3836,7 +3836,7 @@ class CriterionTest(InputVariableMixin, TestBase): # type: ignore[misc]
|
||||
self.with_tf32 = kwargs.get('with_tf32', True)
|
||||
self.tf32_precision = kwargs.get('tf32_precision', 0.001)
|
||||
self.check_batched_grad = kwargs.get('check_batched_grad', True)
|
||||
self.default_dtype = kwargs.get('default_dtype', None)
|
||||
self.default_dtype = kwargs.get('default_dtype')
|
||||
if self.default_dtype is None:
|
||||
self.default_dtype = torch.get_default_dtype()
|
||||
|
||||
|
@ -5124,7 +5124,7 @@ def gradcheck(fn, inputs, **kwargs):
|
||||
|
||||
for key, value in default_values.items():
|
||||
# default value override values explicitly set to None
|
||||
k = kwargs.get(key, None)
|
||||
k = kwargs.get(key)
|
||||
kwargs[key] = k if k is not None else value
|
||||
|
||||
return torch.autograd.gradcheck(fn, inputs, **kwargs)
|
||||
@ -5144,7 +5144,7 @@ def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs):
|
||||
|
||||
for key, value in default_values.items():
|
||||
# default value override values explicitly set to None
|
||||
k = kwargs.get(key, None)
|
||||
k = kwargs.get(key)
|
||||
kwargs[key] = k if k is not None else value
|
||||
|
||||
return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs)
|
||||
|
@ -40,7 +40,7 @@ class VerifyStateDictMixin:
|
||||
if not options.ignore_frozen_params:
|
||||
self.assertEqual(len(msd), len(dist_msd))
|
||||
for fqn, param in msd.items():
|
||||
dist_param = dist_msd.get(fqn, None)
|
||||
dist_param = dist_msd.get(fqn)
|
||||
if not options.ignore_frozen_params:
|
||||
self.assertIsNotNone(dist_param, f"{fqn=}")
|
||||
try:
|
||||
|
Reference in New Issue
Block a user