Fix flake8 lint errors reported by ruff - take 2 (#99798)

Replaces #99784. This PR is pure autofix.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/99798
Approved by: https://github.com/Skylion007, https://github.com/kit1980
This commit is contained in:
Justin Chu
2023-04-22 09:41:27 -07:00
committed by PyTorch MergeBot
parent dc1c0924ec
commit 79c9e82e27
16 changed files with 21 additions and 23 deletions

View File

@ -103,7 +103,7 @@ def gen_data(special_op_lists, analysis_name):
with open(f"{analysis_name}", 'w') as f: with open(f"{analysis_name}", 'w') as f:
for op in ops: for op in ops:
info = [ info = [
op['full_name'], op['meta'], not (op['full_name'] in noncomposite_ops) op['full_name'], op['meta'], op['full_name'] not in noncomposite_ops
] + [check(op) for check in special_op_lists] ] + [check(op) for check in special_op_lists]
f.write(','.join([str(i) for i in info]) + '\n') f.write(','.join([str(i) for i in info]) + '\n')

View File

@ -142,7 +142,7 @@ class TestNonUniformObserver(unittest.TestCase):
quantlevels_test_list = quantization_levels.tolist() quantlevels_test_list = quantization_levels.tolist()
negatives_contained = True negatives_contained = True
for ele in quantlevels_test_list: for ele in quantlevels_test_list:
if not (-ele) in quantlevels_test_list: if -ele not in quantlevels_test_list:
negatives_contained = False negatives_contained = False
self.assertTrue(negatives_contained) self.assertTrue(negatives_contained)

View File

@ -81,7 +81,7 @@ for line in lines:
# if 'cpu' or 'CPU' is in the name and 'cuda' or 'CUDA' is not in the name, then skip it # if 'cpu' or 'CPU' is in the name and 'cuda' or 'CUDA' is not in the name, then skip it
def is_cpu_only(name): def is_cpu_only(name):
name = name.lower() name = name.lower()
return ('cpu' in name) and not ('cuda' in name) return ('cpu' in name) and "cuda" not in name
ALL_TESTS = [x for x in ALL_TESTS if not is_cpu_only(x)] ALL_TESTS = [x for x in ALL_TESTS if not is_cpu_only(x)]

View File

@ -6125,7 +6125,7 @@ a")
@torch.jit.script @torch.jit.script
def test_bool_arith_not(lhs): def test_bool_arith_not(lhs):
if not (lhs is None): if lhs is not None:
return 1 return 1
else: else:
return 2 return 2

View File

@ -1595,7 +1595,7 @@ class TestTEFuser(JitTestCase):
t = torch.jit.trace(fn, (x, y)) t = torch.jit.trace(fn, (x, y))
t(x, y) t(x, y)
self.assertEqual(ref, t(x, y)) self.assertEqual(ref, t(x, y))
if not str(size) in skip_is_fused_check_sizes: if str(size) not in skip_is_fused_check_sizes:
self.assertAllFused(t.graph_for(x, y)) self.assertAllFused(t.graph_for(x, y))
except Exception as e: except Exception as e:
raise RuntimeError( raise RuntimeError(

View File

@ -576,9 +576,7 @@ def mps_ops_modifier(ops):
'unique': None, 'unique': None,
'vdot': None, 'vdot': None,
'view_as_complex': None, 'view_as_complex': None,
'segment_reduce': None,
'segment_reduce_': None, 'segment_reduce_': None,
'_segment_reduce_lengths': None,
'_upsample_bilinear2d_aa': None, '_upsample_bilinear2d_aa': None,
'geometric' : None, 'geometric' : None,
'geometric_': None, 'geometric_': None,

View File

@ -853,12 +853,12 @@ def gen_variable_type_func(
if ( if (
fn.info is None fn.info is None
and not str(f.func.name.name) in RESET_GRAD_ACCUMULATOR and str(f.func.name.name) not in RESET_GRAD_ACCUMULATOR
and not get_base_name(f) in DONT_REQUIRE_DERIVATIVE and get_base_name(f) not in DONT_REQUIRE_DERIVATIVE
and len(gen_differentiable_outputs(fn)) > 0 and len(gen_differentiable_outputs(fn)) > 0
and not cpp.name(f.func) in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE and cpp.name(f.func) not in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE
and not type_wrapper_name(f) in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT and type_wrapper_name(f) not in DONT_ENFORCE_STORAGE_IMPL_USE_COUNT
and not type_wrapper_name(f) in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT and type_wrapper_name(f) not in DONT_ENFORCE_TENSOR_IMPL_USE_COUNT
): ):
# NOTE: [ Registering AutogradNotImplemented boxed kernel ] # NOTE: [ Registering AutogradNotImplemented boxed kernel ]
# #
@ -1340,7 +1340,7 @@ def emit_body(
) )
# Check properties of outputs (enforce (2), (3)) # Check properties of outputs (enforce (2), (3))
if not f.func.kind() in (SchemaKind.inplace, SchemaKind.out): if f.func.kind() not in (SchemaKind.inplace, SchemaKind.out):
base_name = f.func.name.name.base # TODO: should be str(f.func.name.name)? base_name = f.func.name.name.base # TODO: should be str(f.func.name.name)?
aliased_arg_name = ALL_VIEW_FUNCTIONS.get(base_name, None) aliased_arg_name = ALL_VIEW_FUNCTIONS.get(base_name, None)
if aliased_arg_name is not None: if aliased_arg_name is not None:

View File

@ -1801,7 +1801,7 @@ class CppVecKernelChecker(CppVecKernel):
if name in VecCheckerProxy.bin_cmp_ops: if name in VecCheckerProxy.bin_cmp_ops:
return VecCheckerProxy._bin_cmp_op(args, kwargs) return VecCheckerProxy._bin_cmp_op(args, kwargs)
if not (name in self.fast_vec_list): if name not in self.fast_vec_list:
self.disable_vec(f"op: {name}") self.disable_vec(f"op: {name}")
return self.simd_vec return self.simd_vec

View File

@ -187,7 +187,7 @@ class Tensor(torch._C._TensorBase):
if self.grad is not None: if self.grad is not None:
new_tensor.grad = self.grad.__deepcopy__(memo) new_tensor.grad = self.grad.__deepcopy__(memo)
if not type(self) is Tensor: if type(self) is not Tensor:
if type(new_tensor) is not type(self): if type(new_tensor) is not type(self):
raise RuntimeError( raise RuntimeError(
"Type of deepcopy result does not match the type of the source tensor. " "Type of deepcopy result does not match the type of the source tensor. "

View File

@ -868,7 +868,7 @@ def _prepare_n_shadows_add_loggers_model(
qconfig_mapping: QConfigMapping, qconfig_mapping: QConfigMapping,
backend_config: BackendConfig, backend_config: BackendConfig,
) -> torch.nn.Module: ) -> torch.nn.Module:
""" r"""
Note: this API is not recommended for wide usage, it is only Note: this API is not recommended for wide usage, it is only
provided for customers who need to migrate from the `add_loggers` provided for customers who need to migrate from the `add_loggers`
API. API.

View File

@ -139,7 +139,7 @@ class _NSGraphMatchableSubgraphsIterator:
def _is_matchable(self, node: Node) -> bool: def _is_matchable(self, node: Node) -> bool:
if node.op == 'call_function': if node.op == 'call_function':
return not (node.target in self.non_matchable_functions) return node.target not in self.non_matchable_functions
elif node.op == 'call_module': elif node.op == 'call_module':
assert isinstance(node.target, str) assert isinstance(node.target, str)
target_mod = getattr_from_fqn(self.gm, node.target) target_mod = getattr_from_fqn(self.gm, node.target)
@ -147,7 +147,7 @@ class _NSGraphMatchableSubgraphsIterator:
any(isinstance(target_mod, t) # type: ignore[arg-type] any(isinstance(target_mod, t) # type: ignore[arg-type]
for t in self.non_matchable_modules) for t in self.non_matchable_modules)
elif node.op == 'call_method': elif node.op == 'call_method':
return not (node.target in self.non_matchable_methods) return node.target not in self.non_matchable_methods
else: else:
return False return False

View File

@ -669,7 +669,7 @@ def create_add_loggers_graph(
qconfig_mapping: QConfigMapping, qconfig_mapping: QConfigMapping,
node_name_to_qconfig: Dict[str, QConfigAny], node_name_to_qconfig: Dict[str, QConfigAny],
) -> None: ) -> None:
""" r"""
Given a model, a model graph partition (currently a set of matched Given a model, a model graph partition (currently a set of matched
subgraphs) and instructions how to transform each subgraph subgraphs) and instructions how to transform each subgraph
(currently quantizing it according to qconfig_mapping), modifies (currently quantizing it according to qconfig_mapping), modifies

View File

@ -29,7 +29,7 @@ def _get_valid_name(name):
def _log_sparsified_level(model, data_sparsifier) -> None: def _log_sparsified_level(model, data_sparsifier) -> None:
# Show the level of sparsity AFTER step: # Show the level of sparsity AFTER step:
for name, parameter in model.named_parameters(): for name, parameter in model.named_parameters():
if not (type(parameter) in SUPPORTED_TYPES): if type(parameter) not in SUPPORTED_TYPES:
continue continue
valid_name = _get_valid_name(name) valid_name = _get_valid_name(name)
mask = data_sparsifier.get_mask(name=valid_name) mask = data_sparsifier.get_mask(name=valid_name)

View File

@ -1672,7 +1672,7 @@ class DimConstraints:
for s, congruences in reduced_congruences.items(): for s, congruences in reduced_congruences.items():
for congruence in congruences: for congruence in congruences:
# any congruence that cannot be checked becomes a dynamic constraint as well # any congruence that cannot be checked becomes a dynamic constraint as well
if not (s in self._substitutions) or not sympy.checksol(congruence, {s: self._substitutions[s]}): if s not in self._substitutions or not sympy.checksol(congruence, {s: self._substitutions[s]}):
self._dynamic_results.add(self._dcp.doprint(sympy.Eq(congruence, 0))) self._dynamic_results.add(self._dcp.doprint(sympy.Eq(congruence, 0)))
def prettify_results(self): def prettify_results(self):

View File

@ -222,7 +222,7 @@ def type_matches(signature_type : Any, argument_type : Any):
return issubclass(argument_type.__args__[0], sig_el_type) return issubclass(argument_type.__args__[0], sig_el_type)
def is_homogeneous_tuple(t): def is_homogeneous_tuple(t):
if not getattr(t, '__origin__', None) in {tuple, Tuple}: if getattr(t, "__origin__", None) not in {tuple, Tuple}:
return False return False
contained = t.__args__ contained = t.__args__
if t.__args__ == ((),): # Tuple[()].__args__ == ((),) for some reason if t.__args__ == ((),): # Tuple[()].__args__ == ((),) for some reason

View File

@ -71,7 +71,7 @@ def _window_function_checks(function_name: str, M: int, dtype: torch.dtype, layo
raise ValueError(f'{function_name} requires non-negative window length, got M={M}') raise ValueError(f'{function_name} requires non-negative window length, got M={M}')
if layout is not torch.strided: if layout is not torch.strided:
raise ValueError(f'{function_name} is implemented for strided tensors only, got: {layout}') raise ValueError(f'{function_name} is implemented for strided tensors only, got: {layout}')
if not (dtype in [torch.float32, torch.float64]): if dtype not in [torch.float32, torch.float64]:
raise ValueError(f'{function_name} expects float32 or float64 dtypes, got: {dtype}') raise ValueError(f'{function_name} expects float32 or float64 dtypes, got: {dtype}')