mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE] Enable ruff's UP rules in pyproject.toml (#105437)
Signed-off-by: Justin Chu <justinchu@microsoft.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/105437 Approved by: https://github.com/huydhn, https://github.com/malfet, https://github.com/Skylion007
This commit is contained in:
committed by
PyTorch MergeBot
parent
6b2d48e78c
commit
de8bd108b4
@ -55,6 +55,8 @@ ignore = [
|
||||
"SIM116", # Disable Use a dictionary instead of consecutive `if` statements
|
||||
"SIM117",
|
||||
"SIM118",
|
||||
"UP006", # keep-runtime-typing
|
||||
"UP007", # keep-runtime-typing
|
||||
]
|
||||
line-length = 120
|
||||
select = [
|
||||
@ -66,6 +68,7 @@ select = [
|
||||
"SIM1",
|
||||
"W",
|
||||
# Not included in flake8
|
||||
"UP",
|
||||
"PERF",
|
||||
"PLE",
|
||||
"TRY302",
|
||||
@ -73,6 +76,13 @@ select = [
|
||||
|
||||
[tool.ruff.per-file-ignores]
|
||||
"__init__.py" = ["F401"]
|
||||
"test/jit/**" = [
|
||||
"UP", # We don't want to modify the jit test as they test specify syntax
|
||||
]
|
||||
"torch/onnx/**" = [
|
||||
"UP037", # ONNX does runtime type checking
|
||||
]
|
||||
|
||||
"torchgen/api/types/__init__.py" = [
|
||||
"F401",
|
||||
"F403",
|
||||
@ -81,3 +91,6 @@ select = [
|
||||
"F401",
|
||||
"F403",
|
||||
]
|
||||
"torch/utils/collect_env.py" = [
|
||||
"UP", # collect_env.py needs to work with older versions of Python
|
||||
]
|
||||
|
@ -518,7 +518,7 @@ class CPUReproTests(TestCase):
|
||||
|
||||
numerical_testsuit = [4.4, 4.5, 4.6, 5.5]
|
||||
for numerical_number in numerical_testsuit:
|
||||
x = torch.ones((17)) * numerical_number
|
||||
x = torch.ones(17) * numerical_number
|
||||
with config.patch({"cpp.simdlen": None}):
|
||||
torch._dynamo.reset()
|
||||
metrics.reset()
|
||||
|
@ -14,7 +14,7 @@ from collections import namedtuple
|
||||
class TestLiteScriptModule(TestCase):
|
||||
|
||||
def test_typing_namedtuple(self):
|
||||
myNamedTuple = NamedTuple('myNamedTuple', [('a', List[torch.Tensor])])
|
||||
myNamedTuple = NamedTuple('myNamedTuple', [('a', List[torch.Tensor])]) # noqa: UP014
|
||||
|
||||
class MyTestModule(torch.nn.Module):
|
||||
def forward(self, a: torch.Tensor):
|
||||
|
@ -1209,9 +1209,10 @@ torch.cuda.synchronize()
|
||||
return torch.stack([col, col + 2], 1).view(2, 2, 2, 2)
|
||||
|
||||
if adaptive:
|
||||
cls_name = 'AdaptiveMaxPool{}d'.format(num_dim)
|
||||
cls_name = 'AdaptiveMaxPool{}d'.format(num_dim) # noqa: UP032
|
||||
else:
|
||||
cls_name = 'MaxPool{}d'.format(num_dim)
|
||||
# FIXME(#105716): Test fails when using f-string
|
||||
cls_name = 'MaxPool{}d'.format(num_dim) # noqa: UP032
|
||||
module_cls = getattr(nn, cls_name)
|
||||
module = module_cls(2, return_indices=True).to(device, dtype=dtype)
|
||||
numel = 4 ** (num_dim + 1)
|
||||
|
@ -14257,7 +14257,7 @@ dedent """
|
||||
self.assertEqual(out, torch.tensor(6.0))
|
||||
|
||||
def test_namedtuple_type_inference(self):
|
||||
_AnnotatedNamedTuple = NamedTuple('_NamedTupleAnnotated', [('value', int)])
|
||||
_AnnotatedNamedTuple = NamedTuple('_NamedTupleAnnotated', [('value', int)]) # noqa: UP014
|
||||
_UnannotatedNamedTuple = namedtuple('_NamedTupleUnAnnotated', ['value'])
|
||||
|
||||
def test_check_named_tuple_value():
|
||||
|
Reference in New Issue
Block a user