mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE]: Enable some basic pytest style rules (#110362)
Adds some basic flake8-pytest-style rules from ruff with their autofixes. I just picked a couple uncontroversial changes about having a consistent pytest style that were already following. We should consider enabling some more in the future, but this is a good start. I also upgraded ruff to the latest version. Pull Request resolved: https://github.com/pytorch/pytorch/pull/110362 Approved by: https://github.com/ezyang, https://github.com/albanD, https://github.com/kit1980
This commit is contained in:
committed by
PyTorch MergeBot
parent
c95cf4b4c9
commit
668eb55488
@ -2661,6 +2661,6 @@ init_command = [
|
||||
'python3',
|
||||
'tools/linter/adapters/pip_init.py',
|
||||
'--dry-run={{DRYRUN}}',
|
||||
'ruff==0.0.290',
|
||||
'ruff==0.0.291',
|
||||
]
|
||||
is_formatter = true
|
||||
|
@ -76,6 +76,12 @@ select = [
|
||||
"PLE",
|
||||
"PLR1722", # use sys exit
|
||||
"PLW3301", # nested min max
|
||||
"PT006", # TODO: enable more PT rules
|
||||
"PT022",
|
||||
"PT023",
|
||||
"PT024",
|
||||
"PT025",
|
||||
"PT026",
|
||||
"RUF017",
|
||||
"TRY302",
|
||||
]
|
||||
|
@ -64,7 +64,7 @@ def build_constraint(constraint_fn, args, is_cuda=False):
|
||||
t = torch.cuda.DoubleTensor if is_cuda else torch.DoubleTensor
|
||||
return constraint_fn(*(t(x) if isinstance(x, list) else x for x in args))
|
||||
|
||||
@pytest.mark.parametrize('constraint_fn, result, value', EXAMPLES)
|
||||
@pytest.mark.parametrize(('constraint_fn', 'result', 'value'), EXAMPLES)
|
||||
@pytest.mark.parametrize('is_cuda', [False,
|
||||
pytest.param(True, marks=pytest.mark.skipif(not TEST_CUDA,
|
||||
reason='CUDA not found.'))])
|
||||
@ -73,7 +73,7 @@ def test_constraint(constraint_fn, result, value, is_cuda):
|
||||
assert constraint_fn.check(t(value)).all() == result
|
||||
|
||||
|
||||
@pytest.mark.parametrize('constraint_fn, args', [(c[0], c[1:]) for c in CONSTRAINTS])
|
||||
@pytest.mark.parametrize(('constraint_fn', 'args'), [(c[0], c[1:]) for c in CONSTRAINTS])
|
||||
@pytest.mark.parametrize('is_cuda', [False,
|
||||
pytest.param(True, marks=pytest.mark.skipif(not TEST_CUDA,
|
||||
reason='CUDA not found.'))])
|
||||
@ -104,7 +104,7 @@ def test_biject_to(constraint_fn, args, is_cuda):
|
||||
assert j.shape == x.shape[:x.dim() - t.domain.event_dim]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('constraint_fn, args', [(c[0], c[1:]) for c in CONSTRAINTS])
|
||||
@pytest.mark.parametrize(('constraint_fn', 'args'), [(c[0], c[1:]) for c in CONSTRAINTS])
|
||||
@pytest.mark.parametrize('is_cuda', [False,
|
||||
pytest.param(True, marks=pytest.mark.skipif(not TEST_CUDA,
|
||||
reason='CUDA not found.'))])
|
||||
|
@ -252,7 +252,7 @@ base_dist1 = Dirichlet(torch.ones(4, 4))
|
||||
base_dist2 = Normal(torch.zeros(3, 4, 4), torch.ones(3, 4, 4))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('batch_shape, event_shape, dist', [
|
||||
@pytest.mark.parametrize(('batch_shape', 'event_shape', 'dist'), [
|
||||
((4, 4), (), base_dist0),
|
||||
((4,), (4,), base_dist1),
|
||||
((4, 4), (), TransformedDistribution(base_dist0, [transform0])),
|
||||
|
@ -3774,7 +3774,7 @@ class TestIO(TestCase):
|
||||
filename = tmp_path / "file"
|
||||
if request.param == "string":
|
||||
filename = str(filename)
|
||||
yield filename
|
||||
return filename
|
||||
|
||||
def test_nofile(self):
|
||||
# this should probably be supported as a file
|
||||
|
Reference in New Issue
Block a user