mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE]: Apply FURB118 (prev): replaces unnecessary lambdas with operator. (#116027)
This replaces a bunch of unnecessary lambdas with the operator package. This is semantically equivalent, but the operator package is faster, and arguably more readable. When the FURB rules are taken out of preview, I will enable it as a ruff check. Pull Request resolved: https://github.com/pytorch/pytorch/pull/116027 Approved by: https://github.com/malfet
This commit is contained in:
committed by
PyTorch MergeBot
parent
2d2016fdf8
commit
6de28e92d2
@ -29,6 +29,7 @@ from torch.testing._internal import opinfo
|
||||
from torch.testing._internal.common_dtype import all_types_and_complex_and, floating_types
|
||||
from torch.testing._internal.common_modules import modules, module_db, ModuleInfo
|
||||
from torch.testing._internal.opinfo.core import SampleInput, DecorateInfo, OpInfo
|
||||
import operator
|
||||
|
||||
# For testing TestCase methods and torch.testing functions
|
||||
class TestTesting(TestCase):
|
||||
@ -1427,7 +1428,7 @@ class TestMakeTensor(TestCase):
|
||||
@parametrize("noncontiguous", [False, True])
|
||||
@parametrize("shape", [tuple(), (0,), (1,), (1, 1), (2,), (2, 3), (8, 16, 32)])
|
||||
def test_noncontiguous(self, dtype, device, noncontiguous, shape):
|
||||
numel = functools.reduce(lambda a, b: a * b, shape, 1)
|
||||
numel = functools.reduce(operator.mul, shape, 1)
|
||||
|
||||
t = torch.testing.make_tensor(shape, dtype=dtype, device=device, noncontiguous=noncontiguous)
|
||||
self.assertEqual(t.is_contiguous(), not noncontiguous or numel < 2)
|
||||
|
Reference in New Issue
Block a user