mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE]: Apply FURB118 (prev): replaces unnecessary lambdas with operator. (#116027)
This replaces a bunch of unnecessary lambdas with the operator package. This is semantically equivalent, but the operator package is faster, and arguably more readable. When the FURB rules are taken out of preview, I will enable it as a ruff check. Pull Request resolved: https://github.com/pytorch/pytorch/pull/116027 Approved by: https://github.com/malfet
This commit is contained in:
committed by
PyTorch MergeBot
parent
2d2016fdf8
commit
6de28e92d2
@ -16,6 +16,7 @@ from torch.testing._internal.common_utils import (
|
||||
from torch.testing._internal.common_device_type import (
|
||||
instantiate_device_type_tests, onlyCUDA, dtypes, dtypesIfCPU, dtypesIfCUDA,
|
||||
onlyNativeDeviceTypes, skipXLA)
|
||||
import operator
|
||||
|
||||
|
||||
class TestIndexing(TestCase):
|
||||
@ -138,7 +139,7 @@ class TestIndexing(TestCase):
|
||||
def consec(size, start=1):
|
||||
# Creates the sequence in float since CPU half doesn't support the
|
||||
# needed operations. Converts to dtype before returning.
|
||||
numel = reduce(lambda x, y: x * y, size, 1)
|
||||
numel = reduce(operator.mul, size, 1)
|
||||
sequence = torch.ones(numel, dtype=torch.float, device=device).cumsum(0)
|
||||
sequence.add_(start - 1)
|
||||
return sequence.view(*size).to(dtype=dtype)
|
||||
|
Reference in New Issue
Block a user