Fix type promotion for ldexp (#133519)

According to the documentation, ldexp of half and int should return half tensor and ldexp of double should not overflow for 64-bit exponent

Introduce `_pow2` helper function that does not follow scalar to float32 promotion pattern if `self` is reduced precision float or double

Add regression tests to `test_ldexp` and enable it to run on both CPU and GPU

Fixes https://github.com/pytorch/pytorch/issues/133267

Pull Request resolved: https://github.com/pytorch/pytorch/pull/133519
Approved by: https://github.com/janeyx99, https://github.com/Skylion007
This commit is contained in:
Nikita Shulga
2024-08-16 01:26:26 +00:00
committed by PyTorch MergeBot
parent 3a904d1163
commit 1653f7786d
2 changed files with 33 additions and 11 deletions

View File

@ -156,7 +156,6 @@ class TestBinaryUfuncs(TestCase):
numpy_sample = sample.numpy()
l_numpy = numpy_sample.input
r_numpy = numpy_sample.args[0]
actual = op(l, r)
expected = op.ref(l_numpy, r_numpy)
@ -3407,29 +3406,41 @@ class TestBinaryUfuncs(TestCase):
assert m.dim() == 0, "m is intentionally a scalar"
self.assertEqual(torch.pow(2, m), 2**m)
@onlyCPU
def test_ldexp(self, device):
# random values
mantissas = torch.randn(64, device=device)
exponents = torch.randint(-31, 31, (64,), device=device, dtype=torch.int32)
# basic test
np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())
np_outcome = np.ldexp(mantissas.cpu().numpy(), exponents.cpu().numpy())
pt_outcome_1 = torch.ldexp(mantissas, exponents)
pt_outcome_2 = mantissas.ldexp(exponents)
self.assertEqual(np_outcome, pt_outcome_1)
self.assertEqual(np_outcome, pt_outcome_2)
self.assertEqual(np_outcome, pt_outcome_1.cpu())
self.assertEqual(np_outcome, pt_outcome_2.cpu())
mantissas.ldexp_(exponents)
self.assertEqual(np_outcome, mantissas)
self.assertEqual(np_outcome, mantissas.cpu())
# test bounds
mantissas = torch.tensor(
[float("inf"), float("-inf"), float("inf"), float("nan")], device=device
)
exponents = torch.randint(0, 31, (4,), device=device, dtype=torch.int32)
np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())
np_outcome = np.ldexp(mantissas.cpu().numpy(), exponents.cpu().numpy())
pt_outcome = torch.ldexp(mantissas, exponents)
self.assertEqual(np_outcome, pt_outcome)
self.assertEqual(np_outcome, pt_outcome.cpu())
# test half dtype behavior
mantissas = torch.randn(64, device=device, dtype=torch.half)
exponents = torch.randint(-5, 5, (64,), device=device)
self.assertEqual(torch.ldexp(mantissas, exponents).dtype, torch.half)
# test float64 computation
mantissas = torch.tensor([1], dtype=torch.float64, device=device)
exponents = torch.tensor([128], dtype=torch.int64, device=device)
expected = torch.pow(
torch.full((1,), 2, device=device, dtype=torch.float64), 128
)
self.assertEqual(torch.ldexp(mantissas, exponents), expected)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_lerp(self, device, dtype):