mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Fix numerical instability for norm (#129352)
Fixes #123645 When the reduce size is large, reducing directly may exceed the range that FP32 can represent, resulting in incorrect results. Reducing in group and using double as the intermediate cumulative type can avoid exceeding the representation range. Pull Request resolved: https://github.com/pytorch/pytorch/pull/129352 Approved by: https://github.com/jgong5, https://github.com/peterbell10
This commit is contained in:
@ -486,6 +486,17 @@ class TestReductions(TestCase):
|
||||
y2 = op(x2, dim=-1)
|
||||
self.assertEqual(y, y2)
|
||||
|
||||
@onlyCPU
|
||||
@dtypes(torch.float, torch.bfloat16)
|
||||
def test_reduction_lastdim_overflow(self, device, dtype):
|
||||
x1 = torch.ones((1, 32, 224, 224, 160), device=device, dtype=torch.double)
|
||||
x2 = torch.ones((1, 32, 224, 224, 160), device=device, dtype=dtype)
|
||||
ops = [torch.norm, torch.linalg.vector_norm]
|
||||
for op in ops:
|
||||
y1 = op(x1)
|
||||
y2 = op(x2)
|
||||
self.assertEqual(y1.to(dtype), y2)
|
||||
|
||||
@skipIfNoSciPy
|
||||
@dtypes(torch.float32, torch.double, torch.complex64, torch.complex128)
|
||||
def test_logsumexp(self, device, dtype):
|
||||
|
Reference in New Issue
Block a user