[TEST] Modernize test_sort_large (#155546)

Since its introduction ~4 years ago, the test `test_sort_large` has always been deselected because it requires 200GB of CUDA memory. Now, as we do have GPUs this big, it gets selected, but fails with `var_mean` not being a member if `torch.Tensor` and `var_mean` accepting only floating point tensors.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/155546
Approved by: https://github.com/eqy
This commit is contained in:
Aidyn-A
2025-06-10 19:59:06 +00:00
committed by PyTorch MergeBot
parent ea23eb4b98
commit 0ca2a79f5b

View File

@ -222,14 +222,14 @@ class TestSortAndSelect(TestCase):
t = t0.view(1, 8192).expand(2**18 + 1, -1).contiguous()
v, i = t.sort()
del t
iv, im = i.var_mean(dim=0)
iv, im = torch.var_mean(i.to(dtype), dim=0)
del i
vv, vm = v.var_mean(dim=0)
vv, vm = torch.var_mean(v.to(dtype), dim=0)
del v
self.assertEqual(vv, torch.zeros_like(vv))
self.assertEqual(iv, torch.zeros_like(iv))
self.assertEqual(vm, torch.arange(255, dtype=dtype, device=device))
self.assertEqual(im, t0.sort().indices)
self.assertEqual(vm, torch.arange(8192, dtype=dtype, device=device))
self.assertEqual(im, t0.sort().indices, exact_dtype=False)
@dtypes(torch.float32)
def test_sort_restride(self, device, dtype):