[BE][2/6] fix typos in test/ (test/test_*.py) (#157636)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/157636
Approved by: https://github.com/yewentao256, https://github.com/mlazos
ghstack dependencies: #156311, #156609
This commit is contained in:
Xuehai Pan
2025-07-09 13:23:55 +08:00
committed by PyTorch MergeBot
parent ffe11b2bf2
commit fc0376e8b1
57 changed files with 194 additions and 195 deletions

View File

@ -735,7 +735,7 @@ class TestReductions(TestCase):
res2 = x1.sum(axis=(0, 2), keepdims=True)
self.assertEqual(res1, res2)
# TODO: kill this ane replace with common creation ops
# TODO: kill this and replace with common creation ops
def _make_tensors(self, shape, val_range=(-100, 100), use_floating=True, use_integral=True,
use_complex=False) -> dict[str, list[torch.Tensor]]:
float_types = [torch.double,
@ -1629,7 +1629,7 @@ class TestReductions(TestCase):
RuntimeError, "only when boundaries tensor dimension is 1"):
torch.searchsorted(boundaries, 1)
# incompatiable output tensor's dtype
# incompatible output tensor's dtype
def test_output_dtype(dtype, is_int32):
output = values_1d.to(dtype)
with self.assertRaisesRegex(
@ -2018,7 +2018,7 @@ class TestReductions(TestCase):
with self.assertRaisesRegex(RuntimeError, error_msg):
op(x, dim=dim)
# TODO: update this test to comapre against NumPy
# TODO: update this test to compare against NumPy
@onlyCUDA
def test_var(self, device):
cpu_tensor = torch.randn(2, 3, 3)
@ -2513,7 +2513,7 @@ class TestReductions(TestCase):
k = int((t.numel() - 1) / 2)
self.assertEqual(res, t.view(-1).sort()[0][k])
if t.numel() % 2 == 1:
# We can only test agains numpy for odd reductions because numpy
# We can only test against numpy for odd reductions because numpy
# returns the mean of the two medians and torch returns the lower
self.assertEqual(res.cpu().numpy(), np.median(t_numpy))
for dim in range(t.ndim):
@ -2524,7 +2524,7 @@ class TestReductions(TestCase):
self.assertEqual(res[0], (t.sort(dim)[0]).select(dim, k).unsqueeze_(dim))
self.assertEqual(res[0], t.gather(dim, res[1]))
if size % 2 == 1:
# We can only test agains numpy for odd reductions because numpy
# We can only test against numpy for odd reductions because numpy
# returns the mean of the two medians and torch returns the lower
self.assertEqual(res[0].cpu().numpy(), np.median(t_numpy, dim, keepdims=True), exact_dtype=False)
@ -2548,7 +2548,7 @@ class TestReductions(TestCase):
k = int((t.numel() - num_nan - 1) / 2)
self.assertEqual(res, t.view(-1).sort()[0][k])
if (t.numel() - num_nan) % 2 == 1:
# We can only test agains numpy for odd reductions because numpy
# We can only test against numpy for odd reductions because numpy
# returns the mean of the two medians and torch returns the lower
self.assertEqual(res.item(), numpy_op(t.cpu().numpy()))
for dim in range(t.ndim):
@ -2561,7 +2561,7 @@ class TestReductions(TestCase):
k = ((size - num_nan - 1) / 2).type(torch.long)
self.assertEqual(res[0], (t.sort(dim)[0]).gather(dim, k))
self.assertEqual(res[0], t.gather(dim, res[1]))
# We can only test agains numpy for odd reductions because numpy
# We can only test against numpy for odd reductions because numpy
# returns the mean of the two medians and torch returns the lower
mask = (size - num_nan) % 2 == 1
res = res[0].masked_select(mask).cpu()
@ -3526,7 +3526,7 @@ as the input tensor excluding its innermost dimension'):
# raises an error if no `dim` parameter is specified. This exists separately from tests in
# test_tensot_compare_ops_empty because not specifying a `dim` parameter in the former tests does
# not throw errors. Also, checking the return type of argmax requires supplying a different dtype
# argument than that for the input tensor. There is also variantion in numpy testing.
# argument than that for the input tensor. There is also variation in numpy testing.
def test_tensor_compare_ops_argmax_argmix_kthvalue_dim_empty(self, device):
shape = (2, 0, 4)
master_input = torch.randn(shape, device=device)