mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Fix torch.{unique, unique_consecutive} out of bound (#71540)
Summary: This PR ensures that the input iterator is always in front of the output iterator. Thus, we won't have a out of bound issue since the input iterator will meet the end before output iterator meets. Fixes https://github.com/pytorch/pytorch/issues/71089 Pull Request resolved: https://github.com/pytorch/pytorch/pull/71540 Reviewed By: mruberry Differential Revision: D33688123 Pulled By: ngimel fbshipit-source-id: f57718931d09a0fbea76ac1bd6cc8c7150af0978 (cherry picked from commit dc6e0e219a9e9b9ccea9ff5406458b56f556b2e4)
This commit is contained in:
committed by
PyTorch MergeBot
parent
9f0227a0eb
commit
7ee0712642
@ -398,6 +398,8 @@ class TestSortAndSelect(TestCase):
|
||||
x_empty = torch.empty(5, 0, dtype=dtype, device=device)
|
||||
x_ill_formed_empty = torch.empty(5, 0, 0, dtype=dtype, device=device)
|
||||
x_ill_formed_empty_another = torch.empty(5, 0, 5, dtype=dtype, device=device)
|
||||
if dtype in floating_types_and(torch.float16, torch.bfloat16):
|
||||
x_nan = torch.tensor([float("nan"), 0, 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
|
||||
expected_unique_dim0 = torch.tensor([[[1., 1.],
|
||||
[0., 1.],
|
||||
[2., 1.],
|
||||
@ -437,6 +439,10 @@ class TestSortAndSelect(TestCase):
|
||||
expected_unique_empty = torch.tensor([], dtype=dtype, device=device)
|
||||
expected_inverse_empty = torch.tensor([], dtype=torch.long, device=device)
|
||||
expected_counts_empty = torch.tensor([], dtype=torch.long, device=device)
|
||||
if dtype in floating_types_and(torch.float16, torch.bfloat16):
|
||||
expected_unique_nan = torch.tensor([float("nan"), 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
|
||||
expected_inverse_nan = torch.tensor([0, 1, 1, 2, 3, 4], dtype=torch.long, device=device)
|
||||
expected_counts_nan = torch.tensor([1, 2, 1, 1, 1], dtype=torch.long, device=device)
|
||||
# dim0
|
||||
x_unique = torch.unique(x, dim=0)
|
||||
self.assertEqual(expected_unique_dim0, x_unique)
|
||||
@ -547,6 +553,17 @@ class TestSortAndSelect(TestCase):
|
||||
self.assertEqual(expected_inverse_empty, x_inverse)
|
||||
self.assertEqual(expected_counts_empty, x_counts)
|
||||
|
||||
# test tensor with nan
|
||||
if dtype in floating_types_and(torch.float16, torch.bfloat16):
|
||||
x_unique, x_inverse, x_counts = torch.unique(
|
||||
x_nan,
|
||||
return_inverse=True,
|
||||
return_counts=True,
|
||||
dim=0)
|
||||
self.assertEqual(expected_unique_nan, x_unique)
|
||||
self.assertEqual(expected_inverse_nan, x_inverse)
|
||||
self.assertEqual(expected_counts_nan, x_counts)
|
||||
|
||||
# test not a well formed tensor
|
||||
# Checking for runtime error, as this is the expected behaviour
|
||||
with self.assertRaises(RuntimeError):
|
||||
@ -579,6 +596,9 @@ class TestSortAndSelect(TestCase):
|
||||
dtype=dtype,
|
||||
device=device
|
||||
)
|
||||
# test tensor with nan
|
||||
if dtype in floating_types_and(torch.float16, torch.bfloat16):
|
||||
y_nan = torch.tensor([float("nan"), 0, 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
|
||||
expected_y_unique = torch.tensor(
|
||||
[[0, 1],
|
||||
[1, 2],
|
||||
@ -593,6 +613,11 @@ class TestSortAndSelect(TestCase):
|
||||
expected_y_counts = torch.tensor([3, 2, 1, 2, 1, 1], dtype=torch.int64, device=device)
|
||||
expected_y_inverse_bool = torch.tensor([0, 0, 0, 1, 1, 1, 2, 2, 3, 3], dtype=torch.int64, device=device)
|
||||
expected_y_counts_bool = torch.tensor([3, 3, 2, 2], dtype=torch.int64, device=device)
|
||||
if dtype in floating_types_and(torch.float16, torch.bfloat16):
|
||||
expected_y_unique_nan = torch.tensor([float("nan"), 0, float("nan"), float("nan"), 1], dtype=dtype, device=device)
|
||||
expected_y_inverse_nan = torch.tensor([0, 1, 1, 2, 3, 4], dtype=torch.long, device=device)
|
||||
expected_y_counts_nan = torch.tensor([1, 2, 1, 1, 1], dtype=torch.long, device=device)
|
||||
|
||||
y_unique, y_inverse, y_counts = torch.unique_consecutive(y, return_inverse=True, return_counts=True, dim=0)
|
||||
if x.dtype == torch.bool:
|
||||
self.assertEqual(expected_y_inverse_bool, y_inverse)
|
||||
@ -601,6 +626,17 @@ class TestSortAndSelect(TestCase):
|
||||
self.assertEqual(expected_y_inverse, y_inverse)
|
||||
self.assertEqual(expected_y_counts, y_counts)
|
||||
|
||||
# test tensor with nan
|
||||
if dtype in floating_types_and(torch.float16, torch.bfloat16):
|
||||
y_unique, y_inverse, y_counts = torch.unique_consecutive(
|
||||
y_nan,
|
||||
return_inverse=True,
|
||||
return_counts=True,
|
||||
dim=0)
|
||||
self.assertEqual(expected_y_unique_nan, y_unique)
|
||||
self.assertEqual(expected_y_inverse_nan, y_inverse)
|
||||
self.assertEqual(expected_y_counts_nan, y_counts)
|
||||
|
||||
run_test(device, torch.float)
|
||||
run_test(device, torch.double)
|
||||
run_test(device, torch.long)
|
||||
|
Reference in New Issue
Block a user