mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Fix torch.histc not checking min > max on cuda for int8 tensors (#139372)
Fixes #13936086e6513c86/aten/src/ATen/native/cuda/SummaryOps.cu (L323-L324)
Assign `min` and `max` to with low-precision input_t variable `minvalue` and `maxvalue` cause wrong comparing result in following check in here:86e6513c86/aten/src/ATen/native/cuda/SummaryOps.cu (L353)
 Change type of `minvalue` and `maxvalue` to fix it, similar like in line:86e6513c86/aten/src/ATen/native/cuda/SummaryOps.cu (L280-L282)
**Test Result** ```bash $ pytest test/test_reductions.py -vv ```  ```bash $ lintrunner ```  Pull Request resolved: https://github.com/pytorch/pytorch/pull/139372 Approved by: https://github.com/eqy
This commit is contained in:
committed by
PyTorch MergeBot
parent
356fc41ae0
commit
ffb7a08921
@ -320,8 +320,10 @@ Tensor _histc_cuda_template(
|
||||
std::nullopt /* layout */,
|
||||
DeviceType::CUDA,
|
||||
std::nullopt /* pin_memory */);
|
||||
input_t minvalue = min;
|
||||
input_t maxvalue = max;
|
||||
using bounds_t = at::acc_type<input_t, /*is_cuda=*/true>;
|
||||
bounds_t minvalue = min;
|
||||
bounds_t maxvalue = max;
|
||||
|
||||
if (min == max && self.numel() > 0) {
|
||||
minvalue = *self.min().cpu().const_data_ptr<input_t>();
|
||||
maxvalue = *self.max().cpu().const_data_ptr<input_t>();
|
||||
|
@ -3116,6 +3116,30 @@ class TestReductions(TestCase):
|
||||
actual)
|
||||
self.assertEqual(actual.dtype, dtype)
|
||||
|
||||
@dtypes(torch.uint8, torch.int8, torch.int, torch.long, torch.float, torch.double)
|
||||
def test_histc_min_max_errors(self, device, dtype):
|
||||
with self.assertRaisesRegex(RuntimeError, "max must be larger than min"):
|
||||
torch.histc(torch.tensor([1., 2., 3.], dtype=dtype, device=device), bins=4, min=5, max=1)
|
||||
|
||||
@dtypes(torch.float, torch.double)
|
||||
def test_histc_min_max_corner_cases(self, device, dtype):
|
||||
actual = torch.histc(
|
||||
torch.tensor([1., 2, 1], dtype=dtype, device=device),
|
||||
bins=4, min=5, max=5)
|
||||
self.assertEqual(
|
||||
torch.tensor([2, 0, 0, 1], dtype=dtype, device=device),
|
||||
actual)
|
||||
|
||||
@onlyCUDA
|
||||
@dtypes(torch.uint8, torch.int8, torch.int, torch.long)
|
||||
def test_histc_min_max_corner_cases_cuda(self, device, dtype):
|
||||
actual = torch.histc(
|
||||
torch.tensor([1., 2, 1], dtype=dtype, device=device),
|
||||
bins=4, min=5, max=5)
|
||||
self.assertEqual(
|
||||
torch.tensor([2, 0, 0, 1], dtype=dtype, device=device),
|
||||
actual)
|
||||
|
||||
"""
|
||||
Runs torch.histogram and numpy.histogram on the specified input parameters
|
||||
and asserts that their output is equal.
|
||||
|
@ -19375,7 +19375,7 @@ op_db: List[OpInfo] = [
|
||||
)),
|
||||
OpInfo('histc',
|
||||
dtypes=floating_types_and(torch.bfloat16, torch.float16),
|
||||
dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64),
|
||||
dtypesIfCUDA=floating_types_and(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64),
|
||||
sample_inputs_func=sample_inputs_histc,
|
||||
supports_out=True,
|
||||
supports_autograd=False,
|
||||
|
Reference in New Issue
Block a user