Enable UBSAN tests (#141672)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/141672
Approved by: https://github.com/ezyang
This commit is contained in:
cyy
2024-11-28 01:55:12 +00:00
committed by PyTorch MergeBot
parent ca9bfa1a38
commit 5ca75ac1df
2 changed files with 3 additions and 7 deletions

View File

@ -16,10 +16,7 @@ from torch.testing._internal.common_quantization import (
_make_conv_test_input, _make_conv_test_input,
) )
from torch.testing._internal.common_quantized import override_quantized_engine from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import IS_PPC
IS_PPC,
TEST_WITH_UBSAN,
)
class TestQuantizedFunctionalOps(QuantizationTestCase): class TestQuantizedFunctionalOps(QuantizationTestCase):
def test_relu_api(self): def test_relu_api(self):
@ -94,7 +91,7 @@ class TestQuantizedFunctionalOps(QuantizationTestCase):
if qengine not in torch.backends.quantized.supported_engines: if qengine not in torch.backends.quantized.supported_engines:
return return
if qengine == 'qnnpack': if qengine == 'qnnpack':
if IS_PPC or TEST_WITH_UBSAN: if IS_PPC:
return return
use_channelwise = False use_channelwise = False
@ -147,7 +144,7 @@ class TestQuantizedFunctionalOps(QuantizationTestCase):
if qengine not in torch.backends.quantized.supported_engines: if qengine not in torch.backends.quantized.supported_engines:
return return
if qengine == 'qnnpack': if qengine == 'qnnpack':
if IS_PPC or TEST_WITH_UBSAN: if IS_PPC:
return return
input_feature_map_size = (H, W) input_feature_map_size = (H, W)

View File

@ -140,7 +140,6 @@ def _compress_uniform_simplified(X, bit_rate, xmin, xmax, fp16_scale_bias=True):
class TestQuantizedTensor(TestCase): class TestQuantizedTensor(TestCase):
def test_qtensor_equal(self): def test_qtensor_equal(self):
# ASAN regression test reported in https://github.com/pytorch/pytorch/issues/116087
x = torch.rand(5) x = torch.rand(5)
x_q = torch.quantize_per_tensor(x, 0.1, 10, torch.quint4x2) x_q = torch.quantize_per_tensor(x, 0.1, 10, torch.quint4x2)
y_q = torch.quantize_per_tensor(x, 0.1, 10, torch.quint4x2) y_q = torch.quantize_per_tensor(x, 0.1, 10, torch.quint4x2)