From f51040928167fe6fbc08cdac9b20d27a5454b1df Mon Sep 17 00:00:00 2001 From: Daya Khudia Date: Mon, 12 Aug 2019 17:56:09 -0700 Subject: [PATCH] Enable FBGEMM tests under UBSAN as well (#23570) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary: Enabling tests under UBSAN as well Pull Request resolved: https://github.com/pytorch/pytorch/pull/23570 Test Plan: buck test mode/dev caffe2/test:quantized ``` Running 29 tests Started new test run: https://our.intern.facebook.com/intern/testinfra/testrun/3940649677415136 ✓ caffe2/test:quantized - test_qtensor (test_quantized_tensor.TestQuantizedTensor) 0.536 1/29 (passed) ✓ caffe2/test:quantized - test_qtensor_per_channel_affine (test_quantized_tensor.TestQuantizedTensor) 0.453 2/29 (passed) ✓ caffe2/test:quantized - test_qtensor_reshape (test_quantized_tensor.TestQuantizedTensor) 0.302 3/29 (passed) ✓ caffe2/test:quantized - test_qadd_relu_same_qparams (test_quantized.TestQuantizedOps) 0.332 4/29 (passed) ✓ caffe2/test:quantized - test_qtensor_view (test_quantized_tensor.TestQuantizedTensor) 0.351 5/29 (passed) ✓ caffe2/test:quantized - test_qadd_relu_different_qparams (test_quantized.TestQuantizedOps) 0.348 6/29 (passed) ✓ caffe2/test:quantized - test_qtensor_dequantize_linear (test_quantized_tensor.TestQuantizedTensor) 0.338 7/29 (passed) ✓ caffe2/test:quantized - test_qtensor_copy (test_quantized_tensor.TestQuantizedTensor) 0.267 8/29 (passed) ✓ caffe2/test:quantized - test_qtensor_clone (test_quantized_tensor.TestQuantizedTensor) 0.330 9/29 (passed) ✓ caffe2/test:quantized - test_qrelu (test_quantized.TestQuantizedOps) 1.774 10/29 (passed) ✓ caffe2/test:quantized - test_pool_api (test_nn_quantized.ModuleAPITest) 0.418 11/29 (passed) ✓ caffe2/test:quantized - test_qtensor_load_save (test_quantized_tensor.TestQuantizedTensor) 0.724 12/29 (passed) ✓ caffe2/test:quantized - test_relu_api (test_nn_quantized.FunctionalAPITest) 1.013 13/29 (passed) ✓ caffe2/test:quantized - test_qtensor_quant_dequant (test_quantized_tensor.TestQuantizedTensor) 1.055 14/29 (passed) ✓ caffe2/test:quantized - test_qtensor_permute (test_quantized_tensor.TestQuantizedTensor) 0.696 15/29 (passed) ✓ caffe2/test:quantized - test_qtensor_dtypes (test_quantized_tensor.TestQuantizedTensor) 0.841 16/29 (passed) ✓ caffe2/test:quantized - test_quant_dequant_api (test_nn_quantized.ModuleAPITest) 0.616 17/29 (passed) ✓ caffe2/test:quantized - test_qtensor_creation (test_quantized_tensor.TestQuantizedTensor) 0.698 18/29 (passed) ✓ caffe2/test:quantized - test_qconv (test_quantized.TestQuantizedConv) 4.743 19/29 (passed) ✓ caffe2/test:quantized - test_cat (test_quantized.TestQuantizedOps) 6.992 20/29 (passed) ✓ caffe2/test:quantized - test_linear_api (test_nn_quantized.ModuleAPITest) 8.970 21/29 (passed) ✓ caffe2/test:quantized - test_conv_api (test_quantized_conv.QuantizedConvTest) 9.403 22/29 (passed) ↷ caffe2/test:quantized - test_qnnpack_linear (test_quantized.TestQNNPackOps) 0.000 23/29 (skipped) Test output: > Skipped: QNNPACK does not play well with UBSAN at the moment, so we skip the test if we are in a UBSAN environment. > test_qnnpack_linear (test_quantized.TestQNNPackOps) ... skipped 'QNNPACK does not play well with UBSAN at the moment, so we skip the test if we are in a UBSAN environment.' > > ---------------------------------------------------------------------- > Ran 1 test in 0.000s > > OK (skipped=1) ↷ caffe2/test:quantized - test_qnnpack_relu (test_quantized.TestQNNPackOps) 0.000 24/29 (skipped) Test output: > Skipped: QNNPACK does not play well with UBSAN at the moment, so we skip the test if we are in a UBSAN environment. > test_qnnpack_relu (test_quantized.TestQNNPackOps) ... skipped 'QNNPACK does not play well with UBSAN at the moment, so we skip the test if we are in a UBSAN environment.' > > ---------------------------------------------------------------------- > Ran 1 test in 0.000s > > OK (skipped=1) ✓ caffe2/test:quantized - test_max_pool2d (test_quantized.TestQuantizedOps) 8.453 25/29 (passed) ✓ caffe2/test:quantized - test_qlinear_unpack (test_quantized.TestQuantizedLinear) 0.664 26/29 (passed) ✓ caffe2/test:quantized - test_qconv_unpack (test_quantized.TestQuantizedConv) 2.965 27/29 (passed) ✓ caffe2/test:quantized - test_qlinear (test_quantized.TestQuantizedLinear) 1.915 28/29 (passed) ✓ caffe2/test:quantized - test_conv_api (test_nn_quantized.ModuleAPITest) 60.804 29/29 (passed) ✓ caffe2/test:quantized - main 0.000 (passed) Finished test run: https://our.intern.facebook.com/intern/testinfra/testrun/3940649677415136 Summary (total time 68.66s): PASS: 28 FAIL: 0 SKIP: 2 caffe2/test:quantized - test_qnnpack_linear (test_quantized.TestQNNPackOps) caffe2/test:quantized - test_qnnpack_relu (test_quantized.TestQNNPackOps) FATAL: 0 TIMEOUT: 0 OMIT: 0 ``` Reviewed By: jianyuh Differential Revision: D16569166 Pulled By: dskhudia fbshipit-source-id: 53522b4162eb1ebb35b408a1503d9664305c85b0 --- test/test_jit.py | 22 +++++++++------------- test/test_nn.py | 8 +++----- test/test_quantization.py | 20 +++++++++++--------- test/test_quantized.py | 14 ++++++-------- test/test_quantized_conv.py | 11 ++++++----- ubsan.supp | 1 - 6 files changed, 35 insertions(+), 41 deletions(-) diff --git a/test/test_jit.py b/test/test_jit.py index 7e660d214b95..f38bfbb5a76e 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -6586,10 +6586,9 @@ a") a = A() self.assertEqual(a.with_docstring.__doc__, 'test str') - @unittest.skipIf(TEST_WITH_UBSAN or not torch.fbgemm_is_cpu_supported(), - 'Quantized RNN requires FBGEMM. FBGEMM does not play' - ' well with UBSAN at the moment, so we skip the test if' - ' we are in a UBSAN environment.') + @unittest.skipIf(not torch.fbgemm_is_cpu_supported(), + 'Quantized RNN requires FBGEMM. FBGEMM is only optimized for CPUs' + ' with instruction set support avx2 or newer.') def test_rnn_cell_quantized(self): d_in, d_hid = 2, 2 @@ -6681,10 +6680,9 @@ a") for out, ref_out in zip(outs, ref_outs): torch.testing.assert_allclose(out, ref_out) - @unittest.skipIf(TEST_WITH_UBSAN or not torch.fbgemm_is_cpu_supported(), - 'Quantized RNN requires FBGEMM. FBGEMM does not play' - ' well with UBSAN at the moment, so we skip the test if' - ' we are in a UBSAN environment.') + @unittest.skipIf(not torch.fbgemm_is_cpu_supported(), + 'Quantized RNN requires FBGEMM. FBGEMM is only optimized for CPUs' + ' with instruction set support avx2 or newer.') def test_rnn_quantized(self): d_in, d_hid = 2, 2 @@ -11450,9 +11448,7 @@ a") traced = torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[(torch.rand(3, 4),)]) - # These tests don't work because UBSAN has a false positive about accessing - # out of bounds on a dynamically sized struct internal to asmjit - if not TEST_WITH_UBSAN and torch.fbgemm_is_cpu_supported(): + if torch.fbgemm_is_cpu_supported(): def test_quantization_modules(self): K1, N1 = 2, 2 @@ -14872,7 +14868,7 @@ class TestEndToEndHybridFrontendModels(JitTestCase): def test_snli(self): self._test_snli(self, device='cpu') - if not TEST_WITH_UBSAN and torch.fbgemm_is_cpu_supported(): + if torch.fbgemm_is_cpu_supported(): def test_snli_quantized(self): self._test_snli(self, device='cpu', quantized=True) @@ -15014,7 +15010,7 @@ class TestEndToEndHybridFrontendModels(JitTestCase): def test_vae(self): self._test_vae(self, device='cpu') - if not TEST_WITH_UBSAN and torch.fbgemm_is_cpu_supported(): + if torch.fbgemm_is_cpu_supported(): def test_vae_quantized(self): self._test_vae(self, device='cpu', quantized=True) diff --git a/test/test_nn.py b/test/test_nn.py index db4560cfbaf6..c541fe1fb4bb 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -36,7 +36,6 @@ from common_nn import NNTestCase, ModuleTest, CriterionTest, TestBase, \ module_tests, criterion_tests, loss_reference_fns, get_reduction, \ get_weight, smoothl1loss_reference, kldivloss_reference, \ ctcloss_reference, new_module_tests -from common_utils import TEST_WITH_UBSAN from torch.nn import MultiheadAttention @@ -2594,10 +2593,9 @@ class TestNN(NNTestCase): def test_softmax_backward_cuda(self): self._test_softmax_backward(torch.device('cuda')) - @unittest.skipIf(TEST_WITH_UBSAN or not torch.fbgemm_is_cpu_supported(), - 'Linear_FP16_weight requires FBGEMM. FBGEMM does not play' - ' well with UBSAN at the moment, so we skip the test if' - ' we are in a UBSAN environment.') + @unittest.skipIf(not torch.fbgemm_is_cpu_supported(), + 'Linear_FP16_weight requires FBGEMM. FBGEMM is only optimized for CPUs' + ' with instruction set support avx2 or newer.') def test_fb_fc_packed(self): X = np.random.rand(16, 16).astype(np.float32) - 0.5 W = np.random.rand(16, 16).astype(np.float32) - 0.5 diff --git a/test/test_quantization.py b/test/test_quantization.py index b53c34a927e2..fff7d90ad927 100644 --- a/test/test_quantization.py +++ b/test/test_quantization.py @@ -6,7 +6,7 @@ import torch.nn.quantized as nnq from torch.quantization import \ quantize, prepare, convert, prepare_qat, quantize_qat, fuse_modules -from common_utils import run_tests, TEST_WITH_UBSAN +from common_utils import run_tests from common_quantization import QuantizationTestCase, SingleLayerLinearModel, \ SkipQuantModel, QuantStubModel, \ ModForFusion, ManualLinearQATModel, ManualConvLinearQATModel, test_only_eval_fn, test_only_train_fn @@ -14,10 +14,11 @@ from common_quantization import QuantizationTestCase, SingleLayerLinearModel, \ from common_quantization import AnnotatedTwoLayerLinearModel, AnnotatedNestedModel, \ AnnotatedSubNestedModel, AnnotatedCustomConfigNestedModel -@unittest.skipIf(TEST_WITH_UBSAN or not torch.fbgemm_is_cpu_supported(), - 'Quantization requires FBGEMM. FBGEMM does not play' - ' well with UBSAN at the moment, so we skip the test if' - ' we are in a UBSAN environment.') +@unittest.skipIf( + not torch.fbgemm_is_cpu_supported(), + " Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs" + " with instruction set support avx2 or newer.", +) class PostTrainingQuantTest(QuantizationTestCase): def test_single_layer(self): r"""Quantize SingleLayerLinearModel which has one Linear module, make sure it is swapped @@ -240,10 +241,11 @@ class PostTrainingQuantTest(QuantizationTestCase): model = quantize(QuantStubModel(), test_only_eval_fn, self.calib_data) checkQuantized(model) -@unittest.skipIf(TEST_WITH_UBSAN or not torch.fbgemm_is_cpu_supported(), - 'Quantization requires FBGEMM. FBGEMM does not play' - ' well with UBSAN at the moment, so we skip the test if' - ' we are in a UBSAN environment.') +@unittest.skipIf( + not torch.fbgemm_is_cpu_supported(), + " Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs" + " with instruction set support avx2 or newer.", +) class QuantizationAwareTrainingTest(QuantizationTestCase): def test_manual(self): model = ManualLinearQATModel() diff --git a/test/test_quantized.py b/test/test_quantized.py index e13dbcc9db73..8f4c82d0ad52 100644 --- a/test/test_quantized.py +++ b/test/test_quantized.py @@ -312,10 +312,9 @@ class TestQuantizedOps(TestCase): zero_point=zero_point) @unittest.skipIf( - TEST_WITH_UBSAN or not torch.fbgemm_is_cpu_supported(), - " Quantized Linear requires FBGEMM. FBGEMM does not play" - " well with UBSAN at the moment, so we skip the test if" - " we are in a UBSAN environment.", + not torch.fbgemm_is_cpu_supported(), + " Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs" + " with instruction set support avx2 or newer.", ) class TestQuantizedLinear(unittest.TestCase): """Tests the correctness of the quantized linear and linear_relu op.""" @@ -432,10 +431,9 @@ class TestQuantizedLinear(unittest.TestCase): np.testing.assert_equal(W_q.q_zero_point(), W_q_origin.q_zero_point()) @unittest.skipIf( - TEST_WITH_UBSAN or not torch.fbgemm_is_cpu_supported(), - " Quantized convolution requires FBGEMM. FBGEMM does not play" - " well with UBSAN at the moment, so we skip the test if" - " we are in a UBSAN environment.", + not torch.fbgemm_is_cpu_supported(), + " Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs" + " with instruction set support avx2 or newer.", ) class TestQuantizedConv(unittest.TestCase): """Tests the correctness of quantized convolution op.""" diff --git a/test/test_quantized_conv.py b/test/test_quantized_conv.py index ea94dce0c25e..832b8c9ebc08 100644 --- a/test/test_quantized_conv.py +++ b/test/test_quantized_conv.py @@ -13,12 +13,13 @@ from hypothesis import strategies as st import hypothesis_utils as hu from common_quantized import _conv_output_shape -from common_utils import TestCase, run_tests, TEST_WITH_UBSAN +from common_utils import TestCase, run_tests -@unittest.skipIf(TEST_WITH_UBSAN or not torch.fbgemm_is_cpu_supported(), - 'Quantization requires FBGEMM. FBGEMM does not play' - ' well with UBSAN at the moment, so we skip the test if' - ' we are in a UBSAN environment.') +@unittest.skipIf( + not torch.fbgemm_is_cpu_supported(), + " Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs" + " with instruction set support avx2 or newer.", +) class QuantizedConvTest(TestCase): @given(X=hu.tensor_conv2d(min_batch=1, max_batch=3, min_in_channels=1, max_in_channels=7, diff --git a/ubsan.supp b/ubsan.supp index 233429525665..5654515fc3fc 100644 --- a/ubsan.supp +++ b/ubsan.supp @@ -1,3 +1,2 @@ vptr:libtorch.so vptr:libcaffe2.so -bounds:asmjit::Zone::_alloc