mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 13:44:15 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/52323 Using default cpu allocator for ops executed on qnnpack backend will result in asan failures with heap overflow since qnnpack (and xnnpack) can access input beyond their and/beginning. Here we are enabling this feature specifically to enable dynamic sparse linear op test using qnnpack engine. In dynamic linear op, the fp32 bias is not packed and hence can result in out-of-bound access. Test Plan: test_set_default_mobile_cpu_allocator.py Reviewed By: z-a-f Differential Revision: D26263481 fbshipit-source-id: a49227cac7e6781b0db4a156ca734d7671972d9f
28 lines
944 B
Python
28 lines
944 B
Python
import torch
|
|
from torch.testing._internal.common_utils import TestCase, run_tests
|
|
|
|
class TestSetDefaultMobileCPUAllocator(TestCase):
|
|
def test_no_exception(self):
|
|
torch._C._set_default_mobile_cpu_allocator()
|
|
torch._C._unset_default_mobile_cpu_allocator()
|
|
|
|
def test_exception(self):
|
|
with self.assertRaises(Exception):
|
|
torch._C._unset_default_mobile_cpu_allocator()
|
|
|
|
with self.assertRaises(Exception):
|
|
torch._C._set_default_mobile_cpu_allocator()
|
|
torch._C._set_default_mobile_cpu_allocator()
|
|
|
|
# Must reset to good state
|
|
# For next test.
|
|
torch._C._unset_default_mobile_cpu_allocator()
|
|
|
|
with self.assertRaises(Exception):
|
|
torch._C._set_default_mobile_cpu_allocator()
|
|
torch._C._unset_default_mobile_cpu_allocator()
|
|
torch._C._unset_default_mobile_cpu_allocator()
|
|
|
|
if __name__ == '__main__':
|
|
run_tests()
|