mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Add dequantize_linear for JIT pass (#20107)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/20107 att Reviewed By: nishantpdce Differential Revision: D15202187 fbshipit-source-id: 7d6274a67fcca695c0425587f35046fecbc2ccdc
This commit is contained in:
committed by
Facebook Github Bot
parent
cc02a1af61
commit
cca923c481
@ -2804,6 +2804,12 @@ class _TestTorchMixin(object):
|
||||
rqr = qr.dequantize()
|
||||
self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / scale))
|
||||
|
||||
def test_qtensor_dequantize_linear(self):
|
||||
t = torch.arange(-10, 10, dtype=torch.int8)
|
||||
scale = 3
|
||||
zero_point = 2
|
||||
qt = torch.dequantize_linear(t, scale, zero_point, torch.float)
|
||||
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, 'fewer than 2 GPUs detected')
|
||||
def test_device_guard(self):
|
||||
|
Reference in New Issue
Block a user