Add dequantize_linear for JIT pass (#20107)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/20107

att

Reviewed By: nishantpdce

Differential Revision: D15202187

fbshipit-source-id: 7d6274a67fcca695c0425587f35046fecbc2ccdc
This commit is contained in:
Jerry Zhang
2019-05-21 12:15:44 -07:00
committed by Facebook Github Bot
parent cc02a1af61
commit cca923c481
10 changed files with 86 additions and 7 deletions

View File

@ -2804,6 +2804,12 @@ class _TestTorchMixin(object):
rqr = qr.dequantize()
self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / scale))
def test_qtensor_dequantize_linear(self):
t = torch.arange(-10, 10, dtype=torch.int8)
scale = 3
zero_point = 2
qt = torch.dequantize_linear(t, scale, zero_point, torch.float)
@unittest.skipIf(torch.cuda.device_count() < 2, 'fewer than 2 GPUs detected')
def test_device_guard(self):