mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Fix default instantation of dynamic quantized LSTM
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/31433 Test Plan: Imported from OSS Differential Revision: D19164539 Pulled By: jamesr66a fbshipit-source-id: 7045817ab3dfb530c4480a10523c4c6bcdbfc7eb
This commit is contained in:
committed by
Facebook Github Bot
parent
1e80ff7a67
commit
a3cdb7eca3
@ -695,6 +695,33 @@ class PostTrainingDynamicQuantTest(QuantizationTestCase):
|
||||
else:
|
||||
self.assertEqual(packed_val, ref_val)
|
||||
|
||||
# Test default instantiation
|
||||
seq_len = 128
|
||||
batch = 16
|
||||
input_size = 3
|
||||
hidden_size = 7
|
||||
num_layers = 2
|
||||
bias = True
|
||||
bidirectional = False
|
||||
|
||||
x = torch.rand(seq_len, batch, input_size)
|
||||
h = torch.rand(num_layers * (bidirectional + 1), batch, hidden_size)
|
||||
c = torch.rand(num_layers * (bidirectional + 1), batch, hidden_size)
|
||||
|
||||
dtype = torch.qint8
|
||||
|
||||
cell_dq = torch.nn.quantized.dynamic.LSTM(input_size=input_size,
|
||||
hidden_size=hidden_size,
|
||||
num_layers=num_layers,
|
||||
bias=bias,
|
||||
batch_first=False,
|
||||
dropout=0.0,
|
||||
bidirectional=bidirectional,
|
||||
dtype=dtype)
|
||||
|
||||
y, (h, c) = cell_dq(x, (h, c))
|
||||
|
||||
|
||||
@unittest.skipUnless('fbgemm' in torch.backends.quantized.supported_engines,
|
||||
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
|
||||
" with instruction set support avx2 or newer.")
|
||||
|
Reference in New Issue
Block a user