Revert D15445092: Some minor fix to unblock the Bert model quantization

Differential Revision:
D15445092

Original commit changeset: 22da41a56ecb

fbshipit-source-id: eca9a85900bf48fe6a9da5cfff61606a10f0c3de
This commit is contained in:
Edward Yang
2019-05-22 14:22:13 -07:00
committed by Facebook Github Bot
parent cfc98ae714
commit fdb923996d

View File

@ -20,7 +20,7 @@ class QuantizedLinear(torch.jit.ScriptModule):
self.weight = torch.nn.Parameter(self.weight, requires_grad=False)
self.col_offsets = torch.nn.Parameter(self.col_offsets, requires_grad=False)
assert other.bias is not None, 'QuantizedLinear requires a bias'
self.bias = torch.nn.Parameter(other.bias.clone().float(), requires_grad=False)
self.bias = torch.nn.Parameter(other.bias.clone().float())
self.register_buffer(
'packed_tensor_ptr',
@ -42,7 +42,7 @@ class QuantizedLinear(torch.jit.ScriptModule):
out = torch.fbgemm_linear_int8_weight(
input.float(), self.weight, self.packed_tensor_ptr, self.col_offsets,
self.scale, self.zero_point, self.bias)
return out.float()
return out.type_as(input)
def extra_repr(self):
repr = 'in_features={in_features}, out_features={out_features}, ' \