mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
fbgemm precision argument (#20790)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/20790 att Reviewed By: jianyuh Differential Revision: D15445903 fbshipit-source-id: fd338aea55e40eecc780be881e67417679e2ea35
This commit is contained in:
committed by
Facebook Github Bot
parent
c4a3b4d528
commit
77651615c8
@ -60,7 +60,7 @@ T quantize_val(float scale, int32_t zero_point, float value) {
|
||||
// _MM_FROUND_CUR_DIRECTION option that also follow the current rounding mode.
|
||||
int32_t qvalue;
|
||||
qvalue = fbgemm::Quantize<typename T::underlying>(value, zero_point, scale,
|
||||
/*result_precision=*/std::numeric_limits<typename T::underlying>::digits);
|
||||
/*result_precision=*/CHAR_BIT * sizeof(typename T::underlying));
|
||||
return static_cast<T>(qvalue);
|
||||
}
|
||||
|
||||
@ -93,7 +93,7 @@ Tensor dequantize_tensor(Tensor qtensor, Tensor rtensor, float scale, int32_t ze
|
||||
fbgemm::TensorQuantizationParams qparams;
|
||||
qparams.scale = scale;
|
||||
qparams.zero_point = zero_point;
|
||||
qparams.precision = std::numeric_limits<typename T::underlying>::digits;
|
||||
qparams.precision = CHAR_BIT * sizeof(typename T::underlying);
|
||||
float* rd = rtensor.data<float>();
|
||||
fbgemm::Dequantize<typename T::underlying>(/*src=*/qd,
|
||||
/*dst=*/rd,
|
||||
|
Reference in New Issue
Block a user