Add qint8 type (int8_t) (#19984)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/19984

Add qint8 for QTensor, with underlying type of int8_t

Reviewed By: jianyuh

Differential Revision: D15150715

fbshipit-source-id: 57580f599d46f9323af5ce462dbbc464b25e40d7
This commit is contained in:
Jerry Zhang
2019-05-17 20:29:33 -07:00
committed by Facebook Github Bot
parent 986c9eb537
commit 85fad0597c
13 changed files with 50 additions and 15 deletions

View File

@ -29,8 +29,9 @@ namespace c10 {
_(std::complex<float>, ComplexFloat, z) /* 9 */ \
_(std::complex<double>, ComplexDouble, z) /* 10 */ \
_(bool, Bool, i) /* 11 */ \
_(c10::quint8, QUInt8, i) /* 12 */ \
_(c10::qint32, QInt32, i) /* 13 */
_(c10::qint8, QInt8, i) /* 12 */ \
_(c10::quint8, QUInt8, i) /* 13 */ \
_(c10::qint32, QInt32, i) /* 14 */
// If you want to support ComplexHalf for real, replace occurrences
// of this macro with AT_FORALL_SCALAR_TYPES_WITH_COMPLEX. But
@ -47,6 +48,7 @@ namespace c10 {
_(std::complex<float>, ComplexFloat, z) \
_(std::complex<double>, ComplexDouble, z) \
_(bool, Bool, i) \
_(c10::qint8, QInt8, i) \
_(c10::quint8, QUInt8, i) \
_(c10::qint32, QInt32, i)
@ -72,6 +74,7 @@ namespace c10 {
_(at::Half, Half, d) \
_(float, Float, d) \
_(double, Double, d) \
_(c10::qint8, QInt8, i) \
_(c10::quint8, QUInt8, i) \
_(c10::qint32, QInt32, i)
@ -104,6 +107,7 @@ namespace c10 {
_(int64_t, Long, i) \
_(float, Float, d) \
_(double, Double, d) \
_(c10::qint8, QInt8, i) \
_(c10::quint8, QUInt8, i) \
_(c10::qint32, QInt32, i)
@ -227,7 +231,7 @@ static inline bool isComplexType(ScalarType t) {
static inline bool isQIntType(ScalarType t) {
// Don't forget to extend this when adding new QInt types
return t == ScalarType::QUInt8 || t == ScalarType::QInt32;
return t == ScalarType:: QInt8 || t == ScalarType::QUInt8 || t == ScalarType::QInt32;
}
static inline ScalarType promoteTypes(ScalarType a, ScalarType b) {

17
c10/util/qint8.h Normal file
View File

@ -0,0 +1,17 @@
#pragma once
#include <cstdint>
namespace c10 {
/**
* This is the data type for quantized Tensors. Right now we only have
* qint8 which is for 8 bit Tensors, and qint32 for 32 bit int Tensors,
* we might have 4 bit, 2 bit or 1 bit data types in the future.
*/
struct alignas(1) qint8 {
using underlying = int8_t;
int8_t val_;
explicit qint8(int8_t val) : val_(val) {}
};
} // namespace c10

View File

@ -82,8 +82,9 @@ CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE(
CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE(27, float*)
CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE(28, at::Half*)
CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE(29, c10::quint8)
CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE(30, c10::qint32)
CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE(31, _CaffeHighestPreallocatedTypeId)
CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE(29, c10::qint8)
CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE(30, c10::quint8)
CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE(31, c10::qint32)
CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE(32, _CaffeHighestPreallocatedTypeId)
} // namespace caffe2

View File

@ -23,6 +23,7 @@
#include <c10/util/Exception.h>
#include <c10/util/Half.h>
#include <c10/util/IdWrapper.h>
#include <c10/util/qint8.h>
#include <c10/util/quint8.h>
#include <c10/util/qint32.h>
#include <c10/util/Type.h>
@ -625,8 +626,9 @@ CAFFE_DECLARE_PREALLOCATED_KNOWN_TYPE(
CAFFE_DECLARE_PREALLOCATED_KNOWN_TYPE(27, float*)
CAFFE_DECLARE_PREALLOCATED_KNOWN_TYPE(28, at::Half*)
CAFFE_DECLARE_PREALLOCATED_KNOWN_TYPE(29, c10::quint8)
CAFFE_DECLARE_PREALLOCATED_KNOWN_TYPE(30, c10::qint32)
CAFFE_DECLARE_PREALLOCATED_KNOWN_TYPE(31, _CaffeHighestPreallocatedTypeId)
CAFFE_DECLARE_PREALLOCATED_KNOWN_TYPE(29, c10::qint8)
CAFFE_DECLARE_PREALLOCATED_KNOWN_TYPE(30, c10::quint8)
CAFFE_DECLARE_PREALLOCATED_KNOWN_TYPE(31, c10::qint32)
CAFFE_DECLARE_PREALLOCATED_KNOWN_TYPE(32, _CaffeHighestPreallocatedTypeId)
} // namespace caffe2