mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 13:44:15 +08:00
Compare commits
100 Commits
v2.4.1-rc1
...
cslpull81
Author | SHA1 | Date | |
---|---|---|---|
3ddec713b8 | |||
85eeb90d2c | |||
7f6daf289b | |||
3d55d84ec2 | |||
bb2a995529 | |||
9538bf4e7c | |||
219da29dfd | |||
fb013ecb24 | |||
6af4c6acad | |||
786c24a4cd | |||
5d8c7f39d4 | |||
c9c1fed065 | |||
94fea82d66 | |||
447173198b | |||
b79d056e76 | |||
eb567b1f40 | |||
1dd2431f86 | |||
5fcb5f0c8b | |||
a55d0d9718 | |||
8c1247cffb | |||
70a1e85718 | |||
adb699189b | |||
45dccfddcd | |||
3e09123797 | |||
61f922c2ca | |||
984b1a8c35 | |||
205410cb44 | |||
cac7a22b92 | |||
8a09940a54 | |||
1d233b8f50 | |||
491c4a5dcb | |||
4345d98663 | |||
a838e90964 | |||
29081059b6 | |||
f8c45996d5 | |||
c13e03c874 | |||
053930e194 | |||
9a38cae299 | |||
55901fb3da | |||
fc77fdca6f | |||
648625b230 | |||
207c2248a8 | |||
a206dcc79e | |||
f2d7f235a6 | |||
402b289f3b | |||
a32157c67c | |||
24e7f29099 | |||
5b5d269d34 | |||
fa88f390a0 | |||
fe39c07826 | |||
cba195c8ed | |||
16e67be7f1 | |||
7afffdf48b | |||
ca45649eb5 | |||
665e568381 | |||
4077cdd589 | |||
e4bd0adca5 | |||
793df7b7cb | |||
d1d9bc7aa6 | |||
841d87177a | |||
3b555ba477 | |||
734e8f6ad7 | |||
99f5a85a09 | |||
f843ccbb1a | |||
30875953a4 | |||
2126ae186e | |||
739aa224ec | |||
b2d602306a | |||
05711eece9 | |||
a287ff75d0 | |||
4bbadeee8a | |||
2176ef7dfa | |||
583a56d5a8 | |||
c38b3381a1 | |||
a2d4fea872 | |||
58083ffb10 | |||
6630dcd53c | |||
3a2d0755a4 | |||
b459713ca7 | |||
4460e481bc | |||
90bb510ece | |||
38e0a0440c | |||
946f554c8f | |||
55646554b7 | |||
9cab5987bd | |||
db2fa7b827 | |||
093a4ff5f8 | |||
fa8ec8e718 | |||
136bdb96cb | |||
83941482f7 | |||
08d038f8a8 | |||
46948300a2 | |||
ab3a0b192a | |||
8e482e909b | |||
7b9c5e0e3f | |||
ca561d639b | |||
d22287d1ad | |||
3b73f5de3a | |||
c993f1b37f | |||
04da6aeb61 |
@ -62,4 +62,6 @@ readability-string-compare,
|
||||
'
|
||||
HeaderFilterRegex: '^(aten/|c10/|torch/).*$'
|
||||
WarningsAsErrors: '*'
|
||||
CheckOptions:
|
||||
misc-header-include-cycle.IgnoredFilesList: 'format.h;ivalue.h;custom_class.h;Dict.h;List.h'
|
||||
...
|
||||
|
@ -1099,7 +1099,6 @@ exclude_patterns = [
|
||||
'test/test_namedtuple_return_api.py',
|
||||
'test/test_native_functions.py',
|
||||
'test/test_native_mha.py',
|
||||
'test/test_nestedtensor.py',
|
||||
'test/test_nn.py',
|
||||
'test/test_out_dtype_op.py',
|
||||
'test/test_overrides.py',
|
||||
|
@ -461,15 +461,8 @@ filegroup(
|
||||
filegroup(
|
||||
name = "caffe2_perfkernels_srcs",
|
||||
srcs = [
|
||||
"caffe2/perfkernels/adagrad.cc",
|
||||
"caffe2/perfkernels/embedding_lookup.cc",
|
||||
"caffe2/perfkernels/embedding_lookup_idx.cc",
|
||||
"caffe2/perfkernels/fused_8bit_rowwise_embedding_lookup.cc",
|
||||
"caffe2/perfkernels/fused_8bit_rowwise_embedding_lookup_idx.cc",
|
||||
"caffe2/perfkernels/fused_nbit_rowwise_conversion.cc",
|
||||
"caffe2/perfkernels/lstm_unit_cpu_common.cc",
|
||||
"caffe2/perfkernels/math_cpu_base.cc",
|
||||
"caffe2/perfkernels/typed_axpy.cc",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -40,7 +40,7 @@ Important Note: The trustworthiness of a model is not binary. You must always de
|
||||
|
||||
### Untrusted inputs during training and prediction
|
||||
|
||||
If you plan to open your model to untrusted inputs, be aware that inputs can also be used as vectors by malicious agents. To minimize risks, make sure to give your model only the permisisons strictly required, and keep your libraries updated with the lates security patches.
|
||||
If you plan to open your model to untrusted inputs, be aware that inputs can also be used as vectors by malicious agents. To minimize risks, make sure to give your model only the permissions strictly required, and keep your libraries updated with the latest security patches.
|
||||
|
||||
If applicable, prepare your model against bad inputs and prompt injections. Some recommendations:
|
||||
- Pre-analysis: check how the model performs by default when exposed to prompt injection (e.g. using fuzzing for prompt injection).
|
||||
|
@ -385,8 +385,11 @@ class TORCH_API Context {
|
||||
? at::LinalgBackend::Cusolver
|
||||
: at::LinalgBackend::Default;
|
||||
at::BlasBackend blas_preferred_backend =
|
||||
(c10::utils::check_env("TORCH_BLAS_PREFER_CUBLASLT") == true ||
|
||||
c10::utils::check_env("TORCH_BLAS_PREFER_HIPBLASLT") == true)
|
||||
#ifdef USE_ROCM
|
||||
(c10::utils::check_env("TORCH_BLAS_PREFER_HIPBLASLT") != false)
|
||||
#else
|
||||
(c10::utils::check_env("TORCH_BLAS_PREFER_CUBLASLT") == true)
|
||||
#endif
|
||||
? at::BlasBackend::Cublaslt
|
||||
: at::BlasBackend::Cublas;
|
||||
#ifdef C10_MOBILE
|
||||
|
@ -143,7 +143,7 @@ static Device getATenDevice(const DLDevice& ctx, void* data) {
|
||||
return at::detail::getXPUHooks().getDeviceFromPtr(data);
|
||||
default:
|
||||
TORCH_CHECK(
|
||||
false, "Unsupported device_type: " + c10::to_string(ctx.device_type));
|
||||
false, "Unsupported device_type: ", std::to_string(ctx.device_type));
|
||||
}
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ ScalarType toScalarType(const DLDataType& dtype) {
|
||||
break;
|
||||
default:
|
||||
TORCH_CHECK(
|
||||
false, "Unsupported kUInt bits " + c10::to_string(dtype.bits));
|
||||
false, "Unsupported kUInt bits ", std::to_string(dtype.bits));
|
||||
}
|
||||
break;
|
||||
case DLDataTypeCode::kDLInt:
|
||||
@ -186,7 +186,7 @@ ScalarType toScalarType(const DLDataType& dtype) {
|
||||
break;
|
||||
default:
|
||||
TORCH_CHECK(
|
||||
false, "Unsupported kInt bits " + c10::to_string(dtype.bits));
|
||||
false, "Unsupported kInt bits ", std::to_string(dtype.bits));
|
||||
}
|
||||
break;
|
||||
case DLDataTypeCode::kDLFloat:
|
||||
@ -202,7 +202,7 @@ ScalarType toScalarType(const DLDataType& dtype) {
|
||||
break;
|
||||
default:
|
||||
TORCH_CHECK(
|
||||
false, "Unsupported kFloat bits " + c10::to_string(dtype.bits));
|
||||
false, "Unsupported kFloat bits ", std::to_string(dtype.bits));
|
||||
}
|
||||
break;
|
||||
case DLDataTypeCode::kDLBfloat:
|
||||
@ -212,7 +212,7 @@ ScalarType toScalarType(const DLDataType& dtype) {
|
||||
break;
|
||||
default:
|
||||
TORCH_CHECK(
|
||||
false, "Unsupported kFloat bits " + c10::to_string(dtype.bits));
|
||||
false, "Unsupported kFloat bits ", std::to_string(dtype.bits));
|
||||
}
|
||||
break;
|
||||
case DLDataTypeCode::kDLComplex:
|
||||
@ -228,7 +228,7 @@ ScalarType toScalarType(const DLDataType& dtype) {
|
||||
break;
|
||||
default:
|
||||
TORCH_CHECK(
|
||||
false, "Unsupported kFloat bits " + c10::to_string(dtype.bits));
|
||||
false, "Unsupported kFloat bits ", std::to_string(dtype.bits));
|
||||
}
|
||||
break;
|
||||
case DLDataTypeCode::kDLBool:
|
||||
@ -238,11 +238,11 @@ ScalarType toScalarType(const DLDataType& dtype) {
|
||||
break;
|
||||
default:
|
||||
TORCH_CHECK(
|
||||
false, "Unsupported kDLBool bits " + c10::to_string(dtype.bits));
|
||||
false, "Unsupported kDLBool bits ", std::to_string(dtype.bits));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
TORCH_CHECK(false, "Unsupported code " + c10::to_string(dtype.code));
|
||||
TORCH_CHECK(false, "Unsupported code ", std::to_string(dtype.code));
|
||||
}
|
||||
return stype;
|
||||
}
|
||||
@ -298,9 +298,7 @@ Tensor fromDLPack(DLManagedTensor* src) {
|
||||
return fromDLPack(src, std::move(deleter));
|
||||
}
|
||||
|
||||
Tensor fromDLPack(
|
||||
DLManagedTensor* src,
|
||||
std::function<void(void*)> deleter) {
|
||||
Tensor fromDLPack(DLManagedTensor* src, std::function<void(void*)> deleter) {
|
||||
Device device = getATenDevice(src->dl_tensor.device, src->dl_tensor.data);
|
||||
ScalarType stype = toScalarType(src->dl_tensor.dtype);
|
||||
if (!src->dl_tensor.strides) {
|
||||
|
@ -462,7 +462,7 @@ inline Tensor _sum_to(
|
||||
reduce_dims.push_back(i);
|
||||
}
|
||||
for (int64_t i = leading_dims; i < static_cast<int64_t>(sizes.size()); ++i) {
|
||||
if (shape[i - leading_dims] == 1 &&
|
||||
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(shape[i - leading_dims], 1)) &&
|
||||
TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(sizes[i], 1))) {
|
||||
reduce_dims.push_back(i);
|
||||
}
|
||||
|
@ -19,7 +19,13 @@ MemOverlap has_internal_overlap(TensorImpl* t) {
|
||||
auto strides = t->sym_strides();
|
||||
auto sizes = t->sym_sizes();
|
||||
for (const auto i : c10::irange(strides.size())) {
|
||||
if (strides[i] == 0 && sizes[i] > 1) {
|
||||
// NB: The size oblivious test is written very carefully here. When
|
||||
// unbacked SymInts are involved, we should try to conservatively report
|
||||
// if memory overlap /could/ happen under some setting of unbacked
|
||||
// SymInts. Thus, if I have u0 size, we should assume that this has > 1
|
||||
// elements (first expression), but if I have a u0 stride, I should NOT
|
||||
// assume that it is not zero (second expression)
|
||||
if (TORCH_GUARD_SIZE_OBLIVIOUS(sizes[i].sym_gt(1)) && strides[i] == 0) {
|
||||
return MemOverlap::Yes;
|
||||
}
|
||||
}
|
||||
|
@ -22,7 +22,6 @@
|
||||
#endif
|
||||
|
||||
#include <c10/util/irange.h>
|
||||
#include <c10/util/string_utils.h>
|
||||
#include <c10/util/SmallBuffer.h>
|
||||
|
||||
#include <array>
|
||||
@ -1398,7 +1397,7 @@ bool TensorIteratorBase::fast_set_up(const TensorIteratorConfig& config) {
|
||||
break;
|
||||
}
|
||||
default:
|
||||
TORCH_INTERNAL_ASSERT(false, "Unsupported fast setup type", c10::to_string((int)setup_type));
|
||||
TORCH_INTERNAL_ASSERT(false, "Unsupported fast setup type", std::to_string((int)setup_type));
|
||||
}
|
||||
//coalescing dimensions consists of collapsing dimensions to 1 (we are limited to contiguous no-broadcast cases here)
|
||||
if (ndim() > 1){
|
||||
|
@ -31,7 +31,7 @@ struct TemplateEnv {
|
||||
// Add a number 'v' to the map at key 'k'
|
||||
template <typename T>
|
||||
void d(const std::string& k, const T& v) {
|
||||
strings_[k] = c10::to_string(v);
|
||||
strings_[k] = std::to_string(v);
|
||||
lists_.erase(k);
|
||||
}
|
||||
|
||||
|
@ -478,8 +478,6 @@ namespace impl {
|
||||
// (maybe except for some internal prim ops).
|
||||
using GenericList = List<IValue>;
|
||||
|
||||
const IValue* ptr_to_first_element(const GenericList& list);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -350,11 +350,4 @@ void List<T>::unsafeSetElementType(TypePtr t) {
|
||||
impl_->elementType = std::move(t);
|
||||
}
|
||||
|
||||
namespace impl {
|
||||
|
||||
inline const IValue* ptr_to_first_element(const GenericList& list) {
|
||||
return &list.impl_->list[0];
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -440,15 +440,6 @@ TORCH_IMPL_FUNC(log_softmax_backward_cpu_out) (
|
||||
}
|
||||
}
|
||||
|
||||
static Tensor softmax(const Tensor& input_, const int64_t dim_) {
|
||||
auto result = [&]() {
|
||||
NoNamesGuard guard;
|
||||
return at::_softmax(input_, dim_, false);
|
||||
}();
|
||||
namedinference::propagate_names(result, input_);
|
||||
return result;
|
||||
}
|
||||
|
||||
Tensor softmax(const Tensor& input_, const int64_t dim_, std::optional<ScalarType> dtype) {
|
||||
auto result = [&]() {
|
||||
NoNamesGuard guard;
|
||||
@ -505,15 +496,6 @@ Tensor special_softmax(const Tensor& input_, const int64_t dim_, std::optional<S
|
||||
return at::softmax(input_, dim_, dtype);
|
||||
}
|
||||
|
||||
static Tensor log_softmax(const Tensor& input_, const int64_t dim_) {
|
||||
auto result = [&]() {
|
||||
NoNamesGuard guard;
|
||||
return at::_log_softmax(input_, dim_, false);
|
||||
}();
|
||||
namedinference::propagate_names(result, input_);
|
||||
return result;
|
||||
}
|
||||
|
||||
Tensor log_softmax(const Tensor& input_, const int64_t dim_, std::optional<ScalarType> dtype) {
|
||||
auto result = [&]() {
|
||||
NoNamesGuard guard;
|
||||
|
@ -1195,15 +1195,6 @@ Tensor istft(const Tensor& self, const int64_t n_fft, const optional<int64_t> ho
|
||||
#undef REPR
|
||||
}
|
||||
|
||||
static Tensor istft(const Tensor& self, const int64_t n_fft, const optional<int64_t> hop_lengthOpt,
|
||||
const optional<int64_t> win_lengthOpt, const Tensor& window,
|
||||
const bool center, const bool normalized, const optional<bool> onesidedOpt,
|
||||
const optional<int64_t> lengthOpt) {
|
||||
return at::native::istft(
|
||||
self, n_fft, hop_lengthOpt, win_lengthOpt, window, center, normalized,
|
||||
onesidedOpt, lengthOpt, /*return_complex=*/false);
|
||||
}
|
||||
|
||||
void _fft_fill_with_conjugate_symmetry_(const Tensor& input, IntArrayRef dim_) {
|
||||
const auto input_sizes = input.sizes();
|
||||
const auto input_strides = input.strides();
|
||||
|
@ -172,18 +172,10 @@ Tensor arange(
|
||||
return at::arange_out(result, start, end, step);
|
||||
}
|
||||
|
||||
static Tensor& arange_start_out(const Scalar& start, const Scalar& end, Tensor& result) {
|
||||
return at::arange_out(result, start, end, /*step=*/1);
|
||||
}
|
||||
|
||||
Tensor& arange_out(const Scalar& end, Tensor& result) {
|
||||
return at::arange_out(result, /*start=*/0, end, /*step=*/1);
|
||||
}
|
||||
|
||||
static Tensor& arange_out(Tensor& result, const Scalar& start, const Scalar& end) {
|
||||
return at::arange_out(result, start, end, /*step=*/1);
|
||||
}
|
||||
|
||||
Tensor _dim_arange(const Tensor& like, int64_t dim) {
|
||||
return at::arange(like.size(dim), like.options().dtype(at::kLong));
|
||||
}
|
||||
|
@ -105,10 +105,6 @@ Tensor & detach_(Tensor & self) {
|
||||
return self;
|
||||
}
|
||||
|
||||
static Tensor contiguous(const Tensor & self) {
|
||||
return contiguous(self, MemoryFormat::Contiguous);
|
||||
}
|
||||
|
||||
Tensor contiguous(const Tensor& self, MemoryFormat memory_format) {
|
||||
if (self.is_contiguous(memory_format)) {
|
||||
return self;
|
||||
|
@ -210,7 +210,6 @@
|
||||
#include <ATen/ops/zeros_native.h>
|
||||
#endif
|
||||
|
||||
#include <c10/util/StringUtil.h>
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
@ -1181,14 +1180,6 @@ Tensor as_strided_tensorimpl(const Tensor& self, IntArrayRef size, IntArrayRef s
|
||||
return result;
|
||||
}
|
||||
|
||||
static Tensor as_strided_tensorimpl_meta(const Tensor& self, IntArrayRef size, IntArrayRef stride, optional<int64_t> storage_offset_) {
|
||||
auto storage_offset = storage_offset_.value_or(self.storage_offset());
|
||||
auto result = at::detail::make_tensor<TensorImpl>(
|
||||
c10::TensorImpl::VIEW, Storage(self.storage()), self.key_set(), self.dtype());
|
||||
setStrided(result, size, stride, storage_offset);
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void setStridedUnchecked(
|
||||
const Tensor& self,
|
||||
@ -1249,10 +1240,6 @@ const Tensor &as_strided__symint(const Tensor& self, SymIntArrayRef size, SymInt
|
||||
return self;
|
||||
}
|
||||
|
||||
static Tensor narrow_copy_dense(const Tensor& self, int64_t dim, int64_t start, int64_t length) {
|
||||
return self.narrow(dim, start, length).clone(at::MemoryFormat::Contiguous);
|
||||
}
|
||||
|
||||
// Should just use narrow_copy_out, but this API is used internally at Meta:
|
||||
// https://github.com/pytorch/pytorch/pull/87045#issuecomment-1309353561
|
||||
Tensor narrow_copy_dense_cpu(const Tensor& self, int64_t dim, int64_t start, int64_t length){
|
||||
@ -3587,10 +3574,6 @@ Tensor view_as(const Tensor& self, const Tensor& other) {
|
||||
return self.view_symint(other.sym_sizes());
|
||||
}
|
||||
|
||||
static int64_t numel(const Tensor& self) {
|
||||
return self.unsafeGetTensorImpl()->numel();
|
||||
}
|
||||
|
||||
std::vector<Tensor> unbind(const Tensor &self, int64_t dim) {
|
||||
dim = maybe_wrap_dim(dim, self.dim());
|
||||
int64_t size = self.size(dim);
|
||||
|
@ -1002,7 +1002,7 @@ std::string generate_code(
|
||||
std::string extra_args = "";
|
||||
for (size_t i = 0; i < extra_args_typenames.size(); i++) {
|
||||
auto type = std::string(extra_args_typenames[i]);
|
||||
auto name = "extra_arg_" + std::string(to_string(i));
|
||||
auto name = "extra_arg_" + std::to_string(i);
|
||||
extra_params += "," + type + " " + name;
|
||||
extra_args += ", " + name;
|
||||
}
|
||||
|
@ -13,7 +13,8 @@ void run_cudnn_SDP_fprop(
|
||||
int64_t h,
|
||||
int64_t s_q,
|
||||
int64_t s_kv,
|
||||
int64_t d,
|
||||
int64_t d_qk,
|
||||
int64_t d_v,
|
||||
float scaling_factor,
|
||||
bool isTraining,
|
||||
bool is_causal,
|
||||
@ -34,7 +35,8 @@ void run_cudnn_SDP_bprop(
|
||||
int64_t h,
|
||||
int64_t s_q,
|
||||
int64_t s_kv,
|
||||
int64_t d,
|
||||
int64_t d_qk,
|
||||
int64_t d_v,
|
||||
float scaling_factor,
|
||||
bool is_causal,
|
||||
float dropout_probability,
|
||||
@ -128,7 +130,8 @@ struct MHAParams {
|
||||
int64_t h;
|
||||
int64_t s_q;
|
||||
int64_t s_kv;
|
||||
int64_t d;
|
||||
int64_t d_qk;
|
||||
int64_t d_v;
|
||||
double dropout_probability;
|
||||
bool is_causal;
|
||||
bool return_softmaxstats;
|
||||
@ -140,7 +143,8 @@ void setMHAParams(
|
||||
int64_t h,
|
||||
int64_t s_q,
|
||||
int64_t s_kv,
|
||||
int64_t d,
|
||||
int64_t d_qk,
|
||||
int64_t d_v,
|
||||
const Tensor& q,
|
||||
const Tensor& k,
|
||||
const Tensor& v,
|
||||
@ -155,7 +159,8 @@ void setMHAParams(
|
||||
}
|
||||
params.b = b;
|
||||
params.h = h;
|
||||
params.d = d;
|
||||
params.d_qk = d_qk;
|
||||
params.d_v = d_v;
|
||||
params.s_q = s_q;
|
||||
params.s_kv = s_kv;
|
||||
params.dropout_probability = dropout_probability;
|
||||
@ -193,7 +198,8 @@ struct MHACacheKeyWrapper : ParamsWrapper<MHAParams> {
|
||||
int64_t h,
|
||||
int64_t s_q,
|
||||
int64_t s_kv,
|
||||
int64_t d,
|
||||
int64_t d_qk,
|
||||
int64_t d_v,
|
||||
const Tensor& q,
|
||||
const Tensor& k,
|
||||
const Tensor& v,
|
||||
@ -206,7 +212,8 @@ struct MHACacheKeyWrapper : ParamsWrapper<MHAParams> {
|
||||
h,
|
||||
s_q,
|
||||
s_kv,
|
||||
d,
|
||||
d_qk,
|
||||
d_v,
|
||||
q,
|
||||
k,
|
||||
v,
|
||||
@ -249,7 +256,8 @@ auto build_graph_and_tensors(
|
||||
int64_t h,
|
||||
int64_t s_q,
|
||||
int64_t s_kv,
|
||||
int64_t d,
|
||||
int64_t d_qk,
|
||||
int64_t d_v,
|
||||
float scaling_factor,
|
||||
bool return_softmaxstats,
|
||||
bool is_causal,
|
||||
@ -383,7 +391,8 @@ auto build_graph_and_tensors_backward(
|
||||
int64_t h,
|
||||
int64_t s_q,
|
||||
int64_t s_kv,
|
||||
int64_t d,
|
||||
int64_t d_qk,
|
||||
int64_t d_v,
|
||||
float scaling_factor,
|
||||
bool is_causal,
|
||||
float dropout_probability,
|
||||
@ -514,7 +523,8 @@ void run_cudnn_SDP_fprop(
|
||||
int64_t h,
|
||||
int64_t s_q,
|
||||
int64_t s_kv,
|
||||
int64_t d,
|
||||
int64_t d_qk,
|
||||
int64_t d_v,
|
||||
float scaling_factor,
|
||||
bool return_softmaxstats,
|
||||
bool is_causal,
|
||||
@ -528,7 +538,7 @@ void run_cudnn_SDP_fprop(
|
||||
Tensor& dropoutoffset) {
|
||||
cudnnHandle_t handle = getCudnnHandle();
|
||||
o = at::empty_strided(
|
||||
{b, h, s_q, d}, {s_q * h * d, d, h * d, 1}, q.options());
|
||||
{b, h, s_q, d_v}, {s_q * h * d_v, d_v, h * d_v, 1}, q.options());
|
||||
if (return_softmaxstats) {
|
||||
// TODO(eqy): verify that this is correct
|
||||
softmaxstats = at::empty({b, h, s_q}, q.options().dtype(kFloat));
|
||||
@ -539,7 +549,8 @@ void run_cudnn_SDP_fprop(
|
||||
h,
|
||||
s_q,
|
||||
s_kv,
|
||||
d,
|
||||
d_qk,
|
||||
d_v,
|
||||
q,
|
||||
k,
|
||||
v,
|
||||
@ -556,7 +567,8 @@ void run_cudnn_SDP_fprop(
|
||||
h,
|
||||
s_q,
|
||||
s_kv,
|
||||
d,
|
||||
d_qk,
|
||||
d_v,
|
||||
scaling_factor,
|
||||
return_softmaxstats,
|
||||
is_causal,
|
||||
@ -599,7 +611,8 @@ void run_cudnn_SDP_bprop(
|
||||
int64_t h,
|
||||
int64_t s_q,
|
||||
int64_t s_kv,
|
||||
int64_t d,
|
||||
int64_t d_qk,
|
||||
int64_t d_v,
|
||||
float scaling_factor,
|
||||
bool is_causal,
|
||||
float dropout_probability,
|
||||
@ -623,7 +636,18 @@ void run_cudnn_SDP_bprop(
|
||||
}
|
||||
cudnnHandle_t handle = getCudnnHandle();
|
||||
auto key = MHACacheKeyWrapper(
|
||||
b, h, s_q, s_kv, d, q, k, v, dropout_probability, is_causal, true);
|
||||
b,
|
||||
h,
|
||||
s_q,
|
||||
s_kv,
|
||||
d_qk,
|
||||
d_v,
|
||||
q,
|
||||
k,
|
||||
v,
|
||||
dropout_probability,
|
||||
is_causal,
|
||||
true);
|
||||
auto graph_and_tensors_backward_ptr = mhagraphbackwardcache.find(key);
|
||||
graph_and_tensors_backward graph_and_tensors_backward_values;
|
||||
if (graph_and_tensors_backward_ptr) {
|
||||
@ -634,7 +658,8 @@ void run_cudnn_SDP_bprop(
|
||||
h,
|
||||
s_q,
|
||||
s_kv,
|
||||
d,
|
||||
d_qk,
|
||||
d_v,
|
||||
scaling_factor,
|
||||
is_causal,
|
||||
dropout_probability,
|
||||
@ -684,5 +709,4 @@ void run_cudnn_SDP_bprop(
|
||||
|
||||
} // namespace native
|
||||
} // namespace at
|
||||
|
||||
#endif
|
||||
|
@ -9,7 +9,8 @@ void run_cudnn_SDP_fprop(
|
||||
int64_t h,
|
||||
int64_t s_q,
|
||||
int64_t s_kv,
|
||||
int64_t d,
|
||||
int64_t d_k,
|
||||
int64_t d_v,
|
||||
float scaling_factor,
|
||||
bool isTraining,
|
||||
bool is_causal,
|
||||
@ -27,7 +28,8 @@ void run_cudnn_SDP_bprop(
|
||||
int64_t h,
|
||||
int64_t s_q,
|
||||
int64_t s_kv,
|
||||
int64_t d,
|
||||
int64_t d_k,
|
||||
int64_t d_v,
|
||||
float scaling_factor,
|
||||
bool is_causal,
|
||||
float dropout_probability,
|
||||
|
@ -27,53 +27,7 @@ Tensor mkldnn_convolution(
|
||||
TORCH_CHECK(false, "mkldnn_convolution_forward: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
static Tensor mkldnn_convolution_backward_input(
|
||||
IntArrayRef input_size, const Tensor& grad_output, const Tensor& weight,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) {
|
||||
TORCH_CHECK(false, "mkldnn_convolution_backward_input: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
static std::tuple<Tensor, Tensor> mkldnn_convolution_backward_weights(
|
||||
IntArrayRef weight_size, const Tensor& grad_output, const Tensor& input,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) {
|
||||
TORCH_CHECK(false, "mkldnn_convolution_backward_weights: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
static std::tuple<Tensor, Tensor, Tensor> mkldnn_convolution_backward(
|
||||
const Tensor& input, const Tensor& grad_output_t, const Tensor& weight,
|
||||
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, std::array<bool,3> output_mask) {
|
||||
TORCH_CHECK(false, "mkldnn_convolution_backward: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
REGISTER_NO_CPU_DISPATCH(mkldnn_convolution_backward_stub);
|
||||
|
||||
static Tensor mkldnn_convolution_transpose(
|
||||
const Tensor& input, const Tensor& weight, const std::optional<Tensor>& bias_opt,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) {
|
||||
TORCH_CHECK(false, "mkldnn_convolution_transpose: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
static Tensor mkldnn_convolution_transpose_backward_input(
|
||||
IntArrayRef input_size, const Tensor& grad_output, const Tensor& weight,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool bias_defined) {
|
||||
TORCH_CHECK(false, "mkldnn_convolution_transpose_backward_input: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
static std::tuple<Tensor, Tensor> mkldnn_convolution_transpose_backward_weights(
|
||||
IntArrayRef weight_size, const Tensor& grad_output, const Tensor& input,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, bool bias_defined) {
|
||||
TORCH_CHECK(false, "mkldnn_convolution_transpose_backward_weights: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
static std::tuple<Tensor, Tensor, Tensor> mkldnn_convolution_transpose_backward(
|
||||
const Tensor& input, const Tensor& grad_output_t, const Tensor& weight,
|
||||
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation,
|
||||
int64_t groups, std::array<bool,3> output_mask) {
|
||||
TORCH_CHECK(false, "mkldnn_convolution_transpose_backward: ATen not compiled with MKLDNN support");
|
||||
}
|
||||
|
||||
REGISTER_NO_CPU_DISPATCH(mkldnn_convolution_transpose_stub);
|
||||
REGISTER_NO_CPU_DISPATCH(mkldnn_convolution_transpose_backward_stub);
|
||||
|
||||
|
@ -18,26 +18,21 @@ kernel void erfinv_mps_kernel( device {0} *output [[buffer(0)]],
|
||||
/* coefficients in rational expansion */
|
||||
|
||||
float y_abs = abs(y);
|
||||
if(y_abs > 1.0f){{
|
||||
output[index] = NAN;
|
||||
if (y_abs >= 1.0f) {{
|
||||
output[index] = {0}( y_abs > 1.0f ? NAN : copysign(INFINITY, y));
|
||||
return;
|
||||
}}
|
||||
if(y_abs == 1.0f){{
|
||||
output[index] = copysign(INFINITY, y);
|
||||
return;
|
||||
}}
|
||||
if(y_abs <= 0.7f) {{
|
||||
if (y_abs <= 0.7f) {{
|
||||
z = y * y;
|
||||
num = (((a[3]*z + a[2])*z + a[1])*z + a[0]);
|
||||
dem = ((((b[3]*z + b[2])*z + b[1])*z +b[0]) * z + 1.0f);
|
||||
num = ((a[3] * z + a[2]) * z + a[1])*z + a[0];
|
||||
dem = (((b[3] * z + b[2]) * z + b[1]) * z +b[0]) * z + 1.0f;
|
||||
x = y * num / dem;
|
||||
}}
|
||||
else{{
|
||||
}} else {{
|
||||
z = sqrt(-1.0f*log((1.0-y_abs)/2.0));
|
||||
num = ((c[3]*z + c[2])*z + c[1]) * z + c[0];
|
||||
dem = (d[1]*z + d[0])*z + 1.0f;
|
||||
num = ((c[3] * z + c[2]) * z + c[1]) * z + c[0];
|
||||
dem = (d[1] * z + d[0]) * z + 1.0f;
|
||||
x = copysign(num, y) / dem;
|
||||
}}
|
||||
|
||||
output[index] = x;
|
||||
}})METAL";
|
||||
output[index] = {0}(x);
|
||||
}})METAL";
|
||||
|
@ -143,7 +143,7 @@ TORCH_IMPL_FUNC(leaky_relu_out_mps)(const Tensor& self, const Scalar& negative_s
|
||||
Tensor output_ = at::empty_like(self, executeGatherOp ? MemoryFormat::Contiguous : MemoryFormat::Preserve);
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "leaky_relu" + getTensorsStringKey({self}) + ":" + to_string(negative_slope.to<double>());
|
||||
string key = "leaky_relu" + getTensorsStringKey({self}) + ":" + std::to_string(negative_slope.to<double>());
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, self);
|
||||
|
||||
@ -193,8 +193,8 @@ TORCH_IMPL_FUNC(leaky_relu_backward_out_mps)
|
||||
Tensor output_ = at::empty_like(self, self.suggest_memory_format());
|
||||
|
||||
@autoreleasepool {
|
||||
string key =
|
||||
"leaky_relu_backward" + getTensorsStringKey({self, grad_output}) + ":" + to_string(negative_slope.to<double>());
|
||||
string key = "leaky_relu_backward" + getTensorsStringKey({self, grad_output}) + ":" +
|
||||
std::to_string(negative_slope.to<double>());
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, self);
|
||||
MPSGraphTensor* gradOutputTensor = mpsGraphRankedPlaceHolder(mpsGraph, grad_output);
|
||||
@ -242,7 +242,7 @@ TORCH_IMPL_FUNC(log_softmax_mps_out)
|
||||
MPSStream* stream = at::mps::getCurrentMPSStream();
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "log_softmax_mps_out" + getTensorsStringKey({self}) + ":" + to_string(dim);
|
||||
string key = "log_softmax_mps_out" + getTensorsStringKey({self}) + ":" + std::to_string(dim);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, self);
|
||||
|
||||
@ -285,7 +285,7 @@ TORCH_IMPL_FUNC(log_softmax_backward_mps_out)
|
||||
MPSStream* stream = at::mps::getCurrentMPSStream();
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "log_softmax_backward_mps_out:" + getMPSTypeString(grad_output) + ":" + to_string(dim);
|
||||
string key = "log_softmax_backward_mps_out:" + getMPSTypeString(grad_output) + ":" + std::to_string(dim);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* gradOutputTensor = mpsGraphUnrankedPlaceHolder(mpsGraph, getMPSDataType(grad_output));
|
||||
MPSGraphTensor* outputTensor = mpsGraphUnrankedPlaceHolder(mpsGraph, getMPSDataType(output));
|
||||
@ -539,8 +539,8 @@ TORCH_IMPL_FUNC(threshold_out_mps)
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "threshold_out_mps" + getTensorsStringKey({self}) + ":" + to_string(threshold.to<double>()) + ":" +
|
||||
to_string(value.to<double>());
|
||||
string key = "threshold_out_mps" + getTensorsStringKey({self}) + ":" + std::to_string(threshold.to<double>()) +
|
||||
":" + std::to_string(value.to<double>());
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, self);
|
||||
@ -587,7 +587,7 @@ TORCH_IMPL_FUNC(threshold_backward_out_mps)
|
||||
|
||||
@autoreleasepool {
|
||||
string key =
|
||||
"threshold_backward_out_mps" + getTensorsStringKey({self, grad}) + ":" + to_string(threshold.to<double>());
|
||||
"threshold_backward_out_mps" + getTensorsStringKey({self, grad}) + ":" + std::to_string(threshold.to<double>());
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, self);
|
||||
@ -826,8 +826,8 @@ static void elu_variants_out_mps(const Tensor& self,
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
|
||||
@autoreleasepool {
|
||||
string key = func_name + ":" + getTensorsStringKey({self}) + ":" + to_string(alpha.to<double>()) + ":" +
|
||||
to_string(scale.to<double>()) + ":" + to_string(input_scale.to<double>());
|
||||
string key = func_name + ":" + getTensorsStringKey({self}) + ":" + std::to_string(alpha.to<double>()) + ":" +
|
||||
std::to_string(scale.to<double>()) + ":" + std::to_string(input_scale.to<double>());
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, self);
|
||||
@ -916,8 +916,8 @@ TORCH_IMPL_FUNC(elu_backward_out_mps)
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "elu_backward_out_mps:" + getTensorsStringKey({grad_output, self_or_result}) + ":" +
|
||||
to_string(alpha.to<double>()) + ":" + to_string(scale.to<double>()) + ":" +
|
||||
to_string(input_scale.to<double>()) + ":" + to_string(is_result);
|
||||
std::to_string(alpha.to<double>()) + ":" + std::to_string(scale.to<double>()) + ":" +
|
||||
std::to_string(input_scale.to<double>()) + ":" + std::to_string(is_result);
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* gradOutputTensor = mpsGraphRankedPlaceHolder(mpsGraph, grad_output);
|
||||
@ -1010,7 +1010,7 @@ TORCH_IMPL_FUNC(glu_out_mps)(const Tensor& self, const int64_t dim, const Tensor
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "glu_out_mps" + getTensorsStringKey({self}) + ":" + to_string(dim);
|
||||
string key = "glu_out_mps" + getTensorsStringKey({self}) + ":" + std::to_string(dim);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(self), getMPSShape(self));
|
||||
NSArray<MPSGraphTensor*>* outputTensorsArray = [mpsGraph splitTensor:inputTensor
|
||||
@ -1052,7 +1052,7 @@ Tensor& glu_backward_mps_out(const Tensor& grad_output, const Tensor& self, cons
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "glu_backward_mps_out" + getTensorsStringKey({grad_output, self}) + ":" + to_string(dim);
|
||||
string key = "glu_backward_mps_out" + getTensorsStringKey({grad_output, self}) + ":" + std::to_string(dim);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(self), getMPSShape(self));
|
||||
MPSGraphTensor* gradOutputTensor =
|
||||
@ -1855,8 +1855,8 @@ Tensor& hardtanh_backward_out_mps(const Tensor& grad_output,
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "hardtanh_backward_out_mps:" + getTensorsStringKey({grad_output}) + ":" + to_string(min.to<double>()) +
|
||||
":" + to_string(max.to<double>());
|
||||
string key = "hardtanh_backward_out_mps:" + getTensorsStringKey({grad_output}) + ":" +
|
||||
std::to_string(min.to<double>()) + ":" + std::to_string(max.to<double>());
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* gradOutputTensor = mpsGraphRankedPlaceHolder(mpsGraph, grad_output);
|
||||
|
@ -136,8 +136,8 @@ static Tensor& addmv_out_mps_impl(const Tensor& self,
|
||||
Tensor matMulVec = at::mm(mat, vec.unsqueeze(1)).squeeze(1);
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "addmv_out_mps_impl" + getTensorsStringKey({self, matMulVec}) + ":" + to_string(beta_.toDouble()) +
|
||||
":" + to_string(alpha_.toDouble());
|
||||
string key = "addmv_out_mps_impl" + getTensorsStringKey({self, matMulVec}) + ":" +
|
||||
std::to_string(beta_.toDouble()) + ":" + std::to_string(alpha_.toDouble());
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* matMulVecTensor = mpsGraphRankedPlaceHolder(mpsGraph, matMulVec);
|
||||
MPSGraphTensor* selfTensor = mpsGraphRankedPlaceHolder(mpsGraph, self);
|
||||
|
@ -33,7 +33,7 @@ static Tensor& fill_scalar_mps_impl(Tensor& self, const Scalar& value) {
|
||||
};
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "fill_scalar_mps_impl" + getTensorsStringKey(self) + ":" + to_string(value.toDouble());
|
||||
string key = "fill_scalar_mps_impl" + getTensorsStringKey(self) + ":" + std::to_string(value.toDouble());
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphScalarPlaceHolder(mpsGraph, getMPSDataType(self.scalar_type()));
|
||||
|
@ -193,24 +193,24 @@ static Tensor _mps_convolution_impl(const Tensor& input_t,
|
||||
|
||||
string bias_shape_key;
|
||||
if (bias_defined) {
|
||||
bias_shape_key = to_string(bias_shape[0]);
|
||||
bias_shape_key = std::to_string(bias_shape[0]);
|
||||
} else {
|
||||
bias_shape_key = "nobias";
|
||||
}
|
||||
|
||||
string key;
|
||||
if (is3DConv) {
|
||||
key = "mps_3d_convolution:" + to_string(stride[0]) + ":" + to_string(stride[1]) + ":" + to_string(stride[2]) +
|
||||
":" + to_string(dilation[0]) + ":" + to_string(dilation[1]) + ":" + to_string(dilation[2]) + ":" +
|
||||
to_string(padding[0]) + ":" + to_string(padding[1]) + ":" + to_string(padding[2]) + ":" + to_string(groups) +
|
||||
":" + mem_format_key + mps::getTensorsStringKey({input_t, weight_t}) + ":" + to_string(bias_defined) + ":" +
|
||||
bias_shape_key;
|
||||
key = "mps_3d_convolution:" + std::to_string(stride[0]) + ":" + std::to_string(stride[1]) + ":" +
|
||||
std::to_string(stride[2]) + ":" + std::to_string(dilation[0]) + ":" + std::to_string(dilation[1]) + ":" +
|
||||
std::to_string(dilation[2]) + ":" + std::to_string(padding[0]) + ":" + std::to_string(padding[1]) + ":" +
|
||||
std::to_string(padding[2]) + ":" + std::to_string(groups) + ":" + mem_format_key +
|
||||
mps::getTensorsStringKey({input_t, weight_t}) + ":" + std::to_string(bias_defined) + ":" + bias_shape_key;
|
||||
|
||||
} else {
|
||||
key = "mps_convolution:" + to_string(stride[0]) + ":" + to_string(stride[1]) + ":" + to_string(dilation[0]) +
|
||||
":" + to_string(dilation[1]) + ":" + to_string(padding[0]) + ":" + to_string(padding[1]) + ":" +
|
||||
to_string(groups) + ":" + mem_format_key + mps::getTensorsStringKey({input_t, weight_t}) + ":" +
|
||||
to_string(bias_defined) + ":" + bias_shape_key;
|
||||
key = "mps_convolution:" + std::to_string(stride[0]) + ":" + std::to_string(stride[1]) + ":" +
|
||||
std::to_string(dilation[0]) + ":" + std::to_string(dilation[1]) + ":" + std::to_string(padding[0]) + ":" +
|
||||
std::to_string(padding[1]) + ":" + std::to_string(groups) + ":" + mem_format_key +
|
||||
mps::getTensorsStringKey({input_t, weight_t}) + ":" + std::to_string(bias_defined) + ":" + bias_shape_key;
|
||||
}
|
||||
|
||||
MPSShape* inputShape = mps::getMPSShape(input_t, memory_format);
|
||||
@ -388,16 +388,16 @@ static Tensor mps_convolution_backward_input(IntArrayRef input_size,
|
||||
NSString* ns_shape_key = [[gradOutputShape valueForKey:@"description"] componentsJoinedByString:@","];
|
||||
string key;
|
||||
if (is3DConv) {
|
||||
key = "mps_3d_convolution_backward_input:" + to_string(stride[0]) + ":" + to_string(stride[1]) + ":" + ":" +
|
||||
to_string(stride[2]) + to_string(dilation[0]) + ":" + to_string(dilation[1]) + ":" + to_string(dilation[2]) +
|
||||
":" + to_string(padding[0]) + ":" + to_string(padding[1]) + ":" + to_string(padding[2]) + ":" +
|
||||
to_string(groups) + ":" + mem_format_key + getTensorsStringKey({grad_output_t, weight_t}) + ":" +
|
||||
string([ns_shape_key UTF8String]);
|
||||
key = "mps_3d_convolution_backward_input:" + std::to_string(stride[0]) + ":" + std::to_string(stride[1]) + ":" +
|
||||
":" + std::to_string(stride[2]) + std::to_string(dilation[0]) + ":" + std::to_string(dilation[1]) + ":" +
|
||||
std::to_string(dilation[2]) + ":" + std::to_string(padding[0]) + ":" + std::to_string(padding[1]) + ":" +
|
||||
std::to_string(padding[2]) + ":" + std::to_string(groups) + ":" + mem_format_key +
|
||||
getTensorsStringKey({grad_output_t, weight_t}) + ":" + string([ns_shape_key UTF8String]);
|
||||
|
||||
} else {
|
||||
key = "mps_convolution_backward_input:" + to_string(stride[0]) + ":" + to_string(stride[1]) + ":" +
|
||||
to_string(dilation[0]) + ":" + to_string(dilation[1]) + ":" + to_string(padding[0]) + ":" +
|
||||
to_string(padding[1]) + ":" + to_string(groups) + ":" + mem_format_key +
|
||||
key = "mps_convolution_backward_input:" + std::to_string(stride[0]) + ":" + std::to_string(stride[1]) + ":" +
|
||||
std::to_string(dilation[0]) + ":" + std::to_string(dilation[1]) + ":" + std::to_string(padding[0]) + ":" +
|
||||
std::to_string(padding[1]) + ":" + std::to_string(groups) + ":" + mem_format_key +
|
||||
getTensorsStringKey({grad_output_t, weight_t}) + ":" + string([ns_shape_key UTF8String]);
|
||||
}
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
@ -547,15 +547,15 @@ static Tensor mps_convolution_backward_weights(IntArrayRef weight_size,
|
||||
NSString* ns_shape_key = [[gradOutputShape valueForKey:@"description"] componentsJoinedByString:@","];
|
||||
string key;
|
||||
if (is3DConv) {
|
||||
key = "mps_3d_convolution_backward_weights:" + to_string(stride[0]) + ":" + to_string(stride[1]) + ":" +
|
||||
to_string(stride[2]) + ":" + to_string(dilation[0]) + ":" + to_string(dilation[1]) + ":" +
|
||||
to_string(dilation[2]) + ":" + to_string(padding[0]) + ":" + to_string(padding[1]) + ":" +
|
||||
to_string(padding[2]) + ":" + to_string(groups) + ":" + mem_format_key +
|
||||
key = "mps_3d_convolution_backward_weights:" + std::to_string(stride[0]) + ":" + std::to_string(stride[1]) + ":" +
|
||||
std::to_string(stride[2]) + ":" + std::to_string(dilation[0]) + ":" + std::to_string(dilation[1]) + ":" +
|
||||
std::to_string(dilation[2]) + ":" + std::to_string(padding[0]) + ":" + std::to_string(padding[1]) + ":" +
|
||||
std::to_string(padding[2]) + ":" + std::to_string(groups) + ":" + mem_format_key +
|
||||
getTensorsStringKey({grad_output_t, input_t, grad_weight_t}) + ":" + string([ns_shape_key UTF8String]);
|
||||
} else {
|
||||
key = "mps_convolution_backward_weights:" + to_string(stride[0]) + ":" + to_string(stride[1]) + ":" +
|
||||
to_string(dilation[0]) + ":" + to_string(dilation[1]) + ":" + to_string(padding[0]) + ":" +
|
||||
to_string(padding[1]) + ":" + to_string(groups) + ":" + mem_format_key +
|
||||
key = "mps_convolution_backward_weights:" + std::to_string(stride[0]) + ":" + std::to_string(stride[1]) + ":" +
|
||||
std::to_string(dilation[0]) + ":" + std::to_string(dilation[1]) + ":" + std::to_string(padding[0]) + ":" +
|
||||
std::to_string(padding[1]) + ":" + std::to_string(groups) + ":" + mem_format_key +
|
||||
getTensorsStringKey({grad_output_t, input_t, grad_weight_t}) + ":" + string([ns_shape_key UTF8String]);
|
||||
}
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
|
@ -63,7 +63,7 @@ Tensor& random_mps_impl(Tensor& self,
|
||||
|
||||
@autoreleasepool {
|
||||
string key = op_name + getTensorsStringKey({self, mean_opt.value_or(Tensor()), std_opt.value_or(Tensor())}) + ":" +
|
||||
to_string(val1) + ":" + to_string(val2);
|
||||
std::to_string(val1) + ":" + std::to_string(val2);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<RandomCachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
newCachedGraph->stateTensor =
|
||||
mpsGraphRankedPlaceHolder(mpsGraph, MPSDataTypeInt32, @[ @(at::mps::detail::PHILOX_STATE_N) ]);
|
||||
@ -469,7 +469,7 @@ static Tensor& multinomial_with_replacement_mps_kernel(const Tensor& self,
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "multinomial_with_replacement:" + getTensorsStringKey({self}) + ":" + to_string(n_sample);
|
||||
string key = "multinomial_with_replacement:" + getTensorsStringKey({self}) + ":" + std::to_string(n_sample);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<RandomCachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSShape* prob_shape = getMPSShape(self_v);
|
||||
newCachedGraph->stateTensor = mpsGraphRankedPlaceHolder(mpsGraph, MPSDataTypeInt32, @[ @7 ]);
|
||||
|
@ -236,7 +236,7 @@ static std::tuple<Tensor, Tensor> _mps_linear_backward_weights(const Tensor& gra
|
||||
MPSStream* stream = getCurrentMPSStream();
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "mps_linear_backward_weights:" + to_string(bias_defined) + ":" +
|
||||
string key = "mps_linear_backward_weights:" + std::to_string(bias_defined) + ":" +
|
||||
getTensorsStringKey({input_reshaped, weight, grad_output_reshaped});
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, input_reshaped);
|
||||
|
@ -229,8 +229,8 @@ static Tensor& addbmm_or_baddbmm_out_mps_impl(const Tensor& input,
|
||||
|
||||
@autoreleasepool {
|
||||
string key = (opType == ADDBMM_OP_TYPE) ? ("addbmm_out_mps_impl") : ("baddbmm_out_mps_impl");
|
||||
key += getTensorsStringKey({batch1, batch2, input}) + ":" + to_string(beta.toDouble()) + ":" +
|
||||
to_string(alpha.toDouble());
|
||||
key += getTensorsStringKey({batch1, batch2, input}) + ":" + std::to_string(beta.toDouble()) + ":" +
|
||||
std::to_string(alpha.toDouble());
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mps::mpsGraphRankedPlaceHolder(mpsGraph, input);
|
||||
@ -331,8 +331,8 @@ static Tensor& addmm_out_mps_impl(const Tensor& bias,
|
||||
};
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "addmm_out_mps_impl" + getTensorsStringKey({self, other, *bias_}) + ":" + to_string(beta.toDouble()) +
|
||||
":" + to_string(alpha.toDouble());
|
||||
string key = "addmm_out_mps_impl" + getTensorsStringKey({self, other, *bias_}) + ":" +
|
||||
std::to_string(beta.toDouble()) + ":" + std::to_string(alpha.toDouble());
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* selfTensor = nil;
|
||||
MPSGraphTensor* otherTensor = nil;
|
||||
@ -615,8 +615,8 @@ Tensor& addr_out_mps(const Tensor& self,
|
||||
};
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "addr_out_mps_impl" + getTensorsStringKey({vec1, vec2, *self_}) + ":" + to_string(beta.toDouble()) +
|
||||
":" + to_string(alpha.toDouble());
|
||||
string key = "addr_out_mps_impl" + getTensorsStringKey({vec1, vec2, *self_}) + ":" +
|
||||
std::to_string(beta.toDouble()) + ":" + std::to_string(alpha.toDouble());
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* t1 = mps::mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(vec1), inputShape);
|
||||
MPSGraphTensor* t2 = mps::mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(vec2), otherShape);
|
||||
|
@ -69,7 +69,7 @@ static Tensor& mse_loss_backward_out_impl(const Tensor& grad_output,
|
||||
};
|
||||
|
||||
@autoreleasepool {
|
||||
string key = op_name + reductionToString(reduction) + ":" + to_string(grad_input.sizes()[1]) +
|
||||
string key = op_name + reductionToString(reduction) + ":" + std::to_string(grad_input.sizes()[1]) +
|
||||
getTensorsStringKey({input, target, grad_output});
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
newCachedGraph->inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, input);
|
||||
@ -327,8 +327,8 @@ static void nllnd_loss_backward_impl(Tensor& grad_input_arg,
|
||||
}
|
||||
@autoreleasepool {
|
||||
string key = "nllnd_loss_backward" + getTensorsStringKey({input, grad_output, target, weight, total_weight}) +
|
||||
to_string(numClasses) + ":" + to_string(ignore_index) + ":" + to_string(isWeightsArrayValid) + ":" +
|
||||
to_string(isTargetCasted) + ":" + reductionToString(reduction);
|
||||
std::to_string(numClasses) + ":" + std::to_string(ignore_index) + ":" + std::to_string(isWeightsArrayValid) +
|
||||
":" + std::to_string(isTargetCasted) + ":" + reductionToString(reduction);
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, input);
|
||||
@ -463,9 +463,9 @@ static void nllnd_loss_forward_impl(Tensor& output,
|
||||
NSString* ns_shape_key = [[input_shape valueForKey:@"description"] componentsJoinedByString:@","];
|
||||
|
||||
// TODO: Make the key
|
||||
string key = "nllnd_loss_forward_impl:" + to_string(ignore_index) + ":" + to_string(isWeightsArrayValid) + ":" +
|
||||
reductionToString(reduction) + ":" + [ns_shape_key UTF8String] + ":" + getMPSTypeString(input) + ":" +
|
||||
getMPSTypeString(target) + ":" + to_string(isTargetCasted) + ":" + getMPSTypeString(weight);
|
||||
string key = "nllnd_loss_forward_impl:" + std::to_string(ignore_index) + ":" + std::to_string(isWeightsArrayValid) +
|
||||
":" + reductionToString(reduction) + ":" + [ns_shape_key UTF8String] + ":" + getMPSTypeString(input) + ":" +
|
||||
getMPSTypeString(target) + ":" + std::to_string(isTargetCasted) + ":" + getMPSTypeString(weight);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(input), input_shape);
|
||||
MPSGraphTensor* targetTensor = mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(target), target_shape);
|
||||
@ -598,7 +598,7 @@ static void smooth_l1_loss_impl(const Tensor& input,
|
||||
NSString* ns_shape_key = [[input_shape valueForKey:@"description"] componentsJoinedByString:@","];
|
||||
|
||||
string key = "smooth_l1_loss_impl:" + reductionToString(reduction) + ":" + [ns_shape_key UTF8String] + ":" +
|
||||
to_string(beta) + ":" + getMPSTypeString(input) + ":" + getMPSTypeString(target);
|
||||
std::to_string(beta) + ":" + getMPSTypeString(input) + ":" + getMPSTypeString(target);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
// smooth_l1_loss_mps:
|
||||
// ln = 0.5 * ( xn - yn ) ^ 2 / beta, if |xn - yn| < beta
|
||||
@ -734,7 +734,7 @@ static void smooth_l1_loss_backward_impl(const Tensor& grad_output,
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "smooth_l1_loss_backward" + getTensorsStringKey({input, grad_output, grad_input, target}) + ":" +
|
||||
reductionToString(reduction) + ":" + to_string(beta);
|
||||
reductionToString(reduction) + ":" + std::to_string(beta);
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, input);
|
||||
|
@ -106,7 +106,7 @@ Tensor& arange_mps_out(const Scalar& start, const Scalar& end, const Scalar& ste
|
||||
auto stream = getCurrentMPSStream();
|
||||
auto mpsDataType = getMPSDataType(result);
|
||||
@autoreleasepool {
|
||||
string key = "arange_mps_out" + getTensorsStringKey({result}) + ":" + to_string(size);
|
||||
string key = "arange_mps_out" + getTensorsStringKey({result}) + ":" + std::to_string(size);
|
||||
auto cachedGraph = cache_->LookUpAs<RangeCachedGraph>(key);
|
||||
if (!cachedGraph) {
|
||||
cachedGraph = cache_->CreateCachedGraphAs<RangeCachedGraph>(key, ^MPSCachedGraph*() {
|
||||
@ -173,7 +173,7 @@ Tensor& range_mps_out(const Scalar& start, const Scalar& end, const Scalar& step
|
||||
auto stream = getCurrentMPSStream();
|
||||
auto mpsDataType = getMPSDataType(result);
|
||||
@autoreleasepool {
|
||||
string key = "arange_mps_out" + getTensorsStringKey({result}) + ":" + to_string(size);
|
||||
string key = "arange_mps_out" + getTensorsStringKey({result}) + ":" + std::to_string(size);
|
||||
auto cachedGraph = cache_->LookUpAs<RangeCachedGraph>(key);
|
||||
if (!cachedGraph) {
|
||||
cachedGraph = cache_->CreateCachedGraphAs<RangeCachedGraph>(key, ^MPSCachedGraph*() {
|
||||
@ -221,8 +221,8 @@ Tensor& linspace_out_mps(const Scalar& start, const Scalar& end, int64_t steps,
|
||||
bool start_less_end = (start.to<double>() <= end.to<double>());
|
||||
|
||||
@autoreleasepool {
|
||||
string key =
|
||||
"linspace_out_mps:" + getTensorsStringKey({result}) + ":" + to_string(steps) + to_string(start_less_end);
|
||||
string key = "linspace_out_mps:" + getTensorsStringKey({result}) + ":" + std::to_string(steps) +
|
||||
std::to_string(start_less_end);
|
||||
auto cachedGraph = cache_->LookUpAs<RangeCachedGraph>(key);
|
||||
|
||||
if (!cachedGraph) {
|
||||
|
@ -359,8 +359,8 @@ static void impl_func_norm_mps(const Tensor& input_tensor,
|
||||
NSString* ns_key = [[wrappedAxes valueForKey:@"description"] componentsJoinedByString:@","];
|
||||
string keepdim_info = (keepdim) ? "keepdim=1" : "keepdim=0";
|
||||
string tensor_key = cdist ? getTensorsStringKey({input_tensor, other_tensor}) : getTensorsStringKey({input_t});
|
||||
string key = string("norm_out_mps:") + [ns_key UTF8String] + ":" + tensor_key + ":p" + to_string(p) + ":" +
|
||||
keepdim_info + ":" + toString(in_dtype) + ":" + to_string(castInputData);
|
||||
string key = string("norm_out_mps:") + [ns_key UTF8String] + ":" + tensor_key + ":p" + std::to_string(p) + ":" +
|
||||
keepdim_info + ":" + toString(in_dtype) + ":" + std::to_string(castInputData);
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<MPSBinaryCachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
newCachedGraph->inputTensor_ = mpsGraphRankedPlaceHolder(mpsGraph, input_tensor);
|
||||
@ -572,7 +572,7 @@ static Tensor std_var_common_impl_mps(const Tensor& input_t,
|
||||
string op_key = (stdVarType == STANDARD_DEVIATION) ? "std_mps" : "var_mps";
|
||||
NSString* ns_key = [[wrappedAxes valueForKey:@"description"] componentsJoinedByString:@","];
|
||||
string bessel_corrected = (use_correction && correction_value) ? "unbiased " : "biased ";
|
||||
string use_dim_info = (use_dim) ? "use_dim=1:" + to_string(dim_value.size()) : "use_dim=0";
|
||||
string use_dim_info = (use_dim) ? "use_dim=1:" + std::to_string(dim_value.size()) : "use_dim=0";
|
||||
string keepdim_info = (keepdim) ? "keepdim=1" : "keepdim=0";
|
||||
string key = op_key + ":" + getTensorsStringKey(input_t) + ":" + use_dim_info + ":" + keepdim_info + ":" +
|
||||
string([ns_key UTF8String]) + ":" + bessel_corrected + ":" + std::to_string(correction_value);
|
||||
@ -700,7 +700,7 @@ static void min_max_out_mps(const Tensor& input_t,
|
||||
auto stream = at::mps::getCurrentMPSStream();
|
||||
|
||||
@autoreleasepool {
|
||||
string key = func_name + getTensorsStringKey({input_t, indices_t}) + ":" + to_string(dim_);
|
||||
string key = func_name + getTensorsStringKey({input_t, indices_t}) + ":" + std::to_string(dim_);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, input_t);
|
||||
MPSGraphTensor* outputTensor = nil;
|
||||
@ -860,7 +860,7 @@ static void argmax_argmin_out_mps(const Tensor& input_t,
|
||||
@autoreleasepool {
|
||||
NSString* ns_key = [[apparent_in_shape valueForKey:@"description"] componentsJoinedByString:@","];
|
||||
string key =
|
||||
func_name + ":" + to_string(dim_) + ":" + getTensorsStringKey(input_t) + ":" + string([ns_key UTF8String]);
|
||||
func_name + ":" + std::to_string(dim_) + ":" + getTensorsStringKey(input_t) + ":" + string([ns_key UTF8String]);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
auto inputScalarType = input_t.scalar_type();
|
||||
MPSGraphTensor* inputTensor =
|
||||
@ -1217,7 +1217,7 @@ TORCH_IMPL_FUNC(any_out_mps)
|
||||
|
||||
@autoreleasepool {
|
||||
MPSShape* input_t_shape = getMPSShape(input_t);
|
||||
string key = string("any_out_mps:") + getMPSShapeString(input_t_shape) + ":" + to_string(dim_) + ":" +
|
||||
string key = string("any_out_mps:") + getMPSShapeString(input_t_shape) + ":" + std::to_string(dim_) + ":" +
|
||||
getMPSTypeString(input_t);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSDataType input_type = getMPSDataType(input_t);
|
||||
@ -1313,7 +1313,7 @@ TORCH_IMPL_FUNC(all_out_mps)
|
||||
|
||||
@autoreleasepool {
|
||||
MPSShape* input_t_shape = getMPSShape(input_t);
|
||||
string key = string("all_out_mps:") + getMPSShapeString(input_t_shape) + ":" + to_string(dim_) + ":" +
|
||||
string key = string("all_out_mps:") + getMPSShapeString(input_t_shape) + ":" + std::to_string(dim_) + ":" +
|
||||
getMPSTypeString(input_t);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSDataType input_type = getMPSDataType(input_t);
|
||||
@ -1531,8 +1531,8 @@ static void median_out_mps(const Tensor& input_t,
|
||||
auto stream = at::mps::getCurrentMPSStream();
|
||||
|
||||
@autoreleasepool {
|
||||
string key =
|
||||
func_name + ":" + to_string(dim_) + ":" + getTensorsStringKey(input_t) + ":" + getTensorsStringKey(indices_t);
|
||||
string key = func_name + ":" + std::to_string(dim_) + ":" + getTensorsStringKey(input_t) + ":" +
|
||||
getTensorsStringKey(indices_t);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, input_t);
|
||||
MPSGraphTensor* castInputTensor =
|
||||
|
@ -108,8 +108,8 @@ TORCH_IMPL_FUNC(topk_out_mps)
|
||||
// Input as placeholders
|
||||
MPSShape* input_shape = getMPSShape(self);
|
||||
NSString* ns_shape_key = [[input_shape valueForKey:@"description"] componentsJoinedByString:@","];
|
||||
string key = string("topk:") + [ns_shape_key UTF8String] + ":" + getMPSTypeString(self) + ":k" + to_string(k) +
|
||||
":dim" + to_string(dim_) + ":largest" + to_string(largest);
|
||||
string key = string("topk:") + [ns_shape_key UTF8String] + ":" + getMPSTypeString(self) + ":k" + std::to_string(k) +
|
||||
":dim" + std::to_string(dim_) + ":largest" + std::to_string(largest);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
newCachedGraph->selfTensor = mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(self), input_shape);
|
||||
|
||||
@ -320,12 +320,12 @@ TORCH_IMPL_FUNC(cat_out_mps)
|
||||
};
|
||||
|
||||
@autoreleasepool {
|
||||
string key =
|
||||
"cat_out_mps:" + to_string(dimension) + ":" + (memory_format == MemoryFormat::ChannelsLast ? "NHWC" : "NCHW");
|
||||
string key = "cat_out_mps:" + std::to_string(dimension) + ":" +
|
||||
(memory_format == MemoryFormat::ChannelsLast ? "NHWC" : "NCHW");
|
||||
if (!all_same_dtype) {
|
||||
key += getTensorsStringKey(input_tensors, true, all_same_sizes_and_stride);
|
||||
} else {
|
||||
key += ":" + getMPSTypeString(input_tensors[0].scalar_type(), true) + ":" + to_string(inputs.size());
|
||||
key += ":" + getMPSTypeString(input_tensors[0].scalar_type(), true) + ":" + std::to_string(inputs.size());
|
||||
}
|
||||
for (auto idx : skipped_tensor_indices) {
|
||||
key += "," + std::to_string(idx);
|
||||
|
@ -60,8 +60,8 @@ TORCH_IMPL_FUNC(sort_stable_out_mps)
|
||||
// Input as placeholders
|
||||
MPSShape* input_shape = getMPSShape(self);
|
||||
NSString* ns_shape_key = [[input_shape valueForKey:@"description"] componentsJoinedByString:@","];
|
||||
string key = string("sort:") + [ns_shape_key UTF8String] + ":" + getMPSTypeString(self) + ":dim" + to_string(dim) +
|
||||
":descending" + to_string(descending);
|
||||
string key = string("sort:") + [ns_shape_key UTF8String] + ":" + getMPSTypeString(self) + ":dim" +
|
||||
std::to_string(dim) + ":descending" + std::to_string(descending);
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
newCachedGraph->selfTensor = mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(self), input_shape);
|
||||
|
||||
|
@ -240,8 +240,8 @@ static void clamp_scalar_out_mps(const Tensor& input_t,
|
||||
|
||||
@autoreleasepool {
|
||||
// the optional min/max refs could affect how we build the cached graph
|
||||
string key = op_name + (has_min ? ("_min:" + to_string(min_scalar)) : "") +
|
||||
(has_max ? ("_max:" + to_string(max_scalar)) : "") + "_scalar:" + getTensorsStringKey({input_t});
|
||||
string key = op_name + (has_min ? ("_min:" + std::to_string(min_scalar)) : "") +
|
||||
(has_max ? ("_max:" + std::to_string(max_scalar)) : "") + "_scalar:" + getTensorsStringKey({input_t});
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
if (has_min)
|
||||
newCachedGraph->minTensor = [mpsGraph
|
||||
|
@ -13,32 +13,6 @@
|
||||
#include <fmt/format.h>
|
||||
|
||||
namespace at::native {
|
||||
static const std::string& getMetalType(const c10::ScalarType& t) {
|
||||
// Mapping from c10::ScalarType to integral type that can be used for unary ops
|
||||
static std::unordered_map<c10::ScalarType, std::string> scalar_to_metal_type = {
|
||||
{c10::ScalarType::Half, "half"},
|
||||
{c10::ScalarType::Float, "float"},
|
||||
{c10::ScalarType::Long, "long"},
|
||||
{c10::ScalarType::Int, "int"},
|
||||
{c10::ScalarType::Short, "short"},
|
||||
{c10::ScalarType::Bool, "bool"},
|
||||
{c10::ScalarType::Char, "int8_t"},
|
||||
{c10::ScalarType::Byte, "uint8_t"},
|
||||
};
|
||||
|
||||
auto it = scalar_to_metal_type.find(t);
|
||||
TORCH_CHECK(it != scalar_to_metal_type.end(), "Unsupported type ", t);
|
||||
return it->second;
|
||||
}
|
||||
|
||||
static const std::string& getMetalType(const c10::Scalar& s) {
|
||||
return getMetalType(s.type());
|
||||
}
|
||||
|
||||
static const std::string& getMetalType(const Tensor& t) {
|
||||
return getMetalType(t.scalar_type());
|
||||
}
|
||||
|
||||
static mps::MetalShaderLibrary lib(UNARY_KERNEL_TEMPLATE, 2);
|
||||
|
||||
TORCH_IMPL_FUNC(erfinv_out_mps)(const Tensor& self, const Tensor& output_) {
|
||||
@ -57,7 +31,8 @@ TORCH_IMPL_FUNC(erfinv_out_mps)(const Tensor& self, const Tensor& output_) {
|
||||
}
|
||||
using namespace mps;
|
||||
@autoreleasepool {
|
||||
auto cplState = lib.getPipelineStateForFunc("erfinv_mps_kernel", {getMetalType(outputTensor), getMetalType(self)});
|
||||
auto cplState = lib.getPipelineStateForFunc("erfinv_mps_kernel",
|
||||
{scalarToMetalTypeString(outputTensor), scalarToMetalTypeString(self)});
|
||||
|
||||
if (!self.is_contiguous()) {
|
||||
inputTensor = inputTensor.contiguous();
|
||||
|
@ -36,8 +36,8 @@ static std::string getUniqueKey(const ScalarType& dtype,
|
||||
const bool consecutive,
|
||||
c10::optional<int64_t> dimOpt) {
|
||||
return "_unique2_mps:" + getMPSTypeString(dtype) + "[" + getArrayRefString(base_shape) + "]:[" +
|
||||
(dimOpt.has_value() ? to_string(dimOpt.value()) : "None") + "]:[" + to_string(return_inverse) + "]:[" +
|
||||
to_string(return_counts) + "]:[" + to_string(consecutive) + "]";
|
||||
(dimOpt.has_value() ? std::to_string(dimOpt.value()) : "None") + "]:[" + std::to_string(return_inverse) + "]:[" +
|
||||
std::to_string(return_counts) + "]:[" + std::to_string(consecutive) + "]";
|
||||
}
|
||||
|
||||
// dim arg not supported when non consecutive, ie sorted
|
||||
|
@ -99,7 +99,7 @@ static void upsample_out_template(const Tensor& input,
|
||||
|
||||
@autoreleasepool {
|
||||
string key = "upsample_" + std::string(resize_mode_str) + (align_corners ? "_aligned_corners" : "") +
|
||||
getTensorsStringKey({input}) + ":[" + to_string(scale_h) + "," + to_string(scale_w) + "]:[" +
|
||||
getTensorsStringKey({input}) + ":[" + std::to_string(scale_h) + "," + std::to_string(scale_w) + "]:[" +
|
||||
(is_backward_pass ? getArrayRefString(input_size) : "Undefined") + "]";
|
||||
|
||||
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
|
||||
|
@ -42,7 +42,7 @@ static std::string getStridedKey(const ScalarType& self_dtype,
|
||||
}
|
||||
|
||||
return (is_scatter ? "scatter:" : "gather:") + dtype_key + "[" + getArrayRefString(base_shape) + "]:[" +
|
||||
getArrayRefString(new_shape) + "]:[" + getArrayRefString(stride) + "]:[" + to_string(storage_offset) + "]";
|
||||
getArrayRefString(new_shape) + "]:[" + getArrayRefString(stride) + "]:[" + std::to_string(storage_offset) + "]";
|
||||
}
|
||||
|
||||
// initializes the MTLBuffers for tensor data and runs the MPSGraph for the view op
|
||||
|
@ -172,16 +172,6 @@ Tensor mean_quantized_cpu(
|
||||
return result;
|
||||
}
|
||||
|
||||
static Tensor& mean_out_quantized_cpu(
|
||||
Tensor& result,
|
||||
const Tensor& self,
|
||||
DimnameList dim,
|
||||
bool keepdim,
|
||||
std::optional<ScalarType> opt_dtype) {
|
||||
return mean_out_quantized_cpu(
|
||||
self, dimnames_to_positions(self, dim), keepdim, opt_dtype, result);
|
||||
}
|
||||
|
||||
// qstd
|
||||
inline bool is_std_inner_dim_fast_path(
|
||||
const Tensor& self,
|
||||
|
@ -216,20 +216,6 @@ Tensor upsample_bilinear2d_quantized_cpu(
|
||||
}
|
||||
}
|
||||
|
||||
using at::native::upsample::compute_output_size;
|
||||
using at::native::upsample::get_scale_value;
|
||||
|
||||
static Tensor upsample_bilinear2d_quantized_cpu(
|
||||
const Tensor& input,
|
||||
at::OptionalIntArrayRef output_size,
|
||||
bool align_corners,
|
||||
std::optional<ArrayRef<double>> scale_factors) {
|
||||
auto osize = compute_output_size(input.sizes(), output_size, scale_factors);
|
||||
auto scale_h = get_scale_value(scale_factors, 0);
|
||||
auto scale_w = get_scale_value(scale_factors, 1);
|
||||
return upsample_bilinear2d_quantized_cpu(input, osize, align_corners, scale_h, scale_w);
|
||||
}
|
||||
|
||||
DEFINE_DISPATCH(qupsample_bilinear2d_nhwc_stub);
|
||||
} // namespace native
|
||||
} // namespace at
|
||||
|
@ -1,6 +1,7 @@
|
||||
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <ATen/core/Tensor.h>
|
||||
@ -35,7 +36,6 @@
|
||||
#endif
|
||||
|
||||
#include <c10/util/irange.h>
|
||||
#include <c10/util/string_utils.h>
|
||||
|
||||
namespace {
|
||||
// To have a sanity check for maximum matrix size.
|
||||
@ -1848,15 +1848,15 @@ class QConvInt8ForBC final {
|
||||
int64_t output_zero_point) {
|
||||
if (kReluFused) {
|
||||
TORCH_WARN_ONCE(
|
||||
"Arguments [stride, padding, dilation, groups] in ops.quantized.conv"
|
||||
+ c10::to_string(kSpatialDim) + "d_relu, " +
|
||||
"have been removed, please update your model to remove these arguments.");
|
||||
"Arguments [stride, padding, dilation, groups] in ops.quantized.conv" +
|
||||
std::to_string(kSpatialDim),
|
||||
"d_relu, have been removed, please update your model to remove these arguments.");
|
||||
return packed_weight->apply_relu(act, output_scale, output_zero_point);
|
||||
} else {
|
||||
TORCH_WARN_ONCE(
|
||||
"Arguments [stride, padding, dilation, groups] in ops.quantized.conv"
|
||||
+ c10::to_string(kSpatialDim) + "d, " +
|
||||
"have been removed, please update your model to remove these arguments.");
|
||||
"Arguments [stride, padding, dilation, groups] in ops.quantized.conv",
|
||||
std::to_string(kSpatialDim),
|
||||
"d, have been removed, please update your model to remove these arguments.");
|
||||
return packed_weight->apply(act, output_scale, output_zero_point);
|
||||
}
|
||||
}
|
||||
|
@ -342,7 +342,10 @@ Tensor qembeddingbag_byte_prepack_meta(const Tensor& weight) {
|
||||
output_shape[cols_dim] = output_columns;
|
||||
at::SymDimVector output_shape_vec(output_shape);
|
||||
|
||||
return at::empty_symint(output_shape_vec, weight.options().dtype(weight.scalar_type()), weight.suggest_memory_format());
|
||||
return at::empty_symint(
|
||||
output_shape_vec,
|
||||
weight.options().dtype(weight.scalar_type()),
|
||||
weight.suggest_memory_format());
|
||||
}
|
||||
|
||||
namespace {
|
||||
@ -373,9 +376,10 @@ Tensor _qembeddingbag_nbit_prepack_helper(
|
||||
int NUM_ELEM_PER_BYTE = 8 / bit_width;
|
||||
TORCH_CHECK(
|
||||
weight_contig.size(weight.dim() - 1) % NUM_ELEM_PER_BYTE == 0,
|
||||
"qembeddingbag_" + c10::to_string(bit_width) +
|
||||
"bit_prepack only works for the number of columns a multiple of " +
|
||||
c10::to_string(NUM_ELEM_PER_BYTE));
|
||||
"qembeddingbag_",
|
||||
std::to_string(bit_width),
|
||||
"bit_prepack only works for the number of columns a multiple of ",
|
||||
std::to_string(NUM_ELEM_PER_BYTE));
|
||||
|
||||
// The "fused" representation stores the scale and bias with the
|
||||
// row-wise quantized data in one tensor.
|
||||
@ -551,11 +555,9 @@ TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
|
||||
TORCH_FN(QEmbeddingPackWeights::run));
|
||||
}
|
||||
|
||||
|
||||
TORCH_LIBRARY_IMPL(quantized, Meta, m) {
|
||||
m.impl(
|
||||
"quantized::embedding_bag_byte_prepack",
|
||||
qembeddingbag_byte_prepack_meta);
|
||||
"quantized::embedding_bag_byte_prepack", qembeddingbag_byte_prepack_meta);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
@ -270,10 +270,6 @@ Tensor& div_sparse_(Tensor& self, const Tensor& value) {
|
||||
return div_out_sparse_zerodim(self, value, self);
|
||||
}
|
||||
|
||||
static SparseTensor& div_out_sparse_scalar(const SparseTensor& t, Scalar value, SparseTensor& r) {
|
||||
return div_out_sparse_zerodim(t, wrapped_scalar_tensor(value), r);
|
||||
}
|
||||
|
||||
Tensor div_sparse(const Tensor& self, const Tensor& value, std::optional<c10::string_view> rounding_mode) {
|
||||
auto commonDtype = at::result_type(self, value);
|
||||
if (c10::isIntegralType(commonDtype, /*includeBool=*/true) && !rounding_mode.has_value()) {
|
||||
@ -287,10 +283,6 @@ Tensor& div_sparse_(Tensor& self, const Tensor& value, std::optional<c10::string
|
||||
return div_out_sparse_zerodim(self, value, std::move(rounding_mode), self);
|
||||
}
|
||||
|
||||
static SparseTensor& div_out_sparse_scalar(const SparseTensor& t, Scalar value, std::optional<c10::string_view> rounding_mode, SparseTensor& r) {
|
||||
return div_out_sparse_zerodim(t, wrapped_scalar_tensor(value), std::move(rounding_mode), r);
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
// floor_divide(SparseTensor, Scalar)
|
||||
// --------------------------------------------------------------------
|
||||
@ -350,10 +342,6 @@ Tensor& floor_divide_sparse_(Tensor& self, const Tensor& value) {
|
||||
return floor_divide_out_sparse_zerodim(self, value, self);
|
||||
}
|
||||
|
||||
static SparseTensor& floor_divide_out_sparse_scalar(SparseTensor& r, const SparseTensor& t, const Scalar& value) {
|
||||
return floor_divide_out_sparse_zerodim(t, wrapped_scalar_tensor(value), r);
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
// norm(SparseTensor, Scalar)
|
||||
// --------------------------------------------------------------------
|
||||
|
@ -764,8 +764,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> _scaled_dot_product_cudnn_attention_c
|
||||
const int64_t batch_size = query.size(0);
|
||||
const int64_t num_heads = query.size(1);
|
||||
const int64_t max_seqlen_batch_q = query.size(2);
|
||||
const int64_t head_dim = query.size(3);
|
||||
|
||||
const int64_t head_dim_qk = query.size(3);
|
||||
const int64_t head_dim_v = value.size(3);
|
||||
const int64_t max_seqlen_batch_k = key.size(2);
|
||||
const int64_t max_seqlen_batch_v = value.size(2);
|
||||
TORCH_CHECK(
|
||||
@ -806,7 +806,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> _scaled_dot_product_cudnn_attention_c
|
||||
num_heads/*int64_t h*/,
|
||||
max_seqlen_batch_q/*int64_t s_q*/,
|
||||
max_seqlen_batch_k/*int64_t s_kv*/,
|
||||
head_dim/*int64_t d*/,
|
||||
head_dim_qk/*int64_t d_qk*/,
|
||||
head_dim_v/*int64_t d_v*/,
|
||||
softmax_scale/*float scaling_factor*/,
|
||||
compute_logsumexp/* bool */,
|
||||
is_causal/* bool */,
|
||||
|
@ -194,12 +194,11 @@ std::tuple<Tensor, Tensor, Tensor> _scaled_dot_product_cudnn_attention_backward_
|
||||
|
||||
const int64_t batch_size = query.size(0);
|
||||
const int64_t num_heads = query.size(1);
|
||||
const int64_t head_dim = query.size(3);
|
||||
const int64_t head_dim_qk = query.size(3);
|
||||
const int64_t head_dim_v = value.size(3);
|
||||
const int64_t max_seqlen_batch_q = query.size(1);
|
||||
const int64_t max_seqlen_batch_k = key.size(1);
|
||||
|
||||
const auto softmax_scale = sdp::calculate_scale(query, scale).as_float_unchecked();
|
||||
|
||||
auto dq = at::empty_like(query);
|
||||
auto dk = at::empty_like(key);
|
||||
auto dv = at::empty_like(value);
|
||||
@ -207,7 +206,8 @@ std::tuple<Tensor, Tensor, Tensor> _scaled_dot_product_cudnn_attention_backward_
|
||||
num_heads /*int64_t h*/,
|
||||
max_seqlen_batch_q /*int64_t s_q*/,
|
||||
max_seqlen_batch_k /*int64_t s_kv*/,
|
||||
head_dim /*int64_t d*/,
|
||||
head_dim_qk /*int64_t d_qk*/,
|
||||
head_dim_v /*int64_t d_v*/,
|
||||
softmax_scale /*float scaling_factor*/,
|
||||
is_causal /*bool is_causal*/,
|
||||
dropout_p /*float dropout_probability*/,
|
||||
|
@ -14,11 +14,11 @@ AllenaiLongformerBase,pass,9
|
||||
|
||||
|
||||
|
||||
BartForCausalLM,pass,12
|
||||
BartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BartForConditionalGeneration,pass,24
|
||||
BartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -34,11 +34,11 @@ BlenderbotForCausalLM,eager_fail_to_run,0
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForCausalLM,pass,12
|
||||
BlenderbotSmallForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForConditionalGeneration,pass,24
|
||||
BlenderbotSmallForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -102,11 +102,11 @@ M2M100ForConditionalGeneration,pass,4
|
||||
|
||||
|
||||
|
||||
MBartForCausalLM,pass,12
|
||||
MBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
MBartForConditionalGeneration,pass,24
|
||||
MBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -130,23 +130,23 @@ MobileBertForQuestionAnswering,pass,3
|
||||
|
||||
|
||||
|
||||
OPTForCausalLM,pass,12
|
||||
OPTForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForCausalLM,pass,12
|
||||
PLBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForConditionalGeneration,pass,29
|
||||
PLBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
PegasusForCausalLM,pass,12
|
||||
PegasusForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PegasusForConditionalGeneration,pass,23
|
||||
PegasusForConditionalGeneration,pass,7
|
||||
|
||||
|
||||
|
||||
@ -158,7 +158,7 @@ RobertaForQuestionAnswering,pass,5
|
||||
|
||||
|
||||
|
||||
Speech2Text2ForCausalLM,pass,12
|
||||
Speech2Text2ForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
@ -170,11 +170,11 @@ T5Small,pass,5
|
||||
|
||||
|
||||
|
||||
TrOCRForCausalLM,pass,12
|
||||
TrOCRForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
XGLMForCausalLM,pass,12
|
||||
XGLMForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
|
|
@ -150,7 +150,7 @@ hf_Bert_large,pass,0
|
||||
|
||||
|
||||
|
||||
hf_BigBird,pass,46
|
||||
hf_BigBird,pass,43
|
||||
|
||||
|
||||
|
||||
@ -378,4 +378,4 @@ vision_maskrcnn,pass,17
|
||||
|
||||
|
||||
|
||||
yolov3,pass,2
|
||||
yolov3,pass,0
|
||||
|
|
@ -98,7 +98,7 @@ hf_Bert_large,pass,6
|
||||
|
||||
|
||||
|
||||
hf_BigBird,pass, 52
|
||||
hf_BigBird,pass,49
|
||||
|
||||
|
||||
|
||||
@ -286,4 +286,4 @@ vision_maskrcnn,pass,34
|
||||
|
||||
|
||||
|
||||
yolov3,pass,9
|
||||
yolov3,fail_accuracy,8
|
||||
|
|
@ -242,7 +242,7 @@ pyhpc_equation_of_state,pass,0
|
||||
|
||||
|
||||
|
||||
pyhpc_isoneutral_mixing,fail_to_run,0
|
||||
pyhpc_isoneutral_mixing,pass,0
|
||||
|
||||
|
||||
|
||||
@ -350,4 +350,4 @@ vision_maskrcnn,fail_to_run,0
|
||||
|
||||
|
||||
|
||||
yolov3,fail_to_run,0
|
||||
yolov3,pass,0
|
||||
|
|
@ -338,4 +338,4 @@ vision_maskrcnn,pass,28
|
||||
|
||||
|
||||
|
||||
yolov3,pass,2
|
||||
yolov3,pass,0
|
||||
|
|
@ -338,4 +338,4 @@ vision_maskrcnn,pass,28
|
||||
|
||||
|
||||
|
||||
yolov3,pass,2
|
||||
yolov3,pass,0
|
||||
|
|
@ -242,7 +242,7 @@ pyhpc_equation_of_state,pass,0
|
||||
|
||||
|
||||
|
||||
pyhpc_isoneutral_mixing,fail_to_run,0
|
||||
pyhpc_isoneutral_mixing,pass,0
|
||||
|
||||
|
||||
|
||||
@ -350,4 +350,4 @@ vision_maskrcnn,fail_to_run,0
|
||||
|
||||
|
||||
|
||||
yolov3,fail_to_run,0
|
||||
yolov3,pass,0
|
||||
|
|
@ -14,11 +14,11 @@ AllenaiLongformerBase,pass,9
|
||||
|
||||
|
||||
|
||||
BartForCausalLM,pass,12
|
||||
BartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BartForConditionalGeneration,pass,24
|
||||
BartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -34,11 +34,11 @@ BlenderbotForCausalLM,eager_fail_to_run,0
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForCausalLM,pass,12
|
||||
BlenderbotSmallForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForConditionalGeneration,pass,24
|
||||
BlenderbotSmallForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -102,11 +102,11 @@ M2M100ForConditionalGeneration,pass,4
|
||||
|
||||
|
||||
|
||||
MBartForCausalLM,pass,12
|
||||
MBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
MBartForConditionalGeneration,pass,24
|
||||
MBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -130,23 +130,23 @@ MobileBertForQuestionAnswering,pass,3
|
||||
|
||||
|
||||
|
||||
OPTForCausalLM,pass,12
|
||||
OPTForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForCausalLM,pass,12
|
||||
PLBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForConditionalGeneration,pass,29
|
||||
PLBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
PegasusForCausalLM,pass,12
|
||||
PegasusForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PegasusForConditionalGeneration,pass,23
|
||||
PegasusForConditionalGeneration,pass,7
|
||||
|
||||
|
||||
|
||||
@ -158,7 +158,7 @@ RobertaForQuestionAnswering,pass,5
|
||||
|
||||
|
||||
|
||||
Speech2Text2ForCausalLM,pass,12
|
||||
Speech2Text2ForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
@ -170,11 +170,11 @@ T5Small,pass,5
|
||||
|
||||
|
||||
|
||||
TrOCRForCausalLM,pass,12
|
||||
TrOCRForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
XGLMForCausalLM,pass,12
|
||||
XGLMForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
|
|
@ -14,11 +14,11 @@ AllenaiLongformerBase,pass,9
|
||||
|
||||
|
||||
|
||||
BartForCausalLM,pass,12
|
||||
BartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BartForConditionalGeneration,pass,24
|
||||
BartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -34,11 +34,11 @@ BlenderbotForCausalLM,eager_fail_to_run,0
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForCausalLM,pass,12
|
||||
BlenderbotSmallForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForConditionalGeneration,pass,24
|
||||
BlenderbotSmallForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -102,11 +102,11 @@ M2M100ForConditionalGeneration,pass,4
|
||||
|
||||
|
||||
|
||||
MBartForCausalLM,pass,12
|
||||
MBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
MBartForConditionalGeneration,pass,24
|
||||
MBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -130,23 +130,23 @@ MobileBertForQuestionAnswering,pass,3
|
||||
|
||||
|
||||
|
||||
OPTForCausalLM,pass,12
|
||||
OPTForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForCausalLM,pass,12
|
||||
PLBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForConditionalGeneration,pass,29
|
||||
PLBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
PegasusForCausalLM,pass,12
|
||||
PegasusForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PegasusForConditionalGeneration,pass,23
|
||||
PegasusForConditionalGeneration,pass,7
|
||||
|
||||
|
||||
|
||||
@ -158,7 +158,7 @@ RobertaForQuestionAnswering,pass,5
|
||||
|
||||
|
||||
|
||||
Speech2Text2ForCausalLM,pass,12
|
||||
Speech2Text2ForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
@ -170,11 +170,11 @@ T5Small,pass,5
|
||||
|
||||
|
||||
|
||||
TrOCRForCausalLM,pass,12
|
||||
TrOCRForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
XGLMForCausalLM,pass,12
|
||||
XGLMForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
|
|
@ -14,11 +14,11 @@ AllenaiLongformerBase,pass,9
|
||||
|
||||
|
||||
|
||||
BartForCausalLM,pass,12
|
||||
BartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BartForConditionalGeneration,pass,24
|
||||
BartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -34,11 +34,11 @@ BlenderbotForCausalLM,eager_fail_to_run,0
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForCausalLM,pass,12
|
||||
BlenderbotSmallForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForConditionalGeneration,pass,24
|
||||
BlenderbotSmallForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -102,11 +102,11 @@ M2M100ForConditionalGeneration,pass,4
|
||||
|
||||
|
||||
|
||||
MBartForCausalLM,pass,12
|
||||
MBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
MBartForConditionalGeneration,pass,24
|
||||
MBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -130,23 +130,23 @@ MobileBertForQuestionAnswering,pass,3
|
||||
|
||||
|
||||
|
||||
OPTForCausalLM,pass,12
|
||||
OPTForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForCausalLM,pass,12
|
||||
PLBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForConditionalGeneration,pass,29
|
||||
PLBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
PegasusForCausalLM,pass,12
|
||||
PegasusForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PegasusForConditionalGeneration,pass,23
|
||||
PegasusForConditionalGeneration,pass,7
|
||||
|
||||
|
||||
|
||||
@ -158,7 +158,7 @@ RobertaForQuestionAnswering,pass,5
|
||||
|
||||
|
||||
|
||||
Speech2Text2ForCausalLM,pass,12
|
||||
Speech2Text2ForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
@ -170,11 +170,11 @@ T5Small,pass,5
|
||||
|
||||
|
||||
|
||||
TrOCRForCausalLM,pass,12
|
||||
TrOCRForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
XGLMForCausalLM,pass,12
|
||||
XGLMForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
|
|
@ -150,7 +150,7 @@ hf_Bert_large,pass,0
|
||||
|
||||
|
||||
|
||||
hf_BigBird,pass,46
|
||||
hf_BigBird,pass,43
|
||||
|
||||
|
||||
|
||||
@ -374,4 +374,4 @@ vision_maskrcnn,pass,17
|
||||
|
||||
|
||||
|
||||
yolov3,pass,2
|
||||
yolov3,pass,0
|
||||
|
|
@ -98,7 +98,7 @@ hf_Bert_large,pass,6
|
||||
|
||||
|
||||
|
||||
hf_BigBird,pass,52
|
||||
hf_BigBird,pass,49
|
||||
|
||||
|
||||
|
||||
@ -282,4 +282,4 @@ vision_maskrcnn,pass,34
|
||||
|
||||
|
||||
|
||||
yolov3,pass,9
|
||||
yolov3,fail_accuracy,8
|
||||
|
|
@ -298,4 +298,4 @@ vision_maskrcnn,pass,28
|
||||
|
||||
|
||||
|
||||
yolov3,pass,2
|
||||
yolov3,pass,0
|
||||
|
|
@ -14,11 +14,11 @@ AllenaiLongformerBase,pass,9
|
||||
|
||||
|
||||
|
||||
BartForCausalLM,pass,12
|
||||
BartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BartForConditionalGeneration,pass,24
|
||||
BartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -34,11 +34,11 @@ BlenderbotForCausalLM,eager_fail_to_run,0
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForCausalLM,pass,12
|
||||
BlenderbotSmallForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForConditionalGeneration,pass,24
|
||||
BlenderbotSmallForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -102,11 +102,11 @@ M2M100ForConditionalGeneration,pass,4
|
||||
|
||||
|
||||
|
||||
MBartForCausalLM,pass,12
|
||||
MBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
MBartForConditionalGeneration,pass,24
|
||||
MBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -130,23 +130,23 @@ MobileBertForQuestionAnswering,pass,3
|
||||
|
||||
|
||||
|
||||
OPTForCausalLM,pass,12
|
||||
OPTForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForCausalLM,pass,12
|
||||
PLBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForConditionalGeneration,pass,29
|
||||
PLBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
PegasusForCausalLM,pass,12
|
||||
PegasusForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PegasusForConditionalGeneration,pass,23
|
||||
PegasusForConditionalGeneration,pass,7
|
||||
|
||||
|
||||
|
||||
@ -158,7 +158,7 @@ RobertaForQuestionAnswering,pass,5
|
||||
|
||||
|
||||
|
||||
Speech2Text2ForCausalLM,pass,12
|
||||
Speech2Text2ForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
@ -170,11 +170,11 @@ T5Small,pass,5
|
||||
|
||||
|
||||
|
||||
TrOCRForCausalLM,pass,12
|
||||
TrOCRForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
XGLMForCausalLM,pass,12
|
||||
XGLMForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
|
|
@ -150,7 +150,7 @@ hf_Bert_large,pass,0
|
||||
|
||||
|
||||
|
||||
hf_BigBird,fail_accuracy,46
|
||||
hf_BigBird,fail_accuracy,43
|
||||
|
||||
|
||||
|
||||
@ -374,4 +374,4 @@ vision_maskrcnn,pass,17
|
||||
|
||||
|
||||
|
||||
yolov3,pass,2
|
||||
yolov3,pass,0
|
||||
|
|
@ -98,7 +98,7 @@ hf_Bert_large,pass,6
|
||||
|
||||
|
||||
|
||||
hf_BigBird,pass,52
|
||||
hf_BigBird,pass,49
|
||||
|
||||
|
||||
|
||||
@ -282,4 +282,4 @@ vision_maskrcnn,pass,34
|
||||
|
||||
|
||||
|
||||
yolov3,pass,9
|
||||
yolov3,pass,8
|
||||
|
|
@ -14,11 +14,11 @@ AllenaiLongformerBase,pass,9
|
||||
|
||||
|
||||
|
||||
BartForCausalLM,pass,12
|
||||
BartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BartForConditionalGeneration,pass,24
|
||||
BartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -34,11 +34,11 @@ BlenderbotForCausalLM,eager_fail_to_run,0
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForCausalLM,pass,12
|
||||
BlenderbotSmallForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForConditionalGeneration,pass,24
|
||||
BlenderbotSmallForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -102,11 +102,11 @@ M2M100ForConditionalGeneration,pass,4
|
||||
|
||||
|
||||
|
||||
MBartForCausalLM,pass,12
|
||||
MBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
MBartForConditionalGeneration,pass,24
|
||||
MBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -130,23 +130,23 @@ MobileBertForQuestionAnswering,pass,3
|
||||
|
||||
|
||||
|
||||
OPTForCausalLM,pass,12
|
||||
OPTForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForCausalLM,pass,12
|
||||
PLBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForConditionalGeneration,pass,29
|
||||
PLBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
PegasusForCausalLM,pass,12
|
||||
PegasusForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PegasusForConditionalGeneration,pass,23
|
||||
PegasusForConditionalGeneration,pass,7
|
||||
|
||||
|
||||
|
||||
@ -158,7 +158,7 @@ RobertaForQuestionAnswering,pass,5
|
||||
|
||||
|
||||
|
||||
Speech2Text2ForCausalLM,pass,12
|
||||
Speech2Text2ForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
@ -170,11 +170,11 @@ T5Small,pass,5
|
||||
|
||||
|
||||
|
||||
TrOCRForCausalLM,pass,12
|
||||
TrOCRForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
XGLMForCausalLM,pass,12
|
||||
XGLMForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
|
|
@ -150,7 +150,7 @@ hf_Bert_large,pass,0
|
||||
|
||||
|
||||
|
||||
hf_BigBird,pass,46
|
||||
hf_BigBird,pass,43
|
||||
|
||||
|
||||
|
||||
@ -378,4 +378,4 @@ vision_maskrcnn,pass,17
|
||||
|
||||
|
||||
|
||||
yolov3,pass,2
|
||||
yolov3,pass,0
|
||||
|
|
@ -98,7 +98,7 @@ hf_Bert_large,pass,6
|
||||
|
||||
|
||||
|
||||
hf_BigBird,pass,52
|
||||
hf_BigBird,pass,49
|
||||
|
||||
|
||||
|
||||
@ -286,4 +286,4 @@ vision_maskrcnn,pass,34
|
||||
|
||||
|
||||
|
||||
yolov3,pass,9
|
||||
yolov3,pass,8
|
||||
|
|
@ -14,11 +14,11 @@ AllenaiLongformerBase,pass,9
|
||||
|
||||
|
||||
|
||||
BartForCausalLM,pass,12
|
||||
BartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BartForConditionalGeneration,pass,24
|
||||
BartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -34,11 +34,11 @@ BlenderbotForCausalLM,eager_fail_to_run,0
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForCausalLM,pass,12
|
||||
BlenderbotSmallForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
BlenderbotSmallForConditionalGeneration,pass,24
|
||||
BlenderbotSmallForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -102,11 +102,11 @@ M2M100ForConditionalGeneration,pass,4
|
||||
|
||||
|
||||
|
||||
MBartForCausalLM,pass,12
|
||||
MBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
MBartForConditionalGeneration,pass,24
|
||||
MBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
@ -130,23 +130,23 @@ MobileBertForQuestionAnswering,pass,3
|
||||
|
||||
|
||||
|
||||
OPTForCausalLM,pass,12
|
||||
OPTForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForCausalLM,pass,12
|
||||
PLBartForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PLBartForConditionalGeneration,pass,29
|
||||
PLBartForConditionalGeneration,pass,8
|
||||
|
||||
|
||||
|
||||
PegasusForCausalLM,pass,12
|
||||
PegasusForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
PegasusForConditionalGeneration,pass,23
|
||||
PegasusForConditionalGeneration,pass,7
|
||||
|
||||
|
||||
|
||||
@ -158,7 +158,7 @@ RobertaForQuestionAnswering,pass,5
|
||||
|
||||
|
||||
|
||||
Speech2Text2ForCausalLM,pass,12
|
||||
Speech2Text2ForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
@ -170,11 +170,11 @@ T5Small,pass,5
|
||||
|
||||
|
||||
|
||||
TrOCRForCausalLM,pass,12
|
||||
TrOCRForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
XGLMForCausalLM,pass,12
|
||||
XGLMForCausalLM,pass,6
|
||||
|
||||
|
||||
|
||||
|
|
@ -150,7 +150,7 @@ hf_Bert_large,pass,0
|
||||
|
||||
|
||||
|
||||
hf_BigBird,fail_accuracy,46
|
||||
hf_BigBird,fail_accuracy,43
|
||||
|
||||
|
||||
|
||||
@ -378,4 +378,4 @@ vision_maskrcnn,pass,17
|
||||
|
||||
|
||||
|
||||
yolov3,pass,2
|
||||
yolov3,pass,0
|
||||
|
|
@ -98,7 +98,7 @@ hf_Bert_large,pass,6
|
||||
|
||||
|
||||
|
||||
hf_BigBird,pass,52
|
||||
hf_BigBird,pass,49
|
||||
|
||||
|
||||
|
||||
@ -286,4 +286,4 @@ vision_maskrcnn,pass,34
|
||||
|
||||
|
||||
|
||||
yolov3,pass,9
|
||||
yolov3,pass,8
|
||||
|
|
@ -4,12 +4,11 @@ phlippe_densenet,float32,static,default,1.3988316
|
||||
basic_gnn_gcn,float32,dynamic,default,1.074576405
|
||||
llama_v2_7b_16h,float32,dynamic,default,1.211740245
|
||||
resnet50,float32,dynamic,default,1.65984261
|
||||
timm_efficientnet,float32,static,cpp,2.271561735
|
||||
#timm_efficientnet,float32,static,cpp,2.1938112
|
||||
mobilenet_v3_large,float32,static,cpp,2.63375628
|
||||
timm_resnest,float32,dynamic,cpp,1.67998548
|
||||
pyhpc_turbulent_kinetic_energy,float32,dynamic,cpp,1.59968463
|
||||
#hf_GPT2,float32,dynamic,cpp,
|
||||
hf_GPT2,float32,dynamic,cpp,1.379885175
|
||||
#hf_GPT2,float32,dynamic,cpp,1.292704418
|
||||
resnext50_32x4d,amp,static,default,1.461687045
|
||||
vgg16,amp,static,default,1.267194285
|
||||
hf_Longformer,amp,dynamic,default,0.997006035
|
||||
@ -17,6 +16,6 @@ hf_Bert_large,amp,dynamic,default,0.99391146
|
||||
llama,amp,static,default,1.32950568
|
||||
timm_regnet,amp,static,cpp,1.157188305
|
||||
lennard_jones,amp,static,cpp,2.240104485
|
||||
hf_T5_generate,amp,dynamic,cpp,1.447656135
|
||||
#hf_T5_generate,amp,dynamic,cpp,1.29339502
|
||||
timm_vovnet,amp,dynamic,cpp,1.07856471
|
||||
mobilenet_v2,amp,dynamic,cpp,2.27774577
|
||||
|
|
@ -272,6 +272,38 @@ TEST(StaticRuntime, autogen_addr) {
|
||||
/*check_resize=*/true);
|
||||
}
|
||||
|
||||
TEST(StaticRuntime, autogen__test_functorch_fallback) {
|
||||
const std::string script = R"IR(
|
||||
graph(%self: Tensor, %other: Tensor):
|
||||
%bias: None = prim::Constant()
|
||||
%ret = aten::_test_functorch_fallback(%self, %other)
|
||||
%cloned = aten::clone(%ret, %bias)
|
||||
return (%cloned)
|
||||
)IR";
|
||||
|
||||
auto self0 = at::rand({6, 6, 6});
|
||||
auto other0 = at::rand({6, 6, 6});
|
||||
std::vector<IValue> args{self0, other0};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
{},
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
|
||||
auto self1 = at::rand({22, 22, 22});
|
||||
auto other1 = at::rand({22, 22, 22});
|
||||
std::vector<IValue> args2{self1, other1};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
args2,
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
}
|
||||
|
||||
TEST(StaticRuntime, autogen_argmax) {
|
||||
const std::string script = R"IR(
|
||||
graph(%self: Tensor, %dim: int?, %keepdim: bool):
|
||||
@ -4440,6 +4472,40 @@ TEST(StaticRuntime, autogen_masked_select) {
|
||||
/*check_resize=*/true);
|
||||
}
|
||||
|
||||
TEST(StaticRuntime, autogen_nonzero_static) {
|
||||
const std::string script = R"IR(
|
||||
graph(%self: Tensor, %size: int, %fill_value: int):
|
||||
%bias: None = prim::Constant()
|
||||
%ret = aten::nonzero_static(%self, %size, %fill_value)
|
||||
%cloned = aten::clone(%ret, %bias)
|
||||
return (%cloned)
|
||||
)IR";
|
||||
|
||||
auto self0 = at::rand({6, 6, 6});
|
||||
auto size0 = 1;
|
||||
auto fill_value0 = 1;
|
||||
std::vector<IValue> args{self0, size0, fill_value0};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
{},
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
|
||||
auto self1 = at::rand({22, 22, 22});
|
||||
auto size1 = 1;
|
||||
auto fill_value1 = 1;
|
||||
std::vector<IValue> args2{self1, size1, fill_value1};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
args2,
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
}
|
||||
|
||||
TEST(StaticRuntime, autogen_gather) {
|
||||
const std::string script = R"IR(
|
||||
graph(%self: Tensor, %dim: int, %index: Tensor, %sparse_grad: bool):
|
||||
@ -7106,222 +7172,6 @@ TEST(StaticRuntime, autogen_special_multigammaln) {
|
||||
/*check_resize=*/true);
|
||||
}
|
||||
|
||||
TEST(StaticRuntime, autogen_fft_fft) {
|
||||
const std::string script = R"IR(
|
||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
||||
%bias: None = prim::Constant()
|
||||
%ret = aten::fft_fft(%self, %n, %dim, %norm)
|
||||
%cloned = aten::clone(%ret, %bias)
|
||||
return (%cloned)
|
||||
)IR";
|
||||
|
||||
auto self0 = at::rand({6, 6, 6});
|
||||
auto n0 = 1;
|
||||
auto dim0 = 1;
|
||||
auto norm0 = "forward";
|
||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
{},
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
|
||||
auto self1 = at::rand({22, 22, 22});
|
||||
auto n1 = 1;
|
||||
auto dim1 = 1;
|
||||
auto norm1 = "forward";
|
||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
args2,
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
}
|
||||
|
||||
TEST(StaticRuntime, autogen_fft_ifft) {
|
||||
const std::string script = R"IR(
|
||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
||||
%bias: None = prim::Constant()
|
||||
%ret = aten::fft_ifft(%self, %n, %dim, %norm)
|
||||
%cloned = aten::clone(%ret, %bias)
|
||||
return (%cloned)
|
||||
)IR";
|
||||
|
||||
auto self0 = at::rand({6, 6, 6});
|
||||
auto n0 = 1;
|
||||
auto dim0 = 1;
|
||||
auto norm0 = "forward";
|
||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
{},
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
|
||||
auto self1 = at::rand({22, 22, 22});
|
||||
auto n1 = 1;
|
||||
auto dim1 = 1;
|
||||
auto norm1 = "forward";
|
||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
args2,
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
}
|
||||
|
||||
TEST(StaticRuntime, autogen_fft_rfft) {
|
||||
const std::string script = R"IR(
|
||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
||||
%bias: None = prim::Constant()
|
||||
%ret = aten::fft_rfft(%self, %n, %dim, %norm)
|
||||
%cloned = aten::clone(%ret, %bias)
|
||||
return (%cloned)
|
||||
)IR";
|
||||
|
||||
auto self0 = at::rand({6, 6, 6});
|
||||
auto n0 = 1;
|
||||
auto dim0 = 1;
|
||||
auto norm0 = "forward";
|
||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
{},
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
|
||||
auto self1 = at::rand({22, 22, 22});
|
||||
auto n1 = 1;
|
||||
auto dim1 = 1;
|
||||
auto norm1 = "forward";
|
||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
args2,
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
}
|
||||
|
||||
TEST(StaticRuntime, autogen_fft_irfft) {
|
||||
const std::string script = R"IR(
|
||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
||||
%bias: None = prim::Constant()
|
||||
%ret = aten::fft_irfft(%self, %n, %dim, %norm)
|
||||
%cloned = aten::clone(%ret, %bias)
|
||||
return (%cloned)
|
||||
)IR";
|
||||
|
||||
auto self0 = at::rand({6, 6, 6});
|
||||
auto n0 = 1;
|
||||
auto dim0 = 1;
|
||||
auto norm0 = "forward";
|
||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
{},
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
|
||||
auto self1 = at::rand({22, 22, 22});
|
||||
auto n1 = 1;
|
||||
auto dim1 = 1;
|
||||
auto norm1 = "forward";
|
||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
args2,
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
}
|
||||
|
||||
TEST(StaticRuntime, autogen_fft_hfft) {
|
||||
const std::string script = R"IR(
|
||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
||||
%bias: None = prim::Constant()
|
||||
%ret = aten::fft_hfft(%self, %n, %dim, %norm)
|
||||
%cloned = aten::clone(%ret, %bias)
|
||||
return (%cloned)
|
||||
)IR";
|
||||
|
||||
auto self0 = at::rand({6, 6, 6});
|
||||
auto n0 = 1;
|
||||
auto dim0 = 1;
|
||||
auto norm0 = "forward";
|
||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
{},
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
|
||||
auto self1 = at::rand({22, 22, 22});
|
||||
auto n1 = 1;
|
||||
auto dim1 = 1;
|
||||
auto norm1 = "forward";
|
||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
args2,
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
}
|
||||
|
||||
TEST(StaticRuntime, autogen_fft_ihfft) {
|
||||
const std::string script = R"IR(
|
||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
||||
%bias: None = prim::Constant()
|
||||
%ret = aten::fft_ihfft(%self, %n, %dim, %norm)
|
||||
%cloned = aten::clone(%ret, %bias)
|
||||
return (%cloned)
|
||||
)IR";
|
||||
|
||||
auto self0 = at::rand({6, 6, 6});
|
||||
auto n0 = 1;
|
||||
auto dim0 = 1;
|
||||
auto norm0 = "forward";
|
||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
{},
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
|
||||
auto self1 = at::rand({22, 22, 22});
|
||||
auto n1 = 1;
|
||||
auto dim1 = 1;
|
||||
auto norm1 = "forward";
|
||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
||||
testStaticRuntime(
|
||||
script,
|
||||
args,
|
||||
args2,
|
||||
/*use_allclose=*/false,
|
||||
/*use_equalnan=*/false,
|
||||
/*check_resize=*/true);
|
||||
}
|
||||
|
||||
TEST(StaticRuntime, autogen_linalg_cross) {
|
||||
const std::string script = R"IR(
|
||||
graph(%self: Tensor, %other: Tensor, %dim: int):
|
||||
|
@ -827,6 +827,7 @@ libtorch_python_core_sources = [
|
||||
"torch/csrc/dynamo/guards.cpp",
|
||||
"torch/csrc/dynamo/init.cpp",
|
||||
"torch/csrc/functorch/init.cpp",
|
||||
"torch/csrc/fx/node.cpp",
|
||||
"torch/csrc/mps/Module.cpp",
|
||||
"torch/csrc/mtia/Module.cpp",
|
||||
"torch/csrc/inductor/aoti_runner/pybind.cpp",
|
||||
|
@ -1,186 +0,0 @@
|
||||
#include "caffe2/perfkernels/adagrad.h"
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include "caffe2/perfkernels/common.h"
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
void adagrad_update__base(
|
||||
int N,
|
||||
const float* w,
|
||||
const float* g,
|
||||
const float* h,
|
||||
float* nw,
|
||||
float* nh,
|
||||
float epsilon,
|
||||
float decay,
|
||||
const float lr,
|
||||
const float weight_decay = 0.f) {
|
||||
internal::adagrad_update_base_inlined(
|
||||
N, w, g, h, nw, nh, decay, epsilon, lr, weight_decay);
|
||||
}
|
||||
|
||||
void adagrad_update_prefetch__base(
|
||||
int N,
|
||||
const float* w,
|
||||
const float* /* w_n */, // prefetch ptr
|
||||
|
||||
const float* g,
|
||||
|
||||
const float* h,
|
||||
const float* /* h_n */, // prefetch ptr
|
||||
|
||||
float* nw,
|
||||
float* /* nw_n */, // prefetch ptr
|
||||
|
||||
float* nh,
|
||||
float* /* nh_n */, // prefetch ptr
|
||||
|
||||
float epsilon,
|
||||
float lr,
|
||||
float weight_decay = 0.f) {
|
||||
adagrad_update__base(N, w, g, h, nw, nh, epsilon, 1.0f, lr, weight_decay);
|
||||
}
|
||||
|
||||
void adagrad_fp16_update_prefetch__base(
|
||||
int N,
|
||||
const at::Half* w,
|
||||
const at::Half* /* w_n */, // prefetch ptr
|
||||
const float* g,
|
||||
const at::Half* h,
|
||||
const at::Half* /* h_n */, // prefetch ptr
|
||||
at::Half* nw,
|
||||
at::Half* /* nw_n */, // prefetch ptr
|
||||
at::Half* nh,
|
||||
at::Half* /* nh_n */, // prefetch ptr
|
||||
float epsilon,
|
||||
float lr,
|
||||
float weight_decay = 0.f) {
|
||||
internal::adagrad_update_base_inlined(
|
||||
N, w, g, h, nw, nh, 1.0f, epsilon, lr, weight_decay);
|
||||
}
|
||||
|
||||
// version without prefetching
|
||||
decltype(adagrad_update__base) adagrad_update__avx2_fma;
|
||||
decltype(adagrad_update__base) adagrad_update__avx512;
|
||||
void adagrad_update(
|
||||
int N,
|
||||
const float* w,
|
||||
const float* g,
|
||||
const float* h,
|
||||
float* nw,
|
||||
float* nh,
|
||||
float epsilon,
|
||||
float decay,
|
||||
float lr,
|
||||
float weight_decay) {
|
||||
AVX512_DO(adagrad_update, N, w, g, h, nw, nh, epsilon, decay, lr, weight_decay);
|
||||
AVX2_FMA_DO(
|
||||
adagrad_update, N, w, g, h, nw, nh, epsilon, decay, lr, weight_decay);
|
||||
BASE_DO(adagrad_update, N, w, g, h, nw, nh, epsilon, decay, lr, weight_decay);
|
||||
}
|
||||
|
||||
decltype(adagrad_update_prefetch__base) adagrad_update_prefetch__avx2_fma;
|
||||
void adagrad_update_prefetch(
|
||||
int N,
|
||||
const float* w,
|
||||
const float* w_n, // prefetch ptr
|
||||
|
||||
const float* g,
|
||||
|
||||
const float* h,
|
||||
const float* h_n, // prefetch ptr
|
||||
|
||||
float* nw,
|
||||
float* nw_n, // prefetch ptr
|
||||
|
||||
float* nh,
|
||||
float* nh_n, // prefetch ptr
|
||||
|
||||
float epsilon,
|
||||
float lr,
|
||||
float weight_decay) {
|
||||
AVX2_FMA_DO(
|
||||
adagrad_update_prefetch,
|
||||
N,
|
||||
w,
|
||||
w_n,
|
||||
g,
|
||||
h,
|
||||
h_n,
|
||||
nw,
|
||||
nw_n,
|
||||
nh,
|
||||
nh_n,
|
||||
epsilon,
|
||||
lr,
|
||||
weight_decay);
|
||||
BASE_DO(
|
||||
adagrad_update_prefetch,
|
||||
N,
|
||||
w,
|
||||
w_n,
|
||||
g,
|
||||
h,
|
||||
h_n,
|
||||
nw,
|
||||
nw_n,
|
||||
nh,
|
||||
nh_n,
|
||||
epsilon,
|
||||
lr,
|
||||
weight_decay);
|
||||
}
|
||||
|
||||
// Version with prefetching for embeddings and
|
||||
// momentum using fp16
|
||||
decltype(adagrad_fp16_update_prefetch__base)
|
||||
adagrad_fp16_update_prefetch__avx2_fma;
|
||||
void adagrad_fp16_update_prefetch(
|
||||
int N,
|
||||
const at::Half* w,
|
||||
const at::Half* w_n, // prefetch ptr
|
||||
const float* g,
|
||||
const at::Half* h,
|
||||
const at::Half* h_n, // prefetch ptr
|
||||
at::Half* nw,
|
||||
at::Half* nw_n, // prefetch ptr
|
||||
at::Half* nh,
|
||||
at::Half* nh_n, // prefetch ptr
|
||||
float epsilon,
|
||||
float lr,
|
||||
float weight_decay) {
|
||||
AVX2_FMA_DO(
|
||||
adagrad_fp16_update_prefetch,
|
||||
N,
|
||||
w,
|
||||
w_n,
|
||||
g,
|
||||
h,
|
||||
h_n,
|
||||
nw,
|
||||
nw_n,
|
||||
nh,
|
||||
nh_n,
|
||||
epsilon,
|
||||
lr,
|
||||
weight_decay);
|
||||
BASE_DO(
|
||||
adagrad_fp16_update_prefetch,
|
||||
N,
|
||||
w,
|
||||
w_n,
|
||||
g,
|
||||
h,
|
||||
h_n,
|
||||
nw,
|
||||
nw_n,
|
||||
nh,
|
||||
nh_n,
|
||||
epsilon,
|
||||
lr,
|
||||
weight_decay);
|
||||
}
|
||||
|
||||
} // namespace caffe2
|
@ -1,205 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#if defined(__AVX__) && !defined(__NVCC__) && \
|
||||
(defined(__x86_64__) || defined(_M_X64) || defined(__i386__))
|
||||
#define CAFFE2_PERFKERNELS_ADAGRAD_H_USE_INTRINSIC
|
||||
#include <immintrin.h>
|
||||
#endif
|
||||
#include <c10/util/Half.h>
|
||||
#include <c10/util/irange.h>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
namespace internal {
|
||||
|
||||
// The following functions inside internal namespace are inlined because they
|
||||
// are performance critical.
|
||||
|
||||
template <typename T>
|
||||
static inline void adagrad_update_base_inlined(
|
||||
int N,
|
||||
const T* w,
|
||||
const float* g,
|
||||
const T* h,
|
||||
T* nw,
|
||||
T* nh,
|
||||
float decay,
|
||||
float epsilon,
|
||||
float lr,
|
||||
float weight_decay = 0.f) {
|
||||
for (const auto i : c10::irange(N)) {
|
||||
float gi = std::fma(weight_decay, w[i], g[i]);
|
||||
float hi = decay * h[i] + gi * gi;
|
||||
nh[i] = hi;
|
||||
nw[i] = w[i] + lr * gi / (std::sqrt(hi) + epsilon);
|
||||
}
|
||||
}
|
||||
|
||||
// version with prefetching
|
||||
// TODO(msmelyan)
|
||||
// Crux of the computation is computing a / (sqrt(b) + epsilon),
|
||||
// where a and b are vectors and epsilon is very small (eg., 10^-5) and does not
|
||||
// change. Today it's computed using two vector sqrt and vector divide simd
|
||||
// instructions. It is slow. We can take advantage of existing fast vector
|
||||
// VRSQRTPS instruction that computes approximate reciprocals of square roots
|
||||
// of the vector. It is 6x faster than vsrt and vdiv combinations. Since the
|
||||
// addition of epsilon is just done to avoid division by zero, we approximate a
|
||||
// / (sqrt(b) + epsilon) by a / (sqrt(b + sqrt(epsilon)) If we do that, we can
|
||||
// use VRSQRTPS instead now. VRSQRTPS is not very accurate. Specifically, for
|
||||
// the test on random numbers between 0.1 and 1 the absolute error was about
|
||||
// 10^-3 compared to using slower but more accurate combination of vsqrt and
|
||||
// vdiv. Extend Marat's function with more NR iterations to get more accuracy
|
||||
// for training
|
||||
// TODO(msmelyan)
|
||||
// explore streaming stores, but need to have unique indices (deduplication)
|
||||
inline void adagrad_update_prefetch_inlined(
|
||||
int N,
|
||||
const float* w,
|
||||
#ifdef CAFFE2_PERFKERNELS_ADAGRAD_H_USE_INTRINSIC
|
||||
const float* w_n, // prefetch ptr
|
||||
#else
|
||||
const float* /* unused */,
|
||||
#endif
|
||||
|
||||
const float* g,
|
||||
|
||||
const float* h,
|
||||
#ifdef CAFFE2_PERFKERNELS_ADAGRAD_H_USE_INTRINSIC
|
||||
const float* h_n, // prefetch ptr
|
||||
#else
|
||||
const float* /* unused */,
|
||||
#endif
|
||||
|
||||
float* nw,
|
||||
#ifdef CAFFE2_PERFKERNELS_ADAGRAD_H_USE_INTRINSIC
|
||||
float* nw_n, // prefetch ptr
|
||||
#else
|
||||
float* /* unused */,
|
||||
#endif
|
||||
|
||||
float* nh,
|
||||
#ifdef CAFFE2_PERFKERNELS_ADAGRAD_H_USE_INTRINSIC
|
||||
float* nh_n, // prefetch ptr
|
||||
#else
|
||||
float* /* unused */,
|
||||
#endif
|
||||
|
||||
float epsilon,
|
||||
float lr,
|
||||
float weight_decay = 0.f) {
|
||||
auto i = 0;
|
||||
|
||||
#ifdef CAFFE2_PERFKERNELS_ADAGRAD_H_USE_INTRINSIC
|
||||
constexpr int kSize = 8;
|
||||
for (; i + kSize <= N; i += kSize) {
|
||||
_mm_prefetch(reinterpret_cast<const char*>(&w_n[i]), _MM_HINT_T0);
|
||||
_mm_prefetch(reinterpret_cast<const char*>(&h_n[i]), _MM_HINT_T0);
|
||||
_mm_prefetch(reinterpret_cast<const char*>(&nw_n[i]), _MM_HINT_T0);
|
||||
_mm_prefetch(reinterpret_cast<const char*>(&nh_n[i]), _MM_HINT_T0);
|
||||
|
||||
__m256 gi = _mm256_loadu_ps(g + i);
|
||||
__m256 hi = _mm256_loadu_ps(h + i);
|
||||
__m256 wi = _mm256_loadu_ps(w + i);
|
||||
#ifdef __FMA__
|
||||
gi = _mm256_fmadd_ps(_mm256_set1_ps(weight_decay), wi, gi);
|
||||
|
||||
#else
|
||||
gi = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(weight_decay), wi), gi);
|
||||
#endif
|
||||
|
||||
__m256 nhi = _mm256_add_ps(hi, _mm256_mul_ps(gi, gi));
|
||||
_mm256_storeu_ps(nh + i, nhi);
|
||||
__m256 vtmp = _mm256_div_ps(
|
||||
_mm256_mul_ps(_mm256_set1_ps(lr), gi),
|
||||
_mm256_add_ps(_mm256_sqrt_ps(nhi), _mm256_set1_ps(epsilon)));
|
||||
_mm256_storeu_ps(nw + i, _mm256_add_ps(wi, vtmp));
|
||||
}
|
||||
#endif
|
||||
|
||||
adagrad_update_base_inlined(
|
||||
N - i,
|
||||
w + i,
|
||||
g + i,
|
||||
h + i,
|
||||
nw + i,
|
||||
nh + i,
|
||||
1.0f,
|
||||
epsilon,
|
||||
lr,
|
||||
weight_decay);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
// version with prefetching
|
||||
// TODO(msmelyan)
|
||||
// Crux of the computation is computing a / (sqrt(b) + epsilon),
|
||||
// where a and b are vectors and epsilon is very small (eg., 10^-5) and does not
|
||||
// change. Today it's computed using two vector sqrt and vector divide simd
|
||||
// instructions. It is slow. We can take advantage of existing fast vector
|
||||
// VRSQRTPS instruction that computes approximate reciprocals of square roots
|
||||
// of the vector. It is 6x faster than vsrt and vdiv combinations. Since the
|
||||
// addition of epsilon is just done to avoid division by zero, we approximate a
|
||||
// / (sqrt(b) + epsilon) by a / (sqrt(b + sqrt(epsilon)) If we do that, we can
|
||||
// use VRSQRTPS instead now. VRSQRTPS is not very accurate. Specifically, for
|
||||
// the test on random numbers between 0.1 and 1 the absolute error was about
|
||||
// 10^-3 compared to using slower but more accurate combination of vsqrt and
|
||||
// vdiv. Extend Marat's function with more NR iterations to get more accuracy
|
||||
// for training
|
||||
// TODO(msmelyan)
|
||||
// explore streaming stores, but need to have inuque indices (deduplication)
|
||||
void adagrad_update_prefetch(
|
||||
int N,
|
||||
const float* w,
|
||||
const float* w_n, // prefetch ptr
|
||||
|
||||
const float* g,
|
||||
|
||||
const float* h,
|
||||
const float* h_n, // prefetch ptr
|
||||
|
||||
float* nw,
|
||||
float* nw_n, // prefetch ptr
|
||||
|
||||
float* nh,
|
||||
float* nh_n, // prefetch ptr
|
||||
|
||||
float epsilon,
|
||||
float lr,
|
||||
float weight_decay = 0.f);
|
||||
|
||||
// Version with prefetching for embeddings and
|
||||
// momentum using fp16
|
||||
void adagrad_fp16_update_prefetch(
|
||||
int N,
|
||||
const at::Half* w,
|
||||
const at::Half* w_n, // prefetch ptr
|
||||
const float* g,
|
||||
const at::Half* h,
|
||||
const at::Half* h_n, // prefetch ptr
|
||||
at::Half* nw,
|
||||
at::Half* nw_n, // prefetch ptr
|
||||
at::Half* nh,
|
||||
at::Half* nh_n, // prefetch ptr
|
||||
float epsilon,
|
||||
float lr,
|
||||
float weight_decay = 0.f);
|
||||
|
||||
// version without prefetching
|
||||
void adagrad_update(
|
||||
int N,
|
||||
const float* w,
|
||||
const float* g,
|
||||
const float* h,
|
||||
float* nw,
|
||||
float* nh,
|
||||
float epsilon,
|
||||
float decay,
|
||||
float lr,
|
||||
float weight_decay = 0.f);
|
||||
|
||||
} // namespace caffe2
|
||||
|
||||
#ifdef CAFFE2_PERFKERNELS_ADAGRAD_H_USE_INTRINSIC
|
||||
#undef CAFFE2_PERFKERNELS_ADAGRAD_H_USE_INTRINSIC
|
||||
#endif
|
@ -1,125 +0,0 @@
|
||||
#include "caffe2/perfkernels/adagrad.h"
|
||||
#include "caffe2/perfkernels/cvtsh_ss_bugfix.h"
|
||||
|
||||
#include <emmintrin.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
// version without prefetching
|
||||
void adagrad_update__avx2_fma(
|
||||
int N,
|
||||
const float* w,
|
||||
const float* g,
|
||||
const float* h,
|
||||
float* nw,
|
||||
float* nh,
|
||||
float epsilon,
|
||||
float decay,
|
||||
float lr,
|
||||
float weight_decay = 0.f) {
|
||||
constexpr int kSize = 8;
|
||||
auto i = 0;
|
||||
for (; i + kSize <= N; i += kSize) {
|
||||
__m256 gi = _mm256_loadu_ps(g + i);
|
||||
__m256 hi = _mm256_loadu_ps(h + i);
|
||||
__m256 wi = _mm256_loadu_ps(w + i);
|
||||
gi = _mm256_fmadd_ps(_mm256_set1_ps(weight_decay), wi, gi);
|
||||
|
||||
__m256 nhi = _mm256_add_ps(
|
||||
_mm256_mul_ps(_mm256_set1_ps(decay), hi), _mm256_mul_ps(gi, gi));
|
||||
_mm256_storeu_ps(nh + i, nhi);
|
||||
__m256 vtmp = _mm256_div_ps(
|
||||
_mm256_mul_ps(_mm256_set1_ps(lr), gi),
|
||||
_mm256_add_ps(_mm256_sqrt_ps(nhi), _mm256_set1_ps(epsilon)));
|
||||
_mm256_storeu_ps(nw + i, _mm256_add_ps(wi, vtmp));
|
||||
}
|
||||
|
||||
for (; i < N; ++i) {
|
||||
float gi = std::fma(weight_decay, w[i], g[i]);
|
||||
float hi = nh[i] = decay * h[i] + gi * gi;
|
||||
nw[i] = w[i] + lr * gi / (std::sqrt(hi) + epsilon);
|
||||
}
|
||||
}
|
||||
|
||||
void adagrad_update_prefetch__avx2_fma(
|
||||
int N,
|
||||
const float* w,
|
||||
const float* w_n, // prefetch ptr
|
||||
|
||||
const float* g,
|
||||
|
||||
const float* h,
|
||||
const float* h_n, // prefetch ptr
|
||||
|
||||
float* nw,
|
||||
float* nw_n, // prefetch ptr
|
||||
|
||||
float* nh,
|
||||
float* nh_n, // prefetch ptr
|
||||
|
||||
float epsilon,
|
||||
float lr,
|
||||
float weight_decay = 0.f) {
|
||||
internal::adagrad_update_prefetch_inlined(
|
||||
N, w, w_n, g, h, h_n, nw, nw_n, nh, nh_n, epsilon, lr, weight_decay);
|
||||
}
|
||||
|
||||
// Compute adagrad sparse, assumes embedding and momentum are at::Half
|
||||
void adagrad_fp16_update_prefetch__avx2_fma(
|
||||
int N,
|
||||
const at::Half* w,
|
||||
const at::Half* w_n, // prefetch ptr
|
||||
const float* g,
|
||||
const at::Half* h,
|
||||
const at::Half* h_n, // prefetch ptr
|
||||
at::Half* nw,
|
||||
at::Half* nw_n, // prefetch ptr
|
||||
at::Half* nh,
|
||||
at::Half* nh_n, // prefetch ptr
|
||||
float epsilon,
|
||||
float lr,
|
||||
float weight_decay = 0.f) {
|
||||
constexpr int kSize = 8;
|
||||
auto i = 0;
|
||||
for (; i + kSize <= N; i += kSize) {
|
||||
_mm_prefetch(reinterpret_cast<const char*>(&w_n[i]), _MM_HINT_T0);
|
||||
_mm_prefetch(reinterpret_cast<const char*>(&h_n[i]), _MM_HINT_T0);
|
||||
_mm_prefetch(reinterpret_cast<const char*>(&nw_n[i]), _MM_HINT_T0);
|
||||
_mm_prefetch(reinterpret_cast<const char*>(&nh_n[i]), _MM_HINT_T0);
|
||||
|
||||
// only convert momentum and embedding, gradient is fp32
|
||||
__m256 gi = _mm256_loadu_ps(g + i);
|
||||
__m128i hhi = _mm_loadu_si128(reinterpret_cast<const __m128i*>(h + i));
|
||||
__m256 hi = _mm256_cvtph_ps(hhi);
|
||||
__m128i whi = _mm_loadu_si128(reinterpret_cast<const __m128i*>(w + i));
|
||||
__m256 wi = _mm256_cvtph_ps(whi);
|
||||
gi = _mm256_fmadd_ps(_mm256_set1_ps(weight_decay), wi, gi);
|
||||
|
||||
__m256 nhi = _mm256_add_ps(hi, _mm256_mul_ps(gi, gi));
|
||||
__m128i nhhi = _mm256_cvtps_ph(nhi, 0);
|
||||
_mm_storeu_si128(reinterpret_cast<__m128i*>(nh + i), nhhi);
|
||||
|
||||
__m256 vtmp = _mm256_div_ps(
|
||||
_mm256_mul_ps(_mm256_set1_ps(lr), gi),
|
||||
_mm256_add_ps(_mm256_sqrt_ps(nhi), _mm256_set1_ps(epsilon)));
|
||||
__m256 nwi = _mm256_add_ps(wi, vtmp);
|
||||
__m128i nhwi = _mm256_cvtps_ph(nwi, 0);
|
||||
_mm_storeu_si128(reinterpret_cast<__m128i*>(nw + i), nhwi);
|
||||
}
|
||||
|
||||
for (; i < N; ++i) {
|
||||
float gi = std::fma(
|
||||
weight_decay,
|
||||
_cvtsh_ss(reinterpret_cast<const unsigned short*>(w)[i]),
|
||||
g[i]);
|
||||
float nhi =
|
||||
_cvtsh_ss(reinterpret_cast<const unsigned short*>(h)[i]) + gi * gi;
|
||||
reinterpret_cast<unsigned short*>(nh)[i] = _cvtss_sh(nhi, 0);
|
||||
float nwi = _cvtsh_ss(reinterpret_cast<const unsigned short*>(w)[i]) +
|
||||
lr * gi / (std::sqrt(nhi) + epsilon);
|
||||
reinterpret_cast<unsigned short*>(nw)[i] = _cvtss_sh(nwi, 0);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace caffe2
|
@ -1,45 +0,0 @@
|
||||
#include "caffe2/perfkernels/adagrad.h"
|
||||
#include "caffe2/perfkernels/cvtsh_ss_bugfix.h"
|
||||
|
||||
#include <emmintrin.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
// version without prefetching
|
||||
void adagrad_update__avx512(
|
||||
int N,
|
||||
const float* w,
|
||||
const float* g,
|
||||
const float* h,
|
||||
float* nw,
|
||||
float* nh,
|
||||
float epsilon,
|
||||
float decay,
|
||||
float lr,
|
||||
float weight_decay = 0.f) {
|
||||
constexpr int kSize = 16;
|
||||
auto i = 0;
|
||||
for (; i + kSize <= N; i += kSize) {
|
||||
__m512 gi = _mm512_loadu_ps(g + i);
|
||||
__m512 hi = _mm512_loadu_ps(h + i);
|
||||
__m512 wi = _mm512_loadu_ps(w + i);
|
||||
gi = _mm512_fmadd_ps(_mm512_set1_ps(weight_decay), wi, gi);
|
||||
|
||||
__m512 nhi = _mm512_add_ps(
|
||||
_mm512_mul_ps(_mm512_set1_ps(decay), hi), _mm512_mul_ps(gi, gi));
|
||||
_mm512_storeu_ps(nh + i, nhi);
|
||||
__m512 vtmp = _mm512_div_ps(
|
||||
_mm512_mul_ps(_mm512_set1_ps(lr), gi),
|
||||
_mm512_add_ps(_mm512_sqrt_ps(nhi), _mm512_set1_ps(epsilon)));
|
||||
_mm512_storeu_ps(nw + i, _mm512_add_ps(wi, vtmp));
|
||||
}
|
||||
|
||||
for (; i < N; ++i) {
|
||||
float gi = std::fma(weight_decay, w[i], g[i]);
|
||||
float hi = nh[i] = decay * h[i] + gi * gi;
|
||||
nw[i] = w[i] + lr * gi / (std::sqrt(hi) + epsilon);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace caffe2
|
@ -1,113 +0,0 @@
|
||||
#include "caffe2/perfkernels/common.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <cmath>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
namespace {
|
||||
template <typename T>
|
||||
void BoxCoxNaive(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
const T* data_ptr,
|
||||
const T* __restrict lambda1_ptr,
|
||||
const T* __restrict lambda2_ptr,
|
||||
T* output_ptr) {
|
||||
constexpr T k_eps = static_cast<T>(1e-6);
|
||||
|
||||
for (std::size_t i = 0; i < N; i++) {
|
||||
for (std::size_t j = 0; j < D; j++, data_ptr++, output_ptr++) {
|
||||
T lambda1_v = lambda1_ptr[j];
|
||||
T lambda2_v = lambda2_ptr[j];
|
||||
T tmp = std::max(*data_ptr + lambda2_v, k_eps);
|
||||
if (lambda1_v == 0) {
|
||||
*output_ptr = std::log(tmp);
|
||||
} else {
|
||||
T lambda_1 = 1 / lambda1_v;
|
||||
T pow = std::pow(tmp, lambda1_v);
|
||||
*output_ptr = lambda_1 * pow - lambda_1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CAFFE2_PERF_WITH_AVX2) && defined(CAFFE2_PERF_USE_MKL)
|
||||
namespace details {
|
||||
template <typename T>
|
||||
void compute_batch_box_cox__avx2_fma(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const T* data_ptr,
|
||||
const T* __restrict lambda1_ptr,
|
||||
const T* __restrict lambda2_ptr,
|
||||
T* output_ptr);
|
||||
|
||||
extern template
|
||||
void compute_batch_box_cox__avx2_fma<float>(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const float* self_data,
|
||||
const float* __restrict lambda1_data,
|
||||
const float* __restrict lambda2_data,
|
||||
float* output_data);
|
||||
|
||||
extern template
|
||||
void compute_batch_box_cox__avx2_fma<double>(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const double* self_data,
|
||||
const double* __restrict lambda1_data,
|
||||
const double* __restrict lambda2_data,
|
||||
double* output_data);
|
||||
} // namespace detail
|
||||
#endif
|
||||
|
||||
template <typename T>
|
||||
void compute_batch_box_cox(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const T* data,
|
||||
const T* lambda1_data,
|
||||
const T* lambda2_data,
|
||||
T* output_data) {
|
||||
#ifdef CAFFE2_PERF_WITH_AVX2
|
||||
AVX2_FMA_DO(
|
||||
details::compute_batch_box_cox,
|
||||
N,
|
||||
D,
|
||||
block_size,
|
||||
data,
|
||||
lambda1_data,
|
||||
lambda2_data,
|
||||
output_data);
|
||||
#endif
|
||||
BoxCoxNaive<T>(N, D, data, lambda1_data, lambda2_data, output_data);
|
||||
}
|
||||
|
||||
template void compute_batch_box_cox<float>(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const float* data,
|
||||
const float* lambda1_data,
|
||||
const float* lambda2_data,
|
||||
float* output_data);
|
||||
|
||||
template void compute_batch_box_cox<double>(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const double* data,
|
||||
const double* lambda1_data,
|
||||
const double* lambda2_data,
|
||||
double* output_data);
|
||||
|
||||
} // namespace caffe2
|
@ -1,35 +0,0 @@
|
||||
// Impmenets BoxCox operator for CPU
|
||||
#pragma once
|
||||
#include <cstdint>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
template <typename T>
|
||||
void compute_batch_box_cox(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const T* self_data,
|
||||
const T* lambda1_data,
|
||||
const T* lambda2_data,
|
||||
T* output_data);
|
||||
|
||||
extern template void compute_batch_box_cox<float>(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const float* data,
|
||||
const float* lambda1_data,
|
||||
const float* lambda2_data,
|
||||
float* output_data);
|
||||
|
||||
extern template void compute_batch_box_cox<double>(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const double* data,
|
||||
const double* lambda1_data,
|
||||
const double* lambda2_data,
|
||||
double* output_data);
|
||||
|
||||
} // namespace caffe2
|
@ -1,399 +0,0 @@
|
||||
#include <immintrin.h>
|
||||
#ifdef CAFFE2_PERF_USE_MKL
|
||||
#include <c10/util/irange.h>
|
||||
#include <caffe2/perfkernels/common.h>
|
||||
#include <folly/SingletonThreadLocal.h>
|
||||
|
||||
#include "vectorizer.h"
|
||||
|
||||
// Enable compiler vectorized version only if numerical consistency is not
|
||||
// required between dev and opt versions - disabled for now
|
||||
#ifndef FAST_VECTORIZED_KERNEL
|
||||
#define CPU_CAPABILITY_AVX2
|
||||
#include <ATen/cpu/vec/vec.h>
|
||||
|
||||
namespace at::vec {
|
||||
|
||||
// Implements the vectorized version of std::max() operation,
|
||||
// which DOESNOT propagates NaN for second argument
|
||||
template <typename scalar_t>
|
||||
Vectorized<scalar_t> max(const Vectorized<scalar_t>& a, const Vectorized<scalar_t>& b);
|
||||
|
||||
template <>
|
||||
Vectorized<double> max(const Vectorized<double>& a, const Vectorized<double>& b) {
|
||||
// std::max(NaN, nonNan) -> NaN
|
||||
return _mm256_max_pd(b, a);
|
||||
}
|
||||
|
||||
template <>
|
||||
Vectorized<float> max(const Vectorized<float>& a, const Vectorized<float>& b) {
|
||||
// std::max(NaN, nonNan) -> NaN
|
||||
return _mm256_max_ps(b, a);
|
||||
}
|
||||
|
||||
// Implements recieprocal method based on newton-rapson method
|
||||
// 1. user RCP approximiation
|
||||
// 2. update with RCP = RCP * (2 - X * RCP)
|
||||
template <typename scalar_t>
|
||||
Vectorized<scalar_t> fast_recieprocal(const Vectorized<scalar_t>& b);
|
||||
template <typename scalar_t>
|
||||
scalar_t fast_recieprocal(scalar_t b);
|
||||
|
||||
template<>
|
||||
Vectorized<float> fast_recieprocal(const Vectorized<float>& b) {
|
||||
auto minus2 = _mm256_set1_ps(-2.f);
|
||||
auto rcp = _mm256_rcp_ps(b);
|
||||
rcp = _mm256_mul_ps(rcp, _mm256_fnmsub_ps(rcp, b, minus2));
|
||||
rcp = _mm256_mul_ps(rcp, _mm256_fnmsub_ps(rcp, b, minus2));
|
||||
return rcp;
|
||||
}
|
||||
|
||||
template <>
|
||||
float fast_recieprocal(float b) {
|
||||
auto minus2 = _mm_set_ss(-2.f);
|
||||
auto b_reg = _mm_set_ss(b);
|
||||
auto rcp = _mm_rcp_ss(b_reg);
|
||||
rcp = _mm_mul_ss(rcp, _mm_fnmsub_ss(rcp, b_reg, minus2));
|
||||
rcp = _mm_mul_ss(rcp, _mm_fnmsub_ss(rcp, b_reg, minus2));
|
||||
return _mm_cvtss_f32(rcp);
|
||||
}
|
||||
|
||||
template<>
|
||||
Vectorized<double> fast_recieprocal(const Vectorized<double>& b) {
|
||||
return b.reciprocal();
|
||||
}
|
||||
|
||||
template <>
|
||||
double fast_recieprocal(double b) {
|
||||
return 1./b;
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <cstdint>
|
||||
#include <cmath>
|
||||
#include <vector>
|
||||
|
||||
#include <mkl.h>
|
||||
|
||||
namespace caffe2::details {
|
||||
|
||||
// MKL VML function templates.
|
||||
template <typename T>
|
||||
void PackV(const int N, const T* a, const int* ia, T* y);
|
||||
template <typename T>
|
||||
void UnpackV(const int N, const T* a, T* y, const int* iy);
|
||||
|
||||
#define DELEGATE_PACKV_FUNCTION(T, OriginalFunc) \
|
||||
template <> \
|
||||
void PackV<T>(const int N, const T* a, const int* ia, T* y) { \
|
||||
OriginalFunc(N, a, ia, y); \
|
||||
}
|
||||
DELEGATE_PACKV_FUNCTION(float, vsPackV)
|
||||
DELEGATE_PACKV_FUNCTION(double, vdPackV)
|
||||
#undef DELEGATE_PACKV_FUNCTION
|
||||
|
||||
#define DELEGATE_UNPACKV_FUNCTION(T, OriginalFunc) \
|
||||
template <> \
|
||||
void UnpackV<T>(const int N, const T* a, T* y, const int* iy) { \
|
||||
OriginalFunc(N, a, y, iy); \
|
||||
}
|
||||
DELEGATE_UNPACKV_FUNCTION(float, vsUnpackV)
|
||||
DELEGATE_UNPACKV_FUNCTION(double, vdUnpackV)
|
||||
#undef DELEGATE_UNPACKV_FUNCTION
|
||||
|
||||
#ifndef FAST_VECTORIZED_KERNEL
|
||||
template <typename T>
|
||||
void box_cox_zero_lambda(
|
||||
size_t D,
|
||||
const T* const self_data,
|
||||
const T* const lambda2_data,
|
||||
T k_eps,
|
||||
T* const output_data) {
|
||||
int j = 0;
|
||||
using Vec = at::vec::Vectorized<T>;
|
||||
constexpr int64_t VLEN = Vec::size();
|
||||
auto k_eps_vec = Vec(k_eps);
|
||||
for(; j + VLEN < D; j += VLEN) {
|
||||
auto data = Vec::loadu(self_data + j);
|
||||
auto lambda2 = Vec::loadu(lambda2_data + j);
|
||||
auto sum = data + lambda2;
|
||||
auto max = at::vec::max(sum, k_eps_vec);
|
||||
auto res = max.log();
|
||||
res.store(output_data + j);
|
||||
}
|
||||
for ( ;j < D; ++j) {
|
||||
auto sum = self_data[j] + lambda2_data[j];
|
||||
auto max = std::max(sum, k_eps);
|
||||
output_data[j] = std::log(max);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void box_cox_nonzero_lambda(
|
||||
int64_t D,
|
||||
const T* data_ptr,
|
||||
const T* lambda1_ptr,
|
||||
const T* lambda2_ptr,
|
||||
T k_eps,
|
||||
T* out) {
|
||||
|
||||
int j = 0;
|
||||
using Vec = at::vec::Vectorized<T>;
|
||||
constexpr int64_t VLEN = Vec::size();
|
||||
auto k_eps_vec = Vec(k_eps);
|
||||
for(; j + VLEN < D; j += VLEN) {
|
||||
auto data = Vec::loadu(data_ptr + j);
|
||||
auto lambda2 = Vec::loadu(lambda2_ptr + j);
|
||||
auto sum = data + lambda2;
|
||||
auto max = at::vec::max(sum, k_eps_vec);
|
||||
auto lambda1 = Vec::loadu(lambda1_ptr + j);
|
||||
auto lambda_over_1 = at::vec::fast_recieprocal(lambda1);
|
||||
auto pow = max.pow(lambda1);
|
||||
auto res = at::vec::fmsub(pow, lambda_over_1, lambda_over_1);
|
||||
res.store(out + j);
|
||||
}
|
||||
for ( ;j < D; ++j) {
|
||||
auto sum = data_ptr[j] + lambda2_ptr[j];
|
||||
auto max = std::max(sum, k_eps);
|
||||
auto lambda_over_1 = at::vec::fast_recieprocal(lambda1_ptr[j]);
|
||||
auto pow = std::pow(max, lambda1_ptr[j]);
|
||||
out[j] = pow * lambda_over_1 - lambda_over_1;
|
||||
}
|
||||
}
|
||||
#else
|
||||
template <typename T>
|
||||
void box_cox_zero_lambda(
|
||||
size_t D,
|
||||
const T* const self_data,
|
||||
const T* const lambda2_data,
|
||||
T k_eps,
|
||||
T* const output_data) {
|
||||
VECTOR_LOOP for (auto j=0 ;j < D; ++j) {
|
||||
auto sum = self_data[j] + lambda2_data[j];
|
||||
auto max = std::max(sum, k_eps);
|
||||
output_data[j] = std::log(max);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void box_cox_nonzero_lambda(
|
||||
int64_t D,
|
||||
const T* data_ptr,
|
||||
const T* lambda1_ptr,
|
||||
const T* lambda2_ptr,
|
||||
T k_eps,
|
||||
T* out) {
|
||||
|
||||
VECTOR_LOOP for (auto j=0 ;j < D; ++j) {
|
||||
FAST_MATH
|
||||
auto sum = data_ptr[j] + lambda2_ptr[j];
|
||||
auto max = std::max(sum, k_eps);
|
||||
auto lamda1 = lambda1_ptr[j];
|
||||
auto lambda_over_1 = 1 / lamda1;
|
||||
if constexpr (std::is_same<T, float>::value) {
|
||||
lambda_over_1 = lambda_over_1 * (T{2} - lambda_over_1 * lamda1);
|
||||
lambda_over_1 = lambda_over_1 * (T{2} - lambda_over_1 * lamda1);
|
||||
}
|
||||
auto pow = std::pow(max, lamda1);
|
||||
out[j] = pow * lambda_over_1 - lambda_over_1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
template <typename T>
|
||||
void box_cox_mixed_lambda(
|
||||
const T* const self_data,
|
||||
const std::vector<int>& nonzeros,
|
||||
const std::vector<int>& zeros,
|
||||
const T* const lambda1,
|
||||
const T* const lambda2,
|
||||
const T* const lambda2_z_,
|
||||
T k_eps,
|
||||
T* const buffer,
|
||||
T* const output_data) {
|
||||
PackV(nonzeros.size(), self_data, nonzeros.data(), buffer);
|
||||
box_cox_nonzero_lambda<T>(
|
||||
nonzeros.size(), buffer, lambda1, lambda2, k_eps, buffer);
|
||||
UnpackV(nonzeros.size(), buffer, output_data, nonzeros.data());
|
||||
|
||||
PackV(zeros.size(), self_data, zeros.data(), buffer);
|
||||
box_cox_zero_lambda<T>(
|
||||
zeros.size(), buffer, lambda2_z_, k_eps, buffer);
|
||||
UnpackV(zeros.size(), buffer, output_data, zeros.data());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void TileArrayIntoVector(
|
||||
const T* const a,
|
||||
const size_t D,
|
||||
const int K,
|
||||
std::vector<T>& b) {
|
||||
b.resize(K * D);
|
||||
for (const auto k : c10::irange(K)) {
|
||||
std::copy(a, a + D, b.begin() + k * D);
|
||||
}
|
||||
}
|
||||
|
||||
void TileIndicesInPlace(std::vector<int>& v, const std::size_t D, const std::size_t K) {
|
||||
auto n = v.size();
|
||||
v.resize(K * n);
|
||||
for (const auto k : c10::irange(1, K)) {
|
||||
for (const auto j : c10::irange(n)) {
|
||||
v[k * n + j] = v[j] + k * D;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void compute_batch_box_cox__avx2_fma(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const T* self_data,
|
||||
const T* __restrict lambda1_data,
|
||||
const T* __restrict lambda2_data,
|
||||
T* output_data) {
|
||||
constexpr T k_eps = static_cast<T>(1e-6);
|
||||
|
||||
FOLLY_DECLARE_REUSED(zeros, std::vector<int>);
|
||||
FOLLY_DECLARE_REUSED(nonzeros, std::vector<int>);
|
||||
// Don't bother calling reserve; calls after the first will get a
|
||||
// correctly-sized allocation anyway.
|
||||
for (const auto j : c10::irange(D)) {
|
||||
if (lambda1_data[j] == 0) {
|
||||
zeros.push_back(j);
|
||||
} else {
|
||||
nonzeros.push_back(j);
|
||||
}
|
||||
}
|
||||
|
||||
// Process K rows at a time for effective vectorization with small rows.
|
||||
const auto K = std::min(N, (block_size + D - 1) / D);
|
||||
|
||||
FOLLY_DECLARE_REUSED(lambda1_, std::vector<T>);
|
||||
FOLLY_DECLARE_REUSED(lambda2_, std::vector<T>);
|
||||
FOLLY_DECLARE_REUSED(lambda2_z_, std::vector<T>);
|
||||
|
||||
if (nonzeros.size() == D) {
|
||||
// ((x + lambda2)^lambda1 - 1)/lambda1, if lambda1 != 0
|
||||
size_t i = 0;
|
||||
if (K > 1) {
|
||||
TileArrayIntoVector(lambda1_data, D, K, lambda1_);
|
||||
TileArrayIntoVector(lambda2_data, D, K, lambda2_);
|
||||
DCHECK_EQ(K * D, lambda1_.size());
|
||||
DCHECK_EQ(K * D, lambda2_.size());
|
||||
for (; i < N - K + 1; i += K, self_data += K * D, output_data += K * D) {
|
||||
box_cox_nonzero_lambda<T>(
|
||||
K * D,
|
||||
self_data,
|
||||
lambda1_.data(),
|
||||
lambda2_.data(),
|
||||
k_eps,
|
||||
output_data);
|
||||
}
|
||||
}
|
||||
for (; i < N; i++, self_data += D, output_data += D) {
|
||||
box_cox_nonzero_lambda<T>(
|
||||
D, self_data, lambda1_data, lambda2_data, k_eps, output_data);
|
||||
}
|
||||
} else if (zeros.size() == D) {
|
||||
// ln(x + lambda2), if lambda1 == 0
|
||||
size_t i = 0;
|
||||
if (K > 1) {
|
||||
TileArrayIntoVector(lambda2_data, D, K, lambda2_z_);
|
||||
DCHECK_EQ(K * D, lambda2_z_.size());
|
||||
for (; i < N - K + 1; i += K, self_data += K * D, output_data += K * D) {
|
||||
box_cox_zero_lambda<T>(
|
||||
K * D, self_data, lambda2_z_.data(), k_eps, output_data);
|
||||
}
|
||||
}
|
||||
for (; i < N; i++, self_data += D, output_data += D) {
|
||||
box_cox_zero_lambda<T>(
|
||||
D, self_data, lambda2_data, k_eps, output_data);
|
||||
}
|
||||
} else {
|
||||
// mix zeros and nonzeros
|
||||
const size_t n = nonzeros.size();
|
||||
if (K > 1) {
|
||||
TileIndicesInPlace(nonzeros, 0, K);
|
||||
TileIndicesInPlace(zeros, 0, K);
|
||||
}
|
||||
|
||||
FOLLY_DECLARE_REUSED(buffer, std::vector<T>);
|
||||
|
||||
buffer.resize(std::max(nonzeros.size(), zeros.size()));
|
||||
lambda1_.resize(nonzeros.size());
|
||||
lambda2_.resize(nonzeros.size());
|
||||
lambda2_z_.resize(zeros.size());
|
||||
PackV(nonzeros.size(), lambda1_data, nonzeros.data(), lambda1_.data());
|
||||
PackV(nonzeros.size(), lambda2_data, nonzeros.data(), lambda2_.data());
|
||||
PackV(zeros.size(), lambda2_data, zeros.data(), lambda2_z_.data());
|
||||
|
||||
size_t i = 0;
|
||||
if (K > 1) {
|
||||
// Truncate to original size, and re-tile with offsets this time.
|
||||
nonzeros.resize(n);
|
||||
DCHECK_GT(D, n);
|
||||
zeros.resize(D - n);
|
||||
TileIndicesInPlace(nonzeros, D, K);
|
||||
TileIndicesInPlace(zeros, D, K);
|
||||
DCHECK_EQ(nonzeros.size(), lambda1_.size());
|
||||
DCHECK_EQ(nonzeros.size(), lambda2_.size());
|
||||
DCHECK_EQ(zeros.size(), lambda2_z_.size());
|
||||
|
||||
for (; i < N - K + 1; i += K, self_data += K * D, output_data += K * D) {
|
||||
box_cox_mixed_lambda<T>(
|
||||
self_data,
|
||||
nonzeros,
|
||||
zeros,
|
||||
lambda1_.data(),
|
||||
lambda2_.data(),
|
||||
lambda2_z_.data(),
|
||||
k_eps,
|
||||
buffer.data(),
|
||||
output_data);
|
||||
}
|
||||
// Truncate to original size.
|
||||
nonzeros.resize(n);
|
||||
zeros.resize(D - n);
|
||||
}
|
||||
for (; i < N; i++, self_data += D, output_data += D) {
|
||||
box_cox_mixed_lambda<T>(
|
||||
self_data,
|
||||
nonzeros,
|
||||
zeros,
|
||||
lambda1_.data(),
|
||||
lambda2_.data(),
|
||||
lambda2_z_.data(),
|
||||
k_eps,
|
||||
buffer.data(),
|
||||
output_data);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template
|
||||
void compute_batch_box_cox__avx2_fma<float>(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const float* self_data,
|
||||
const float* __restrict lambda1_data,
|
||||
const float* __restrict lambda2_data,
|
||||
float* output_data);
|
||||
|
||||
template
|
||||
void compute_batch_box_cox__avx2_fma<double>(
|
||||
std::size_t N,
|
||||
std::size_t D,
|
||||
std::size_t block_size,
|
||||
const double* self_data,
|
||||
const double* __restrict lambda1_data,
|
||||
const double* __restrict lambda2_data,
|
||||
double* output_data);
|
||||
|
||||
} // namespace caffe2::detail
|
||||
#endif
|
@ -1,75 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
// Apple clang was fixed in 8.1
|
||||
#if defined(__apple_build_version__) && \
|
||||
((__clang_major__ < 8) || \
|
||||
((__clang_major__ == 8) && (__clang_minor__ < 1)))
|
||||
#define CAFFE2_INTERNAL_APPLE_NEED_FIX 1
|
||||
#endif
|
||||
|
||||
// Regular clang was fixed in 3.9
|
||||
#if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 9)
|
||||
#define CAFFE2_INTERNAL_CLANG_NEED_FIX 1
|
||||
#endif
|
||||
|
||||
#if defined(CAFFE2_INTERNAL_APPLE_NEED_FIX) || \
|
||||
defined(CAFFE2_INTERNAL_CLANG_NEED_FIX)
|
||||
|
||||
#include <c10/util/Half.h>
|
||||
#include <emmintrin.h>
|
||||
|
||||
// This version of clang has a bug that _cvtsh_ss is not defined, see
|
||||
// https://reviews.llvm.org/D16177
|
||||
static __inline float
|
||||
__attribute__((__always_inline__, __nodebug__, __target__("f16c")))
|
||||
_cvtsh_ss(unsigned short a) {
|
||||
__v8hi v = {(short)a, 0, 0, 0, 0, 0, 0, 0};
|
||||
__v4sf r = __builtin_ia32_vcvtph2ps(v);
|
||||
return r[0];
|
||||
}
|
||||
|
||||
static __inline unsigned short
|
||||
__attribute__((__always_inline__, __nodebug__, __target__("f16c")))
|
||||
_cvtss_sh(float a, int imm8) {
|
||||
unsigned short ret;
|
||||
*reinterpret_cast<at::Half*>(&ret) = a;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif // __APPLE_NEED_FIX || __CLANG_NEED_FIX
|
||||
|
||||
#undef __APPLE_NEED_FIX
|
||||
#undef __CLANG_NEED_FIX
|
||||
|
||||
#if defined(_MSC_VER) && !defined(__clang__)
|
||||
|
||||
#include <c10/util/Half.h>
|
||||
#include <cstdint>
|
||||
|
||||
// It seems that microsoft msvc does not have a _cvtsh_ss implementation so
|
||||
// we will add a dummy version to it.
|
||||
|
||||
static inline float _cvtsh_ss(unsigned short x) {
|
||||
union {
|
||||
std::uint32_t intval;
|
||||
float floatval;
|
||||
} t1;
|
||||
std::uint32_t t2, t3;
|
||||
t1.intval = x & 0x7fff; // Non-sign bits
|
||||
t2 = x & 0x8000; // Sign bit
|
||||
t3 = x & 0x7c00; // Exponent
|
||||
t1.intval <<= 13; // Align mantissa on MSB
|
||||
t2 <<= 16; // Shift sign bit into position
|
||||
t1.intval += 0x38000000; // Adjust bias
|
||||
t1.intval = (t3 == 0 ? 0 : t1.intval); // Denormals-as-zero
|
||||
t1.intval |= t2; // Re-insert sign bit
|
||||
return t1.floatval;
|
||||
}
|
||||
|
||||
static inline unsigned short _cvtss_sh(float x, int imm8) {
|
||||
unsigned short ret;
|
||||
*reinterpret_cast<at::Half*>(&ret) = x;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif // _MSC_VER
|
@ -1,211 +0,0 @@
|
||||
#include "caffe2/perfkernels/fused_8bit_rowwise_embedding_lookup.h"
|
||||
|
||||
#include "caffe2/perfkernels/common.h"
|
||||
|
||||
#include <c10/util/Logging.h>
|
||||
#include <c10/util/irange.h>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
/**
|
||||
* Base implementation does runtime dispatch for each segment of reduction
|
||||
* @return false if there is an out-of-bound error
|
||||
*/
|
||||
template <
|
||||
typename IndexType,
|
||||
typename InType,
|
||||
typename OutType,
|
||||
bool IS_WEIGHT_POSITIONAL = false>
|
||||
static bool Fused8BitRowwiseEmbeddingLookupGenericSlow(
|
||||
const int64_t block_size,
|
||||
const int64_t output_size,
|
||||
const int64_t index_size,
|
||||
const int64_t data_size,
|
||||
const InType* input,
|
||||
const IndexType* indices,
|
||||
const int* lengths,
|
||||
const float* weights, // optional, can be null for sum reducer
|
||||
bool normalize_by_lengths,
|
||||
OutType* out) {
|
||||
// block_size is the number of elements and fused_block_size is the size of
|
||||
// an entire row, including scale and bias.
|
||||
const auto scale_bias_offset = 8 / sizeof(InType);
|
||||
const int64_t fused_block_size = block_size + scale_bias_offset;
|
||||
int64_t current = 0;
|
||||
for (const auto m : c10::irange(output_size)) {
|
||||
memset(out, 0, sizeof(OutType) * block_size);
|
||||
if (current + lengths[m] > index_size) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < lengths[m]; ++i) {
|
||||
int64_t idx = indices[current];
|
||||
if (idx < 0 || idx >= data_size) {
|
||||
return false;
|
||||
}
|
||||
#ifdef __GNUC__
|
||||
if (current + 1 < index_size) {
|
||||
__builtin_prefetch(
|
||||
input + fused_block_size * indices[current + 1], 0, 1);
|
||||
}
|
||||
#endif // __GNUC__
|
||||
|
||||
const float* scale_bias = reinterpret_cast<const float*>(
|
||||
input + fused_block_size * indices[current] + block_size);
|
||||
|
||||
float weight = 1.0f;
|
||||
if (weights) {
|
||||
weight = weights[IS_WEIGHT_POSITIONAL ? i : current];
|
||||
}
|
||||
const float scale = weight * scale_bias[0];
|
||||
const float bias = weight * scale_bias[1];
|
||||
|
||||
for (const auto j : c10::irange(block_size)) {
|
||||
out[j] += scale * input[fused_block_size * indices[current] + j] + bias;
|
||||
}
|
||||
|
||||
++current;
|
||||
}
|
||||
if (normalize_by_lengths && lengths[m]) {
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
float scale = 1.f / lengths[m];
|
||||
for (const auto j : c10::irange(block_size)) {
|
||||
out[j] *= scale;
|
||||
}
|
||||
}
|
||||
out += block_size;
|
||||
}
|
||||
return current == index_size;
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
// Proxy back to generic implementation
|
||||
#define FUSED_8BIT_ROWWISE_EMBEDDING_SPECIALIZATION(IndexType, OutType) \
|
||||
bool \
|
||||
Fused8BitRowwiseEmbeddingLookup_##IndexType##_uint8_t_##OutType##_false__base( \
|
||||
const int64_t block_size, \
|
||||
const int64_t output_size, \
|
||||
const int64_t index_size, \
|
||||
const int64_t data_size, \
|
||||
const uint8_t* input, \
|
||||
const IndexType* indices, \
|
||||
const int* lengths, \
|
||||
const float* weights, \
|
||||
bool normalize_by_lengths, \
|
||||
OutType* out) { \
|
||||
return Fused8BitRowwiseEmbeddingLookupGenericSlow< \
|
||||
IndexType, \
|
||||
uint8_t, \
|
||||
OutType, \
|
||||
false>( \
|
||||
block_size, \
|
||||
output_size, \
|
||||
index_size, \
|
||||
data_size, \
|
||||
input, \
|
||||
indices, \
|
||||
lengths, \
|
||||
weights, \
|
||||
normalize_by_lengths, \
|
||||
out); \
|
||||
} \
|
||||
decltype( \
|
||||
Fused8BitRowwiseEmbeddingLookup_##IndexType##_uint8_t_##OutType##_false__base) \
|
||||
Fused8BitRowwiseEmbeddingLookup_##IndexType##_uint8_t_##OutType##_false__avx2_fma; \
|
||||
bool Fused8BitRowwiseEmbeddingLookup_##IndexType##_uint8_t_##OutType( \
|
||||
const int64_t block_size, \
|
||||
const int64_t output_size, \
|
||||
const int64_t index_size, \
|
||||
const int64_t data_size, \
|
||||
const uint8_t* input, \
|
||||
const IndexType* indices, \
|
||||
const int* lengths, \
|
||||
const float* weights, \
|
||||
bool normalize_by_lengths, \
|
||||
OutType* out) { \
|
||||
const int32_t one = 1; \
|
||||
CAFFE_ENFORCE_EQ( \
|
||||
reinterpret_cast<const uint8_t*>(&one)[0], \
|
||||
1, \
|
||||
"Fused8BitRowwiseEmbeddingLookup is not supported on this platform"); \
|
||||
AVX2_FMA_DO( \
|
||||
Fused8BitRowwiseEmbeddingLookup_##IndexType##_uint8_t_##OutType##_false, \
|
||||
block_size, \
|
||||
output_size, \
|
||||
index_size, \
|
||||
data_size, \
|
||||
input, \
|
||||
indices, \
|
||||
lengths, \
|
||||
weights, \
|
||||
normalize_by_lengths, \
|
||||
out); \
|
||||
BASE_DO( \
|
||||
Fused8BitRowwiseEmbeddingLookup_##IndexType##_uint8_t_##OutType##_false, \
|
||||
block_size, \
|
||||
output_size, \
|
||||
index_size, \
|
||||
data_size, \
|
||||
input, \
|
||||
indices, \
|
||||
lengths, \
|
||||
weights, \
|
||||
normalize_by_lengths, \
|
||||
out); \
|
||||
} \
|
||||
template <> \
|
||||
void Fused8BitRowwiseEmbeddingLookup<IndexType, uint8_t, OutType, false>( \
|
||||
const int64_t block_size, \
|
||||
const int64_t output_size, \
|
||||
const int64_t index_size, \
|
||||
const int64_t data_size, \
|
||||
const uint8_t* input, \
|
||||
const IndexType* indices, \
|
||||
const int* lengths, \
|
||||
const float* weights, \
|
||||
bool normalize_by_lengths, \
|
||||
OutType* out) { \
|
||||
bool success = \
|
||||
Fused8BitRowwiseEmbeddingLookup_##IndexType##_uint8_t_##OutType( \
|
||||
block_size, \
|
||||
output_size, \
|
||||
index_size, \
|
||||
data_size, \
|
||||
input, \
|
||||
indices, \
|
||||
lengths, \
|
||||
weights, \
|
||||
normalize_by_lengths, \
|
||||
out); \
|
||||
if (success) { \
|
||||
return; \
|
||||
} \
|
||||
int64_t current = 0; \
|
||||
for (int m = 0; m < output_size; ++m) { \
|
||||
for (int i = 0; i < lengths[m]; ++i) { \
|
||||
CAFFE_ENFORCE_LT(current, index_size); \
|
||||
IndexType idx = indices[current]; \
|
||||
CAFFE_ENFORCE( \
|
||||
0 <= idx && idx < data_size, \
|
||||
"Index ", \
|
||||
current, \
|
||||
" is out of bounds: ", \
|
||||
idx, \
|
||||
", range 0 to ", \
|
||||
data_size); \
|
||||
++current; \
|
||||
} \
|
||||
} \
|
||||
CAFFE_ENFORCE_EQ( \
|
||||
current, \
|
||||
index_size, \
|
||||
"Your input seems to be incorrect: the sum of lengths values should be " \
|
||||
"the size of the indices tensor, but it appears not."); \
|
||||
}
|
||||
// clang-format on
|
||||
|
||||
FUSED_8BIT_ROWWISE_EMBEDDING_SPECIALIZATION(int32_t, float);
|
||||
FUSED_8BIT_ROWWISE_EMBEDDING_SPECIALIZATION(int64_t, float);
|
||||
|
||||
#undef FUSED_8BIT_ROWWISE_EMBEDDING_SPECIALIZATION
|
||||
|
||||
} // namespace caffe2
|
@ -1,55 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
/**
|
||||
* Embedding lookup with reduction.
|
||||
*
|
||||
* `input` of size data_size * (block_size + 8B)
|
||||
* `indices` of size index_size
|
||||
* `lengths` of size output_size
|
||||
* `weights` nullptr or array of size index_size
|
||||
* `out` of size output_size * block_size
|
||||
* sum(lengths[i]) == index_size
|
||||
*
|
||||
* Note that block_size should be the number of quantized values per row in the
|
||||
* data, i.e. excluding the scale and bias. The total (fused) block size is
|
||||
* assumed to be this block_size, plus 4 bytes for scale and 4 bytes for bias.
|
||||
*
|
||||
* Behavior is roughly equivalent to pseudocode:
|
||||
*
|
||||
* pos = 0
|
||||
* fused_block_size = block_size + 8B // quantized values and scale and bias
|
||||
* for (i = 0..output_size-1)
|
||||
* for (k = 0..block_size-1)
|
||||
* out[i*block_size + k] = 0
|
||||
* for (j = 0..lengths[i]-1)
|
||||
* for (k = 0..block_size-1)
|
||||
* out[i*block_size + k] += input[indices[pos]*(fused_block_size) + k] *
|
||||
* (weights ? weights[IS_WEIGHT_POSITIONAL ? j : pos] : 1.0)
|
||||
* pos += 1
|
||||
* if (normalize_weights && lengths[i] > 0)
|
||||
* for (k = 0..block_size-1)
|
||||
* out[i*block_size + k] /= lengths[i]
|
||||
*
|
||||
*/
|
||||
|
||||
template <
|
||||
typename IndexType,
|
||||
typename InType,
|
||||
typename OutType,
|
||||
bool IS_WEIGHT_POSITIONAL = false>
|
||||
void Fused8BitRowwiseEmbeddingLookup(
|
||||
const std::int64_t block_size,
|
||||
const std::int64_t output_size,
|
||||
const std::int64_t index_size,
|
||||
const std::int64_t data_size,
|
||||
const InType* input,
|
||||
const IndexType* indices,
|
||||
const int* lengths,
|
||||
const float* weights, // optional, can be null for non-weighted sum
|
||||
bool normalize_by_lengths,
|
||||
OutType* out);
|
||||
} // namespace caffe2
|
@ -1,213 +0,0 @@
|
||||
#include "caffe2/perfkernels/fused_8bit_rowwise_embedding_lookup_idx.h"
|
||||
|
||||
#include "caffe2/perfkernels/common.h"
|
||||
|
||||
#include <c10/util/Logging.h>
|
||||
#include <c10/util/irange.h>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
/**
|
||||
* Base implementation does runtime dispatch for each segment of reduction
|
||||
* @return false if there is an out-of-bound error
|
||||
*/
|
||||
template <
|
||||
typename IndexType,
|
||||
typename InType,
|
||||
typename OutType,
|
||||
bool IS_WEIGHT_POSITIONAL = false>
|
||||
static bool Fused8BitRowwiseEmbeddingLookupGenericSlowIdx(
|
||||
const int64_t block_size,
|
||||
const int64_t output_size,
|
||||
const int64_t index_size,
|
||||
const int64_t data_size,
|
||||
const InType* input,
|
||||
const IndexType* indices,
|
||||
const IndexType* offsets,
|
||||
const float* weights, // optional, can be null for sum reducer
|
||||
bool normalize_by_lengths,
|
||||
OutType* out) {
|
||||
// block_size is the number of elements and fused_block_size is the size of
|
||||
// an entire row, including scale and bias.
|
||||
const auto scale_bias_offset = 8 / sizeof(InType);
|
||||
const int64_t fused_block_size = block_size + scale_bias_offset;
|
||||
int64_t current = 0;
|
||||
for (const auto m : c10::irange(output_size)) {
|
||||
memset(out, 0, sizeof(OutType) * block_size);
|
||||
if (current != offsets[m] - offsets[0]) {
|
||||
return false;
|
||||
}
|
||||
int64_t start_offset = offsets[m];
|
||||
int64_t end_offset = offsets[m + 1];
|
||||
int64_t length = end_offset - start_offset;
|
||||
for (const auto i : c10::irange(start_offset, end_offset)) {
|
||||
int64_t idx = indices[current];
|
||||
if (idx < 0 || idx >= data_size) {
|
||||
return false;
|
||||
}
|
||||
#ifdef __GNUC__
|
||||
if (current + 1 < index_size) {
|
||||
__builtin_prefetch(
|
||||
input + fused_block_size * indices[current + 1], 0, 1);
|
||||
}
|
||||
#endif // __GNUC__
|
||||
|
||||
const float* scale_bias = reinterpret_cast<const float*>(
|
||||
input + fused_block_size * indices[current] + block_size);
|
||||
|
||||
float weight = 1.0f;
|
||||
if (weights) {
|
||||
weight = weights[IS_WEIGHT_POSITIONAL ? i : current];
|
||||
}
|
||||
const float scale = weight * scale_bias[0];
|
||||
const float bias = weight * scale_bias[1];
|
||||
|
||||
for (const auto j : c10::irange(block_size)) {
|
||||
out[j] += scale * input[fused_block_size * indices[current] + j] + bias;
|
||||
}
|
||||
|
||||
++current;
|
||||
}
|
||||
if (normalize_by_lengths && length) {
|
||||
float scale = 1.f / length;
|
||||
for (const auto j : c10::irange(block_size)) {
|
||||
out[j] *= scale;
|
||||
}
|
||||
}
|
||||
out += block_size;
|
||||
}
|
||||
return current == index_size;
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
// Proxy back to generic implementation
|
||||
#define FUSED_8BIT_ROWWISE_EMBEDDING_IDX_SPECIALIZATION(IndexType, OutType) \
|
||||
bool \
|
||||
Fused8BitRowwiseEmbeddingLookupIdx_##IndexType##_uint8_t_##OutType##_false__base( \
|
||||
const int64_t block_size, \
|
||||
const int64_t output_size, \
|
||||
const int64_t index_size, \
|
||||
const int64_t data_size, \
|
||||
const uint8_t* input, \
|
||||
const IndexType* indices, \
|
||||
const IndexType* offsets, \
|
||||
const float* weights, \
|
||||
bool normalize_by_lengths, \
|
||||
OutType* out) { \
|
||||
return Fused8BitRowwiseEmbeddingLookupGenericSlowIdx< \
|
||||
IndexType, \
|
||||
uint8_t, \
|
||||
OutType, \
|
||||
false>( \
|
||||
block_size, \
|
||||
output_size, \
|
||||
index_size, \
|
||||
data_size, \
|
||||
input, \
|
||||
indices, \
|
||||
offsets, \
|
||||
weights, \
|
||||
normalize_by_lengths, \
|
||||
out); \
|
||||
} \
|
||||
decltype( \
|
||||
Fused8BitRowwiseEmbeddingLookupIdx_##IndexType##_uint8_t_##OutType##_false__base) \
|
||||
Fused8BitRowwiseEmbeddingLookupIdx_##IndexType##_uint8_t_##OutType##_false__avx2_fma; \
|
||||
bool Fused8BitRowwiseEmbeddingLookupIdx_##IndexType##_uint8_t_##OutType( \
|
||||
const int64_t block_size, \
|
||||
const int64_t output_size, \
|
||||
const int64_t index_size, \
|
||||
const int64_t data_size, \
|
||||
const uint8_t* input, \
|
||||
const IndexType* indices, \
|
||||
const IndexType* offsets, \
|
||||
const float* weights, \
|
||||
bool normalize_by_lengths, \
|
||||
OutType* out) { \
|
||||
const int32_t one = 1; \
|
||||
CAFFE_ENFORCE_EQ( \
|
||||
reinterpret_cast<const uint8_t*>(&one)[0], \
|
||||
1, \
|
||||
"Fused8BitRowwiseEmbeddingLookup is not supported on this platform"); \
|
||||
AVX2_FMA_DO( \
|
||||
Fused8BitRowwiseEmbeddingLookupIdx_##IndexType##_uint8_t_##OutType##_false, \
|
||||
block_size, \
|
||||
output_size, \
|
||||
index_size, \
|
||||
data_size, \
|
||||
input, \
|
||||
indices, \
|
||||
offsets, \
|
||||
weights, \
|
||||
normalize_by_lengths, \
|
||||
out); \
|
||||
BASE_DO( \
|
||||
Fused8BitRowwiseEmbeddingLookupIdx_##IndexType##_uint8_t_##OutType##_false, \
|
||||
block_size, \
|
||||
output_size, \
|
||||
index_size, \
|
||||
data_size, \
|
||||
input, \
|
||||
indices, \
|
||||
offsets, \
|
||||
weights, \
|
||||
normalize_by_lengths, \
|
||||
out); \
|
||||
} \
|
||||
template <> \
|
||||
void Fused8BitRowwiseEmbeddingLookupIdx<IndexType, uint8_t, OutType, false>( \
|
||||
const int64_t block_size, \
|
||||
const int64_t output_size, \
|
||||
const int64_t index_size, \
|
||||
const int64_t data_size, \
|
||||
const uint8_t* input, \
|
||||
const IndexType* indices, \
|
||||
const IndexType* offsets, \
|
||||
const float* weights, \
|
||||
bool normalize_by_lengths, \
|
||||
OutType* out) { \
|
||||
bool success = \
|
||||
Fused8BitRowwiseEmbeddingLookupIdx_##IndexType##_uint8_t_##OutType( \
|
||||
block_size, \
|
||||
output_size, \
|
||||
index_size, \
|
||||
data_size, \
|
||||
input, \
|
||||
indices, \
|
||||
offsets, \
|
||||
weights, \
|
||||
normalize_by_lengths, \
|
||||
out); \
|
||||
if (success) { \
|
||||
return; \
|
||||
} \
|
||||
int64_t current = 0; \
|
||||
for (int m = 0; m < output_size; ++m) { \
|
||||
for (int64_t i = offsets[m]; i < offsets[m + 1]; ++i) { \
|
||||
CAFFE_ENFORCE_LT(current, index_size); \
|
||||
IndexType idx = indices[current]; \
|
||||
CAFFE_ENFORCE( \
|
||||
0 <= idx && idx < data_size, \
|
||||
"Index ", \
|
||||
current, \
|
||||
" is out of bounds: ", \
|
||||
idx, \
|
||||
", range 0 to ", \
|
||||
data_size); \
|
||||
++current; \
|
||||
} \
|
||||
} \
|
||||
CAFFE_ENFORCE_EQ( \
|
||||
current, \
|
||||
index_size, \
|
||||
"Your input seems to be incorrect: the sum of lengths values should be " \
|
||||
"the size of the indices tensor, but it appears not."); \
|
||||
}
|
||||
// clang-format on
|
||||
|
||||
FUSED_8BIT_ROWWISE_EMBEDDING_IDX_SPECIALIZATION(int32_t, float);
|
||||
FUSED_8BIT_ROWWISE_EMBEDDING_IDX_SPECIALIZATION(int64_t, float);
|
||||
|
||||
#undef FUSED_8BIT_ROWWISE_EMBEDDING_IDX_SPECIALIZATION
|
||||
|
||||
} // namespace caffe2
|
@ -1,57 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
/**
|
||||
* Embedding lookup with reduction.
|
||||
*
|
||||
* `input` of size data_size * (block_size + 8B)
|
||||
* `indices` of size index_size
|
||||
* `offsets` of size output_size
|
||||
* `weights` nullptr or array of size index_size
|
||||
* `out` of size output_size * block_size
|
||||
*
|
||||
* Note that block_size should be the number of quantized values per row in the
|
||||
* data, i.e. excluding the scale and bias. The total (fused) block size is
|
||||
* assumed to be this block_size, plus 4 bytes for scale and 4 bytes for bias.
|
||||
*
|
||||
* Behavior is roughly equivalent to pseudocode:
|
||||
*
|
||||
* pos = 0
|
||||
* fused_block_size = block_size + 8B // quantized values and scale and bias
|
||||
* for (i = 0..output_size-1)
|
||||
* for (k = 0..block_size-1)
|
||||
* out[i*block_size + k] = 0
|
||||
* start_offset = offsets[i]
|
||||
* end_offset = i == output_size-1 ? index_size : offsets[i+1] - 1
|
||||
* length = end_offset - start_offset
|
||||
* for (j = start_offset..end_offset)
|
||||
* for (k = 0..block_size-1)
|
||||
* out[i*block_size + k] += input[indices[pos]*(fused_block_size) + k] *
|
||||
* (weights ? weights[IS_WEIGHT_POSITIONAL ? j : pos] : 1.0)
|
||||
* pos += 1
|
||||
* if (normalize_weights && length > 0)
|
||||
* for (k = 0..block_size-1)
|
||||
* out[i*block_size + k] /= length
|
||||
*
|
||||
*/
|
||||
|
||||
template <
|
||||
typename IndexType,
|
||||
typename InType,
|
||||
typename OutType,
|
||||
bool IS_WEIGHT_POSITIONAL = false>
|
||||
void Fused8BitRowwiseEmbeddingLookupIdx(
|
||||
const std::int64_t block_size,
|
||||
const std::int64_t output_size,
|
||||
const std::int64_t index_size,
|
||||
const std::int64_t data_size,
|
||||
const InType* input,
|
||||
const IndexType* indices,
|
||||
const IndexType* offsets,
|
||||
const float* weights, // optional, can be null for non-weighted sum
|
||||
bool normalize_by_lengths,
|
||||
OutType* out);
|
||||
} // namespace caffe2
|
@ -1,214 +0,0 @@
|
||||
#include "./fused_nbit_rowwise_conversion.h"
|
||||
|
||||
#include <c10/util/Half.h>
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#ifdef USE_FBGEMM
|
||||
#include "fbgemm/QuantUtils.h"
|
||||
#endif
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
void FloatToFused8BitRowwiseQuantized__base(
|
||||
const float* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
std::uint8_t* output) {
|
||||
constexpr float kEpsilon = 1e-8f;
|
||||
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
int output_columns = input_columns + 2 * sizeof(float);
|
||||
for (std::size_t row = 0; row < input_rows; ++row) {
|
||||
const float* input_row = input + row * input_columns;
|
||||
std::uint8_t* output_row = output + row * output_columns;
|
||||
float* output_row_scale_bias =
|
||||
reinterpret_cast<float*>(output_row + input_columns);
|
||||
|
||||
float minimum_element =
|
||||
*std::min_element(input_row, input_row + input_columns);
|
||||
float maximum_element =
|
||||
*std::max_element(input_row, input_row + input_columns);
|
||||
float range = maximum_element - minimum_element;
|
||||
|
||||
output_row_scale_bias[0] = range / 255.0f;
|
||||
output_row_scale_bias[1] = minimum_element;
|
||||
const auto inverse_scale = 255.0f / (range + kEpsilon);
|
||||
for (std::size_t col = 0; col < static_cast<size_t>(input_columns); ++col) {
|
||||
output_row[col] =
|
||||
std::lrintf((input_row[col] - minimum_element) * inverse_scale);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Fused8BitRowwiseQuantizedToFloat__base(
|
||||
const std::uint8_t* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
float* output) {
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
int output_columns = input_columns - 2 * sizeof(float);
|
||||
|
||||
for (std::size_t row = 0; row < input_rows; ++row) {
|
||||
const std::uint8_t* input_row = input + row * input_columns;
|
||||
const float* input_row_scale_bias =
|
||||
reinterpret_cast<const float*>(input_row + output_columns);
|
||||
float* output_row = output + row * output_columns;
|
||||
|
||||
for (std::size_t col = 0; col < static_cast<std::size_t>(output_columns); ++col) {
|
||||
output_row[col] =
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
input_row[col] * input_row_scale_bias[0] + input_row_scale_bias[1];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void FloatToFused8BitRowwiseQuantized(
|
||||
const float* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
std::uint8_t* output) {
|
||||
#ifdef USE_FBGEMM
|
||||
fbgemm::FloatOrHalfToFused8BitRowwiseQuantizedSBFloat<float>(
|
||||
input, input_rows, input_columns, output);
|
||||
#else
|
||||
FloatToFused8BitRowwiseQuantized__base(
|
||||
input, input_rows, input_columns, output);
|
||||
#endif
|
||||
}
|
||||
|
||||
void Fused8BitRowwiseQuantizedToFloat(
|
||||
const std::uint8_t* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
float* output) {
|
||||
#ifdef USE_FBGEMM
|
||||
fbgemm::Fused8BitRowwiseQuantizedSBFloatToFloatOrHalf<float>(
|
||||
input, input_rows, input_columns, output);
|
||||
#else
|
||||
Fused8BitRowwiseQuantizedToFloat__base(
|
||||
input, input_rows, input_columns, output);
|
||||
#endif
|
||||
}
|
||||
|
||||
void FloatToFusedNBitRowwiseQuantizedSBHalf__base(
|
||||
int bit_rate,
|
||||
const float* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
std::uint8_t* output) {
|
||||
int num_elem_per_byte = 8 / bit_rate;
|
||||
int output_columns =
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
(input_columns + num_elem_per_byte - 1) / num_elem_per_byte +
|
||||
2 * sizeof(at::Half);
|
||||
for (std::size_t row = 0; row < input_rows; ++row) {
|
||||
const float* input_row = input + row * input_columns;
|
||||
std::uint8_t* output_row = output + row * output_columns;
|
||||
at::Half* output_row_scale_bias = reinterpret_cast<at::Half*>(
|
||||
output_row +
|
||||
(input_columns + num_elem_per_byte - 1) / num_elem_per_byte);
|
||||
|
||||
float minimum_element =
|
||||
*std::min_element(input_row, input_row + input_columns);
|
||||
float maximum_element =
|
||||
*std::max_element(input_row, input_row + input_columns);
|
||||
|
||||
minimum_element = static_cast<at::Half>(minimum_element);
|
||||
const float range = maximum_element - minimum_element;
|
||||
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
at::Half scale = range == 0 ? 1.0f : range / ((1 << bit_rate) - 1);
|
||||
if (scale == 0) {
|
||||
// Corner case handling when maximum_element == minimum_element
|
||||
// Any scale would work because X - minimum_element will be 0 for all X
|
||||
scale = 1.0f;
|
||||
}
|
||||
float inverse_scale = 1.0f / scale;
|
||||
if (std::isinf(inverse_scale)) {
|
||||
scale = 1.0f;
|
||||
inverse_scale = 1.0f;
|
||||
}
|
||||
|
||||
output_row_scale_bias[0] = scale;
|
||||
output_row_scale_bias[1] = minimum_element;
|
||||
for (std::size_t col = 0; col < static_cast<size_t>(input_columns); ++col) {
|
||||
float X = input_row[col];
|
||||
std::uint8_t quantized = std::max(
|
||||
0,
|
||||
std::min<int>(
|
||||
std::lrintf((X - minimum_element) * inverse_scale),
|
||||
(1 << bit_rate) - 1));
|
||||
if (col % num_elem_per_byte == 0) {
|
||||
output_row[col / num_elem_per_byte] = quantized;
|
||||
} else {
|
||||
output_row[col / num_elem_per_byte] |=
|
||||
(quantized << ((col % num_elem_per_byte) * bit_rate));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void FusedNBitRowwiseQuantizedSBHalfToFloat__base(
|
||||
int bit_rate,
|
||||
const std::uint8_t* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
float* output) {
|
||||
int num_elem_per_byte = 8 / bit_rate;
|
||||
int output_columns =
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
(input_columns - 2 * sizeof(at::Half)) * num_elem_per_byte;
|
||||
|
||||
for (std::size_t row = 0; row < static_cast<size_t>(input_rows); ++row) {
|
||||
const std::uint8_t* input_row = input + row * input_columns;
|
||||
const at::Half* input_row_scale_bias = reinterpret_cast<const at::Half*>(
|
||||
input_row +
|
||||
(output_columns + num_elem_per_byte - 1) / num_elem_per_byte);
|
||||
float scale = input_row_scale_bias[0];
|
||||
float bias = input_row_scale_bias[1];
|
||||
float* output_row = output + row * output_columns;
|
||||
|
||||
for (std::size_t col = 0; col < static_cast<std::size_t>(output_columns); ++col) {
|
||||
std::uint8_t quantized = input_row[col / num_elem_per_byte];
|
||||
quantized >>= (col % num_elem_per_byte) * bit_rate;
|
||||
quantized &= (1 << bit_rate) - 1;
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
output_row[col] = scale * quantized + bias;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void FloatToFusedNBitRowwiseQuantizedSBHalf(
|
||||
int bit_rate,
|
||||
const float* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
std::uint8_t* output) {
|
||||
#ifdef USE_FBGEMM
|
||||
fbgemm::FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf<float>(
|
||||
bit_rate, input, input_rows, input_columns, output);
|
||||
#else
|
||||
FloatToFusedNBitRowwiseQuantizedSBHalf__base(
|
||||
bit_rate, input, input_rows, input_columns, output);
|
||||
#endif
|
||||
}
|
||||
|
||||
void FusedNBitRowwiseQuantizedSBHalfToFloat(
|
||||
int bit_rate,
|
||||
const std::uint8_t* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
float* output) {
|
||||
#ifdef USE_FBGEMM
|
||||
fbgemm::FusedNBitRowwiseQuantizedSBHalfToFloatOrHalf<float>(
|
||||
bit_rate, input, input_rows, input_columns, output);
|
||||
#else
|
||||
FusedNBitRowwiseQuantizedSBHalfToFloat__base(
|
||||
bit_rate, input, input_rows, input_columns, output);
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace caffe2
|
@ -1,39 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
void FloatToFused8BitRowwiseQuantized(
|
||||
const float* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
std::uint8_t* output);
|
||||
|
||||
void Fused8BitRowwiseQuantizedToFloat(
|
||||
const std::uint8_t* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
float* output);
|
||||
|
||||
/**
|
||||
* Row-wise quantization with fp16 scale and bias
|
||||
*
|
||||
* @param bit_rate can be 2, 4, or 8
|
||||
*/
|
||||
void FloatToFusedNBitRowwiseQuantizedSBHalf(
|
||||
int bit_rate,
|
||||
const float* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
std::uint8_t* output);
|
||||
|
||||
void FusedNBitRowwiseQuantizedSBHalfToFloat(
|
||||
int bit_rate,
|
||||
const std::uint8_t* input,
|
||||
size_t input_rows,
|
||||
int input_columns,
|
||||
float* output);
|
||||
|
||||
} // namespace caffe2
|
@ -1,141 +0,0 @@
|
||||
#pragma once
|
||||
#include <string.h>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
#include "c10/util/irange.h"
|
||||
#include "caffe2/utils/conversions.h"
|
||||
|
||||
#include "vectorizer.h"
|
||||
|
||||
namespace caffe2 {
|
||||
namespace perfkernels {
|
||||
namespace {
|
||||
template <typename T>
|
||||
inline T sigmoid(T x) {
|
||||
return 1 / (1 + std::exp(-x));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T host_tanh(T x) {
|
||||
return 2 * sigmoid(2 * x) - 1;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void LstmUnitImpl(
|
||||
const int N,
|
||||
const int D,
|
||||
const int t,
|
||||
const T* H_prev,
|
||||
const T* C_prev,
|
||||
const T* X,
|
||||
const int32_t* seqLengths,
|
||||
const bool drop_states,
|
||||
T* C,
|
||||
T* H,
|
||||
const float forget_bias) {
|
||||
const T forgetBias = convert::To<float, T>(forget_bias);
|
||||
for (const auto n : c10::irange(N)) {
|
||||
const bool valid = seqLengths == nullptr || t < seqLengths[n];
|
||||
if (!valid) {
|
||||
if (drop_states) {
|
||||
memset(H, 0, sizeof(T) * D);
|
||||
memset(C, 0, sizeof(T) * D);
|
||||
} else {
|
||||
memcpy(H, H_prev, sizeof(T) * D);
|
||||
memcpy(C, C_prev, sizeof(T) * D);
|
||||
}
|
||||
} else {
|
||||
const T* X_D = &X[D];
|
||||
const T* X_2D = &X[2 * D];
|
||||
const T* X_3D = &X[3 * D];
|
||||
VECTOR_LOOP for (const auto d : c10::irange(D)) {
|
||||
const T i = sigmoid(X[d]);
|
||||
const T f = sigmoid(X_D[d] + forgetBias);
|
||||
const T o = sigmoid(X_2D[d]);
|
||||
const T g = host_tanh(X_3D[d]);
|
||||
const T c_prev = C_prev[d];
|
||||
const T c = f * c_prev + i * g;
|
||||
C[d] = c;
|
||||
const T host_tanh_c = host_tanh(c);
|
||||
H[d] = o * host_tanh_c;
|
||||
}
|
||||
}
|
||||
H_prev += D;
|
||||
C_prev += D;
|
||||
X += 4 * D;
|
||||
C += D;
|
||||
H += D;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void LstmUnitGradientImpl(
|
||||
int N,
|
||||
int D,
|
||||
int t,
|
||||
const T* C_prev,
|
||||
const T* X,
|
||||
const int32_t* seqLengths,
|
||||
const T* C,
|
||||
const T* H,
|
||||
const T* C_diff,
|
||||
const T* H_diff,
|
||||
bool drop_states,
|
||||
T* H_prev_diff,
|
||||
T* C_prev_diff,
|
||||
T* X_diff,
|
||||
const float forget_bias) {
|
||||
const T localForgetBias = convert::To<float, T>(forget_bias);
|
||||
for (const auto n : c10::irange(N)) {
|
||||
const bool valid = seqLengths == nullptr || t < seqLengths[n];
|
||||
|
||||
if (!valid) {
|
||||
if (drop_states) {
|
||||
memset(C_prev_diff, 0, sizeof(T) * D);
|
||||
memset(H_prev_diff, 0, sizeof(T) * D);
|
||||
} else {
|
||||
memcpy(H_prev_diff, H_diff, sizeof(T) * D);
|
||||
memcpy(C_prev_diff, C_diff, sizeof(T) * D);
|
||||
}
|
||||
memset(X_diff, 0, 4 * sizeof(T) * D);
|
||||
} else {
|
||||
VECTOR_LOOP for (const auto d : c10::irange(D)) {
|
||||
T* c_prev_diff = C_prev_diff + d;
|
||||
T* h_prev_diff = H_prev_diff + d;
|
||||
T* i_diff = X_diff + d;
|
||||
T* f_diff = X_diff + 1 * D + d;
|
||||
T* o_diff = X_diff + 2 * D + d;
|
||||
T* g_diff = X_diff + 3 * D + d;
|
||||
|
||||
const T i = sigmoid(X[d]);
|
||||
const T f = sigmoid(X[1 * D + d] + localForgetBias);
|
||||
const T o = sigmoid(X[2 * D + d]);
|
||||
const T g = host_tanh(X[3 * D + d]);
|
||||
const T c_prev = C_prev[d];
|
||||
const T c = C[d];
|
||||
const T host_tanh_c = host_tanh(c);
|
||||
const T c_term_diff =
|
||||
C_diff[d] + H_diff[d] * o * (1 - host_tanh_c * host_tanh_c);
|
||||
*c_prev_diff = c_term_diff * f;
|
||||
*h_prev_diff = 0; // not used in 'valid' case
|
||||
*i_diff = c_term_diff * g * i * (1 - i);
|
||||
*f_diff = c_term_diff * c_prev * f * (1 - f);
|
||||
*o_diff = H_diff[d] * host_tanh_c * o * (1 - o);
|
||||
*g_diff = c_term_diff * i * (1 - g * g);
|
||||
}
|
||||
}
|
||||
C_prev += D;
|
||||
X += 4 * D;
|
||||
C += D;
|
||||
H += D;
|
||||
C_diff += D;
|
||||
H_diff += D;
|
||||
X_diff += 4 * D;
|
||||
H_prev_diff += D;
|
||||
C_prev_diff += D;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace perfkernels
|
||||
} // namespace caffe2
|
@ -1,73 +0,0 @@
|
||||
#pragma once
|
||||
#include <cstdint>
|
||||
|
||||
namespace caffe2 {
|
||||
namespace detail {
|
||||
|
||||
// Forward declration of the LSTMUnit templated
|
||||
// implementation
|
||||
template <typename T>
|
||||
void LstmUnitCpu(
|
||||
const int N,
|
||||
const int D,
|
||||
const int t,
|
||||
const T* H_prev,
|
||||
const T* C_prev,
|
||||
const T* X,
|
||||
const int32_t* seqLengths,
|
||||
const bool drop_states,
|
||||
T* C,
|
||||
T* H,
|
||||
const float forget_bias);
|
||||
|
||||
// Forward specialization
|
||||
extern template void LstmUnitCpu<float>(
|
||||
const int N,
|
||||
const int D,
|
||||
const int t,
|
||||
const float* H_prev,
|
||||
const float* C_prev,
|
||||
const float* X,
|
||||
const int32_t* seqLengths,
|
||||
const bool drop_states,
|
||||
float* C,
|
||||
float* H,
|
||||
const float forget_bias);
|
||||
|
||||
template <typename T>
|
||||
void LstmUnitGradientCpu(
|
||||
int N,
|
||||
int D,
|
||||
int t,
|
||||
const T* C_prev,
|
||||
const T* X,
|
||||
const int32_t* seqLengths,
|
||||
const T* C,
|
||||
const T* H,
|
||||
const T* C_diff,
|
||||
const T* H_diff,
|
||||
bool drop_states,
|
||||
T* H_prev_diff,
|
||||
T* C_prev_diff,
|
||||
T* X_diff,
|
||||
const float forget_bias);
|
||||
|
||||
extern template void LstmUnitGradientCpu<float>(
|
||||
int N,
|
||||
int D,
|
||||
int t,
|
||||
const float* C_prev,
|
||||
const float* X,
|
||||
const int32_t* seqLengths,
|
||||
const float* C,
|
||||
const float* H,
|
||||
const float* C_diff,
|
||||
const float* H_diff,
|
||||
bool drop_states,
|
||||
float* H_prev_diff,
|
||||
float* C_prev_diff,
|
||||
float* X_diff,
|
||||
const float forget_bias);
|
||||
|
||||
} // namespace detail
|
||||
} // namespace caffe2
|
@ -1,123 +0,0 @@
|
||||
#include "caffe2/perfkernels/lstm_unit_cpu-impl.h"
|
||||
|
||||
namespace caffe2 {
|
||||
namespace perfkernels {
|
||||
namespace {
|
||||
// Explicit initialize for float and AVX2 vectorization
|
||||
template void LstmUnitImpl<float>(
|
||||
const int N,
|
||||
const int D,
|
||||
const int t,
|
||||
const float* H_prev,
|
||||
const float* C_prev,
|
||||
const float* X,
|
||||
const int32_t* seqLengths,
|
||||
const bool drop_states,
|
||||
float* C,
|
||||
float* H,
|
||||
const float forget_bias);
|
||||
|
||||
template void LstmUnitGradientImpl<float>(
|
||||
int N,
|
||||
int D,
|
||||
int t,
|
||||
const float* C_prev,
|
||||
const float* X,
|
||||
const int32_t* seqLengths,
|
||||
const float* C,
|
||||
const float* H,
|
||||
const float* C_diff,
|
||||
const float* H_diff,
|
||||
bool drop_states,
|
||||
float* H_prev_diff,
|
||||
float* C_prev_diff,
|
||||
float* X_diff,
|
||||
const float forget_bias);
|
||||
} // namespace
|
||||
|
||||
// Define templated implementation fo LSTM kernels on CPU supporting AVX2
|
||||
template <typename T>
|
||||
void LstmUnitImpl__avx2_fma(
|
||||
const int N,
|
||||
const int D,
|
||||
const int t,
|
||||
const T* H_prev,
|
||||
const T* C_prev,
|
||||
const T* X,
|
||||
const int32_t* seqLengths,
|
||||
const bool drop_states,
|
||||
T* C,
|
||||
T* H,
|
||||
const float forget_bias) {
|
||||
LstmUnitImpl(
|
||||
N, D, t, H_prev, C_prev, X, seqLengths, drop_states, C, H, forget_bias);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void LstmUnitGradientImpl__avx2_fma(
|
||||
int N,
|
||||
int D,
|
||||
int t,
|
||||
const T* C_prev,
|
||||
const T* X,
|
||||
const int32_t* seqLengths,
|
||||
const T* C,
|
||||
const T* H,
|
||||
const T* C_diff,
|
||||
const T* H_diff,
|
||||
bool drop_states,
|
||||
T* H_prev_diff,
|
||||
T* C_prev_diff,
|
||||
T* X_diff,
|
||||
const float forget_bias) {
|
||||
LstmUnitGradientImpl(
|
||||
N,
|
||||
D,
|
||||
t,
|
||||
C_prev,
|
||||
X,
|
||||
seqLengths,
|
||||
C,
|
||||
H,
|
||||
C_diff,
|
||||
H_diff,
|
||||
drop_states,
|
||||
H_prev_diff,
|
||||
C_prev_diff,
|
||||
X_diff,
|
||||
forget_bias);
|
||||
}
|
||||
|
||||
// Explicit initialize for float
|
||||
template void LstmUnitImpl__avx2_fma<float>(
|
||||
const int N,
|
||||
const int D,
|
||||
const int t,
|
||||
const float* H_prev,
|
||||
const float* C_prev,
|
||||
const float* X,
|
||||
const int32_t* seqLengths,
|
||||
const bool drop_states,
|
||||
float* C,
|
||||
float* H,
|
||||
const float forget_bias);
|
||||
|
||||
template void LstmUnitGradientImpl__avx2_fma<float>(
|
||||
int N,
|
||||
int D,
|
||||
int t,
|
||||
const float* C_prev,
|
||||
const float* X,
|
||||
const int32_t* seqLengths,
|
||||
const float* C,
|
||||
const float* H,
|
||||
const float* C_diff,
|
||||
const float* H_diff,
|
||||
bool drop_states,
|
||||
float* H_prev_diff,
|
||||
float* C_prev_diff,
|
||||
float* X_diff,
|
||||
const float forget_bias);
|
||||
|
||||
} // namespace perfkernels
|
||||
} // namespace caffe2
|
@ -1,125 +0,0 @@
|
||||
#include "caffe2/perfkernels/lstm_unit_cpu_common.h"
|
||||
#include "caffe2/perfkernels/common.h"
|
||||
#include "caffe2/perfkernels/lstm_unit_cpu-impl.h"
|
||||
|
||||
namespace caffe2 {
|
||||
namespace detail {
|
||||
|
||||
// Define templated implementation fo LSTM kernels on CPU
|
||||
template <typename T>
|
||||
void LstmUnitCpu(
|
||||
const int N,
|
||||
const int D,
|
||||
const int t,
|
||||
const T* H_prev,
|
||||
const T* C_prev,
|
||||
const T* X,
|
||||
const int32_t* seqLengths,
|
||||
const bool drop_states,
|
||||
T* C,
|
||||
T* H,
|
||||
const float forget_bias) {
|
||||
// Do CPU dispatching
|
||||
AVX2_FMA_DO(
|
||||
perfkernels::LstmUnitImpl,
|
||||
N,
|
||||
D,
|
||||
t,
|
||||
H_prev,
|
||||
C_prev,
|
||||
X,
|
||||
seqLengths,
|
||||
drop_states,
|
||||
C,
|
||||
H,
|
||||
forget_bias);
|
||||
perfkernels::LstmUnitImpl(
|
||||
N, D, t, H_prev, C_prev, X, seqLengths, drop_states, C, H, forget_bias);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void LstmUnitGradientCpu(
|
||||
int N,
|
||||
int D,
|
||||
int t,
|
||||
const T* C_prev,
|
||||
const T* X,
|
||||
const int32_t* seqLengths,
|
||||
const T* C,
|
||||
const T* H,
|
||||
const T* C_diff,
|
||||
const T* H_diff,
|
||||
bool drop_states,
|
||||
T* H_prev_diff,
|
||||
T* C_prev_diff,
|
||||
T* X_diff,
|
||||
const float forget_bias) {
|
||||
// Do CPU dispatching
|
||||
AVX2_FMA_DO(
|
||||
perfkernels::LstmUnitGradientImpl,
|
||||
N,
|
||||
D,
|
||||
t,
|
||||
C_prev,
|
||||
X,
|
||||
seqLengths,
|
||||
C,
|
||||
H,
|
||||
C_diff,
|
||||
H_diff,
|
||||
drop_states,
|
||||
H_prev_diff,
|
||||
C_prev_diff,
|
||||
X_diff,
|
||||
forget_bias);
|
||||
perfkernels::LstmUnitGradientImpl(
|
||||
N,
|
||||
D,
|
||||
t,
|
||||
C_prev,
|
||||
X,
|
||||
seqLengths,
|
||||
C,
|
||||
H,
|
||||
C_diff,
|
||||
H_diff,
|
||||
drop_states,
|
||||
H_prev_diff,
|
||||
C_prev_diff,
|
||||
X_diff,
|
||||
forget_bias);
|
||||
}
|
||||
|
||||
// Explicit initialize for float
|
||||
template void LstmUnitCpu<float>(
|
||||
const int N,
|
||||
const int D,
|
||||
const int t,
|
||||
const float* H_prev,
|
||||
const float* C_prev,
|
||||
const float* X,
|
||||
const int32_t* seqLengths,
|
||||
const bool drop_states,
|
||||
float* C,
|
||||
float* H,
|
||||
const float forget_bias);
|
||||
|
||||
template void LstmUnitGradientCpu<float>(
|
||||
int N,
|
||||
int D,
|
||||
int t,
|
||||
const float* C_prev,
|
||||
const float* X,
|
||||
const int32_t* seqLengths,
|
||||
const float* C,
|
||||
const float* H,
|
||||
const float* C_diff,
|
||||
const float* H_diff,
|
||||
bool drop_states,
|
||||
float* H_prev_diff,
|
||||
float* C_prev_diff,
|
||||
float* X_diff,
|
||||
const float forget_bias);
|
||||
|
||||
} // namespace detail
|
||||
} // namespace caffe2
|
@ -1,71 +0,0 @@
|
||||
#pragma once
|
||||
#include <cstdint>
|
||||
|
||||
namespace caffe2 {
|
||||
namespace perfkernels {
|
||||
|
||||
template <typename T>
|
||||
void LstmUnitImpl__avx2_fma(
|
||||
const int N,
|
||||
const int D,
|
||||
const int t,
|
||||
const T* H_prev,
|
||||
const T* C_prev,
|
||||
const T* X,
|
||||
const int32_t* seqLengths,
|
||||
const bool drop_states,
|
||||
T* C,
|
||||
T* H,
|
||||
const float forget_bias);
|
||||
|
||||
template <typename T>
|
||||
void LstmUnitGradientImpl__avx2_fma(
|
||||
int N,
|
||||
int D,
|
||||
int t,
|
||||
const T* C_prev,
|
||||
const T* X,
|
||||
const int32_t* seqLengths,
|
||||
const T* C,
|
||||
const T* H,
|
||||
const T* C_diff,
|
||||
const T* H_diff,
|
||||
bool drop_states,
|
||||
T* H_prev_diff,
|
||||
T* C_prev_diff,
|
||||
T* X_diff,
|
||||
const float forget_bias);
|
||||
|
||||
// Forward declaration of specialized functions
|
||||
extern template void LstmUnitImpl__avx2_fma(
|
||||
const int N,
|
||||
const int D,
|
||||
const int t,
|
||||
const float* H_prev,
|
||||
const float* C_prev,
|
||||
const float* X,
|
||||
const int32_t* seqLengths,
|
||||
const bool drop_states,
|
||||
float* C,
|
||||
float* H,
|
||||
const float forget_bias);
|
||||
|
||||
extern template void LstmUnitGradientImpl__avx2_fma(
|
||||
int N,
|
||||
int D,
|
||||
int t,
|
||||
const float* C_prev,
|
||||
const float* X,
|
||||
const int32_t* seqLengths,
|
||||
const float* C,
|
||||
const float* H,
|
||||
const float* C_diff,
|
||||
const float* H_diff,
|
||||
bool drop_states,
|
||||
float* H_prev_diff,
|
||||
float* C_prev_diff,
|
||||
float* X_diff,
|
||||
const float forget_bias);
|
||||
|
||||
} // namespace perfkernels
|
||||
} // namespace caffe2
|
@ -1,35 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
namespace math {
|
||||
|
||||
// Returns the quantized and compressed values of floating inputs
|
||||
// The "fused" representation stores the [bitwidth][tail][min][max]
|
||||
// with the quantized data in one array. Since we store 8/bitwidth
|
||||
// quantized data in one byte, the last buckets of some bytes may have
|
||||
// unused bits. There are totally tail buckets are unused.
|
||||
// We encode *bitwidth* and *tail* at the beginning,
|
||||
// following by 32-bit floating data respresenting min and max.
|
||||
// | bitwidth | tail | min | max | ... int8 data ... |
|
||||
// | 1B | 1B | 4B | 4B | ...output_data....|
|
||||
// In output_data: the b-th bucket of the i-th byte stores
|
||||
// the i-th data of the b-th segment of input row
|
||||
|
||||
void quantize_and_compress(
|
||||
const float* input_data,
|
||||
std::uint8_t* output_data,
|
||||
std::uint64_t input_size,
|
||||
std::uint64_t bitwidth,
|
||||
bool random,
|
||||
const float* random_buffer);
|
||||
|
||||
void decompress_and_dequantize(
|
||||
const std::uint8_t* input_data,
|
||||
float* output_data,
|
||||
std::uint64_t input_size);
|
||||
|
||||
} // namespace math
|
||||
} // namespace caffe2
|
@ -1,246 +0,0 @@
|
||||
// Implements the math functions for CPU.
|
||||
// The implementation in this file allows us to route the underlying numerical
|
||||
// computation library to different compiler options (-mno-avx2 or -mavx2).
|
||||
|
||||
#include <immintrin.h>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
|
||||
#include <c10/util/irange.h>
|
||||
|
||||
using std::uint64_t;
|
||||
using std::uint8_t;
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
namespace math {
|
||||
|
||||
static constexpr double QEPSILON = 1e-8;
|
||||
|
||||
void quantize_and_compress__avx2(
|
||||
const float* input_data,
|
||||
uint8_t* output_data,
|
||||
uint64_t input_size,
|
||||
uint64_t bitwidth,
|
||||
bool random,
|
||||
const float* random_buffer) {
|
||||
__m256i shuffle_mask_v = _mm256_set_epi8(
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
0x0c,
|
||||
0x08,
|
||||
0x04,
|
||||
0x00,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
0xff,
|
||||
0x0c,
|
||||
0x08,
|
||||
0x04,
|
||||
0x00);
|
||||
__m256i permute_mask_v =
|
||||
_mm256_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00);
|
||||
|
||||
uint64_t data_per_byte = 8 / bitwidth;
|
||||
uint64_t tail = input_size % data_per_byte;
|
||||
tail = tail ? data_per_byte - tail : 0;
|
||||
uint64_t segment_size = (input_size + data_per_byte - 1) / data_per_byte;
|
||||
|
||||
// basic info
|
||||
float minimum_element = INFINITY, maximum_element = -INFINITY;
|
||||
for (const auto i : c10::irange(input_size)) {
|
||||
minimum_element =
|
||||
(input_data[i] < minimum_element) ? input_data[i] : minimum_element;
|
||||
maximum_element =
|
||||
(input_data[i] > maximum_element) ? input_data[i] : maximum_element;
|
||||
}
|
||||
output_data[0] = bitwidth;
|
||||
output_data[1] = tail;
|
||||
reinterpret_cast<float*>(output_data + 2)[0] = minimum_element;
|
||||
reinterpret_cast<float*>(output_data + 2)[1] = maximum_element;
|
||||
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
float gap = (maximum_element - minimum_element) / ((1 << bitwidth) - 1.0f);
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
float gap_inverse = 1. / (gap + QEPSILON);
|
||||
uint8_t max_q = (1 << bitwidth) - 1;
|
||||
uint64_t bit_start = 0;
|
||||
if (random) {
|
||||
for (uint64_t start = 0; start < input_size; start += segment_size) {
|
||||
uint64_t stride = start + segment_size <= input_size ? segment_size
|
||||
: input_size - start;
|
||||
uint64_t i = 0;
|
||||
constexpr int VLEN = 8;
|
||||
for (; i < stride / VLEN * VLEN; i += VLEN) {
|
||||
__m256 r_v = _mm256_loadu_ps(&random_buffer[start + i]);
|
||||
__m256 fval_v = _mm256_loadu_ps(input_data + start + i);
|
||||
__m256 thetimes_v = _mm256_mul_ps(
|
||||
_mm256_sub_ps(fval_v, _mm256_set1_ps(minimum_element)),
|
||||
_mm256_set1_ps(gap_inverse));
|
||||
__m256 rounded_v = _mm256_floor_ps(_mm256_add_ps(thetimes_v, r_v));
|
||||
rounded_v = _mm256_max_ps(
|
||||
_mm256_setzero_ps(),
|
||||
_mm256_min_ps(_mm256_set1_ps(max_q), rounded_v));
|
||||
__m256i qval_v = _mm256_cvtps_epi32(rounded_v);
|
||||
__m256i orval_v = _mm256_cvtepu8_epi32(_mm_lddqu_si128(
|
||||
reinterpret_cast<const __m128i*>(output_data + 10 + i)));
|
||||
orval_v =
|
||||
_mm256_or_si256(orval_v, _mm256_slli_epi32(qval_v, bit_start));
|
||||
orval_v = _mm256_shuffle_epi8(orval_v, shuffle_mask_v);
|
||||
orval_v = _mm256_permutevar8x32_epi32(orval_v, permute_mask_v);
|
||||
*reinterpret_cast<int64_t*>(output_data + 10 + i) =
|
||||
_mm256_extract_epi64(orval_v, 0);
|
||||
}
|
||||
for (; i < stride; ++i) {
|
||||
float fval = input_data[start + i];
|
||||
float thetimes = (fval - minimum_element) * gap_inverse;
|
||||
float rounded = floor(thetimes + random_buffer[start + i]);
|
||||
rounded = rounded < static_cast<float>(max_q)
|
||||
? rounded
|
||||
: static_cast<float>(max_q);
|
||||
rounded = rounded > 0.0f ? rounded : 0.0f;
|
||||
uint8_t qval = rounded;
|
||||
|
||||
uint8_t orval = output_data[10 + i];
|
||||
output_data[10 + i] = orval | static_cast<uint8_t>(qval << bit_start);
|
||||
}
|
||||
bit_start += bitwidth;
|
||||
}
|
||||
} else {
|
||||
// !random
|
||||
for (uint64_t start = 0; start < input_size; start += segment_size) {
|
||||
uint64_t stride = start + segment_size <= input_size ? segment_size
|
||||
: input_size - start;
|
||||
uint64_t i = 0;
|
||||
constexpr int VLEN = 8;
|
||||
for (; i < stride / VLEN * VLEN; i += VLEN) {
|
||||
__m256 fval_v = _mm256_loadu_ps(input_data + start + i);
|
||||
__m256 thetimes_v = _mm256_mul_ps(
|
||||
_mm256_sub_ps(fval_v, _mm256_set1_ps(minimum_element)),
|
||||
_mm256_set1_ps(gap_inverse));
|
||||
thetimes_v = _mm256_max_ps(
|
||||
_mm256_setzero_ps(),
|
||||
_mm256_min_ps(_mm256_set1_ps(max_q), thetimes_v));
|
||||
__m256i qval_v = _mm256_cvtps_epi32(_mm256_round_ps(
|
||||
thetimes_v, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
||||
__m256i orval_v = _mm256_cvtepu8_epi32(_mm_lddqu_si128(
|
||||
reinterpret_cast<const __m128i*>(output_data + 10 + i)));
|
||||
orval_v =
|
||||
_mm256_or_si256(orval_v, _mm256_slli_epi32(qval_v, bit_start));
|
||||
orval_v = _mm256_shuffle_epi8(orval_v, shuffle_mask_v);
|
||||
orval_v = _mm256_permutevar8x32_epi32(orval_v, permute_mask_v);
|
||||
*reinterpret_cast<int64_t*>(output_data + 10 + i) =
|
||||
_mm256_extract_epi64(orval_v, 0);
|
||||
}
|
||||
for (; i < stride; ++i) {
|
||||
float fval = input_data[start + i];
|
||||
float thetimes = (fval - minimum_element) * gap_inverse;
|
||||
thetimes = thetimes < static_cast<float>(max_q)
|
||||
? thetimes
|
||||
: static_cast<float>(max_q);
|
||||
thetimes = thetimes > 0.0f ? thetimes : 0.0f;
|
||||
uint8_t qval = nearbyint(thetimes);
|
||||
|
||||
uint8_t orval = output_data[10 + i];
|
||||
output_data[10 + i] = orval | static_cast<uint8_t>(qval << bit_start);
|
||||
}
|
||||
bit_start += bitwidth;
|
||||
}
|
||||
} // !random
|
||||
}
|
||||
|
||||
void decompress_and_dequantize__avx2(
|
||||
const uint8_t* input_data,
|
||||
float* output_data,
|
||||
uint64_t input_size) {
|
||||
// basic info
|
||||
const float minimum_element =
|
||||
reinterpret_cast<const float*>(input_data + 2)[0];
|
||||
const float maximum_element =
|
||||
reinterpret_cast<const float*>(input_data + 2)[1];
|
||||
const uint64_t bitwidth = input_data[0];
|
||||
const float gap =
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
(maximum_element - minimum_element) / ((1 << bitwidth) - 1.f) +
|
||||
QEPSILON; // for exact recovering
|
||||
|
||||
const uint64_t tail = input_data[1];
|
||||
|
||||
const uint64_t output_size = (input_size - 10) * (8 / bitwidth) - tail;
|
||||
// decoding
|
||||
uint64_t bit_start = 0;
|
||||
const uint64_t segment_size = input_size - 10;
|
||||
for (uint64_t start = 0; start < output_size; start += segment_size) {
|
||||
uint64_t stride = start + segment_size <= output_size ? segment_size
|
||||
: output_size - start;
|
||||
uint8_t mask = (1 << bitwidth) - 1;
|
||||
uint64_t i = 0;
|
||||
// Can process 8 elements at a time because we need to expand uint8_t
|
||||
// to int32_t to use epi32 vector instructions.
|
||||
constexpr int VLEN = 8;
|
||||
for (; i < stride / VLEN * VLEN; i += VLEN) {
|
||||
__m128i in_v = _mm_lddqu_si128(
|
||||
reinterpret_cast<const __m128i*>(input_data + 10 + i));
|
||||
__m256i out_epi32_v = _mm256_and_si256(
|
||||
_mm256_srli_epi32(_mm256_cvtepu8_epi32(in_v), bit_start),
|
||||
_mm256_set1_epi32(mask));
|
||||
__m256 out_v = _mm256_fmadd_ps(
|
||||
_mm256_cvtepi32_ps(out_epi32_v),
|
||||
_mm256_set1_ps(gap),
|
||||
_mm256_set1_ps(minimum_element));
|
||||
_mm256_storeu_ps(output_data + start + i, out_v);
|
||||
}
|
||||
for (; i < stride; ++i) {
|
||||
output_data[start + i] =
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
((input_data[10 + i] >> bit_start) & mask) * gap + minimum_element;
|
||||
}
|
||||
bit_start += bitwidth;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace math
|
||||
} // namespace caffe2
|
@ -1,168 +0,0 @@
|
||||
// Implements the math functions for CPU.
|
||||
// The implementation in this file allows us to route the underlying numerical
|
||||
// computation library to different compiler options (-mno-avx2 or -mavx2).
|
||||
|
||||
#include <cfloat>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
|
||||
#include "common.h"
|
||||
// NOLINTNEXTLINE(modernize-deprecated-headers)
|
||||
#include "math.h"
|
||||
|
||||
#include <c10/util/irange.h>
|
||||
|
||||
using std::uint64_t;
|
||||
using std::uint8_t;
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
namespace math {
|
||||
|
||||
static constexpr double QEPSILON = 1e-8;
|
||||
|
||||
void quantize_and_compress__base(
|
||||
const float* input_data,
|
||||
uint8_t* output_data,
|
||||
uint64_t input_size,
|
||||
uint64_t bitwidth,
|
||||
bool random,
|
||||
const float* random_buffer) {
|
||||
uint64_t data_per_byte = 8 / bitwidth;
|
||||
uint64_t tail = input_size % data_per_byte;
|
||||
tail = tail ? data_per_byte - tail : 0;
|
||||
uint64_t segment_size = (input_size + data_per_byte - 1) / data_per_byte;
|
||||
|
||||
// basic info
|
||||
float minimum_element = INFINITY, maximum_element = -INFINITY;
|
||||
for (const auto i : c10::irange(input_size)) {
|
||||
minimum_element =
|
||||
input_data[i] < minimum_element ? input_data[i] : minimum_element;
|
||||
maximum_element =
|
||||
input_data[i] > maximum_element ? input_data[i] : maximum_element;
|
||||
}
|
||||
output_data[0] = bitwidth;
|
||||
output_data[1] = tail;
|
||||
reinterpret_cast<float*>(output_data + 2)[0] = minimum_element;
|
||||
reinterpret_cast<float*>(output_data + 2)[1] = maximum_element;
|
||||
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
float gap = (maximum_element - minimum_element) / ((1 << bitwidth) - 1.0f);
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
float gap_inverse = 1. / (gap + QEPSILON);
|
||||
uint8_t max_q = (1 << bitwidth) - 1;
|
||||
uint64_t bit_start = 0;
|
||||
if (random) {
|
||||
for (uint64_t start = 0; start < input_size; start += segment_size) {
|
||||
uint64_t stride = start + segment_size <= input_size ? segment_size
|
||||
: input_size - start;
|
||||
uint64_t i = 0;
|
||||
for (; i < stride; ++i) {
|
||||
float fval = input_data[start + i];
|
||||
float thetimes = (fval - minimum_element) * gap_inverse;
|
||||
float rounded = floor(thetimes + random_buffer[start + i]);
|
||||
rounded = rounded < static_cast<float>(max_q)
|
||||
? rounded
|
||||
: static_cast<float>(max_q);
|
||||
rounded = rounded > 0.0f ? rounded : 0.0f;
|
||||
uint8_t qval = rounded;
|
||||
|
||||
uint8_t orval = output_data[10 + i];
|
||||
output_data[10 + i] = orval | static_cast<uint8_t>(qval << bit_start);
|
||||
}
|
||||
bit_start += bitwidth;
|
||||
}
|
||||
} else {
|
||||
for (uint64_t start = 0; start < input_size; start += segment_size) {
|
||||
uint64_t stride = start + segment_size <= input_size ? segment_size
|
||||
: input_size - start;
|
||||
uint64_t i = 0;
|
||||
for (; i < stride; ++i) {
|
||||
float fval = input_data[start + i];
|
||||
float thetimes = (fval - minimum_element) * gap_inverse;
|
||||
thetimes = thetimes < static_cast<float>(max_q)
|
||||
? thetimes
|
||||
: static_cast<float>(max_q);
|
||||
thetimes = thetimes > 0.0f ? thetimes : 0.0f;
|
||||
uint8_t qval = nearbyint(thetimes);
|
||||
|
||||
uint8_t orval = output_data[10 + i];
|
||||
output_data[10 + i] = orval | static_cast<uint8_t>(qval << bit_start);
|
||||
}
|
||||
bit_start += bitwidth;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
decltype(quantize_and_compress__base) quantize_and_compress__avx2;
|
||||
void quantize_and_compress(
|
||||
const float* input_data,
|
||||
uint8_t* output_data,
|
||||
uint64_t input_size,
|
||||
uint64_t bitwidth,
|
||||
bool random,
|
||||
const float* random_buffer) {
|
||||
AVX2_DO(
|
||||
quantize_and_compress,
|
||||
input_data,
|
||||
output_data,
|
||||
input_size,
|
||||
bitwidth,
|
||||
random,
|
||||
random_buffer);
|
||||
BASE_DO(
|
||||
quantize_and_compress,
|
||||
input_data,
|
||||
output_data,
|
||||
input_size,
|
||||
bitwidth,
|
||||
random,
|
||||
random_buffer);
|
||||
}
|
||||
|
||||
void decompress_and_dequantize__base(
|
||||
const uint8_t* input_data,
|
||||
float* output_data,
|
||||
uint64_t input_size) {
|
||||
// basic info
|
||||
const float minimum_element =
|
||||
reinterpret_cast<const float*>(input_data + 2)[0];
|
||||
const float maximum_element =
|
||||
reinterpret_cast<const float*>(input_data + 2)[1];
|
||||
const uint64_t bitwidth = input_data[0];
|
||||
const float gap =
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
|
||||
(maximum_element - minimum_element) / ((1 << bitwidth) - 1.f) +
|
||||
QEPSILON; // for exact recovering
|
||||
|
||||
const uint64_t tail = input_data[1];
|
||||
|
||||
const uint64_t output_size = (input_size - 10) * (8 / bitwidth) - tail;
|
||||
// decoding
|
||||
uint64_t bit_start = 0;
|
||||
const uint64_t segment_size = input_size - 10;
|
||||
for (uint64_t start = 0; start < output_size; start += segment_size) {
|
||||
uint64_t stride = start + segment_size <= output_size ? segment_size
|
||||
: output_size - start;
|
||||
uint8_t mask = (1 << bitwidth) - 1;
|
||||
uint64_t i = 0;
|
||||
for (; i < stride; ++i) {
|
||||
output_data[start + i] =
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions)
|
||||
((input_data[10 + i] >> bit_start) & mask) * gap + minimum_element;
|
||||
}
|
||||
bit_start += bitwidth;
|
||||
}
|
||||
}
|
||||
|
||||
decltype(decompress_and_dequantize__base) decompress_and_dequantize__avx2;
|
||||
void decompress_and_dequantize(
|
||||
const uint8_t* input_data,
|
||||
float* output_data,
|
||||
uint64_t input_size) {
|
||||
AVX2_DO(decompress_and_dequantize, input_data, output_data, input_size);
|
||||
BASE_DO(decompress_and_dequantize, input_data, output_data, input_size);
|
||||
}
|
||||
|
||||
} // namespace math
|
||||
} // namespace caffe2
|
@ -1,88 +0,0 @@
|
||||
#include <c10/util/Half.h>
|
||||
#include "caffe2/perfkernels/typed_axpy.h"
|
||||
#include "caffe2/perfkernels/common.h"
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
void TypedAxpy__base(int N, const float a, const float* x, float* y) {
|
||||
for (int i = 0; i < N; ++i) {
|
||||
y[i] += a * x[i];
|
||||
}
|
||||
}
|
||||
|
||||
decltype(TypedAxpy__base) TypedAxpy__avx2_fma;
|
||||
decltype(TypedAxpy__base) TypedAxpy__avx_f16c;
|
||||
template <>
|
||||
void TypedAxpy<float, float>(int N, const float a, const float* x, float* y) {
|
||||
AVX2_FMA_DO(TypedAxpy, N, a, x, y);
|
||||
AVX_F16C_DO(TypedAxpy, N, a, x, y);
|
||||
BASE_DO(TypedAxpy, N, a, x, y);
|
||||
}
|
||||
|
||||
void TypedAxpyHalffloat__base(
|
||||
int N,
|
||||
const float a,
|
||||
const at::Half* x,
|
||||
float* y) {
|
||||
for (int i = 0; i < N; ++i) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
union {
|
||||
uint32_t intval;
|
||||
float floatval;
|
||||
} t1;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
uint32_t t2, t3;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
t1.intval = x[i].x & 0x7fff; // Non-sign bits
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
t2 = x[i].x & 0x8000; // Sign bit
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
t3 = x[i].x & 0x7c00; // Exponent
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
t1.intval <<= 13; // Align mantissa on MSB
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
t2 <<= 16; // Shift sign bit into position
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
t1.intval += 0x38000000; // Adjust bias
|
||||
t1.intval = (t3 == 0 ? 0 : t1.intval); // Denormals-as-zero
|
||||
t1.intval |= t2; // Re-insert sign bit
|
||||
y[i] += t1.floatval * a;
|
||||
}
|
||||
}
|
||||
|
||||
decltype(TypedAxpyHalffloat__base) TypedAxpyHalffloat__avx2_fma;
|
||||
decltype(TypedAxpyHalffloat__base) TypedAxpyHalffloat__avx_f16c;
|
||||
template <>
|
||||
void TypedAxpy<at::Half, float>(
|
||||
int N,
|
||||
const float a,
|
||||
const at::Half* x,
|
||||
float* y) {
|
||||
AVX2_FMA_DO(TypedAxpyHalffloat, N, a, x, y);
|
||||
AVX_F16C_DO(TypedAxpyHalffloat, N, a, x, y);
|
||||
BASE_DO(TypedAxpyHalffloat, N, a, x, y);
|
||||
}
|
||||
|
||||
void TypedAxpy_uint8_float__base(
|
||||
int N,
|
||||
const float a,
|
||||
const std::uint8_t* x,
|
||||
float* y) {
|
||||
for (int i = 0; i < N; ++i) {
|
||||
y[i] += (float)(x[i]) * a;
|
||||
}
|
||||
}
|
||||
|
||||
decltype(TypedAxpy_uint8_float__base) TypedAxpy_uint8_float__avx2_fma;
|
||||
decltype(TypedAxpy_uint8_float__base) TypedAxpy_uint8_float__avx_f16c;
|
||||
template <>
|
||||
void TypedAxpy<std::uint8_t, float>(
|
||||
int N,
|
||||
const float a,
|
||||
const std::uint8_t* x,
|
||||
float* y) {
|
||||
AVX2_FMA_DO(TypedAxpy_uint8_float, N, a, x, y);
|
||||
BASE_DO(TypedAxpy_uint8_float, N, a, x, y);
|
||||
}
|
||||
|
||||
} // namespace caffe2
|
@ -1,12 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
// Similar to Axpy that calculate y = a * x + y, but allowing x and y to be
|
||||
// of different data types.
|
||||
// It also provides a performance optimization hint (use_a) to see if a is going
|
||||
// to be 1 or not.
|
||||
template <typename IN, typename OUT>
|
||||
void TypedAxpy(int N, const OUT a, const IN* x, OUT* y);
|
||||
|
||||
} // namespace caffe2
|
@ -1,68 +0,0 @@
|
||||
#include "caffe2/perfkernels/cvtsh_ss_bugfix.h"
|
||||
|
||||
#include <c10/util/Half.h>
|
||||
#include <emmintrin.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
void TypedAxpy__avx_f16c(int N, const float a, const float* x, float* y) {
|
||||
int current = 0;
|
||||
const int bound = (N % 8) ? N - 8 : N;
|
||||
__m256 mma = _mm256_set1_ps(a);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (; current < bound; current += 8) {
|
||||
_mm256_storeu_ps(
|
||||
y + current,
|
||||
_mm256_add_ps(
|
||||
_mm256_mul_ps(mma, _mm256_loadu_ps(x + current)),
|
||||
_mm256_loadu_ps(y + current)));
|
||||
}
|
||||
|
||||
if (bound != N) {
|
||||
while (current < N) {
|
||||
y[current] += x[current] * a;
|
||||
++current;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void TypedAxpyHalffloat__avx_f16c(
|
||||
int N,
|
||||
const float a,
|
||||
const at::Half* x,
|
||||
float* y) {
|
||||
// if x does not start at the 16 byte boundary, we will process the first few.
|
||||
// before we get to a real one.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
while ((reinterpret_cast<unsigned long>(x) % 16) && N) {
|
||||
*(y++) += _cvtsh_ss((*(x++)).x) * a;
|
||||
--N;
|
||||
}
|
||||
|
||||
// From now on we can do vectorized additions using __m256, which is 8 floats,
|
||||
// so we will vectorize every 8 element and then resort to cvtsh_ss.
|
||||
__m256 mma = _mm256_set1_ps(a);
|
||||
int current = 0;
|
||||
const int bound = (N % 8) ? N - 8 : N;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (; current < bound; current += 8) {
|
||||
__m128i mmx_16 =
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i*>(x + current));
|
||||
__m256 mmx_32 = _mm256_cvtph_ps(mmx_16);
|
||||
__m256 mmy_in = _mm256_loadu_ps(y + current);
|
||||
__m256 mmmul = _mm256_mul_ps(mmx_32, mma);
|
||||
__m256 mmy_out = _mm256_add_ps(mmmul, mmy_in);
|
||||
_mm256_storeu_ps(y + current, mmy_out);
|
||||
}
|
||||
|
||||
if (bound != N) {
|
||||
while (current < N) {
|
||||
y[current] += _cvtsh_ss(x[current].x) * a;
|
||||
++current;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace caffe2
|
@ -1,104 +0,0 @@
|
||||
#include "caffe2/perfkernels/cvtsh_ss_bugfix.h"
|
||||
|
||||
#include <c10/util/Half.h>
|
||||
#include <emmintrin.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
namespace caffe2 {
|
||||
|
||||
void TypedAxpy__avx2_fma(int N, const float a, const float* x, float* y) {
|
||||
int current = 0;
|
||||
const int bound = (N % 8) ? N - 8 : N;
|
||||
__m256 mma = _mm256_set1_ps(a);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (; current < bound; current += 8) {
|
||||
_mm256_storeu_ps(
|
||||
y + current,
|
||||
_mm256_fmadd_ps(
|
||||
mma, _mm256_loadu_ps(x + current), _mm256_loadu_ps(y + current)));
|
||||
}
|
||||
|
||||
if (bound != N) {
|
||||
while (current < N) {
|
||||
y[current] += x[current] * a;
|
||||
++current;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void TypedAxpyHalffloat__avx2_fma(
|
||||
int N,
|
||||
const float a,
|
||||
const at::Half* x,
|
||||
float* y) {
|
||||
// if x does not start at the 16 byte boundary, we will process the first few.
|
||||
// before we get to a real one.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
while ((reinterpret_cast<unsigned long>(x) % 16) && N) {
|
||||
*(y++) += _cvtsh_ss((*(x++)).x) * a;
|
||||
--N;
|
||||
}
|
||||
|
||||
// From now on we can do vectorized additions using __m256, which is 8 floats,
|
||||
// so we will vectorize every 8 element and then resort to cvtsh_ss.
|
||||
__m256 mma = _mm256_set1_ps(a);
|
||||
int current = 0;
|
||||
const int bound = (N % 8) ? N - 8 : N;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (; current < bound; current += 8) {
|
||||
__m128i mmx_16 =
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i*>(x + current));
|
||||
__m256 mmx_32 = _mm256_cvtph_ps(mmx_16);
|
||||
__m256 mmy = _mm256_loadu_ps(y + current);
|
||||
mmy = _mm256_fmadd_ps(mmx_32, mma, mmy);
|
||||
_mm256_storeu_ps(y + current, mmy);
|
||||
}
|
||||
|
||||
if (bound != N) {
|
||||
while (current < N) {
|
||||
y[current] += _cvtsh_ss(x[current].x) * a;
|
||||
++current;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void TypedAxpy_uint8_float__avx2_fma(
|
||||
int N,
|
||||
const float a,
|
||||
const std::uint8_t* x,
|
||||
float* y) {
|
||||
// if x does not start at the 16 byte boundary, we will process the first few.
|
||||
// before we get to a real one.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
while ((reinterpret_cast<unsigned long>(x) % 16) && N) {
|
||||
*(y++) += static_cast<float>(*(x++)) * a;
|
||||
--N;
|
||||
}
|
||||
|
||||
// From now on we can do vectorized additions using __m256, which is 8 floats,
|
||||
// so we will vectorize every 8 element and then resort to cvtsh_ss.
|
||||
__m256 mma = _mm256_set1_ps(a);
|
||||
int current = 0;
|
||||
const int bound = (N % 8) ? N - 8 : N;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
||||
for (; current < bound; current += 8) {
|
||||
__m256i mmx_int32 = _mm256_cvtepi8_epi32(
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i*>(x + current)));
|
||||
__m256 mmx_fp32 = _mm256_cvtepi32_ps(mmx_int32);
|
||||
|
||||
__m256 mmy = _mm256_loadu_ps(y + current);
|
||||
mmy = _mm256_fmadd_ps(mmx_fp32, mma, mmy);
|
||||
_mm256_storeu_ps(y + current, mmy);
|
||||
}
|
||||
|
||||
if (bound != N) {
|
||||
while (current < N) {
|
||||
y[current] += (float)(x[current]) * a;
|
||||
++current;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace caffe2
|
@ -1,28 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#if (ENABLE_VECTORIZATION > 0) && !defined(_DEBUG) && !defined(DEBUG)
|
||||
#if defined(__clang__) && (__clang_major__ > 7)
|
||||
#define IS_SANITIZER \
|
||||
((__has_feature(address_sanitizer) == 1) || \
|
||||
(__has_feature(memory_sanitizer) == 1) || \
|
||||
(__has_feature(thread_sanitizer) == 1) || \
|
||||
(__has_feature(undefined_sanitizer) == 1))
|
||||
|
||||
#if IS_SANITIZER == 0
|
||||
#define VECTOR_LOOP _Pragma("clang loop vectorize(enable)")
|
||||
#define FAST_MATH _Pragma("clang fp contract(fast)")
|
||||
#define VECTORIZED_KERNEL 1
|
||||
#endif
|
||||
#elif defined(_OPENMP) && (_OPENMP >= 201511)
|
||||
// Support with OpenMP4.5 and above
|
||||
#define VECTOR_LOOP _Pragma("omp for simd")
|
||||
#define VECTORIZED_KERNEL 1
|
||||
#define FAST_MATH
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef VECTOR_LOOP
|
||||
// Not supported
|
||||
#define VECTOR_LOOP
|
||||
#define FAST_MATH
|
||||
#endif
|
@ -62,8 +62,8 @@ Overall, the ``pipelining`` package provides the following features:
|
||||
application on the Llama model.
|
||||
|
||||
|
||||
Step 1: build ``PipelineStage`` for execution
|
||||
*********************************************
|
||||
Step 1: build ``PipelineStage``
|
||||
*******************************
|
||||
|
||||
Before we can use a ``PipelineSchedule``, we need to create ``PipelineStage``
|
||||
objects that wrap the part of the model running in that stage. The
|
||||
@ -261,11 +261,12 @@ Let us see how the ``pipeline`` API works:
|
||||
|
||||
from torch.distributed.pipelining import pipeline, SplitPoint
|
||||
|
||||
# An example micro-batch input
|
||||
x = torch.LongTensor([1, 2, 4, 5])
|
||||
|
||||
pipe = pipeline(
|
||||
module=mod,
|
||||
num_chunks=1,
|
||||
example_args=(x,),
|
||||
mb_args=(x,),
|
||||
split_spec={
|
||||
"layers.1": SplitPoint.BEGINNING,
|
||||
}
|
||||
@ -306,7 +307,7 @@ If we ``print(pipe)``, we can see::
|
||||
|
||||
|
||||
The "model partitions" are represented by submodules (``submod_0``,
|
||||
``submod_1``), each of which is reconstructed with original model operations
|
||||
``submod_1``), each of which is reconstructed with original model operations, weights
|
||||
and hierarchies. In addition, a "root-level" ``forward`` function is
|
||||
reconstructed to capture the data flow between those partitions. Such data flow
|
||||
will be replayed by the pipeline runtime later, in a distributed fashion.
|
||||
@ -317,12 +318,29 @@ The ``Pipe`` object provides a method for retrieving the "model partitions":
|
||||
|
||||
stage_mod : nn.Module = pipe.get_stage_module(stage_idx)
|
||||
|
||||
You can also create a distributed stage runtime on a device using ``Pipe``:
|
||||
The returned ``stage_mod`` is a ``nn.Module``, with which you can create an
|
||||
optimizer, save or load checkpoints, or apply other parallelisms.
|
||||
|
||||
``Pipe`` also allows you to create a distributed stage runtime on a device given
|
||||
a ``ProcessGroup``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
stage = pipe.build_stage(stage_idx, device, group)
|
||||
|
||||
Alternatively, if you would like to build the stage runtime later after some
|
||||
modification to the ``stage_mod``, you can use a functional version of the
|
||||
``build_stage`` API. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from torch.distributed.pipelining import build_stage
|
||||
from torch.nn.parallel import DistributedDataParallel
|
||||
|
||||
dp_mod = DistributedDataParallel(stage_mod)
|
||||
info = pipe.info()
|
||||
stage = build_stage(dp_mod, stage_idx, info, device, group)
|
||||
|
||||
.. note::
|
||||
The ``pipeline`` frontend uses a tracer (``torch.export``) to capture your
|
||||
model into a single graph. If your model is not full-graph'able, you can use
|
||||
|
@ -10,6 +10,8 @@ API Methods
|
||||
|
||||
.. autofunction:: torch.distributed.elastic.events.record
|
||||
|
||||
.. autofunction:: torch.distributed.elastic.events.construct_and_record_rdzv_event
|
||||
|
||||
.. autofunction:: torch.distributed.elastic.events.get_logging_handler
|
||||
|
||||
Event Objects
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user