mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18635 Optimize SoftmaxOp on CPU Reviewed By: houseroad Differential Revision: D14689516 fbshipit-source-id: d2dcee2476d1a3a21f428e99bce9835f1d229d64
48 lines
1.1 KiB
C++
48 lines
1.1 KiB
C++
#ifndef CAFFE2_OPERATORS_SOFTMAX_OP_H_
|
|
#define CAFFE2_OPERATORS_SOFTMAX_OP_H_
|
|
|
|
#include "caffe2/core/context.h"
|
|
#include "caffe2/core/logging.h"
|
|
#include "caffe2/core/operator.h"
|
|
#include "caffe2/utils/math.h"
|
|
|
|
namespace caffe2 {
|
|
|
|
template <typename T, class Context>
|
|
class SoftmaxOp final : public Operator<Context> {
|
|
public:
|
|
template <class... Args>
|
|
explicit SoftmaxOp(Args&&... args)
|
|
: Operator<Context>(std::forward<Args>(args)...),
|
|
axis_(this->template GetSingleArgument<int>("axis", 1)) {}
|
|
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
|
|
|
bool RunOnDevice() override;
|
|
|
|
protected:
|
|
int axis_;
|
|
Tensor scale_;
|
|
Tensor rowmax_;
|
|
Tensor sum_multiplier_;
|
|
};
|
|
|
|
template <typename T, class Context>
|
|
class SoftmaxGradientOp final : public Operator<Context> {
|
|
public:
|
|
template <class... Args>
|
|
explicit SoftmaxGradientOp(Args&&... args)
|
|
: Operator<Context>(std::forward<Args>(args)...),
|
|
axis_(this->template GetSingleArgument<int>("axis", 1)) {}
|
|
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
|
bool RunOnDevice() override;
|
|
|
|
protected:
|
|
int axis_;
|
|
Tensor scale_;
|
|
Tensor sum_multiplier_;
|
|
};
|
|
|
|
} // namespace caffe2
|
|
|
|
#endif // CAFFE2_OPERATORS_SOFTMAX_OP_H_
|