[Lite Trainer] Add necessary registrations for MNIST model (#33717)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/33717

Because of the special treatment of operator names for lite interpreter, all the operators used in lite interpreter are still prepended by "_". Add the necessary registrations for MNIST model. All the ops with autograd capability are included in torch_mobile_train. After rebase the selective build from D19649074 can be utilized to strip the unused ops.

Note that this diff is for feasibility test. The training accuracy are not covered in the test.
ghstack-source-id: 97780066

Test Plan:
```
buck run xplat/caffe2/fb/lite_trainer:lite_trainer -c pt.disable_gen_tracing=1 -c pt.static_dispatch=0 -- --model=/path/MnistModel.bc
```
{F227898221}

Reviewed By: dreiss

Differential Revision: D19743201

fbshipit-source-id: cacadd76f3729faa0018d147a69466bbf54312fd
This commit is contained in:
Martin Yuan
2020-03-06 15:40:29 -08:00
committed by Facebook Github Bot
parent 96ca06cfce
commit 01edb7450f
3 changed files with 126 additions and 14 deletions

View File

@ -22,19 +22,19 @@ bool InterpreterState::run(Stack& stack) {
while (true) {
Instruction inst = code_->instructions_[pc];
// std::cout << "RUNNING " << pc << " " << code_->instructions_[pc];
// if (inst.op == OP) {
// std::cout << ", " << code_->op_names_[inst.X].name << "." <<
// code_->op_names_[inst.X].overload_name;
// }
// std::cout << std::endl;
// for (auto val : stack) {
// if (val.isTensor()) {
// std::cout << val.toTensor().sizes() << std::endl;
// } else {
// std::cout << val << std::endl;
// }
// }
// std::cout << "RUNNING " << pc << " " << code_->instructions_[pc];
// if (inst.op == OP) {
// std::cout << ", " << code_->op_names_[inst.X].name << "." <<
// code_->op_names_[inst.X].overload_name;
// }
// std::cout << std::endl;
// for (auto val : stack) {
// if (val.isTensor()) {
// std::cout << val.toTensor().sizes() << std::endl;
// } else {
// std::cout << val << std::endl;
// }
// }
switch (inst.op) {
case OP: {
#if defined(PYTORCH_MOBILE_OPERATOR_OBSERVER)

View File

@ -2,21 +2,80 @@
#include <ATen/ATen.h>
#include <ATen/core/stack.h>
#include <ATen/TypeDefault.h>
#include <torch/csrc/autograd/function.h>
using Stack = std::vector<c10::IValue>;
using torch::jit::peek;
using torch::jit::drop;
using torch::jit::pack;
using torch::jit::push;
using torch::jit::pop;
using at::Tensor;
using at::Scalar;
using namespace torch::autograd;
using namespace c10;
namespace torch {
namespace autograd {
namespace VariableType {
Tensor mul_Tensor(const Tensor &self, const Tensor &other);
Tensor add_Scalar(const Tensor &self, Scalar other, Scalar alpha);
Tensor conv2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups);
Tensor view(const Tensor & self, IntArrayRef size);
Tensor log_softmax_int(const Tensor & self, int64_t dim, c10::optional<ScalarType> dtype);
Tensor dropout(const Tensor & input, double p, bool train);
Tensor feature_dropout(const Tensor & input, double p, bool train);
Tensor max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode);
Tensor relu(const Tensor & self);
Tensor t(const Tensor & self);
Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha);
}
}
}
namespace {
at::Tensor toOptionalTensor(const c10::IValue& v) {
if (v.isNone()) {
return at::Tensor();
}
return v.toTensor();
}
void conv2d_kernel(const c10::OperatorHandle& op, Stack* stack) {
auto input = std::move(peek(*stack, 0, 7)).toTensor();
auto weight = (std::move(peek(*stack, 1, 7))).toTensor();
auto bias = toOptionalTensor((std::move(peek(*stack, 2, 7))));
RECORD_FUNCTION("conv2d", std::vector<c10::IValue>({input, weight, bias}), Node::peek_at_next_sequence_nr());
auto result_ = VariableType::conv2d(
input,
weight,
bias,
(std::move(peek(*stack, 3, 7))).toIntVector(),
(std::move(peek(*stack, 4, 7))).toIntVector(),
(std::move(peek(*stack, 5, 7))).toIntVector(),
(std::move(peek(*stack, 6, 7))).toInt()
);
drop(*stack, 7);
pack(*stack, std::move(result_));
}
void view_kernel(const c10::OperatorHandle& op, Stack* stack) {
auto self = (std::move(peek(*stack, 0, 2))).toTensor();
auto size = (std::move(peek(*stack, 1, 2))).toIntVector();
auto result_ = torch::autograd::VariableType::view(self, size);
drop(*stack, 2);
pack(*stack, std::move(result_));
}
void log_softmax_kernel(const c10::OperatorHandle& op, Stack* stack) {
auto self = (std::move(peek(*stack, 0, 3))).toTensor();
auto dim = (std::move(peek(*stack, 1, 3))).toInt();
auto dtype = (std::move(peek(*stack, 2, 3))).toOptional<c10::ScalarType>();
auto result_ = torch::autograd::VariableType::log_softmax_int(self, dim, dtype);
drop(*stack, 3);
pack(*stack, std::move(result_));
}
static auto registry = torch::RegisterOperators().op(
"_aten::add.Scalar",
torch::RegisterOperators::options().kernel(c10::DispatchKey::VariableTensorId, &torch::autograd::VariableType::add_Scalar)
@ -24,5 +83,43 @@ static auto registry = torch::RegisterOperators().op(
"_aten::mul.Tensor(Tensor self, Tensor other) -> Tensor",
torch::RegisterOperators::options().kernel(c10::DispatchKey::VariableTensorId, &torch::autograd::VariableType::mul_Tensor)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)
);
).op(torch::RegisterOperators::options()
.schema("_aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor")
.kernel < &conv2d_kernel>(DispatchKey::VariableTensorId)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)
).op(torch::RegisterOperators::options()
.schema("_aten::dropout(Tensor input, float p, bool train) -> Tensor")
.kernel<Tensor (const Tensor &, double, bool)>(DispatchKey::VariableTensorId, &VariableType::dropout)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)
).op(torch::RegisterOperators::options()
.schema("_aten::feature_dropout(Tensor input, float p, bool train) -> Tensor")
.kernel<Tensor (const Tensor &, double, bool)>(DispatchKey::VariableTensorId, &VariableType::feature_dropout)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)
).op(torch::RegisterOperators::options()
.schema("_aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor")
.kernel<log_softmax_kernel>(DispatchKey::VariableTensorId)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA))
.op(
"_aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor",
torch::RegisterOperators::options().kernel(DispatchKey::VariableTensorId,
[](const Tensor & self, c10::List<int64_t> kernel_size, c10::List<int64_t> stride, c10::List<int64_t> padding, c10::List<int64_t> dilation, bool ceil_mode=false) {
return VariableType::max_pool2d(self, kernel_size.vec(), stride.vec(), padding.vec(), dilation.vec(), ceil_mode);
}))
.op(torch::RegisterOperators::options()
.schema("_aten::relu(Tensor self) -> Tensor")
.kernel<Tensor (const Tensor &)>(DispatchKey::VariableTensorId, &VariableType::relu)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA))
.op(torch::RegisterOperators::options()
.schema("_aten::view(Tensor(a) self, int[] size) -> Tensor(a)")
.kernel<&view_kernel>(DispatchKey::VariableTensorId)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA))
.op(torch::RegisterOperators::options()
.schema("_aten::t(Tensor(a) self) -> Tensor(a)")
.kernel<Tensor (const Tensor &), &VariableType::t>(DispatchKey::VariableTensorId)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA))
.op(torch::RegisterOperators::options()
.schema("_aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor")
.kernel<Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar)>(DispatchKey::VariableTensorId, &VariableType::addmm)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA))
;
}

View File

@ -97,6 +97,12 @@ void __is__kernel(const c10::OperatorHandle& op, Stack* stack) {
push(*stack, self.isSameIdentity(obj));
}
void __isnot__kernel(const c10::OperatorHandle& op, Stack* stack) {
c10::IValue self, obj;
pop(*stack, self, obj);
push(*stack, !self.isSameIdentity(obj));
}
void log_softmax_kernel(const c10::OperatorHandle& op, Stack* stack) {
auto result_ = at::log_softmax(
(std::move(peek(*stack, 0, 3))).toTensor(),
@ -413,6 +419,12 @@ static auto registry = torch::RegisterOperators().op(
[](const Tensor & input, double p, bool train) {
return at::dropout(input, p, train);
})
).op(
"_aten::feature_dropout(Tensor input, float p, bool train) -> Tensor",
torch::RegisterOperators::options().kernel(c10::DispatchKey::CPUTensorId,
[](const Tensor & input, double p, bool train) {
return at::feature_dropout(input, p, train);
})
).op(
"_aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)",
torch::RegisterOperators::options()
@ -451,6 +463,9 @@ static auto registry = torch::RegisterOperators().op(
).op(
"_aten::__is__(t1 self, t2 obj) -> bool",
torch::RegisterOperators::options().catchAllKernel<&__is__kernel>()
).op(
"_aten::__isnot__(t1 self, t2 obj) -> bool",
torch::RegisterOperators::options().catchAllKernel<&__isnot__kernel>()
).op(
"_aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor",
torch::RegisterOperators::options().kernel<&log_softmax_kernel>(c10::DispatchKey::CPUTensorId)