AutoNonVariableTypeMode->InferenceMode in OSS. (#56421)

Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/56421

Test Plan: Imported from OSS

Reviewed By: bertmaher

Differential Revision: D27866609

Pulled By: ailzhang

fbshipit-source-id: 040991a031c5511501b03cfe21a4a636586e120e
This commit is contained in:
Ailing Zhang
2021-04-19 18:03:47 -07:00
committed by Facebook GitHub Bot
parent 5b4c3a9da1
commit f096245610
5 changed files with 7 additions and 22 deletions

View File

@ -201,8 +201,7 @@ After that, you can use libtorch C++ API from your native code.
namespace pytorch_testapp_jni {
namespace {
struct JITCallGuard {
torch::autograd::AutoGradMode no_autograd_guard{false};
torch::AutoNonVariableTypeMode non_var_guard{true};
c10::InferenceMode guard;
torch::jit::GraphOptimizerEnabledGuard no_optimizer_guard{false};
};
}

View File

@ -26,14 +26,8 @@ namespace pytorch_jni {
namespace {
struct JITCallGuard {
// AutoGrad is disabled for mobile by default.
torch::autograd::AutoGradMode no_autograd_guard{false};
// VariableType dispatch is not included in default mobile build. We need set
// this guard globally to avoid dispatch error (only for dynamic dispatch).
// Thanks to the unification of Variable class and Tensor class it's no longer
// required to toggle the NonVariableTypeMode per op - so it doesn't hurt to
// always set NonVariableTypeMode for inference only use case.
torch::AutoNonVariableTypeMode non_var_guard{true};
// Inference only workload.
c10::InferenceMode guard;
// Disable graph optimizer to ensure list of unused ops are not changed for
// custom mobile build.
torch::jit::GraphOptimizerEnabledGuard no_optimizer_guard{false};

View File

@ -17,14 +17,8 @@ namespace pytorch_jni {
namespace {
struct LiteJITCallGuard {
// VariableType dispatch is not included in default mobile build. We need set
// this guard globally to avoid dispatch error (only for dynamic dispatch).
// Thanks to the unification of Variable class and Tensor class it's no longer
// required to toggle the NonVariableTypeMode per op - so it doesn't hurt to
// always set NonVariableTypeMode for inference only use case.
// TODO: avoid having to set this guard for custom mobile build with mobile
// interpreter.
torch::AutoNonVariableTypeMode non_var_guard{true};
// Inference only workload.
c10::InferenceMode guard;
};
} // namespace

View File

@ -24,8 +24,7 @@ void log(const char* m, T t) {
}
struct JITCallGuard {
torch::autograd::AutoGradMode no_autograd_guard{false};
torch::AutoNonVariableTypeMode non_var_guard{true};
c10::InferenceMode guard;
torch::jit::GraphOptimizerEnabledGuard no_optimizer_guard{false};
};
} // namespace

View File

@ -32,8 +32,7 @@ static void FusedOverhead(benchmark::State& state) {
}
static void UnfusedOverhead(benchmark::State& state) {
torch::NoGradGuard ng;
torch::AutoNonVariableTypeMode nv;
c10::InferenceMode guard;
overrideCanFuseOnCPU(false);
Module m("m");