mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
AutoNonVariableTypeMode->InferenceMode in OSS. (#56421)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/56421 Test Plan: Imported from OSS Reviewed By: bertmaher Differential Revision: D27866609 Pulled By: ailzhang fbshipit-source-id: 040991a031c5511501b03cfe21a4a636586e120e
This commit is contained in:
committed by
Facebook GitHub Bot
parent
5b4c3a9da1
commit
f096245610
@ -201,8 +201,7 @@ After that, you can use libtorch C++ API from your native code.
|
||||
namespace pytorch_testapp_jni {
|
||||
namespace {
|
||||
struct JITCallGuard {
|
||||
torch::autograd::AutoGradMode no_autograd_guard{false};
|
||||
torch::AutoNonVariableTypeMode non_var_guard{true};
|
||||
c10::InferenceMode guard;
|
||||
torch::jit::GraphOptimizerEnabledGuard no_optimizer_guard{false};
|
||||
};
|
||||
}
|
||||
|
@ -26,14 +26,8 @@ namespace pytorch_jni {
|
||||
namespace {
|
||||
|
||||
struct JITCallGuard {
|
||||
// AutoGrad is disabled for mobile by default.
|
||||
torch::autograd::AutoGradMode no_autograd_guard{false};
|
||||
// VariableType dispatch is not included in default mobile build. We need set
|
||||
// this guard globally to avoid dispatch error (only for dynamic dispatch).
|
||||
// Thanks to the unification of Variable class and Tensor class it's no longer
|
||||
// required to toggle the NonVariableTypeMode per op - so it doesn't hurt to
|
||||
// always set NonVariableTypeMode for inference only use case.
|
||||
torch::AutoNonVariableTypeMode non_var_guard{true};
|
||||
// Inference only workload.
|
||||
c10::InferenceMode guard;
|
||||
// Disable graph optimizer to ensure list of unused ops are not changed for
|
||||
// custom mobile build.
|
||||
torch::jit::GraphOptimizerEnabledGuard no_optimizer_guard{false};
|
||||
|
@ -17,14 +17,8 @@ namespace pytorch_jni {
|
||||
namespace {
|
||||
|
||||
struct LiteJITCallGuard {
|
||||
// VariableType dispatch is not included in default mobile build. We need set
|
||||
// this guard globally to avoid dispatch error (only for dynamic dispatch).
|
||||
// Thanks to the unification of Variable class and Tensor class it's no longer
|
||||
// required to toggle the NonVariableTypeMode per op - so it doesn't hurt to
|
||||
// always set NonVariableTypeMode for inference only use case.
|
||||
// TODO: avoid having to set this guard for custom mobile build with mobile
|
||||
// interpreter.
|
||||
torch::AutoNonVariableTypeMode non_var_guard{true};
|
||||
// Inference only workload.
|
||||
c10::InferenceMode guard;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
@ -24,8 +24,7 @@ void log(const char* m, T t) {
|
||||
}
|
||||
|
||||
struct JITCallGuard {
|
||||
torch::autograd::AutoGradMode no_autograd_guard{false};
|
||||
torch::AutoNonVariableTypeMode non_var_guard{true};
|
||||
c10::InferenceMode guard;
|
||||
torch::jit::GraphOptimizerEnabledGuard no_optimizer_guard{false};
|
||||
};
|
||||
} // namespace
|
||||
|
@ -32,8 +32,7 @@ static void FusedOverhead(benchmark::State& state) {
|
||||
}
|
||||
|
||||
static void UnfusedOverhead(benchmark::State& state) {
|
||||
torch::NoGradGuard ng;
|
||||
torch::AutoNonVariableTypeMode nv;
|
||||
c10::InferenceMode guard;
|
||||
overrideCanFuseOnCPU(false);
|
||||
|
||||
Module m("m");
|
||||
|
Reference in New Issue
Block a user