mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/34556 According to https://github.com/pytorch/pytorch/pull/34012#discussion_r388581548, this `at::globalContext().setQEngine(at::QEngine::QNNPACK);` call isn't really necessary for mobile. In Context.cpp it selects the last available QEngine if the engine isn't set explicitly. For OSS mobile prebuild it should only include QNNPACK engine so the default behavior should already be desired behavior. It makes difference only when USE_FBGEMM is set - but it should be off for both OSS mobile build and internal mobile build. Test Plan: Imported from OSS Differential Revision: D20374522 Pulled By: ljk53 fbshipit-source-id: d4e437a03c6d4f939edccb5c84f02609633a0698
54 lines
1.7 KiB
C++
54 lines
1.7 KiB
C++
// This is a simple predictor binary that loads a TorchScript CV model and runs
|
|
// a forward pass with fixed input `torch::ones({1, 3, 224, 224})`.
|
|
// It's used for end-to-end integration test for custom mobile build.
|
|
|
|
#include <iostream>
|
|
#include <string>
|
|
#include <torch/script.h>
|
|
|
|
using namespace std;
|
|
|
|
namespace {
|
|
|
|
struct MobileCallGuard {
|
|
// AutoGrad is disabled for mobile by default.
|
|
torch::autograd::AutoGradMode no_autograd_guard{false};
|
|
// VariableType dispatch is not included in default mobile build. We need set
|
|
// this guard globally to avoid dispatch error (only for dynamic dispatch).
|
|
// Thanks to the unification of Variable class and Tensor class it's no longer
|
|
// required to toggle the NonVariableTypeMode per op - so it doesn't hurt to
|
|
// always set NonVariableTypeMode for inference only use case.
|
|
torch::AutoNonVariableTypeMode non_var_guard{true};
|
|
// Disable graph optimizer to ensure list of unused ops are not changed for
|
|
// custom mobile build.
|
|
torch::jit::GraphOptimizerEnabledGuard no_optimizer_guard{false};
|
|
};
|
|
|
|
torch::jit::script::Module loadModel(const std::string& path) {
|
|
MobileCallGuard guard;
|
|
auto module = torch::jit::load(path);
|
|
module.eval();
|
|
return module;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
int main(int argc, const char* argv[]) {
|
|
if (argc < 2) {
|
|
std::cerr << "Usage: " << argv[0] << " <model_path>\n";
|
|
return 1;
|
|
}
|
|
auto module = loadModel(argv[1]);
|
|
auto input = torch::ones({1, 3, 224, 224});
|
|
auto output = [&]() {
|
|
MobileCallGuard guard;
|
|
return module.forward({input}).toTensor();
|
|
}();
|
|
|
|
std::cout << std::setprecision(3) << std::fixed;
|
|
for (int i = 0; i < 5; i++) {
|
|
std::cout << output.data_ptr<float>()[i] << std::endl;
|
|
}
|
|
return 0;
|
|
}
|