mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/67134 This diff demos torch::deploy unity which builds the model, the dependencies and the runtime as a unity! The end user only need to use the build_unity rule to replace the python_binary rule to define the python application. Under the hood, we build the python application (an xar file), build the torch deploy runtime, and then embed the python application (the xar file) into the torch deploy runtime. When starting the torch::deploy runtime, the xar will be written to the filesystem and extracted. We put the extracted path to python sys.path so all the model files and all the python dependencies can be found! As a demo, the model here is just a simple python program using numpy and scipy. But theoretically, it can be as complex as we want. I'll check how bento_kernel works. Maybe we can learn from bento_kernel to simplify things a bit. ghstack-source-id: 142085742 Test Plan: ``` #build buck build mode/opt unity:unity # make sure the path exists before we start torch::deploy runtime # Otherwise the dynamic loader will just skip this non-existing path # even though we create it after the runtime starts. mkdir -p /tmp/torch_deploy_python_app/python_app_root #run LD_LIBRARY_PATH=/tmp/torch_deploy_python_app/python_app_root ~/fbcode/buck-out/gen/caffe2/torch/csrc/deploy/unity/unity ``` Reviewed By: suo Differential Revision: D31816526 fbshipit-source-id: 8eba97952aad10dcf1c86779fb3f7e500773d7ee
38 lines
1.3 KiB
C++
38 lines
1.3 KiB
C++
/*
|
|
* The tool provides a shell to the embedded interpreter. Useful to inspect the
|
|
* state of the embedding interpreter interactively.
|
|
*/
|
|
#include <c10/util/Flags.h>
|
|
#include <torch/csrc/deploy/deploy.h>
|
|
#include <torch/csrc/deploy/path_environment.h>
|
|
|
|
C10_DEFINE_string(
|
|
python_path,
|
|
"",
|
|
"The root of the installed python libraries in the system");
|
|
C10_DEFINE_string(pyscript, "", "The path of the python script to execute");
|
|
|
|
// NOLINTNEXTLINE(bugprone-exception-escape)
|
|
int main(int argc, char** argv) {
|
|
c10::ParseCommandLineFlags(&argc, &argv);
|
|
|
|
if (FLAGS_python_path.size() > 0) {
|
|
LOG(INFO) << "Will add " << FLAGS_python_path << " to python sys.path";
|
|
}
|
|
std::shared_ptr<torch::deploy::Environment> env =
|
|
std::make_shared<torch::deploy::PathEnvironment>(FLAGS_python_path);
|
|
// create multiple interpreter instances so the tool does not just cover the
|
|
// simplest case with a single interpreter instance.
|
|
torch::deploy::InterpreterManager m(2, env);
|
|
auto I = m.acquireOne();
|
|
|
|
if (FLAGS_pyscript.size() > 0) {
|
|
auto realpath = I.global("os", "path").attr("expanduser")({FLAGS_pyscript});
|
|
I.global("runpy", "run_path")({realpath});
|
|
} else {
|
|
c10::ArrayRef<torch::deploy::Obj> noArgs;
|
|
I.global("pdb", "set_trace")(noArgs);
|
|
}
|
|
return 0;
|
|
}
|