mirror of
https://github.com/pytorch/pytorch.git
synced 2025-11-07 10:01:39 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/29881 Breaking these into separate files allows us to have three different builds: - Mobile inference-only. - Mobile with module saving. - Server with module saving and other export functions like ONNX. And this can be accomplished just by selecting which cpp files to compile, without setting any preprocessor flags. Test Plan: CI. Local mobile+saving build. Reviewed By: smessmer Differential Revision: D18509296 fbshipit-source-id: 9438273bac4624df5c7f035b2bacb901cce43053
32 lines
788 B
C++
32 lines
788 B
C++
#include <torch/csrc/jit/script/module.h>
|
|
#include <torch/csrc/jit/export.h>
|
|
|
|
namespace torch {
|
|
namespace jit {
|
|
namespace script {
|
|
|
|
void Module::save(std::ostream& out, const ExtraFilesMap& extra_files) const {
|
|
ExportModule(*this, out, extra_files, false);
|
|
}
|
|
|
|
void Module::save(const std::string& filename, const ExtraFilesMap& extra_files)
|
|
const {
|
|
ExportModule(*this, filename, extra_files, false);
|
|
}
|
|
|
|
void Module::_save_for_mobile(
|
|
std::ostream& out,
|
|
const ExtraFilesMap& extra_files) const {
|
|
ExportModule(*this, out, extra_files, true);
|
|
}
|
|
|
|
void Module::_save_for_mobile(
|
|
const std::string& filename,
|
|
const ExtraFilesMap& extra_files) const {
|
|
ExportModule(*this, filename, extra_files, true);
|
|
}
|
|
|
|
} // namespace script
|
|
} // namespace jit
|
|
} // namespace torch
|