mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
This is the first step towards hiding the flatbuffers types and headers from the load/serialize APIs. The two new functions make it possible to load modules without using `GetMutableModule` (defined by the generated header) or `FlatbufferLoader` (which depends on flatbuffers types). D38292794 will remove the functions/class marked DEPRECATED here after migrating existing users. Differential Revision: [D38292793](https://our.internmc.facebook.com/intern/diff/D38292793/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/82618 Approved by: https://github.com/qihqi
229 lines
8.2 KiB
C++
229 lines
8.2 KiB
C++
#pragma once
|
|
|
|
#include <ATen/core/ivalue.h>
|
|
#include <caffe2/serialize/inline_container.h>
|
|
#include <torch/csrc/jit/mobile/function.h>
|
|
#include <torch/csrc/jit/mobile/interpreter.h>
|
|
#include <torch/csrc/jit/mobile/module.h>
|
|
#include <torch/csrc/jit/runtime/instruction.h>
|
|
#include <torch/csrc/jit/serialization/mobile_bytecode_generated.h> // NOLINT
|
|
#include <torch/custom_class.h>
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
/**
|
|
* Defines the public API for loading flatbuffer-serialized mobile modules.
|
|
*/
|
|
|
|
namespace torch {
|
|
namespace jit {
|
|
|
|
/// Maps file names to file contents.
|
|
using ExtraFilesMap = std::unordered_map<std::string, std::string>;
|
|
|
|
// On high level, to produce a Module from a file on disk, we need to go
|
|
// through the follow steps:
|
|
// 1. Read: Read the file from disk -> memory
|
|
// 2. Deserialize: Parse the bytes to produce some in memory manipulable
|
|
// structure
|
|
// 3. Module initialization: Produce mobile::Module out of the structure
|
|
// produced in 2.
|
|
// Under this context, the structure described in 2. is
|
|
// mobile::serialization::Module
|
|
|
|
/// DEPRECATED: Use a parse/load function below.
|
|
// Parse a mobile::Module from flatbuffer's in-memory Module representation.
|
|
// The caller is assumed to manage the lifetimes of Module.
|
|
// This function does step 3 described above.
|
|
// If should_copy_tensor_memory is true, then the returned module will NOT
|
|
// have refences to flatbuffer_module, so it can be discarded.
|
|
// If should_copy_tensor_memory is false, then returned module will have
|
|
// tensors that points inside of flatbuffer_module; the caller need to make
|
|
// sure that flatbuffer_module outlives returned Module.
|
|
TORCH_API mobile::Module initialize_mobile_module(
|
|
mobile::serialization::Module* flatbuffer_module,
|
|
c10::optional<at::Device> device = c10::nullopt,
|
|
bool should_copy_tensor_memory = false);
|
|
|
|
// Parse a mobile::Module from raw bytes.
|
|
//
|
|
// This function does steps 2+3 described above.
|
|
//
|
|
// Does not take ownership of `data`; if you want it to take ownership, see the
|
|
// shared_ptr overload of this function.
|
|
//
|
|
// If should_copy_tensor_memory is true, then the returned module will NOT have
|
|
// refences to `data`, so `data` can be freed immediately.
|
|
//
|
|
// If should_copy_tensor_memory is false, then returned module will have tensors
|
|
// that points inside of `data`; the caller will need to make sure that `data`
|
|
// outlives the returned Module.
|
|
TORCH_API mobile::Module parse_and_initialize_mobile_module(
|
|
void* data,
|
|
size_t size, // of `data`, in bytes.
|
|
c10::optional<at::Device> device = c10::nullopt,
|
|
ExtraFilesMap* extra_files = nullptr,
|
|
bool should_copy_tensor_memory = false);
|
|
|
|
// Parse a mobile::Module from raw bytes.
|
|
//
|
|
// This function does steps 2+3 described above.
|
|
//
|
|
// The returned Module holds a reference to `data`.
|
|
//
|
|
// If you do not want the Module to hold a reference to `data`, see the raw
|
|
// pointer overload of this function.
|
|
TORCH_API mobile::Module parse_and_initialize_mobile_module(
|
|
std::shared_ptr<char> data,
|
|
size_t size, // of `data`, in bytes.
|
|
c10::optional<at::Device> device = c10::nullopt,
|
|
ExtraFilesMap* extra_files = nullptr);
|
|
|
|
// Parse a mobile::Module from raw bytes, also returning JIT-related metadata.
|
|
//
|
|
// This is the same as parse_and_initialize_mobile_module() except that it also
|
|
// extracts JIT source files and constants. Can be used to construct a
|
|
// jit::Module.
|
|
TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit(
|
|
void* data,
|
|
size_t size, // of `data`, in bytes.
|
|
ExtraFilesMap& jit_sources,
|
|
std::vector<IValue>& jit_constants,
|
|
c10::optional<at::Device> device = c10::nullopt,
|
|
ExtraFilesMap* extra_files = nullptr);
|
|
|
|
// Load a mobile::Module from a filepath.
|
|
//
|
|
// This function does steps 1+2+3 described above.
|
|
//
|
|
// We need to have this as a convienience because Python API will need to wrap
|
|
// this. C++ clients should use one of the versions of
|
|
// parse_and_initialize_mobile_module() so they can manage the raw data more
|
|
// directly.
|
|
TORCH_API mobile::Module load_mobile_module_from_file(
|
|
const std::string& filename,
|
|
c10::optional<at::Device> device = c10::nullopt,
|
|
ExtraFilesMap* extra_files = nullptr);
|
|
|
|
/// DEPRECATED: Use the `extra_files` parameter of one of the parse/load
|
|
/// functions above.
|
|
TORCH_API void parseExtraFiles(
|
|
mobile::serialization::Module* module,
|
|
ExtraFilesMap& extra_files);
|
|
|
|
TORCH_API uint64_t get_bytecode_version(std::istream& in);
|
|
TORCH_API uint64_t get_bytecode_version(const std::string& filename);
|
|
TORCH_API uint64_t get_bytecode_version_from_bytes(char* flatbuffer_content);
|
|
|
|
TORCH_API mobile::ModuleInfo get_module_info_from_flatbuffer(
|
|
char* flatbuffer_content);
|
|
|
|
// The methods below are less efficient because it need to read the stream in
|
|
// its entirity to a buffer
|
|
TORCH_API mobile::Module load_mobile_module_from_stream_with_copy(
|
|
std::istream& in,
|
|
c10::optional<at::Device> device = c10::nullopt,
|
|
ExtraFilesMap* extra_files = nullptr);
|
|
|
|
// This function will make the capabilities to load
|
|
// Module as a flatbuffer file available for use by _load_for_mobile
|
|
// and friends. This is NOT needed if using the other functions
|
|
// in this file directly.
|
|
TORCH_API bool register_flatbuffer_loader();
|
|
|
|
/// DEPRECATED: Use one of the parse/load functions above.
|
|
class TORCH_API FlatbufferLoader {
|
|
public:
|
|
FlatbufferLoader();
|
|
|
|
typedef IValue (
|
|
*IValueParser)(FlatbufferLoader&, const mobile::serialization::IValue&);
|
|
void registerIValueParser(
|
|
mobile::serialization::IValueUnion ivalue_type,
|
|
IValueParser parser);
|
|
mobile::Module parseModule(mobile::serialization::Module* module);
|
|
|
|
void extractJitSourceAndConstants(
|
|
ExtraFilesMap* jit_sources,
|
|
std::vector<IValue>* constants);
|
|
|
|
typedef TypePtr (*TypeResolver)(
|
|
const std::string& type_str,
|
|
std::shared_ptr<CompilationUnit> cu);
|
|
|
|
void internal_registerTypeResolver(TypeResolver type_resolver);
|
|
|
|
IValue& getIValue(uint32_t pos) {
|
|
TORCH_CHECK(pos < all_ivalues_.size());
|
|
return all_ivalues_[pos];
|
|
}
|
|
|
|
mobile::Function* getFunction(uint32_t pos) {
|
|
return all_functions_[pos];
|
|
}
|
|
|
|
ClassTypePtr getType(uint32_t pos) {
|
|
TORCH_CHECK(pos < all_ivalues_.size());
|
|
return all_types_[pos];
|
|
}
|
|
|
|
c10::Storage getStorage(uint32_t index);
|
|
TypePtr getOrCreateTypeAnnotations(const flatbuffers::String* offset);
|
|
ClassTypePtr getOrCreateClassTypeForObject(
|
|
const mobile::serialization::Object* object);
|
|
|
|
const mobile::serialization::Module* getCurrentFlatbufferInput() {
|
|
return module_;
|
|
}
|
|
|
|
bool getShouldCopyTensorMemory() {
|
|
return should_copy_tensor_memory_;
|
|
}
|
|
|
|
void setShouldCopyTensorMemory(bool should_copy_tensor_memory) {
|
|
should_copy_tensor_memory_ = should_copy_tensor_memory;
|
|
}
|
|
|
|
// Whether or not should load operators in functions.
|
|
// Not loading operators is useful because if an operator is not found
|
|
// then we throw exceptions, and sometimes we want to print out
|
|
// what operators are included before that to debug.
|
|
void setShouldLoadOperators(bool should_load_operators) {
|
|
should_load_operators_ = should_load_operators;
|
|
}
|
|
|
|
std::shared_ptr<mobile::CompilationUnit> mcu_;
|
|
std::shared_ptr<CompilationUnit> cu_;
|
|
|
|
private:
|
|
IValue parseIValue(const mobile::serialization::IValue* ivalue);
|
|
std::unique_ptr<mobile::Function> parseFunction(
|
|
const mobile::serialization::Function* method);
|
|
void parseAndPopulate(
|
|
uint32_t i,
|
|
const mobile::serialization::IValue* ivalue);
|
|
|
|
std::unordered_map<uint32_t, mobile::Function*> all_functions_;
|
|
std::vector<ClassTypePtr> all_types_;
|
|
std::unordered_set<uint32_t> initialized_types_;
|
|
std::unordered_map<const flatbuffers::String*, TypePtr> type_annotations_;
|
|
std::vector<bool> storage_loaded_;
|
|
std::vector<c10::Storage> storages_;
|
|
std::vector<IValue> all_ivalues_;
|
|
std::array<
|
|
IValueParser,
|
|
static_cast<uint8_t>(mobile::serialization::IValueUnion::MAX) + 1>
|
|
ivalue_parsers_;
|
|
TypeResolver type_resolver_ = nullptr;
|
|
mobile::serialization::Module* module_ = nullptr;
|
|
bool module_parsed_ = false;
|
|
bool should_copy_tensor_memory_ = false;
|
|
bool should_load_operators_ = true;
|
|
// 0 -> mobile_ivalue_size_ elements are from the mobile module.
|
|
uint32_t mobile_ivalue_size_ = 0;
|
|
};
|
|
|
|
} // namespace jit
|
|
} // namespace torch
|