mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
jit pickling rref (#32959)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/32959 in rpc torch script call path, we need to pickle/unpickle rref, this diff is added to make jit pickler/unpickler be able to pickle/unpickle rref. It is similar to what is implemented for PyRef::pickle() and PyRef::unpickle(). The pickling/unpickling design assumes it is always coupled with RPC calls. It is not needed to checkpoint a model with rref, before checkpointing the model, user should call ref.to_here() to get value inside rref. The pickling process is: 1. push torch.distributed.rpc.rref global string 1. call rref.fork() and create rrefForkData, which is a few IDs and type str of the value held inside the rref, the IDs includes rref id, fork id, caller work id, callee work id, owner work id 2. push the rrefForkData The unpickling process is: 1. read torch.distributed.rpc.rref global string, and retrieve the cached global lamda function 2. the globa lamda function will get rrefForkData 3. if callee is also owner work id, then get owner rref based on Ids inside rrefFork data and return the ownerRRef 4. if callee is not owner work id, then create user rref using the rrefForkData and return the userRRef 5. meanwhile owner rref will be notified and do reference counting correctly During unpickling, a type_resolver is needed to parse type str. This type_resolver has python dependency, so we get it from rpc_agent, and pass it to unpickler during construction. So we added a type_resolver argumenmt to jit unpickler constructor in this diff. ghstack-source-id: 98814793 Test Plan: unit test Differential Revision: D19713293 fbshipit-source-id: 4fd776cdd4ce8f457c4034d79acdfb4cd095c52e
This commit is contained in:
committed by
Facebook Github Bot
parent
481e7f2e78
commit
4d9b649261
@ -12,17 +12,6 @@ namespace rpc {
|
||||
///////////////////// Pickle/Unpickle Helplers ////////////////////////////
|
||||
|
||||
namespace {
|
||||
constexpr int OWNER_IDX = 0; // index of ownerId in the tuple
|
||||
constexpr int RREFID_ON_IDX = 1; // index of RRefId.createdOn_ in the tuple
|
||||
constexpr int RREFID_ID_IDX = 2; // index of RRefId.localId_ in the tuple
|
||||
constexpr int FORKID_ON_IDX = 3; // index of ForkId.createdOn_ in the tuple
|
||||
constexpr int FORKID_ID_IDX = 4; // index of ForkId.localId_ in the tuple
|
||||
constexpr int PARENT_IDX = 5; // index of parent in the tuple
|
||||
constexpr int TYPE_IDX = 6; // index of parent in the tuple
|
||||
|
||||
// NB: if more fields are added, make sure this field is also bumped
|
||||
constexpr int RFD_TUPLE_SIZE = 7; // number of RRefForkData fields in py::tuple
|
||||
|
||||
py::tuple toPyTuple(const RRefForkData& rrefForkData) {
|
||||
// add GIL as it is contructing a py::object
|
||||
pybind11::gil_scoped_acquire ag;
|
||||
@ -40,7 +29,9 @@ RRefForkData fromPyTuple(const py::tuple& pyTuple) {
|
||||
pybind11::gil_scoped_acquire ag;
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
pyTuple.size() == RFD_TUPLE_SIZE,
|
||||
"Pickled RRefForkData must contain 6 numbers.");
|
||||
"Pickled RRefForkData must contain ",
|
||||
RFD_TUPLE_SIZE,
|
||||
" numbers.");
|
||||
worker_id_t ownerId = pyTuple[OWNER_IDX].cast<worker_id_t>();
|
||||
// const reference will extend the lifetime of the temporary variable
|
||||
const RRefId& rrefId = RRefId(
|
||||
|
Reference in New Issue
Block a user