mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 13:44:15 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/59298 After recent changes, LazyStreamContext had in fact always become eager, and was in fact equivalent to a vector of streams. So it makes more sense now to remove this abstraction and use a more self-descriptive type. This PR migrates the RequestCallback internals. The next PR migrates the TensorPipe agent. ghstack-source-id: 130583774 Test Plan: CI Reviewed By: mrshenli Differential Revision: D28789175 fbshipit-source-id: fa581a50f9a6a1e42c2ad8c808a9b099bea7433e
26 lines
883 B
C++
26 lines
883 B
C++
#include <torch/csrc/distributed/rpc/request_callback.h>
|
|
|
|
#include <torch/csrc/distributed/autograd/context/container.h>
|
|
#include <torch/csrc/distributed/autograd/utils.h>
|
|
|
|
namespace torch {
|
|
namespace distributed {
|
|
namespace rpc {
|
|
|
|
using namespace torch::distributed::autograd;
|
|
|
|
c10::intrusive_ptr<JitFuture> RequestCallback::operator()(
|
|
Message& request,
|
|
std::vector<c10::Stream> streams) const {
|
|
// NB: cannot clear autograd context id here because the processMessage method
|
|
// might pause waiting for all RRefs in the arguments to be confirmed by their
|
|
// owners and resumne processing in a different thread. Hence, the
|
|
// thread_local context id needs to be set and cleared in the thread that
|
|
// indeed carries out the processing logic.
|
|
return processMessage(request, std::move(streams));
|
|
}
|
|
|
|
} // namespace rpc
|
|
} // namespace distributed
|
|
} // namespace torch
|