mirror of
https://github.com/pytorch/pytorch.git
synced 2025-11-02 14:34:54 +08:00
A kernel has "dispatcher convention" if there is an additional keyset arg at the beginning of the argument list. This PR: - adds a way to register kernels with dispatcher_convention using Library.impl (pass dispatcher_convention = True) - adds OpOverload.redispatch We use both of the above in the new custom ops API: we register the autograd kernel in dispatcher convention so that we can actually call redispatch like how pytorch built-in ops do it. Test Plan: - existing tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124089 Approved by: https://github.com/albanD ghstack dependencies: #123937, #124064, #124065, #124066, #124071
20 lines
429 B
C++
20 lines
429 B
C++
#include <pybind11/pybind11.h>
|
|
#include <torch/csrc/utils/pybind.h>
|
|
|
|
namespace torch {
|
|
namespace impl {
|
|
namespace dispatch {
|
|
|
|
void initDispatchBindings(PyObject* module);
|
|
|
|
void python_op_registration_trampoline_impl(
|
|
const c10::OperatorHandle& op,
|
|
c10::DispatchKey key,
|
|
c10::DispatchKeySet keyset,
|
|
torch::jit::Stack* stack,
|
|
bool with_keyset);
|
|
|
|
} // namespace dispatch
|
|
} // namespace impl
|
|
} // namespace torch
|