mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: Torch Native Runtime RFC: https://github.com/pytorch/rfcs/pull/72 As part of the effort to open source TorchNativeRuntime (or what we call Sigmoid), we are moving the implementation to torch/: fbcode/sigmoid/kernels -> fbcode/caffe2/torch/nativert/kernels Test Plan: CI Differential Revision: D77032074 Pull Request resolved: https://github.com/pytorch/pytorch/pull/156507 Approved by: https://github.com/zhxchen17
30 lines
676 B
C++
30 lines
676 B
C++
#pragma once
|
|
|
|
#include <c10/core/Device.h>
|
|
#include <torch/nativert/executor/ExecutionFrame.h>
|
|
#include <torch/nativert/executor/GraphExecutorBase.h>
|
|
#include <torch/nativert/graph/Graph.h>
|
|
|
|
namespace torch::nativert {
|
|
|
|
class HigherOrderKernel : public OpKernel {
|
|
enum class OpType {
|
|
UNKNOWN,
|
|
COND,
|
|
WHILE_LOOP,
|
|
RUN_CONST_GRAPH,
|
|
};
|
|
|
|
public:
|
|
HigherOrderKernel(
|
|
const Node* node,
|
|
std::vector<std::unique_ptr<GraphExecutorBase>> graphExecutors);
|
|
void computeInternal(ExecutionFrame& executionFrame) const final;
|
|
|
|
private:
|
|
std::vector<std::unique_ptr<GraphExecutorBase>> graphExecutors_;
|
|
OpType opType_;
|
|
};
|
|
|
|
} // namespace torch::nativert
|