Files
pytorch/torch/csrc/jit/mobile/interpreter.h
Kimish Patel 77a6436cac [Pytorch Mobile] Combing instructions and debug hanles in single struct (#62418)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/62418

Debug handles have one to one correspondence with instruction, so just
combine them in one.

Test Plan:
CI

Imported from OSS

Reviewed By: raziel

Differential Revision: D29993661

fbshipit-source-id: 125c7163174cf66624dd95f110fdc8208fea8a07
2021-08-13 21:40:17 -07:00

52 lines
1.7 KiB
C++

#pragma once
#include <ATen/core/ivalue.h>
#include <ATen/core/operator_name.h>
#include <torch/csrc/jit/runtime/instruction.h>
#include <vector>
namespace torch {
namespace jit {
namespace mobile {
using Stack = std::vector<c10::IValue>;
using DebugHandle = int64_t;
struct InstructionWithDebugHandle {
InstructionWithDebugHandle(Instruction inst, DebugHandle handle)
: instruction(inst), debug_handle(handle) {}
Instruction instruction;
DebugHandle debug_handle;
};
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct Code {
// TODO: Combine instructions and debug handles vector
// into std::vector<<std::pair<Instruction, DebugHandle>>
std::vector<InstructionWithDebugHandle> instructions_with_handles_;
std::vector<c10::OperatorName> op_names_;
std::vector<std::function<void(Stack&)>> operators_;
std::vector<c10::IValue> constants_;
std::vector<c10::TypePtr> types_;
size_t register_size_; // Aggregated output size.
};
struct InterpreterState {
TORCH_API explicit InterpreterState(std::shared_ptr<Code> code);
TORCH_API bool run(Stack& stack);
private:
std::shared_ptr<Code> code_;
c10::IValue& reg(size_t reg);
std::vector<c10::IValue> registers_;
};
// Interpreter executes instruction in a loop one by one
// from a list of instructions. PC is a program counter pointer
// pointing to the current instruction being executed.
// This function returns the current PC.
// Note that this is set only when exception occurs.
// since this is a thread local variable and setting it for
// every instruction will add overhead of thread local variable access.
int64_t getInterpretersExceptionPC();
} // namespace mobile
} // namespace jit
} // namespace torch