mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Add source information to IR nodes (#5449)
* Add source information to IR nodes SourceRange information from the script is not propagated to IR nodes. This information is only used in two places now: the interpreter wraps errors that occur when an instruction executions and shape propagation now reports errors on the line where it fails: Traceback (most recent call last): File "test/test_jit.py", line 1655, in test_script_error bar(Variable(torch.rand(10), requires_grad=True), Variable(torch.rand(9), requires_grad=True)) RuntimeError: The size of tensor a (10) must match the size of tensor b (9) at non-singleton dimension 0: @torch.jit.script def bar(c, b): return c / b ~~~~~ <--- HERE In the future, shape propagation should really not report any size errors and instead just not propagate shapes and let the actual execution fail. However, this is hard to accomplish while we still depend on running the op to do shape propagation.
This commit is contained in:
@ -1641,5 +1641,20 @@ class TestJit(TestCase):
|
||||
s = Variable(torch.rand(2))
|
||||
self.assertEqual(s + s + s, foo(s))
|
||||
|
||||
def test_script_error(self):
|
||||
@torch.jit.script
|
||||
def foo(a):
|
||||
return a.mm(a)
|
||||
s = Variable(torch.rand(10))
|
||||
with self.assertRaisesRegex(RuntimeError, "failed shape propagation"):
|
||||
foo(s)
|
||||
|
||||
@torch.jit.script
|
||||
def bar(c, b):
|
||||
return c / b
|
||||
|
||||
with self.assertRaisesRegex(RuntimeError, "failed in interpreter"):
|
||||
bar(Variable(torch.rand(10), requires_grad=True), Variable(torch.rand(9), requires_grad=True))
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
|
@ -43,7 +43,7 @@ variable_list Function::traced_apply(variable_list inputs) {
|
||||
var_flags.push_back(VariableFlags::of(input));
|
||||
}
|
||||
auto* this_node = graph->createCppOp(get_shared_ptr(), std::move(var_flags));
|
||||
this_node->setSourceLocation(std::make_shared<SourceLocation>(
|
||||
this_node->setSourceLocation(std::make_shared<StringSourceLocation>(
|
||||
jit::tracer::getPythonInterpreterStackTrace()
|
||||
));
|
||||
for (auto& input: inputs) {
|
||||
|
@ -197,7 +197,9 @@ void encodeGraph(onnx::GraphProto * p_g, const std::shared_ptr<Graph> & g, const
|
||||
}
|
||||
auto p_n = p_g->add_node();
|
||||
if (node->getSourceLocation()) {
|
||||
p_n->set_doc_string(node->getSourceLocation()->python_traceback);
|
||||
std::stringstream ss;
|
||||
node->getSourceLocation()->highlight(ss);
|
||||
p_n->set_doc_string(ss.str());
|
||||
}
|
||||
for(auto input : node->inputs()) {
|
||||
if (input->node()->kind() == kUndefined) {
|
||||
|
@ -611,6 +611,7 @@ struct Instruction {
|
||||
UseList inputs;
|
||||
ListHandle<int> outputs;
|
||||
Symbol debug_name; // used in dump to understand the generated code
|
||||
std::shared_ptr<SourceLocation> debug_location; // for error reporting
|
||||
};
|
||||
|
||||
|
||||
@ -663,6 +664,7 @@ struct CodeImpl {
|
||||
|
||||
void insertNodesFromBlock(Block* block) {
|
||||
for(auto node : block->nodes()) {
|
||||
const auto & source_location = node->getSourceLocation();
|
||||
switch(node->kind()) {
|
||||
case kIf: {
|
||||
// x = if c:
|
||||
@ -684,15 +686,15 @@ struct CodeImpl {
|
||||
|
||||
// kPlaceholder instructions are replaced with branch instructions
|
||||
// when the branch target locations are known
|
||||
auto cond_branch = insertInstruction(kPlaceholder, node->inputs(), moveFlags(node), {});
|
||||
auto cond_branch = insertInstruction(kPlaceholder, source_location, node->inputs(), moveFlags(node), {});
|
||||
auto then_block = node->blocks()[0];
|
||||
auto else_block = node->blocks()[1];
|
||||
insertNodesFromBlock(else_block);
|
||||
insertAssign(else_block->outputs(), moveFlags(else_block), node->outputs());
|
||||
auto jump = insertInstruction(kPlaceholder, {}, {}, {});
|
||||
insertAssign(source_location,else_block->outputs(), moveFlags(else_block), node->outputs());
|
||||
auto jump = insertInstruction(kPlaceholder, source_location, {}, {}, {});
|
||||
auto then_block_start = instructions.size();
|
||||
insertNodesFromBlock(then_block);
|
||||
insertAssign(then_block->outputs(), moveFlags(then_block), node->outputs());
|
||||
insertAssign(source_location, then_block->outputs(), moveFlags(then_block), node->outputs());
|
||||
createJump(jump, instructions.size());
|
||||
createJumpNZ(cond_branch, then_block_start);
|
||||
} break;
|
||||
@ -714,18 +716,18 @@ struct CodeImpl {
|
||||
auto body_block = node->blocks()[0];
|
||||
|
||||
// before assign op: stack: ... <cond> <loop-carried-depdencies>
|
||||
insertAssign(node->inputs(), moveFlags(node), body_block->inputs());
|
||||
insertAssign(source_location, node->inputs(), moveFlags(node), body_block->inputs());
|
||||
// after assign op: stack: ... <cond>
|
||||
// cond_branch consumes <cond> from top of the stack
|
||||
auto cond_branch = insertInstruction(kPlaceholder, {}, {}, {});
|
||||
auto cond_branch = insertInstruction(kPlaceholder, source_location,{}, {}, {});
|
||||
// after branch: stack: ...
|
||||
|
||||
auto entry = instructions.size();
|
||||
insertNodesFromBlock(body_block);
|
||||
// before assign op: stack: ... <cond> <loop-carried-depdencies>
|
||||
insertAssign(body_block->outputs(), moveFlags(body_block), body_block->inputs());
|
||||
insertAssign(source_location, body_block->outputs(), moveFlags(body_block), body_block->inputs());
|
||||
// after assign op: stack: ... <cond>
|
||||
auto cond_branch_end = insertInstruction(kPlaceholder, {}, {}, {});
|
||||
auto cond_branch_end = insertInstruction(kPlaceholder, source_location, {}, {}, {});
|
||||
// after branch: stack: ...
|
||||
|
||||
aliasRegistersTo(node->outputs(), body_block->inputs());
|
||||
@ -746,17 +748,19 @@ struct CodeImpl {
|
||||
}
|
||||
|
||||
size_t insertInstruction(Node * n) {
|
||||
auto inst = insertInstruction(n->kind(), n->inputs(), moveFlags(n) , n->outputs());
|
||||
auto inst = insertInstruction(n->kind(), n->getSourceLocation(), n->inputs(), moveFlags(n) , n->outputs());
|
||||
instructions[inst].callback = getOperation(n, constants_are_variables);
|
||||
return inst;
|
||||
}
|
||||
size_t insertInstruction(Symbol sym,
|
||||
std::shared_ptr<SourceLocation> debug_location,
|
||||
ArrayRef<Value*> inputs,
|
||||
ArrayRef<uint8_t> move_flags,
|
||||
ArrayRef<Value*> outputs) {
|
||||
instructions.emplace_back();
|
||||
auto & inst = instructions.back();
|
||||
inst.debug_name = sym;
|
||||
inst.debug_location = std::move(debug_location);
|
||||
listBegin(inst.inputs.values);
|
||||
for(auto input : inputs) {
|
||||
listInsert(inst.inputs.values, getOrAllocateRegister(input, true));
|
||||
@ -778,8 +782,8 @@ struct CodeImpl {
|
||||
return moveFlags(b->return_node());
|
||||
}
|
||||
|
||||
size_t insertAssign(ArrayRef<Value*> inputs, ArrayRef<uint8_t> move_flags, ArrayRef<Value*> outputs) {
|
||||
auto inst = insertInstruction(kAssign, inputs, move_flags, outputs);
|
||||
size_t insertAssign(std::shared_ptr<SourceLocation> debug_location, ArrayRef<Value*> inputs, ArrayRef<uint8_t> move_flags, ArrayRef<Value*> outputs) {
|
||||
auto inst = insertInstruction(kAssign, std::move(debug_location),inputs, move_flags, outputs);
|
||||
// This node effectively forwards its inputs into different places in a register list.
|
||||
// We don't need to manipulate the stack in any way, because all inputs are also outputs,
|
||||
// and the interpreter will take care of putting them in correct places.
|
||||
@ -904,13 +908,21 @@ struct InterpreterStateImpl {
|
||||
// std::cout << "executing " << pc << ": ";
|
||||
// function->dumpInstruction(std::cout, pc);
|
||||
// std::cout << "\n";
|
||||
auto & inst = instructions[pc];
|
||||
loadTensorsFromRegisters(inst.inputs, stack);
|
||||
pc += 1 + inst.callback(stack);
|
||||
for(int i = inst.outputs.size - 1; i >= 0; i--) {
|
||||
int reg = get(inst.outputs,i);
|
||||
registers[reg] = pop(stack);
|
||||
// std::cout << "pop reg[" << reg << "];\n" << registers[reg].pImpl << "\n";
|
||||
try {
|
||||
auto & inst = instructions[pc];
|
||||
loadTensorsFromRegisters(inst.inputs, stack);
|
||||
size_t new_pc = pc + 1 + inst.callback(stack);
|
||||
for(int i = inst.outputs.size - 1; i >= 0; i--) {
|
||||
int reg = get(inst.outputs,i);
|
||||
registers[reg] = pop(stack);
|
||||
// std::cout << "pop reg[" << reg << "];\n" << registers[reg].pImpl << "\n";
|
||||
}
|
||||
pc = new_pc;
|
||||
} catch(std::exception & e) {
|
||||
if(!instructions[pc].debug_location)
|
||||
throw; // rethrow original exception
|
||||
// throw a new exception with enhanced debugging information
|
||||
instructions[pc].debug_location->wrapAndRethrowException(e, "operation failed in interpreter");
|
||||
}
|
||||
}
|
||||
current_pc = pc;
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "torch/csrc/jit/type.h"
|
||||
#include "torch/csrc/jit/graph_node_list.h"
|
||||
#include "torch/csrc/jit/variable_flags.h"
|
||||
#include "torch/csrc/jit/source_location.h"
|
||||
|
||||
namespace torch { namespace autograd {
|
||||
|
||||
@ -68,14 +69,7 @@ static inline bool operator==(const Use & a, const Use & b) {
|
||||
return a.user == b.user && a.offset == b.offset;
|
||||
}
|
||||
|
||||
// SourceLocation represents source code-level debug information for a node.
|
||||
// It contains a Python stack trace that represents the provenance of a given
|
||||
// node in the trace.
|
||||
struct SourceLocation {
|
||||
SourceLocation(std::string python_traceback)
|
||||
: python_traceback(std::move(python_traceback)) {}
|
||||
std::string python_traceback;
|
||||
};
|
||||
|
||||
|
||||
// Scope is a node of a trie that represents the tree of nested scopes.
|
||||
// Individual scopes are pushed and popped from Graph, which holds a
|
||||
@ -274,7 +268,7 @@ public:
|
||||
return kind_;
|
||||
}
|
||||
Node* setSourceLocation(std::shared_ptr<SourceLocation> sl) {
|
||||
source_location_ = sl;
|
||||
source_location_ = std::move(sl);
|
||||
return this;
|
||||
}
|
||||
std::shared_ptr<SourceLocation> getSourceLocation() const {
|
||||
|
@ -62,14 +62,14 @@ void PropagateShapeOnNode(Node * node) {
|
||||
// here, otherwise we fallback to running a fake version of the op
|
||||
// to get a quick and dirty propagation
|
||||
case kneg: {
|
||||
node->output()->setType(types[0]->contiguous());
|
||||
node->output()->setType(types.at(0)->contiguous());
|
||||
} break;
|
||||
case kmm: {
|
||||
auto lhs_type = types.at(0);
|
||||
auto rhs_type = types.at(1);
|
||||
node->output()->setType(std::make_shared<TensorType>(
|
||||
lhs_type->scalarType(), lhs_type->device(),
|
||||
at::IntList{lhs_type->sizes()[0], rhs_type->sizes()[1]}));
|
||||
at::IntList{lhs_type->sizes().at(0), rhs_type->sizes().at(1)}));
|
||||
} break;
|
||||
case kt: {
|
||||
auto tp = types.at(0);
|
||||
@ -85,7 +85,7 @@ void PropagateShapeOnNode(Node * node) {
|
||||
int64_t dim = node->i(kdim);
|
||||
int64_t length = node->i(klength);
|
||||
sizes.at(dim) = length;
|
||||
node->output()->setType(tp->withSizes(sizes));
|
||||
node->output()->setType(tp->withSizesStrides(sizes, tp->strides()));
|
||||
} break;
|
||||
case ksum: {
|
||||
if (node->hasAttribute(kdim)) {
|
||||
@ -132,7 +132,7 @@ void PropagateShapeOnNode(Node * node) {
|
||||
// If types[0] has a type, then it is not defined, and the type will
|
||||
// get set to types[0] because that will be the value propagated.
|
||||
// If its type is not defined, then unification is an undefined type.
|
||||
node->output()->setType(types[0]->shared_from_this());
|
||||
node->output()->setType(types.at(0)->shared_from_this());
|
||||
} break;
|
||||
case kConstant: {
|
||||
node->output()->inferTypeFrom(node->t(kvalue));
|
||||
@ -141,16 +141,16 @@ void PropagateShapeOnNode(Node * node) {
|
||||
node->output()->setType(DynamicType::get());
|
||||
} break;
|
||||
case kIf: {
|
||||
auto then_block = node->blocks()[0];
|
||||
auto else_block = node->blocks()[1];
|
||||
auto then_block = node->blocks().at(0);
|
||||
auto else_block = node->blocks().at(1);
|
||||
PropagateShapeOnBlock(then_block);
|
||||
PropagateShapeOnBlock(else_block);
|
||||
mergeTypes(then_block->outputs(), else_block->outputs(), node->outputs());
|
||||
} break;
|
||||
case kLoop: {
|
||||
auto body_block = node->blocks()[0];
|
||||
auto body_block = node->blocks().at(0);
|
||||
// propagate counter type
|
||||
body_block->inputs()[0]->setType(node->inputs()[0]->type());
|
||||
body_block->inputs().at(0)->setType(node->inputs().at(0)->type());
|
||||
// propagate loop-carried input types to block inputs
|
||||
auto loop_carried_inputs = node->inputs().slice(2); // skip max, cond
|
||||
auto loop_carried_block = body_block->inputs().slice(1); // skip trip
|
||||
@ -184,7 +184,15 @@ void PropagateShapeOnNode(Node * node) {
|
||||
|
||||
void PropagateShapeOnBlock(Block * block) {
|
||||
for (Node * node : block->nodes()) {
|
||||
PropagateShapeOnNode(node);
|
||||
try {
|
||||
PropagateShapeOnNode(node);
|
||||
} catch(std::exception & e) {
|
||||
if(auto sl = node->getSourceLocation()) {
|
||||
sl->wrapAndRethrowException(e, "operation failed shape propagation");
|
||||
} else {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -222,10 +222,16 @@ struct to_ir {
|
||||
return save_env;
|
||||
}
|
||||
|
||||
Node* create(Symbol kind, const SourceRange& loc, size_t num_outputs) {
|
||||
return def.graph
|
||||
->create(kind, num_outputs)
|
||||
->setSourceLocation(std::make_shared<SourceRange>(loc));
|
||||
}
|
||||
|
||||
std::vector<Value*> emitTernaryIf(const TernaryIf& expr) {
|
||||
Value* cond_value = emitExpr(expr.cond(), 1)[0];
|
||||
|
||||
Node* n = def.graph->insertNode(def.graph->create(kIf, 0));
|
||||
Node* n = def.graph->insertNode(create(kIf, expr.range(), 0));
|
||||
n->addInput(cond_value);
|
||||
auto* true_block = n->addBlock();
|
||||
auto* false_block = n->addBlock();
|
||||
@ -251,7 +257,7 @@ struct to_ir {
|
||||
void emitIf(const If& stmt) {
|
||||
Value* cond_value = emitExpr(stmt.cond(), 1)[0];
|
||||
|
||||
Node* n = def.graph->insertNode(def.graph->create(kIf, 0));
|
||||
Node* n = def.graph->insertNode(create(kIf, stmt.range(), 0));
|
||||
n->addInput(cond_value);
|
||||
auto* true_block = n->addBlock();
|
||||
auto* false_block = n->addBlock();
|
||||
@ -297,10 +303,10 @@ struct to_ir {
|
||||
// in a way that ensure single static assignment.
|
||||
|
||||
// TODO: clarify that this is an optional input that isn't needed here
|
||||
Value* max_trip_count_dummy = emitConst(INT_MAX, "i")[0];
|
||||
Value* max_trip_count_dummy = emitConst(stmt.range(), INT_MAX, "i")[0];
|
||||
Value* cond_value = emitExpr(stmt.cond(), 1)[0];
|
||||
|
||||
Node* n = def.graph->insertNode(def.graph->create(kLoop, 0));
|
||||
Node* n = def.graph->insertNode(create(kLoop, stmt.range(), 0));
|
||||
n->addInput(max_trip_count_dummy);
|
||||
n->addInput(cond_value);
|
||||
auto* body_block = n->addBlock();
|
||||
@ -472,14 +478,14 @@ struct to_ir {
|
||||
expectOutputs(tree, output_size, 1);
|
||||
const auto& inputs = tree->trees();
|
||||
auto kind = getNodeKind(tree->kind(), inputs.size());
|
||||
return emitNode(kind, getValues(inputs), output_size)->outputs();
|
||||
return emitNode(kind, tree->range(), getValues(inputs), output_size)->outputs();
|
||||
} break;
|
||||
case '+':
|
||||
case '-': {
|
||||
expectOutputs(tree, output_size, 1);
|
||||
const auto& inputs = tree->trees();
|
||||
auto kind = getNodeKind(tree->kind(), inputs.size());
|
||||
auto* node = emitNode(kind, getValues(inputs), output_size);
|
||||
auto* node = emitNode(kind, tree->range(), getValues(inputs), output_size);
|
||||
if (kind != kneg)
|
||||
node->t_(Symbol("alpha"), at::CPU(at::kFloat).scalarTensor(1.0));
|
||||
return node->outputs();
|
||||
@ -492,7 +498,7 @@ struct to_ir {
|
||||
expectOutputs(tree, output_size, 0);
|
||||
if (!apply.attributes().empty())
|
||||
throw ErrorReport(tree) << "print doesn't accept any keyword arguments";
|
||||
return emitNode(kPrint, getValues(apply.inputs()), 0,
|
||||
return emitNode(kPrint, tree->range(), getValues(apply.inputs()), 0,
|
||||
AttributeMap{}, ListAttributeMap{})->outputs();
|
||||
} else {
|
||||
const auto& inputs = getValues(apply.inputs());
|
||||
@ -524,7 +530,7 @@ struct to_ir {
|
||||
}
|
||||
}
|
||||
return emitNode(
|
||||
kind, inputs, output_size, attributes, list_attributes)
|
||||
kind, tree->range(), inputs, output_size, attributes, list_attributes)
|
||||
->outputs();
|
||||
}
|
||||
} break;
|
||||
@ -535,7 +541,7 @@ struct to_ir {
|
||||
} break;
|
||||
case TK_CONST: {
|
||||
expectOutputs(tree, output_size, 1);
|
||||
return emitConst(
|
||||
return emitConst(tree->range(),
|
||||
tree->tree(0)->doubleValue(), tree->tree(1)->stringValue());
|
||||
} break;
|
||||
case TK_SLICE: {
|
||||
@ -584,20 +590,21 @@ struct to_ir {
|
||||
}
|
||||
return emitNode(
|
||||
Symbol("type_as"),
|
||||
{emitExpr(input, 1)[0], createConstant(at::CPU(t).ones({1}))},
|
||||
input->range(),
|
||||
{emitExpr(input, 1)[0], createConstant(input->range(), at::CPU(t).ones({1}))},
|
||||
1)
|
||||
->outputs();
|
||||
}
|
||||
|
||||
std::vector<Value*> emitConst(const double val, const std::string& type) {
|
||||
std::vector<Value*> emitConst(const SourceRange& loc, const double val, const std::string& type) {
|
||||
if (type == "f") {
|
||||
return {createConstant(at::CPU(at::kFloat).scalarTensor(val))};
|
||||
return {createConstant(loc, at::CPU(at::kFloat).scalarTensor(val))};
|
||||
} else if (type == "LL") {
|
||||
return {createConstant(at::CPU(at::kLong).scalarTensor(val))};
|
||||
return {createConstant(loc, at::CPU(at::kLong).scalarTensor(val))};
|
||||
} else if (type == "b") {
|
||||
return {createConstant(at::CPU(at::kByte).scalarTensor(val))};
|
||||
return {createConstant(loc, at::CPU(at::kByte).scalarTensor(val))};
|
||||
} else if (type == "i") {
|
||||
return {createConstant(at::CPU(at::kInt).scalarTensor(val))};
|
||||
return {createConstant(loc, at::CPU(at::kInt).scalarTensor(val))};
|
||||
} else {
|
||||
throw std::runtime_error("unknown const type " + type);
|
||||
}
|
||||
@ -605,11 +612,12 @@ struct to_ir {
|
||||
|
||||
Node* emitNode(
|
||||
NodeKind kind,
|
||||
const SourceRange& loc,
|
||||
const std::vector<Value*> inputs,
|
||||
const size_t output_size,
|
||||
const AttributeMap& attributes = AttributeMap{},
|
||||
const ListAttributeMap& list_attributes = ListAttributeMap{}) {
|
||||
Node* n = def.graph->insertNode(def.graph->create(kind, output_size));
|
||||
Node* n = def.graph->insertNode(create(kind, loc, output_size));
|
||||
for (auto* input_value : inputs) {
|
||||
n->addInput(input_value);
|
||||
}
|
||||
@ -639,17 +647,18 @@ struct to_ir {
|
||||
// Desugars slice syntactic sugar tensor[begin:end] -> tensor.slice(begin,
|
||||
// end).
|
||||
std::vector<Value*> emitSlice(
|
||||
const SourceRange& range,
|
||||
const SourceRange& loc,
|
||||
TreeList&& inputs,
|
||||
const size_t output_size) {
|
||||
const auto applyInputs =
|
||||
Compound::create(TK_LIST, range, std::move(inputs));
|
||||
Compound::create(TK_LIST, loc, std::move(inputs));
|
||||
const auto input_values = getValues(applyInputs->trees());
|
||||
Value* tensor = input_values[0];
|
||||
const auto& begin = at::Scalar(input_values[1]->node()->t(kvalue)).toInt();
|
||||
const auto& end = at::Scalar(input_values[2]->node()->t(kvalue)).toInt();
|
||||
return emitNode(
|
||||
Symbol("slice"),
|
||||
loc,
|
||||
{tensor},
|
||||
output_size,
|
||||
{{"dim", {0, "LL"}},
|
||||
@ -661,16 +670,17 @@ struct to_ir {
|
||||
|
||||
// Desugars gather syntactic sugar tensor[idx] -> tensor.select(idx).
|
||||
std::vector<Value*> emitGather(
|
||||
const SourceRange& range,
|
||||
const SourceRange& loc,
|
||||
TreeList&& inputs,
|
||||
const size_t output_size) {
|
||||
const auto applyInputs =
|
||||
Compound::create(TK_LIST, range, std::move(inputs));
|
||||
Compound::create(TK_LIST, loc, std::move(inputs));
|
||||
const auto input_values = getValues(applyInputs->trees());
|
||||
Value* tensor = input_values[0];
|
||||
const auto& idx = at::Scalar(input_values[1]->node()->t(kvalue)).toInt();
|
||||
return emitNode(
|
||||
Symbol("select"),
|
||||
loc,
|
||||
{tensor},
|
||||
output_size,
|
||||
{{"dim", {0, "LL"}}, {"index", {idx, "LL"}}})
|
||||
@ -685,8 +695,10 @@ struct to_ir {
|
||||
std::shared_ptr<Environment> environment_stack;
|
||||
|
||||
private:
|
||||
Value* createConstant(const at::Tensor& val) {
|
||||
return def.graph->insertNode(def.graph->createConstant(val))->output();
|
||||
Value* createConstant(const SourceRange& loc, const at::Tensor& val) {
|
||||
auto n = def.graph->createConstant(val);
|
||||
n->setSourceLocation(std::make_shared<SourceRange>(loc));
|
||||
return def.graph->insertNode(n)->output();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -7,6 +7,8 @@
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "torch/csrc/jit/source_location.h"
|
||||
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
@ -305,7 +307,7 @@ SharedParserData& sharedParserData();
|
||||
// a range of a shared string 'file_' with functions to help debug by highlight
|
||||
// that
|
||||
// range.
|
||||
struct SourceRange {
|
||||
struct SourceRange : public SourceLocation {
|
||||
SourceRange(
|
||||
const std::shared_ptr<std::string>& file_,
|
||||
size_t start_,
|
||||
@ -317,7 +319,7 @@ struct SourceRange {
|
||||
size_t size() const {
|
||||
return end() - start();
|
||||
}
|
||||
void highlight(std::ostream& out) const {
|
||||
virtual void highlight(std::ostream& out) const override {
|
||||
const std::string& str = file();
|
||||
size_t begin = start();
|
||||
size_t end = start();
|
||||
|
@ -127,7 +127,9 @@ struct List : public TreeView {
|
||||
List(const TreeRef& tree) : TreeView(tree) {
|
||||
tree->match(TK_LIST);
|
||||
// Iterate over list to temporarily instantiate Ts that will check the type
|
||||
for (const T& elem : *this) {}
|
||||
for (const T& elem : *this) {
|
||||
(void) elem; //silence unused warning
|
||||
}
|
||||
}
|
||||
iterator begin() const {
|
||||
return iterator(tree_->trees().begin());
|
||||
|
39
torch/csrc/jit/source_location.h
Normal file
39
torch/csrc/jit/source_location.h
Normal file
@ -0,0 +1,39 @@
|
||||
#pragma once
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <typeinfo>
|
||||
#include <stdexcept>
|
||||
|
||||
|
||||
namespace torch { namespace jit {
|
||||
// SourceLocation represents source code-level debug information for a node.
|
||||
// It contains information about where a node got generated.
|
||||
// In the case of tracing this will be a python stack trace.
|
||||
// In the case of using the scripting frontend this will be backed
|
||||
// by a SourceRange object
|
||||
struct SourceLocation {
|
||||
virtual void highlight(std::ostream & out) const = 0;
|
||||
void wrapAndRethrowException(const std::exception & e, const std::string & additional = "") {
|
||||
std::stringstream msg;
|
||||
msg << "\n" << e.what() << ":\n";
|
||||
if(additional.size() != 0) {
|
||||
msg << additional << ":\n";
|
||||
}
|
||||
highlight(msg);
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// normally a python stack trace
|
||||
struct StringSourceLocation : public SourceLocation {
|
||||
StringSourceLocation(std::string context)
|
||||
: context(std::move(context)) {}
|
||||
virtual void highlight(std::ostream & out) const override {
|
||||
out << context;
|
||||
}
|
||||
private:
|
||||
std::string context;
|
||||
};
|
||||
|
||||
}}
|
@ -142,7 +142,7 @@ PreTraceInfo makePreTraceInfo(at::ArrayRef<Variable> inputs, F ctor) {
|
||||
auto state_lock = info.state->lock();
|
||||
|
||||
Node *n = ctor(*graph);
|
||||
auto sl = std::make_shared<SourceLocation>(getPythonInterpreterStackTrace());
|
||||
auto sl = std::make_shared<StringSourceLocation>(getPythonInterpreterStackTrace());
|
||||
n->setSourceLocation(sl);
|
||||
|
||||
for (Variable input : inputs) {
|
||||
|
Reference in New Issue
Block a user