mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Summary: I got some tensor->variable conversion exceptions from `torch/csrc/autograd/variable.h`, which used the `TORCH_ASSERTM` macros instead of `AT_CHECK`, so they didn't have backtraces. This was such a substantial loss for debugability that I decided to update the whole codebase to use the backtrace-enabled ATen macros instead of `TORCH_ASSERT` and `JIT_ASSERT`, the latter having been an alias of the former. ezyang apaszke zdevito Pull Request resolved: https://github.com/pytorch/pytorch/pull/9575 Differential Revision: D8924566 Pulled By: goldsborough fbshipit-source-id: 7a4013b13eec9dbf024cef94cf49fca72f61d441
48 lines
1.0 KiB
C++
48 lines
1.0 KiB
C++
#include "torch/csrc/autograd/input_buffer.h"
|
|
|
|
#include "torch/csrc/autograd/functions/basic_ops.h"
|
|
|
|
#include <ATen/DeviceGuard.h>
|
|
|
|
#include <cstddef>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
namespace torch { namespace autograd {
|
|
|
|
|
|
void InputBuffer::add(size_t pos, Variable var) {
|
|
AT_ASSERT(pos < buffer.size());
|
|
if (!var.defined()) {
|
|
return;
|
|
}
|
|
auto& old_var = buffer[pos];
|
|
if (!old_var.defined()) {
|
|
buffer[pos] = std::move(var);
|
|
} else {
|
|
at::DeviceGuard device_guard(var);
|
|
// ATen doesn't route sparse additions correctly...
|
|
if (old_var.type().is_sparse()) {
|
|
buffer[pos] = var + old_var;
|
|
} else {
|
|
buffer[pos] = old_var + var;
|
|
}
|
|
}
|
|
}
|
|
|
|
auto InputBuffer::device() const -> int {
|
|
for (auto& var : buffer) {
|
|
if (var.defined() && var.type().is_cuda()) {
|
|
return var.get_device();
|
|
}
|
|
}
|
|
return -1;
|
|
}
|
|
|
|
auto InputBuffer::variables(InputBuffer&& g) -> std::vector<Variable> {
|
|
std::vector<Variable> result = std::move(g.buffer);
|
|
return result;
|
|
}
|
|
|
|
}} // namespace torch::autograd
|