Revert "Record view stacks if running anomaly mode (#103185)"

This reverts commit a02c573a8996d5d47585410ceaf81c87104cfd43.

Reverted https://github.com/pytorch/pytorch/pull/103185 on behalf of https://github.com/izaitsevfb due to Breaks internal builds, see D46629734 ([comment](https://github.com/pytorch/pytorch/pull/103185#issuecomment-1588258206))
This commit is contained in:
PyTorch MergeBot
2023-06-12 23:52:10 +00:00
parent c3d3165f16
commit 2c313e7b99
7 changed files with 7 additions and 72 deletions

View File

@ -175,8 +175,6 @@ core_trainer_sources = [
"torch/csrc/jit/ir/type_hashing.cpp",
"torch/csrc/jit/serialization/pickler.cpp",
"torch/csrc/jit/serialization/type_name_uniquer.cpp",
"torch/csrc/profiler/unwind/unwind.cpp",
"torch/csrc/profiler/combined_traceback.cpp",
]
torch_mobile_core = [
@ -405,6 +403,8 @@ core_sources_full_mobile_no_backend_interface_xplat = [
"torch/csrc/jit/tensorexpr/types.cpp",
"torch/csrc/jit/tensorexpr/unique_name_manager.cpp",
"torch/csrc/jit/testing/file_check.cpp",
"torch/csrc/profiler/unwind/unwind.cpp",
"torch/csrc/profiler/combined_traceback.cpp",
"torch/csrc/jit/testing/hooks_for_testing.cpp",
"torch/csrc/utils/cpp_stacktraces.cpp",
"torch/csrc/utils/schema_info.cpp",

View File

@ -1172,7 +1172,6 @@ def main():
'include/torch/csrc/jit/codegen/cuda/scheduler/*.h',
'include/torch/csrc/onnx/*.h',
'include/torch/csrc/profiler/*.h',
'include/torch/csrc/profiler/unwind/*.h',
'include/torch/csrc/profiler/orchestration/*.h',
'include/torch/csrc/profiler/stubs/*.h',
'include/torch/csrc/utils/*.h',

View File

@ -4293,20 +4293,6 @@ Done""")
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_anomaly_gives_view_stack(self):
def arglebargle(x):
with torch.no_grad():
return x.view(2, 2)
r = arglebargle(torch.randn(4))
with self.assertRaisesRegex(RuntimeError, r"detect_anomaly\(check_nan=False\)"):
r.add_(torch.randn(4, requires_grad=True))
with detect_anomaly(check_nan=False):
r = arglebargle(torch.randn(4))
with self.assertRaisesRegex(RuntimeError, "arglebargle"):
r.add_(torch.randn(4, requires_grad=True))
def test_calculate_shape_util(self):
out = torch.randn(10, 5, requires_grad=True)
grad = torch.randn(5, 10, requires_grad=True)

View File

@ -1,7 +1,6 @@
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/autograd/InferenceMode.h>
#include <torch/csrc/autograd/anomaly_mode.h>
#include <torch/csrc/autograd/autograd.h>
#include <torch/csrc/autograd/edge.h>
#include <torch/csrc/autograd/engine.h>
@ -41,13 +40,7 @@ DifferentiableViewMeta::DifferentiableViewMeta(
backward_info_(std::move(backward_info)),
forward_info_(std::move(forward_info)),
shared_view_info_(shared_view_info),
creation_meta_(creation_meta),
creation_traceback_(
AnomalyMode::is_enabled() ? torch::CapturedTraceback::gather(
/*python*/ true,
/*script*/ false,
/*cpp*/ false)
: nullptr) {
creation_meta_(creation_meta) {
is_view_ = true;
if (backward_info_.has_value()) {
self_impl->set_version_counter(
@ -66,16 +59,6 @@ DifferentiableViewMeta::DifferentiableViewMeta(
}
}
void DifferentiableViewMeta::set_creation_meta(CreationMeta new_creation_meta) {
TORCH_CHECK(
has_bw_view(), "creation_meta can only exist for backward views.");
creation_meta_ = new_creation_meta;
if (AnomalyMode::is_enabled()) {
creation_traceback_ = torch::CapturedTraceback::gather(
/*python*/ true, /*script*/ false, /*cpp*/ false);
}
}
// Chain this view info with the new view op between base and tensor
ViewInfo ViewInfo::chain(
const Variable& base,
@ -855,24 +838,6 @@ void handle_view_on_rebase(
TORCH_INTERNAL_ASSERT(false, "Invalid CreationMeta state");
}
auto* tb = diff_view_meta->get_creation_traceback().get();
if (tb) {
std::ostringstream oss;
torch::SymbolizedTracebacks st = torch::symbolize({tb});
const std::vector<uint64_t>& traceback = st.tracebacks[0];
for (uint64_t idx : traceback) {
const unwind::Frame& frame = st.all_frames[idx];
oss << " File \"" << frame.filename << "\", line " << frame.lineno
<< ", in " << frame.funcname << "\n";
}
msg = c10::str(msg, " This view was allocated at:\n", oss.str());
} else {
msg = c10::str(
msg,
" To find out where this view was allocated, run your entire forward region under"
" anomaly mode (torch.autograd.detect_anomaly(check_nan=False)).");
}
TORCH_CHECK(false, msg);
}
}

View File

@ -7,7 +7,6 @@
#include <torch/csrc/autograd/edge.h>
#include <torch/csrc/autograd/forward_grad.h>
#include <torch/csrc/autograd/function_hook.h>
#include <torch/csrc/profiler/combined_traceback.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/core/Tensor.h>
@ -595,7 +594,6 @@ struct TORCH_API DifferentiableViewMeta : public AutogradMeta {
/// version_counter.current_version().
uint32_t attr_version_;
CreationMeta creation_meta_;
std::shared_ptr<torch::CapturedTraceback> creation_traceback_;
public:
/// requires_grad is a backward AD field so we only use the view specific
@ -637,13 +635,12 @@ struct TORCH_API DifferentiableViewMeta : public AutogradMeta {
return creation_meta_;
}
const std::shared_ptr<torch::CapturedTraceback>& get_creation_traceback()
const {
return creation_traceback_;
void set_creation_meta(CreationMeta new_creation_meta) {
TORCH_CHECK(
has_bw_view(), "creation_meta can only exist for backward views.");
creation_meta_ = new_creation_meta;
}
void set_creation_meta(CreationMeta new_creation_meta);
bool has_fw_view() const {
return shared_view_info_ || forward_info_.has_value();
}

View File

@ -1,5 +1,4 @@
#include <torch/csrc/profiler/combined_traceback.h>
#include <atomic>
namespace torch {
@ -18,11 +17,9 @@ std::shared_ptr<CapturedTraceback> CapturedTraceback::gather(
p = p->next_;
}
}
#ifndef BUILD_LITE_INTERPRETER
if (script) {
r->script_frames_ = torch::jit::currentCallstack();
}
#endif
if (cpp) {
r->cpp_frames_ = unwind::unwind();
}
@ -117,7 +114,6 @@ SymbolizedTracebacks symbolize(
};
auto append_jit = [&]() {
#ifndef BUILD_LITE_INTERPRETER
if (jit_appended) {
return;
}
@ -137,7 +133,6 @@ SymbolizedTracebacks symbolize(
r.tracebacks.back().push_back(r.all_frames.size());
r.all_frames.emplace_back(std::move(frame));
}
#endif
};
for (void* f : sc->cpp_frames_) {

View File

@ -1,12 +1,7 @@
#pragma once
#ifndef BUILD_LITE_INTERPRETER
#include <torch/csrc/jit/runtime/interpreter.h>
#endif
#include <c10/core/Allocator.h>
#include <c10/util/Exception.h>
#include <torch/csrc/profiler/unwind/unwind.h>
#include <unordered_map>
namespace torch {
@ -52,9 +47,7 @@ struct TORCH_API CapturedTraceback : public c10::GatheredContext {
private:
std::vector<PyFrame> frames_;
std::vector<void*> cpp_frames_;
#ifndef BUILD_LITE_INTERPRETER
std::vector<jit::StackEntry> script_frames_;
#endif
friend TORCH_API SymbolizedTracebacks
symbolize(const std::vector<CapturedTraceback*>& to_symbolize);