mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[Code Clean] Replace std::runtime_error
with TORCH_CHECK
(#163610)
Including: - `torch/csrc/instruction_counter` - `torch/csrc/lazy` - `torch/csrc/monitor` - `torch/csrc/profiler` - `torch/csrc/dynamo` Fixes part of #148114 Personal mistake about (PR #163317), this PR does the same thing **and PR #163317 has already been approved by @albanD.** This is a personal mistake on my part, and I'm so sorry about that. Hope you won't mind @albanD. 🥹 Pull Request resolved: https://github.com/pytorch/pytorch/pull/163610 Approved by: https://github.com/albanD, https://github.com/Skylion007
This commit is contained in:
committed by
PyTorch MergeBot
parent
bbde16fe98
commit
39df24fe04
@ -1,3 +1,4 @@
|
||||
#include <c10/util/Exception.h>
|
||||
#include <torch/csrc/dynamo/cache_entry.h>
|
||||
#include <torch/csrc/dynamo/cpp_shim.h>
|
||||
#include <torch/csrc/dynamo/cpython_includes.h>
|
||||
@ -23,10 +24,8 @@ static py::object dynamo_call_callback(
|
||||
CacheEntry* cache_entry,
|
||||
FrameState* frame_state) {
|
||||
THPPyInterpreterFrame* frame = THPPyInterpreterFrame_New(_frame);
|
||||
if (frame == nullptr) {
|
||||
throw std::runtime_error(
|
||||
"Dynamo failed to initialize CPython interpreter frame wrapper");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
frame, "Dynamo failed to initialize CPython interpreter frame wrapper");
|
||||
frame->locals = (PyObject*)framelocals_mapping_to_dict(locals);
|
||||
|
||||
py::object cache_entry_obj = py::none();
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <c10/util/Exception.h>
|
||||
#include <torch/csrc/dynamo/extra_state.h>
|
||||
|
||||
#include <torch/csrc/dynamo/cache_entry.h>
|
||||
@ -232,9 +233,8 @@ py::list _debug_get_cache_entry_list(const py::handle& code_obj) {
|
||||
|
||||
PrecompileEntry::PrecompileEntry(py::object gm, py::object c)
|
||||
: guard_manager(std::move(gm)), code(std::move(c)) {
|
||||
if (!PyCode_Check(code.ptr())) {
|
||||
throw std::runtime_error("Expecting CodeType from PrecompileEntry.");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
PyCode_Check(code.ptr()), "Expecting CodeType from PrecompileEntry.");
|
||||
root_mgr =
|
||||
torch::dynamo::convert_to_root_guard_manager(guard_manager.attr("root"));
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <ATen/autocast_mode.h>
|
||||
#include <c10/core/SafePyObject.h>
|
||||
#include <c10/core/impl/PyInterpreter.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#define PY_SSIZE_T_CLEAN
|
||||
#include <ATen/EmptyTensor.h>
|
||||
#include <ATen/SparseCsrTensorUtils.h>
|
||||
@ -64,8 +65,7 @@ int open_counter() {
|
||||
|
||||
uint64_t count_instructions(const std::function<void()>& fn) {
|
||||
int fd = open_counter();
|
||||
if (fd == -1)
|
||||
throw std::runtime_error("perf_event_open failed");
|
||||
TORCH_CHECK(fd != -1, "perf_event_open failed");
|
||||
|
||||
ioctl(fd, PERF_EVENT_IOC_RESET, 0);
|
||||
ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
|
||||
@ -568,7 +568,7 @@ static PyMethodDef TensorGuards_methods[] = {
|
||||
{nullptr} /* Sentinel */
|
||||
};
|
||||
|
||||
static PyTypeObject TensorGuardsType = { PyVarObject_HEAD_INIT(nullptr, 0)
|
||||
static PyTypeObject TensorGuardsType = {PyVarObject_HEAD_INIT(nullptr, 0)
|
||||
};
|
||||
|
||||
struct AutocastState {
|
||||
@ -771,9 +771,8 @@ PyObject* GlobalStateGuard_load(
|
||||
PyObject* args,
|
||||
PyObject* kwargs) {
|
||||
char* json;
|
||||
if (!PyArg_ParseTuple(args, "s", &json)) {
|
||||
throw std::runtime_error("Cannot parse as json string.");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
PyArg_ParseTuple(args, "s", &json), "Cannot parse as json string.");
|
||||
nlohmann::json::parse(json).get_to(*self);
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
@ -797,7 +796,7 @@ static PyMethodDef GlobalStateGuard_methods[] = {
|
||||
METH_VARARGS,
|
||||
"Parse serialized json format"},
|
||||
{nullptr}};
|
||||
static PyTypeObject GlobalStateGuardType = { PyVarObject_HEAD_INIT(nullptr, 0)
|
||||
static PyTypeObject GlobalStateGuardType = {PyVarObject_HEAD_INIT(nullptr, 0)
|
||||
};
|
||||
|
||||
static PyObject* check_type_id(PyObject* dummy, PyObject* args) {
|
||||
@ -854,9 +853,9 @@ static int dict_version_watch_callback(
|
||||
static uint64_t get_dict_version_unchecked(PyObject* dict) {
|
||||
#if IS_PYTHON_3_12_PLUS
|
||||
|
||||
if (PyDict_Watch(dict_version_watcher_id, dict)) {
|
||||
throw std::runtime_error("failed to add version watcher to dict!");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
!PyDict_Watch(dict_version_watcher_id, dict),
|
||||
"failed to add version watcher to dict!");
|
||||
if (!dict_version_map.count(dict)) {
|
||||
dict_version_map[dict] = global_dict_version_id++;
|
||||
}
|
||||
@ -1783,8 +1782,10 @@ class LAMBDA_GUARD_NO_FRAMELOCALS : public LAMBDA_GUARD {
|
||||
}
|
||||
|
||||
bool check_nopybind(FrameLocalsMapping* map) override {
|
||||
throw std::runtime_error(
|
||||
"FramelocalsMapping input to LAMBDA_GUARD_NO_FRAMELOCALS, use LAMBDA_GUARD instead");
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"FramelocalsMapping input to LAMBDA_GUARD_NO_FRAMELOCALS,"
|
||||
"use LAMBDA_GUARD instead");
|
||||
}
|
||||
};
|
||||
|
||||
@ -2882,9 +2883,8 @@ class GuardManager {
|
||||
return py::type::of(py::none());
|
||||
}
|
||||
|
||||
if (!PyCallable_Check(_weak_type.ptr())) {
|
||||
throw std::runtime_error("_weak_type is not callable");
|
||||
}
|
||||
TORCH_CHECK_TYPE(
|
||||
PyCallable_Check(_weak_type.ptr()), "_weak_type is not callable");
|
||||
return _weak_type();
|
||||
}
|
||||
|
||||
@ -2896,10 +2896,8 @@ class GuardManager {
|
||||
}
|
||||
|
||||
void mark_tag_safe_root() {
|
||||
if (!_is_tag_safe) {
|
||||
throw std::runtime_error(
|
||||
"Marking a node tag_safe_root when its not tag safe");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
_is_tag_safe, "Marking a node tag_safe_root when its not tag safe");
|
||||
_is_tag_safe_root = true;
|
||||
}
|
||||
|
||||
@ -3165,11 +3163,10 @@ class GuardManager {
|
||||
if (is_recording) {
|
||||
stop_recording_dict_pointers(_root, value, result);
|
||||
if (result) {
|
||||
if (!register_weakref_callback(value)) {
|
||||
// something bad happened, disable the dict tag optimization
|
||||
throw std::runtime_error(
|
||||
"Could not register a callback for recursive dict tag optimization");
|
||||
}
|
||||
// something bad happened, disable the dict tag optimization
|
||||
TORCH_CHECK(
|
||||
register_weakref_callback(value),
|
||||
"Could not register a callback for recursive dict tag optimization");
|
||||
#if IS_PYTHON_3_12_PLUS
|
||||
// Ideally we don't need to even register a weakref callback for value.
|
||||
// But it does not hurt to be more cautious
|
||||
@ -4102,14 +4099,14 @@ class DictGuardManager : public GuardManager {
|
||||
const py::object& a,
|
||||
const std::string& source,
|
||||
const py::object& b) {
|
||||
throw std::runtime_error("Can not add an accessor to DictGuardManager");
|
||||
TORCH_CHECK(false, "Can not add an accessor to DictGuardManager");
|
||||
}
|
||||
|
||||
void add_leaf_guard(std::shared_ptr<LeafGuard> leaf_guard) override {
|
||||
// If you are calling this, you probably want to go through a key, value
|
||||
// child manager and then add a leaf guard on them. DictGuardManager already
|
||||
// has TYPE_MATCH and LENGTH_CHECK built in.
|
||||
throw std::runtime_error("DictGuardManager does not support a leaf_guard");
|
||||
TORCH_CHECK(false, "DictGuardManager does not support a leaf_guard");
|
||||
}
|
||||
|
||||
// Debug helper - Returning raw pointers because we can't return unique_ptr
|
||||
@ -4868,13 +4865,12 @@ class FrameLocalsGuardAccessor : public GuardAccessor {
|
||||
// NB: Intentional duplication between check_nopybind and
|
||||
// check_verbose_nopybind.
|
||||
bool check_nopybind(PyObject* obj, bool matches_dict_tag = false) override {
|
||||
if (!PyDict_Check(obj)) {
|
||||
// This should not cause guard failure.
|
||||
// If this error is encountered, it probably means
|
||||
// we did not convert FrameLocalsMapping to dict (using to_dict()).
|
||||
throw std::runtime_error(
|
||||
"FrameLocalsGuardAccessor check expected dict() input");
|
||||
}
|
||||
// This should not cause guard failure.
|
||||
// If this error is encountered, it probably means
|
||||
// we did not convert FrameLocalsMapping to dict (using to_dict()).
|
||||
TORCH_CHECK_TYPE(
|
||||
PyDict_Check(obj),
|
||||
"FrameLocalsGuardAccessor check expected dict() input");
|
||||
|
||||
if (matches_dict_tag && _is_immutable_object) {
|
||||
// immutable object and dict tag matches, we can skip the guard subtree.
|
||||
@ -5320,7 +5316,7 @@ class TensorPropertyGuardAccessor : public GuardAccessor {
|
||||
} else if (_prop == TensorProperty::STORAGE_OFFSET) {
|
||||
opt_value = tensor.sym_storage_offset().maybe_as_int();
|
||||
} else {
|
||||
throw std::runtime_error("Unknown property");
|
||||
TORCH_CHECK(false, "Unknown property");
|
||||
}
|
||||
|
||||
if (!opt_value.has_value()) {
|
||||
@ -6663,12 +6659,9 @@ double profile_guard_manager(
|
||||
} // namespace
|
||||
|
||||
static void* _torchinductor_pyobject_tensor_data_ptr(PyObject* obj) {
|
||||
if (C10_UNLIKELY(
|
||||
obj == nullptr ||
|
||||
(!THPVariable_CheckExact(obj) && !THPVariable_Check(obj)))) {
|
||||
throw std::runtime_error(
|
||||
"_torchinductor_pyobject_tensor_data_ptr: non-tensor input");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
obj != nullptr && (THPVariable_CheckExact(obj) || THPVariable_Check(obj)),
|
||||
"_torchinductor_pyobject_tensor_data_ptr: non-tensor input");
|
||||
return THPVariable_Unpack(obj).data_ptr();
|
||||
}
|
||||
|
||||
@ -7895,10 +7888,9 @@ PyObject* torch_c_dynamo_guards_init() {
|
||||
std::string source,
|
||||
py::handle example_value,
|
||||
py::handle guard_manager_enum) -> GuardManager* {
|
||||
if (self.is_exact_dict_type()) {
|
||||
throw std::runtime_error(
|
||||
"getattr_manager on a DictGuardManager is supported only for dict subclasses");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
!self.is_exact_dict_type(),
|
||||
"getattr_manager on a DictGuardManager is supported only for dict subclasses");
|
||||
return self.get_child_manager<GetAttrGuardAccessor>(
|
||||
std::move(attr_name),
|
||||
std::move(source),
|
||||
@ -7936,16 +7928,15 @@ PyObject* torch_c_dynamo_guards_init() {
|
||||
#if IS_PYTHON_3_12_PLUS
|
||||
|
||||
dict_version_watcher_id = PyDict_AddWatcher(dict_version_watch_callback);
|
||||
if (dict_version_watcher_id == -1) {
|
||||
throw std::runtime_error("Failed to install dict_version_watch_callback");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
dict_version_watcher_id != -1,
|
||||
"Failed to install dict_version_watch_callback");
|
||||
|
||||
dict_recursive_tag_watcher_id =
|
||||
PyDict_AddWatcher(dict_recursive_tag_watch_callback);
|
||||
if (dict_recursive_tag_watcher_id == -1) {
|
||||
throw std::runtime_error(
|
||||
"Failed to install dict_recursive_tag_watch_callback");
|
||||
}
|
||||
TORCH_CHECK(
|
||||
dict_recursive_tag_watcher_id != -1,
|
||||
"Failed to install dict_recursive_tag_watch_callback");
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <c10/util/Exception.h>
|
||||
#include <torch/csrc/dynamo/init.h>
|
||||
#include <torch/csrc/dynamo/utils.h>
|
||||
|
||||
@ -111,7 +112,7 @@ THPObjectPtr _unicode_dispatch(PyObject* str) {
|
||||
return F::apply(str, PyUnicode_4BYTE_DATA(str), length);
|
||||
default:
|
||||
// This should be impossible - throw to make the compiler happy.
|
||||
throw std::runtime_error("unreachable");
|
||||
TORCH_CHECK(false, "unreachable");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/error.h>
|
||||
#include <torch/csrc/instruction_counter/Module.h>
|
||||
#include <torch/csrc/utils/pybind.h>
|
||||
@ -20,7 +21,7 @@ namespace torch::instruction_counter {
|
||||
|
||||
static long start() {
|
||||
#if !defined(__linux__)
|
||||
throw std::runtime_error("This systems seems not to be Linux");
|
||||
TORCH_CHECK(false, "This systems seems not to be Linux");
|
||||
#else
|
||||
|
||||
// Construct base perf_event_attr struct
|
||||
@ -51,7 +52,7 @@ static long start() {
|
||||
|
||||
static uint64_t end(int fd) {
|
||||
#if !defined(__linux__)
|
||||
throw std::runtime_error("This systems seems not to be Linux");
|
||||
TORCH_CHECK(false, "This systems seems not to be Linux");
|
||||
#else
|
||||
// Disable the event group
|
||||
if (ioctl(fd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP) == -1) {
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <c10/util/Exception.h>
|
||||
#include <torch/csrc/lazy/core/multi_wait.h>
|
||||
|
||||
#include <chrono>
|
||||
@ -31,7 +32,7 @@ void MultiWait::Wait(double wait_seconds) {
|
||||
if (!cv_.wait_for(lock, std::chrono::duration<double>(wait_seconds), [this] {
|
||||
return completed_count_ >= count_;
|
||||
})) {
|
||||
throw std::runtime_error("Timeout");
|
||||
TORCH_CHECK(false, "Timeout");
|
||||
}
|
||||
if (exptr_ != nullptr) {
|
||||
std::rethrow_exception(exptr_);
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <c10/core/ScalarType.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <torch/csrc/lazy/ts_backend/ts_backend_impl.h>
|
||||
#include <torch/csrc/lazy/ts_backend/ts_lowering_context.h>
|
||||
#include <torch/csrc/lazy/ts_backend/ts_node.h>
|
||||
@ -44,8 +45,8 @@ void TSLoweringContext::Lower(const Node* node) {
|
||||
AssignOutputOp(torch::lazy::Output(node, i), ops[i]);
|
||||
}
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
"Expected torch::lazy::TsNode but could not dynamic cast");
|
||||
TORCH_CHECK(
|
||||
false, "Expected torch::lazy::TsNode but could not dynamic cast");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <sstream>
|
||||
|
||||
#include <c10/util/Exception.h>
|
||||
#include <torch/csrc/api/include/torch/jit.h>
|
||||
#include <torch/csrc/jit/runtime/graph_executor.h>
|
||||
#include <torch/csrc/lazy/backend/lowering_context.h>
|
||||
@ -26,8 +27,8 @@ class TORCH_API TSComputation : public Computation {
|
||||
}
|
||||
|
||||
const std::vector<Shape>& parameter_shapes() const override {
|
||||
throw std::runtime_error(
|
||||
"TODO(whc) implement TS computation shapes or change interface");
|
||||
TORCH_CHECK(
|
||||
false, "TODO(whc) implement TS computation shapes or change interface");
|
||||
return parameter_shapes_;
|
||||
}
|
||||
|
||||
@ -36,8 +37,8 @@ class TORCH_API TSComputation : public Computation {
|
||||
}
|
||||
|
||||
const Shape& result_shape() const override {
|
||||
throw std::runtime_error(
|
||||
"TODO(whc) implement TS computation shapes or change interface");
|
||||
TORCH_CHECK(
|
||||
false, "TODO(whc) implement TS computation shapes or change interface");
|
||||
return result_shape_;
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <c10/util/Exception.h>
|
||||
#include <torch/csrc/monitor/counters.h>
|
||||
|
||||
#include <unordered_set>
|
||||
@ -21,8 +22,10 @@ const char* aggregationName(Aggregation agg) {
|
||||
case Aggregation::MIN:
|
||||
return "min";
|
||||
default:
|
||||
throw std::runtime_error(
|
||||
"unknown aggregation: " + std::to_string(static_cast<int>(agg)));
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
"unknown aggregation: ",
|
||||
std::to_string(static_cast<int>(agg)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <c10/util/Exception.h>
|
||||
#include <utility>
|
||||
|
||||
#include <c10/util/WaitCounter.h>
|
||||
@ -58,7 +59,7 @@ struct type_caster<torch::monitor::data_value_t> {
|
||||
std::string& str = std::get<std::string>(src);
|
||||
return THPUtils_packString(str);
|
||||
}
|
||||
throw std::runtime_error("unknown data_value_t type");
|
||||
TORCH_CHECK(false, "unknown data_value_t type");
|
||||
}
|
||||
};
|
||||
} // namespace pybind11::detail
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <ATen/record_function.h>
|
||||
#include <c10/core/impl/PyInterpreter.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/overloaded.h>
|
||||
#include <torch/csrc/DynamicTypes.h>
|
||||
#include <torch/csrc/autograd/utils/wrap_outputs.h>
|
||||
@ -440,9 +441,7 @@ void initPythonBindings(PyObject* module) {
|
||||
p.performance_events);
|
||||
},
|
||||
[](const py::tuple& t) { // __setstate__
|
||||
if (t.size() >= 5) {
|
||||
throw std::runtime_error("Expected at least 5 values in state");
|
||||
}
|
||||
TORCH_CHECK(t.size() < 5, "Expected at least 5 values in state");
|
||||
|
||||
py::list py_metrics = t[0].cast<py::list>();
|
||||
std::vector<std::string> metrics{py_metrics.size()};
|
||||
|
@ -36,7 +36,7 @@ static void cudaCheck(cudaError_t result, const char* file, int line) {
|
||||
} else {
|
||||
ss << cudaGetErrorString(result);
|
||||
}
|
||||
throw std::runtime_error(ss.str());
|
||||
TORCH_CHECK(false, ss.str());
|
||||
}
|
||||
}
|
||||
#define TORCH_CUDA_CHECK(result) cudaCheck(result, __FILE__, __LINE__);
|
||||
|
Reference in New Issue
Block a user