From b0556110e58e3bcf2c872b933e4fd4a0d34398ad Mon Sep 17 00:00:00 2001 From: cyy Date: Fri, 11 Jul 2025 18:22:48 +0000 Subject: [PATCH] Remove unsafe PyTorchError constructor (#154961) Use libfmt in call sites of PyTorchError. Pull Request resolved: https://github.com/pytorch/pytorch/pull/154961 Approved by: https://github.com/albanD --- torch/CMakeLists.txt | 5 +- torch/csrc/Device.cpp | 4 +- torch/csrc/Exceptions.cpp | 18 --- torch/csrc/Exceptions.h | 13 +- torch/csrc/Generator.cpp | 14 +- torch/csrc/api/include/torch/python.h | 4 +- torch/csrc/autograd/python_function.cpp | 12 +- .../csrc/autograd/python_legacy_variable.cpp | 12 +- .../autograd/python_variable_indexing.cpp | 13 +- torch/csrc/utils/disable_torch_function.cpp | 11 +- torch/csrc/utils/python_arg_parser.cpp | 132 ++++++++++-------- torch/csrc/utils/python_arg_parser.h | 41 +++--- torch/csrc/utils/tensor_new.cpp | 6 +- torch/csrc/utils/tensor_numpy.cpp | 34 +++-- 14 files changed, 173 insertions(+), 146 deletions(-) diff --git a/torch/CMakeLists.txt b/torch/CMakeLists.txt index b07df3007f57..bc92f97b3956 100644 --- a/torch/CMakeLists.txt +++ b/torch/CMakeLists.txt @@ -506,10 +506,11 @@ if(NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin") ) # Pybind11 requires explicit linking of the torch_python library if(BUILD_LIBTORCHLESS) - target_link_libraries(nnapi_backend PRIVATE ${TORCH_LIB} torch_python pybind::pybind11) + target_link_libraries(nnapi_backend PRIVATE ${TORCH_LIB}) else() - target_link_libraries(nnapi_backend PRIVATE torch torch_python pybind::pybind11) + target_link_libraries(nnapi_backend PRIVATE torch) endif() + target_link_libraries(nnapi_backend PRIVATE torch_python pybind::pybind11 fmt::fmt-header-only) endif() set(TORCH_PYTHON_COMPILE_OPTIONS ${TORCH_PYTHON_COMPILE_OPTIONS} PARENT_SCOPE) diff --git a/torch/csrc/Device.cpp b/torch/csrc/Device.cpp index 4e31c22c2e53..53aca5ae8e31 100644 --- a/torch/csrc/Device.cpp +++ b/torch/csrc/Device.cpp @@ -141,9 +141,9 @@ static PyObject* THPDevice_rc(PyObject* a, PyObject* b, int op) { case Py_LE: case Py_GT: case Py_GE: - throw torch::TypeError("comparison not implemented"); + TORCH_CHECK_TYPE(false, "comparison not implemented"); default: - throw torch::TypeError("unexpected comparison op"); + TORCH_CHECK_TYPE(false, "unexpected comparison op"); } END_HANDLE_TH_ERRORS } diff --git a/torch/csrc/Exceptions.cpp b/torch/csrc/Exceptions.cpp index 80ee9630dcf5..77085a946399 100644 --- a/torch/csrc/Exceptions.cpp +++ b/torch/csrc/Exceptions.cpp @@ -228,17 +228,6 @@ std::string processErrorMsg(std::string str) { return str; } -static std::string formatMessage(const char* format, va_list fmt_args) { - constexpr size_t ERROR_BUF_SIZE = 1024; - std::string error_buf(ERROR_BUF_SIZE, '\0'); - auto res = vsnprintf(error_buf.data(), ERROR_BUF_SIZE, format, fmt_args); - if (res < 0) { - res = 0; - } - error_buf.resize(res); - return error_buf; -} - void translate_exception_to_python(const std::exception_ptr& e_ptr) { try { TORCH_INTERNAL_ASSERT( @@ -250,13 +239,6 @@ void translate_exception_to_python(const std::exception_ptr& e_ptr) { CATCH_ALL_ERRORS(return) } -TypeError::TypeError(const char* format, ...) { - va_list fmt_args{}; - va_start(fmt_args, format); - msg = formatMessage(format, fmt_args); - va_end(fmt_args); -} - void PyWarningHandler::InternalHandler::process(const c10::Warning& warning) { warning_buffer_.push_back(warning); } diff --git a/torch/csrc/Exceptions.h b/torch/csrc/Exceptions.h index d74447de83f4..8df6ea24a4bb 100644 --- a/torch/csrc/Exceptions.h +++ b/torch/csrc/Exceptions.h @@ -283,19 +283,12 @@ struct PyTorchError : public std::exception { std::string msg; }; -// Declare a printf-like function on gcc & clang -// The compiler can then warn on invalid format specifiers -#ifdef __GNUC__ -#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) \ - __attribute__((format(printf, FORMAT_INDEX, VA_ARGS_INDEX))) -#else -#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) -#endif - // Translates to Python TypeError struct TypeError : public PyTorchError { + TORCH_PYTHON_API TypeError() = default; + TORCH_PYTHON_API TypeError(std::string msg_) + : PyTorchError(std::move(msg_)) {} using PyTorchError::PyTorchError; - TORCH_PYTHON_API TypeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3); PyObject* python_type() override { return PyExc_TypeError; } diff --git a/torch/csrc/Generator.cpp b/torch/csrc/Generator.cpp index ce2b4789e442..d99d41ae3d35 100644 --- a/torch/csrc/Generator.cpp +++ b/torch/csrc/Generator.cpp @@ -82,9 +82,11 @@ static PyObject* THPGenerator_setState(PyObject* _self, PyObject* _new_state) { HANDLE_TH_ERRORS if (!THPVariable_Check(_new_state)) { - throw torch::TypeError( - "expected a torch.ByteTensor, but got %s", - Py_TYPE(_new_state)->tp_name); + TORCH_CHECK_TYPE( + false, + fmt::format( + "expected a torch.ByteTensor, but got {}", + Py_TYPE(_new_state)->tp_name)); } auto self = (THPGenerator*)_self; auto& gen = self->cdata; @@ -380,8 +382,10 @@ PyObject* THPGenerator_Wrap(const Generator& gen) { at::Generator THPGenerator_Unwrap(PyObject* state) { if (!Py_IS_TYPE(state, &THPGeneratorType)) { - throw torch::TypeError( - "expected a Generator, but got %s", Py_TYPE(state)->tp_name); + TORCH_CHECK_TYPE( + false, + fmt::format( + "expected a Generator, but got {}", Py_TYPE(state)->tp_name)); } return reinterpret_cast(state)->cdata; } diff --git a/torch/csrc/api/include/torch/python.h b/torch/csrc/api/include/torch/python.h index 1d65bc221fd5..4878b1cc851a 100644 --- a/torch/csrc/api/include/torch/python.h +++ b/torch/csrc/api/include/torch/python.h @@ -26,7 +26,7 @@ inline Device py_object_to_device(py::object object) { if (THPDevice_Check(obj)) { return reinterpret_cast(obj)->device; } - throw TypeError("Expected device"); + TORCH_CHECK_TYPE(false, "Expected device"); } inline Dtype py_object_to_dtype(py::object object) { @@ -34,7 +34,7 @@ inline Dtype py_object_to_dtype(py::object object) { if (THPDtype_Check(obj)) { return reinterpret_cast(obj)->scalar_type; } - throw TypeError("Expected dtype"); + TORCH_CHECK_TYPE(false, "Expected dtype"); } template diff --git a/torch/csrc/autograd/python_function.cpp b/torch/csrc/autograd/python_function.cpp index dcbbcd550e2a..089c0571aea4 100644 --- a/torch/csrc/autograd/python_function.cpp +++ b/torch/csrc/autograd/python_function.cpp @@ -793,11 +793,13 @@ static void _get_tensors_to_save( if (is_executable) { // TODO: We should really just ALWAYS throw an error here, but // doing so will break some internal tests. We should fix those. - throw torch::TypeError( - "save_for_backward can only save variables, but argument %ld is of " - "type %s", - i, - Py_TYPE(obj)->tp_name); + TORCH_CHECK_TYPE( + false, + fmt::format( + "save_for_backward can only save variables, but argument {} is of " + "type {}", + i, + Py_TYPE(obj)->tp_name)); } } } diff --git a/torch/csrc/autograd/python_legacy_variable.cpp b/torch/csrc/autograd/python_legacy_variable.cpp index 3a068c53eb50..ee00008c94bb 100644 --- a/torch/csrc/autograd/python_legacy_variable.cpp +++ b/torch/csrc/autograd/python_legacy_variable.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include @@ -57,8 +58,9 @@ static PyObject* THPVariable_pynew( !is_volatile || !requires_grad, "Variable can't be volatile and require_grad at the same time!"); if (grad_fn && !THPFunction_Check(grad_fn)) { - throw TypeError( - "_grad_fn has to be a Function object or None, but got %s", + TORCH_CHECK_TYPE( + false, + "_grad_fn has to be a Function object or None, but got ", Py_TYPE(grad_fn)->tp_name); } Variable var; @@ -74,8 +76,10 @@ static PyObject* THPVariable_pynew( } else if (THPVariable_Check(data)) { var = THPVariable_Unpack(data).detach(); } else { - throw torch::TypeError( - "Variable data has to be a tensor, but got %s", Py_TYPE(data)->tp_name); + TORCH_CHECK_TYPE( + false, + "Variable data has to be a tensor, but got ", + Py_TYPE(data)->tp_name); } // We set `tensor`'s `allow_tensor_metadata_change` to true here, because we // want to allow the following use case for backward compatibility: diff --git a/torch/csrc/autograd/python_variable_indexing.cpp b/torch/csrc/autograd/python_variable_indexing.cpp index 3aa241b06f3a..9dd811eabe79 100644 --- a/torch/csrc/autograd/python_variable_indexing.cpp +++ b/torch/csrc/autograd/python_variable_indexing.cpp @@ -29,6 +29,7 @@ #include #include +#include using namespace at; using namespace torch::autograd::utils; @@ -123,10 +124,12 @@ inline Variable valueToTensor( } else if (torch::is_symbool(value)) { scalar = Scalar(py::cast(py::handle(value))); } else { - throw TypeError( - "can't assign a %s to a %s", + TORCH_CHECK_TYPE( + false, + "can't assign a ", Py_TYPE(value)->tp_name, - torch::utils::options_to_string(options).c_str()); + " to a ", + torch::utils::options_to_string(options)); } // lift_fresh is supposed to be used in situations where you are guaranteed to // get a plain Tensor which is not true for cpu device but not for non cpu @@ -443,7 +446,7 @@ static void dispatch_set_item( int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) { HANDLE_TH_ERRORS if (py_value == nullptr) { - throw TypeError("Tensor does not support deleting items"); + TORCH_CHECK_TYPE(false, "Tensor does not support deleting items"); } if ((check_has_torch_function(self)) || (check_has_torch_function(py_value))) { @@ -456,7 +459,7 @@ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) { if (self_.layout() == kSparse || self_.layout() == kSparseCsr || self_.layout() == kSparseCsc || self_.layout() == kSparseBsr || self_.layout() == kSparseBsc) { - throw TypeError("Cannot assign to a sparse tensor"); + TORCH_CHECK_TYPE(false, "Cannot assign to a sparse tensor"); } OptionalDeviceGuard device_guard(device_of(self_)); at::Device self_device = self_.device(); diff --git a/torch/csrc/utils/disable_torch_function.cpp b/torch/csrc/utils/disable_torch_function.cpp index cbe2b1203e98..9dc6e9777a36 100644 --- a/torch/csrc/utils/disable_torch_function.cpp +++ b/torch/csrc/utils/disable_torch_function.cpp @@ -5,6 +5,7 @@ #include #include +#include namespace torch { static PyObject* disabled_torch_function = nullptr; @@ -219,8 +220,9 @@ PyObject* THPModule_disable_torch_function(PyObject* self, PyObject* a) { } else if (PyTuple_Check(args)) { py_args = py::reinterpret_borrow(args); } else { - throw torch::TypeError( - "expected List or Tuple (got %s)", Py_TYPE(args)->tp_name); + TORCH_CHECK_TYPE( + false, + fmt::format("expected List or Tuple (got {})", Py_TYPE(args)->tp_name)); } // These are all C-API calls so no exceptions will be raised @@ -253,8 +255,9 @@ PyObject* THPModule_disable_torch_dispatch(PyObject* self, PyObject* a) { } else if (PyTuple_Check(args)) { py_args = py::reinterpret_borrow(args); } else { - throw torch::TypeError( - "expected List or Tuple (got %s)", Py_TYPE(args)->tp_name); + TORCH_CHECK_TYPE( + false, + fmt::format("expected List or Tuple (got {})", Py_TYPE(args)->tp_name)); } // This implementation is not completely correct. The moral diff --git a/torch/csrc/utils/python_arg_parser.cpp b/torch/csrc/utils/python_arg_parser.cpp index 36a68a2449b4..8a16b0211dce 100644 --- a/torch/csrc/utils/python_arg_parser.cpp +++ b/torch/csrc/utils/python_arg_parser.cpp @@ -1417,20 +1417,24 @@ std::string FunctionSignature::toString() const { const auto min_args = signature.min_args; const long nargs_ = nargs; if (min_args != max_pos_args) { - throw TypeError( - "%s() takes from %zu to %zu positional arguments but %ld were given", - signature.name.c_str(), - min_args, - max_pos_args, - nargs_); + TORCH_CHECK_TYPE( + false, + fmt::format( + "{}() takes from {} to {} positional arguments but {} were given", + signature.name, + min_args, + max_pos_args, + nargs_)); } - throw TypeError( - "%s() takes %zu positional argument%s but %ld %s given", - signature.name.c_str(), - max_pos_args, - max_pos_args == 1 ? "" : "s", - nargs_, - nargs == 1 ? "was" : "were"); + TORCH_CHECK_TYPE( + false, + fmt::format( + "{}() takes {} positional argument{} but {} {} given", + signature.name, + max_pos_args, + max_pos_args == 1 ? "" : "s", + nargs_, + nargs == 1 ? "was" : "were")); } [[noreturn]] static void missing_args( @@ -1450,12 +1454,14 @@ std::string FunctionSignature::toString() const { } } - throw TypeError( - "%s() missing %d required positional argument%s: %s", - signature.name.c_str(), - num_missing, - num_missing == 1 ? "s" : "", - ss.str().c_str()); + TORCH_CHECK_TYPE( + false, + fmt::format( + "{}() missing {} required positional argument{}: {}", + signature.name, + num_missing, + num_missing == 1 ? "s" : "", + ss.str())); } static Py_ssize_t find_param(FunctionSignature& signature, PyObject* name) { @@ -1484,27 +1490,31 @@ static Py_ssize_t find_param(FunctionSignature& signature, PyObject* name) { // accessible within this thread. while (PyDict_Next(kwargs, &pos, &key, &value)) { if (!THPUtils_checkString(key)) { - throw TypeError("keywords must be strings"); + TORCH_CHECK_TYPE(false, "keywords must be strings"); } auto param_idx = find_param(signature, key); if (param_idx < 0) { - throw TypeError( - "%s() got an unexpected keyword argument '%s'", - signature.name.c_str(), - THPUtils_unpackString(key).c_str()); + TORCH_CHECK_TYPE( + false, + fmt::format( + "{}() got an unexpected keyword argument '{}'", + signature.name, + THPUtils_unpackString(key))); } if (param_idx < num_pos_args) { - throw TypeError( - "%s() got multiple values for argument '%s'", - signature.name.c_str(), - THPUtils_unpackString(key).c_str()); + TORCH_CHECK_TYPE( + false, + fmt::format( + "{}() got multiple values for argument '{}'", + signature.name, + THPUtils_unpackString(key))); } } // this should never be hit - throw TypeError("invalid keyword arguments"); + TORCH_CHECK_TYPE(false, "invalid keyword arguments"); } bool FunctionSignature::parse( @@ -1591,12 +1601,14 @@ bool FunctionSignature::parse( } else if (raise_exception) { if (is_kwd) { // foo(): argument 'other' must be str, not int - throw TypeError( - "%s(): argument '%s' must be %s, not %s", - name.c_str(), - param.name.c_str(), - param.type_name().c_str(), - Py_TYPE(obj)->tp_name); + TORCH_CHECK_TYPE( + false, + fmt::format( + "{}(): argument '{}' must be {}, not {}", + name, + param.name, + param.type_name(), + Py_TYPE(obj)->tp_name)); } else { // foo(): argument 'other' (position 2) must be str, not int if (failed_idx != -1) { @@ -1605,25 +1617,29 @@ bool FunctionSignature::parse( obj = args; } TORCH_INTERNAL_ASSERT(failed_idx < PySequence_Size(obj)); - throw TypeError( - "%s(): argument '%s' (position %ld) must be %s, but found element of type %s at pos %ld", - name.c_str(), - param.name.c_str(), - static_cast(arg_pos + 1), - param.type_name().c_str(), - Py_TYPE(py::reinterpret_steal( - PySequence_GetItem(obj, failed_idx)) - .ptr()) - ->tp_name, - static_cast(failed_idx)); + TORCH_CHECK_TYPE( + false, + fmt::format( + "{}(): argument '{}' (position {}) must be {}, but found element of type {} at pos {}", + name, + param.name, + arg_pos + 1, + param.type_name(), + Py_TYPE(py::reinterpret_steal( + PySequence_GetItem(obj, failed_idx)) + .ptr()) + ->tp_name, + failed_idx)); } - throw TypeError( - "%s(): argument '%s' (position %ld) must be %s, not %s", - name.c_str(), - param.name.c_str(), - static_cast(arg_pos + 1), - param.type_name().c_str(), - Py_TYPE(obj)->tp_name); + TORCH_CHECK_TYPE( + false, + fmt::format( + "{}(): argument '{}' (position {}) must be {}, not {}", + name, + param.name, + arg_pos + 1, + param.type_name(), + Py_TYPE(obj)->tp_name)); } } else { return false; @@ -1745,7 +1761,7 @@ void PythonArgParser::print_error( auto options = get_signatures(); auto msg = torch::format_invalid_args(args, kwargs, function_name + "()", options); - throw TypeError("%s", msg.c_str()); + TORCH_CHECK_TYPE(false, msg); } std::vector PythonArgParser::get_signatures() const { @@ -1812,8 +1828,12 @@ at::Tensor PythonArgs::tensor_slow(int i) { // a test for Py_None here; instead, you need to mark the argument // as *allowing none*; you can do this by writing 'Tensor?' instead // of 'Tensor' in the ATen metadata. - throw TypeError( - "expected Tensor as argument %d, but got %s", i, Py_TYPE(obj)->tp_name); + TORCH_CHECK_TYPE( + false, + fmt::format( + "expected Tensor as argument {}, but got {}", + i, + Py_TYPE(obj)->tp_name)); } at::AutoDispatchBelowADInplaceOrView guard; // TODO: remove at::tracer::impl::NoTracerDispatchMode tracer_guard; diff --git a/torch/csrc/utils/python_arg_parser.h b/torch/csrc/utils/python_arg_parser.h index 72edd3843350..bc281f2512a5 100644 --- a/torch/csrc/utils/python_arg_parser.h +++ b/torch/csrc/utils/python_arg_parser.h @@ -39,6 +39,7 @@ // Scalar and Tensor, UNLESS they require grad (in which case // they only bind to Tensor). +#include #include #include @@ -490,7 +491,9 @@ inline std::array PythonArgs::tensorlist_n(int i) { // NOLINTNEXTLINE(bugprone-branch-clone) auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get()); if (size != N) { - throw TypeError("expected tuple of %d elements but got %d", N, (int)size); + TORCH_CHECK_TYPE( + false, + fmt::format("expected tuple of {} elements but got {}", N, size)); } for (const auto idx : c10::irange(size)) { PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) @@ -528,12 +531,14 @@ inline void throw_intlist_exception( ? e.what() : std::string("type must be ") + args->signature.params[i].type_name() + ",but got " + Py_TYPE(obj)->tp_name; - throw TypeError( - "%s(): argument '%s' failed to unpack the object at pos %zu with error \"%s\"", - args->signature.name.c_str(), - args->signature.params[i].name.c_str(), - idx + 1, - error.c_str()); + TORCH_CHECK_TYPE( + false, + fmt::format( + "{}(): argument '{}' failed to unpack the object at pos {} with error \"{}\"", + args->signature.name, + args->signature.params[i].name, + idx + 1, + error)); } inline std::vector PythonArgs::symintlist(int i) { @@ -712,13 +717,15 @@ inline std::vector PythonArgs::getDoublelist(int i) { res[idx] = THPUtils_unpackDouble(obj); } } catch (const std::exception&) { - throw TypeError( - "%s(): argument '%s' must be %s, but found element of type %s at pos %zu", - signature.name.c_str(), - signature.params[i].name.c_str(), - signature.params[i].type_name().c_str(), - Py_TYPE(obj)->tp_name, - idx + 1); + TORCH_CHECK_TYPE( + false, + fmt::format( + "{}(): argument '{}' must be {}, but found element of type {} at pos {}", + signature.name, + signature.params[i].name, + signature.params[i].type_name(), + Py_TYPE(obj)->tp_name, + idx + 1)); } } return res; @@ -1119,8 +1126,10 @@ inline c10::Stream PythonArgs::stream(int i) { return c10::Stream( c10::Stream::Default::DEFAULT, c10::Device(c10::DeviceType::CPU, -1)); if (!THPStream_Check(args[i])) { - throw TypeError( - "expected Stream object. Got '%s'", Py_TYPE(args[i])->tp_name); + TORCH_CHECK_TYPE( + false, + fmt::format( + "expected Stream object. Got '{}'", Py_TYPE(args[i])->tp_name)); } return c10::Stream::unpack3( ((THPStream*)args[i])->stream_id, diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index f41a1e250e54..45f58cde9a65 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -670,11 +670,13 @@ Tensor legacy_sparse_tensor_generic_ctor_new( // new(sequence) binds to this signature but should be treated differently // unless the sequences is a torch.Size if (ctor_or_new == CtorOrNew::CTOR) { - throw TypeError( + TORCH_CHECK_TYPE( + false, "torch.sparse.SparseTensor(sequence) only accepts sizes. Please use torch.sparse_coo_tensor() " "or construct a strided tensor and convert it to sparse via to_sparse."); } else { - throw TypeError( + TORCH_CHECK_TYPE( + false, "SparseTensor.new(sequence) only accepts sizes. Please use torch.sparse_coo_tensor() " "or construct a strided tensor and convert it to sparse via to_sparse."); } diff --git a/torch/csrc/utils/tensor_numpy.cpp b/torch/csrc/utils/tensor_numpy.cpp index ada10b665d05..c8548884692f 100644 --- a/torch/csrc/utils/tensor_numpy.cpp +++ b/torch/csrc/utils/tensor_numpy.cpp @@ -1,3 +1,4 @@ +#include #include #include #define WITH_NUMPY_IMPORT_ARRAY @@ -105,7 +106,7 @@ static std::vector to_aten_shape(int ndim, npy_intp* values) { static std::vector seq_to_aten_shape(PyObject* py_seq) { int ndim = PySequence_Length(py_seq); if (ndim == -1) { - throw TypeError("shape and strides must be sequences"); + TORCH_CHECK_TYPE(false, "shape and strides must be sequences"); } auto result = std::vector(ndim); for (const auto i : c10::irange(ndim)) { @@ -303,7 +304,8 @@ int aten_to_numpy_dtype(const ScalarType scalar_type) { case kBool: return NPY_BOOL; default: - throw TypeError("Got unsupported ScalarType %s", toString(scalar_type)); + TORCH_CHECK_TYPE( + false, "Got unsupported ScalarType ", toString(scalar_type)); } } @@ -355,10 +357,12 @@ ScalarType numpy_dtype_to_aten(int dtype) { auto pytype = THPObjectPtr(PyArray_TypeObjectFromType(dtype)); if (!pytype) throw python_error(); - throw TypeError( - "can't convert np.ndarray of type %s. The only supported types are: " - "float64, float32, float16, complex64, complex128, int64, int32, int16, int8, uint64, uint32, uint16, uint8, and bool.", - ((PyTypeObject*)pytype.get())->tp_name); + TORCH_CHECK_TYPE( + false, + fmt::format( + "can't convert np.ndarray of type {}. The only supported types are: " + "float64, float32, float16, complex64, complex128, int64, int32, int16, int8, uint64, uint32, uint16, uint8, and bool.", + ((PyTypeObject*)pytype.get())->tp_name)); } bool is_numpy_int(PyObject* obj) { @@ -385,7 +389,7 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) { TORCH_INTERNAL_ASSERT(cuda_dict); if (!PyDict_Check(cuda_dict.get())) { - throw TypeError("`__cuda_array_interface__` must be a dict"); + TORCH_CHECK_TYPE(false, "`__cuda_array_interface__` must be a dict"); } // Extract the `obj.__cuda_array_interface__['shape']` attribute @@ -396,7 +400,7 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) { throw python_error(); } if (py_shape == nullptr) { - throw TypeError("attribute `shape` must exist"); + TORCH_CHECK_TYPE(false, "attribute `shape` must exist"); } sizes = seq_to_aten_shape(py_shape); } @@ -410,7 +414,7 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) { throw python_error(); } if (py_typestr == nullptr) { - throw TypeError("attribute `typestr` must exist"); + TORCH_CHECK_TYPE(false, "attribute `typestr` must exist"); } PyArray_Descr* descr = nullptr; TORCH_CHECK_VALUE( @@ -432,10 +436,10 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) { throw python_error(); } if (py_data == nullptr) { - throw TypeError("attribute `shape` data exist"); + TORCH_CHECK_TYPE(false, "attribute `shape` data exist"); } if (!PyTuple_Check(py_data) || PyTuple_GET_SIZE(py_data) != 2) { - throw TypeError("`data` must be a 2-tuple of (int, bool)"); + TORCH_CHECK_TYPE(false, "`data` must be a 2-tuple of (int, bool)"); } data_ptr = PyLong_AsVoidPtr(PyTuple_GET_ITEM(py_data, 0)); if (data_ptr == nullptr && PyErr_Occurred()) { @@ -446,8 +450,8 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) { throw python_error(); } if (read_only) { - throw TypeError( - "the read only flag is not supported, should always be False"); + TORCH_CHECK_TYPE( + false, "the read only flag is not supported, should always be False"); } } @@ -461,8 +465,8 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) { if (py_strides != nullptr && py_strides != Py_None) { if (PySequence_Length(py_strides) == -1 || static_cast(PySequence_Length(py_strides)) != sizes.size()) { - throw TypeError( - "strides must be a sequence of the same length as shape"); + TORCH_CHECK_TYPE( + false, "strides must be a sequence of the same length as shape"); } strides = seq_to_aten_shape(py_strides);