mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Remove unsafe PyTorchError constructor (#154961)
Use libfmt in call sites of PyTorchError. Pull Request resolved: https://github.com/pytorch/pytorch/pull/154961 Approved by: https://github.com/albanD
This commit is contained in:
@ -506,10 +506,11 @@ if(NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
|||||||
)
|
)
|
||||||
# Pybind11 requires explicit linking of the torch_python library
|
# Pybind11 requires explicit linking of the torch_python library
|
||||||
if(BUILD_LIBTORCHLESS)
|
if(BUILD_LIBTORCHLESS)
|
||||||
target_link_libraries(nnapi_backend PRIVATE ${TORCH_LIB} torch_python pybind::pybind11)
|
target_link_libraries(nnapi_backend PRIVATE ${TORCH_LIB})
|
||||||
else()
|
else()
|
||||||
target_link_libraries(nnapi_backend PRIVATE torch torch_python pybind::pybind11)
|
target_link_libraries(nnapi_backend PRIVATE torch)
|
||||||
endif()
|
endif()
|
||||||
|
target_link_libraries(nnapi_backend PRIVATE torch_python pybind::pybind11 fmt::fmt-header-only)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(TORCH_PYTHON_COMPILE_OPTIONS ${TORCH_PYTHON_COMPILE_OPTIONS} PARENT_SCOPE)
|
set(TORCH_PYTHON_COMPILE_OPTIONS ${TORCH_PYTHON_COMPILE_OPTIONS} PARENT_SCOPE)
|
||||||
|
@ -141,9 +141,9 @@ static PyObject* THPDevice_rc(PyObject* a, PyObject* b, int op) {
|
|||||||
case Py_LE:
|
case Py_LE:
|
||||||
case Py_GT:
|
case Py_GT:
|
||||||
case Py_GE:
|
case Py_GE:
|
||||||
throw torch::TypeError("comparison not implemented");
|
TORCH_CHECK_TYPE(false, "comparison not implemented");
|
||||||
default:
|
default:
|
||||||
throw torch::TypeError("unexpected comparison op");
|
TORCH_CHECK_TYPE(false, "unexpected comparison op");
|
||||||
}
|
}
|
||||||
END_HANDLE_TH_ERRORS
|
END_HANDLE_TH_ERRORS
|
||||||
}
|
}
|
||||||
|
@ -228,17 +228,6 @@ std::string processErrorMsg(std::string str) {
|
|||||||
return str;
|
return str;
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string formatMessage(const char* format, va_list fmt_args) {
|
|
||||||
constexpr size_t ERROR_BUF_SIZE = 1024;
|
|
||||||
std::string error_buf(ERROR_BUF_SIZE, '\0');
|
|
||||||
auto res = vsnprintf(error_buf.data(), ERROR_BUF_SIZE, format, fmt_args);
|
|
||||||
if (res < 0) {
|
|
||||||
res = 0;
|
|
||||||
}
|
|
||||||
error_buf.resize(res);
|
|
||||||
return error_buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
void translate_exception_to_python(const std::exception_ptr& e_ptr) {
|
void translate_exception_to_python(const std::exception_ptr& e_ptr) {
|
||||||
try {
|
try {
|
||||||
TORCH_INTERNAL_ASSERT(
|
TORCH_INTERNAL_ASSERT(
|
||||||
@ -250,13 +239,6 @@ void translate_exception_to_python(const std::exception_ptr& e_ptr) {
|
|||||||
CATCH_ALL_ERRORS(return)
|
CATCH_ALL_ERRORS(return)
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeError::TypeError(const char* format, ...) {
|
|
||||||
va_list fmt_args{};
|
|
||||||
va_start(fmt_args, format);
|
|
||||||
msg = formatMessage(format, fmt_args);
|
|
||||||
va_end(fmt_args);
|
|
||||||
}
|
|
||||||
|
|
||||||
void PyWarningHandler::InternalHandler::process(const c10::Warning& warning) {
|
void PyWarningHandler::InternalHandler::process(const c10::Warning& warning) {
|
||||||
warning_buffer_.push_back(warning);
|
warning_buffer_.push_back(warning);
|
||||||
}
|
}
|
||||||
|
@ -283,19 +283,12 @@ struct PyTorchError : public std::exception {
|
|||||||
std::string msg;
|
std::string msg;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Declare a printf-like function on gcc & clang
|
|
||||||
// The compiler can then warn on invalid format specifiers
|
|
||||||
#ifdef __GNUC__
|
|
||||||
#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) \
|
|
||||||
__attribute__((format(printf, FORMAT_INDEX, VA_ARGS_INDEX)))
|
|
||||||
#else
|
|
||||||
#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Translates to Python TypeError
|
// Translates to Python TypeError
|
||||||
struct TypeError : public PyTorchError {
|
struct TypeError : public PyTorchError {
|
||||||
|
TORCH_PYTHON_API TypeError() = default;
|
||||||
|
TORCH_PYTHON_API TypeError(std::string msg_)
|
||||||
|
: PyTorchError(std::move(msg_)) {}
|
||||||
using PyTorchError::PyTorchError;
|
using PyTorchError::PyTorchError;
|
||||||
TORCH_PYTHON_API TypeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
|
|
||||||
PyObject* python_type() override {
|
PyObject* python_type() override {
|
||||||
return PyExc_TypeError;
|
return PyExc_TypeError;
|
||||||
}
|
}
|
||||||
|
@ -82,9 +82,11 @@ static PyObject* THPGenerator_setState(PyObject* _self, PyObject* _new_state) {
|
|||||||
|
|
||||||
HANDLE_TH_ERRORS
|
HANDLE_TH_ERRORS
|
||||||
if (!THPVariable_Check(_new_state)) {
|
if (!THPVariable_Check(_new_state)) {
|
||||||
throw torch::TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"expected a torch.ByteTensor, but got %s",
|
false,
|
||||||
Py_TYPE(_new_state)->tp_name);
|
fmt::format(
|
||||||
|
"expected a torch.ByteTensor, but got {}",
|
||||||
|
Py_TYPE(_new_state)->tp_name));
|
||||||
}
|
}
|
||||||
auto self = (THPGenerator*)_self;
|
auto self = (THPGenerator*)_self;
|
||||||
auto& gen = self->cdata;
|
auto& gen = self->cdata;
|
||||||
@ -380,8 +382,10 @@ PyObject* THPGenerator_Wrap(const Generator& gen) {
|
|||||||
|
|
||||||
at::Generator THPGenerator_Unwrap(PyObject* state) {
|
at::Generator THPGenerator_Unwrap(PyObject* state) {
|
||||||
if (!Py_IS_TYPE(state, &THPGeneratorType)) {
|
if (!Py_IS_TYPE(state, &THPGeneratorType)) {
|
||||||
throw torch::TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"expected a Generator, but got %s", Py_TYPE(state)->tp_name);
|
false,
|
||||||
|
fmt::format(
|
||||||
|
"expected a Generator, but got {}", Py_TYPE(state)->tp_name));
|
||||||
}
|
}
|
||||||
return reinterpret_cast<THPGenerator*>(state)->cdata;
|
return reinterpret_cast<THPGenerator*>(state)->cdata;
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ inline Device py_object_to_device(py::object object) {
|
|||||||
if (THPDevice_Check(obj)) {
|
if (THPDevice_Check(obj)) {
|
||||||
return reinterpret_cast<THPDevice*>(obj)->device;
|
return reinterpret_cast<THPDevice*>(obj)->device;
|
||||||
}
|
}
|
||||||
throw TypeError("Expected device");
|
TORCH_CHECK_TYPE(false, "Expected device");
|
||||||
}
|
}
|
||||||
|
|
||||||
inline Dtype py_object_to_dtype(py::object object) {
|
inline Dtype py_object_to_dtype(py::object object) {
|
||||||
@ -34,7 +34,7 @@ inline Dtype py_object_to_dtype(py::object object) {
|
|||||||
if (THPDtype_Check(obj)) {
|
if (THPDtype_Check(obj)) {
|
||||||
return reinterpret_cast<THPDtype*>(obj)->scalar_type;
|
return reinterpret_cast<THPDtype*>(obj)->scalar_type;
|
||||||
}
|
}
|
||||||
throw TypeError("Expected dtype");
|
TORCH_CHECK_TYPE(false, "Expected dtype");
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename ModuleType>
|
template <typename ModuleType>
|
||||||
|
@ -793,11 +793,13 @@ static void _get_tensors_to_save(
|
|||||||
if (is_executable) {
|
if (is_executable) {
|
||||||
// TODO: We should really just ALWAYS throw an error here, but
|
// TODO: We should really just ALWAYS throw an error here, but
|
||||||
// doing so will break some internal tests. We should fix those.
|
// doing so will break some internal tests. We should fix those.
|
||||||
throw torch::TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"save_for_backward can only save variables, but argument %ld is of "
|
false,
|
||||||
"type %s",
|
fmt::format(
|
||||||
i,
|
"save_for_backward can only save variables, but argument {} is of "
|
||||||
Py_TYPE(obj)->tp_name);
|
"type {}",
|
||||||
|
i,
|
||||||
|
Py_TYPE(obj)->tp_name));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#include <torch/csrc/autograd/python_legacy_variable.h>
|
#include <torch/csrc/autograd/python_legacy_variable.h>
|
||||||
|
|
||||||
#include <ATen/ATen.h>
|
#include <ATen/ATen.h>
|
||||||
|
#include <fmt/format.h>
|
||||||
|
|
||||||
#include <torch/csrc/Exceptions.h>
|
#include <torch/csrc/Exceptions.h>
|
||||||
#include <torch/csrc/autograd/python_function.h>
|
#include <torch/csrc/autograd/python_function.h>
|
||||||
@ -57,8 +58,9 @@ static PyObject* THPVariable_pynew(
|
|||||||
!is_volatile || !requires_grad,
|
!is_volatile || !requires_grad,
|
||||||
"Variable can't be volatile and require_grad at the same time!");
|
"Variable can't be volatile and require_grad at the same time!");
|
||||||
if (grad_fn && !THPFunction_Check(grad_fn)) {
|
if (grad_fn && !THPFunction_Check(grad_fn)) {
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"_grad_fn has to be a Function object or None, but got %s",
|
false,
|
||||||
|
"_grad_fn has to be a Function object or None, but got ",
|
||||||
Py_TYPE(grad_fn)->tp_name);
|
Py_TYPE(grad_fn)->tp_name);
|
||||||
}
|
}
|
||||||
Variable var;
|
Variable var;
|
||||||
@ -74,8 +76,10 @@ static PyObject* THPVariable_pynew(
|
|||||||
} else if (THPVariable_Check(data)) {
|
} else if (THPVariable_Check(data)) {
|
||||||
var = THPVariable_Unpack(data).detach();
|
var = THPVariable_Unpack(data).detach();
|
||||||
} else {
|
} else {
|
||||||
throw torch::TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"Variable data has to be a tensor, but got %s", Py_TYPE(data)->tp_name);
|
false,
|
||||||
|
"Variable data has to be a tensor, but got ",
|
||||||
|
Py_TYPE(data)->tp_name);
|
||||||
}
|
}
|
||||||
// We set `tensor`'s `allow_tensor_metadata_change` to true here, because we
|
// We set `tensor`'s `allow_tensor_metadata_change` to true here, because we
|
||||||
// want to allow the following use case for backward compatibility:
|
// want to allow the following use case for backward compatibility:
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include <c10/util/irange.h>
|
#include <c10/util/irange.h>
|
||||||
|
|
||||||
#include <c10/core/Layout.h>
|
#include <c10/core/Layout.h>
|
||||||
|
#include <fmt/format.h>
|
||||||
|
|
||||||
using namespace at;
|
using namespace at;
|
||||||
using namespace torch::autograd::utils;
|
using namespace torch::autograd::utils;
|
||||||
@ -123,10 +124,12 @@ inline Variable valueToTensor(
|
|||||||
} else if (torch::is_symbool(value)) {
|
} else if (torch::is_symbool(value)) {
|
||||||
scalar = Scalar(py::cast<c10::SymBool>(py::handle(value)));
|
scalar = Scalar(py::cast<c10::SymBool>(py::handle(value)));
|
||||||
} else {
|
} else {
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"can't assign a %s to a %s",
|
false,
|
||||||
|
"can't assign a ",
|
||||||
Py_TYPE(value)->tp_name,
|
Py_TYPE(value)->tp_name,
|
||||||
torch::utils::options_to_string(options).c_str());
|
" to a ",
|
||||||
|
torch::utils::options_to_string(options));
|
||||||
}
|
}
|
||||||
// lift_fresh is supposed to be used in situations where you are guaranteed to
|
// lift_fresh is supposed to be used in situations where you are guaranteed to
|
||||||
// get a plain Tensor which is not true for cpu device but not for non cpu
|
// get a plain Tensor which is not true for cpu device but not for non cpu
|
||||||
@ -443,7 +446,7 @@ static void dispatch_set_item(
|
|||||||
int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
|
int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
|
||||||
HANDLE_TH_ERRORS
|
HANDLE_TH_ERRORS
|
||||||
if (py_value == nullptr) {
|
if (py_value == nullptr) {
|
||||||
throw TypeError("Tensor does not support deleting items");
|
TORCH_CHECK_TYPE(false, "Tensor does not support deleting items");
|
||||||
}
|
}
|
||||||
if ((check_has_torch_function(self)) ||
|
if ((check_has_torch_function(self)) ||
|
||||||
(check_has_torch_function(py_value))) {
|
(check_has_torch_function(py_value))) {
|
||||||
@ -456,7 +459,7 @@ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
|
|||||||
if (self_.layout() == kSparse || self_.layout() == kSparseCsr ||
|
if (self_.layout() == kSparse || self_.layout() == kSparseCsr ||
|
||||||
self_.layout() == kSparseCsc || self_.layout() == kSparseBsr ||
|
self_.layout() == kSparseCsc || self_.layout() == kSparseBsr ||
|
||||||
self_.layout() == kSparseBsc) {
|
self_.layout() == kSparseBsc) {
|
||||||
throw TypeError("Cannot assign to a sparse tensor");
|
TORCH_CHECK_TYPE(false, "Cannot assign to a sparse tensor");
|
||||||
}
|
}
|
||||||
OptionalDeviceGuard device_guard(device_of(self_));
|
OptionalDeviceGuard device_guard(device_of(self_));
|
||||||
at::Device self_device = self_.device();
|
at::Device self_device = self_.device();
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <torch/csrc/utils/python_strings.h>
|
#include <torch/csrc/utils/python_strings.h>
|
||||||
|
|
||||||
#include <ATen/PythonTorchFunctionTLS.h>
|
#include <ATen/PythonTorchFunctionTLS.h>
|
||||||
|
#include <fmt/format.h>
|
||||||
|
|
||||||
namespace torch {
|
namespace torch {
|
||||||
static PyObject* disabled_torch_function = nullptr;
|
static PyObject* disabled_torch_function = nullptr;
|
||||||
@ -219,8 +220,9 @@ PyObject* THPModule_disable_torch_function(PyObject* self, PyObject* a) {
|
|||||||
} else if (PyTuple_Check(args)) {
|
} else if (PyTuple_Check(args)) {
|
||||||
py_args = py::reinterpret_borrow<py::tuple>(args);
|
py_args = py::reinterpret_borrow<py::tuple>(args);
|
||||||
} else {
|
} else {
|
||||||
throw torch::TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"expected List or Tuple (got %s)", Py_TYPE(args)->tp_name);
|
false,
|
||||||
|
fmt::format("expected List or Tuple (got {})", Py_TYPE(args)->tp_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
// These are all C-API calls so no exceptions will be raised
|
// These are all C-API calls so no exceptions will be raised
|
||||||
@ -253,8 +255,9 @@ PyObject* THPModule_disable_torch_dispatch(PyObject* self, PyObject* a) {
|
|||||||
} else if (PyTuple_Check(args)) {
|
} else if (PyTuple_Check(args)) {
|
||||||
py_args = py::reinterpret_borrow<py::tuple>(args);
|
py_args = py::reinterpret_borrow<py::tuple>(args);
|
||||||
} else {
|
} else {
|
||||||
throw torch::TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"expected List or Tuple (got %s)", Py_TYPE(args)->tp_name);
|
false,
|
||||||
|
fmt::format("expected List or Tuple (got {})", Py_TYPE(args)->tp_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
// This implementation is not completely correct. The moral
|
// This implementation is not completely correct. The moral
|
||||||
|
@ -1417,20 +1417,24 @@ std::string FunctionSignature::toString() const {
|
|||||||
const auto min_args = signature.min_args;
|
const auto min_args = signature.min_args;
|
||||||
const long nargs_ = nargs;
|
const long nargs_ = nargs;
|
||||||
if (min_args != max_pos_args) {
|
if (min_args != max_pos_args) {
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"%s() takes from %zu to %zu positional arguments but %ld were given",
|
false,
|
||||||
signature.name.c_str(),
|
fmt::format(
|
||||||
min_args,
|
"{}() takes from {} to {} positional arguments but {} were given",
|
||||||
max_pos_args,
|
signature.name,
|
||||||
nargs_);
|
min_args,
|
||||||
|
max_pos_args,
|
||||||
|
nargs_));
|
||||||
}
|
}
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"%s() takes %zu positional argument%s but %ld %s given",
|
false,
|
||||||
signature.name.c_str(),
|
fmt::format(
|
||||||
max_pos_args,
|
"{}() takes {} positional argument{} but {} {} given",
|
||||||
max_pos_args == 1 ? "" : "s",
|
signature.name,
|
||||||
nargs_,
|
max_pos_args,
|
||||||
nargs == 1 ? "was" : "were");
|
max_pos_args == 1 ? "" : "s",
|
||||||
|
nargs_,
|
||||||
|
nargs == 1 ? "was" : "were"));
|
||||||
}
|
}
|
||||||
|
|
||||||
[[noreturn]] static void missing_args(
|
[[noreturn]] static void missing_args(
|
||||||
@ -1450,12 +1454,14 @@ std::string FunctionSignature::toString() const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"%s() missing %d required positional argument%s: %s",
|
false,
|
||||||
signature.name.c_str(),
|
fmt::format(
|
||||||
num_missing,
|
"{}() missing {} required positional argument{}: {}",
|
||||||
num_missing == 1 ? "s" : "",
|
signature.name,
|
||||||
ss.str().c_str());
|
num_missing,
|
||||||
|
num_missing == 1 ? "s" : "",
|
||||||
|
ss.str()));
|
||||||
}
|
}
|
||||||
|
|
||||||
static Py_ssize_t find_param(FunctionSignature& signature, PyObject* name) {
|
static Py_ssize_t find_param(FunctionSignature& signature, PyObject* name) {
|
||||||
@ -1484,27 +1490,31 @@ static Py_ssize_t find_param(FunctionSignature& signature, PyObject* name) {
|
|||||||
// accessible within this thread.
|
// accessible within this thread.
|
||||||
while (PyDict_Next(kwargs, &pos, &key, &value)) {
|
while (PyDict_Next(kwargs, &pos, &key, &value)) {
|
||||||
if (!THPUtils_checkString(key)) {
|
if (!THPUtils_checkString(key)) {
|
||||||
throw TypeError("keywords must be strings");
|
TORCH_CHECK_TYPE(false, "keywords must be strings");
|
||||||
}
|
}
|
||||||
|
|
||||||
auto param_idx = find_param(signature, key);
|
auto param_idx = find_param(signature, key);
|
||||||
if (param_idx < 0) {
|
if (param_idx < 0) {
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"%s() got an unexpected keyword argument '%s'",
|
false,
|
||||||
signature.name.c_str(),
|
fmt::format(
|
||||||
THPUtils_unpackString(key).c_str());
|
"{}() got an unexpected keyword argument '{}'",
|
||||||
|
signature.name,
|
||||||
|
THPUtils_unpackString(key)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (param_idx < num_pos_args) {
|
if (param_idx < num_pos_args) {
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"%s() got multiple values for argument '%s'",
|
false,
|
||||||
signature.name.c_str(),
|
fmt::format(
|
||||||
THPUtils_unpackString(key).c_str());
|
"{}() got multiple values for argument '{}'",
|
||||||
|
signature.name,
|
||||||
|
THPUtils_unpackString(key)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// this should never be hit
|
// this should never be hit
|
||||||
throw TypeError("invalid keyword arguments");
|
TORCH_CHECK_TYPE(false, "invalid keyword arguments");
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FunctionSignature::parse(
|
bool FunctionSignature::parse(
|
||||||
@ -1591,12 +1601,14 @@ bool FunctionSignature::parse(
|
|||||||
} else if (raise_exception) {
|
} else if (raise_exception) {
|
||||||
if (is_kwd) {
|
if (is_kwd) {
|
||||||
// foo(): argument 'other' must be str, not int
|
// foo(): argument 'other' must be str, not int
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"%s(): argument '%s' must be %s, not %s",
|
false,
|
||||||
name.c_str(),
|
fmt::format(
|
||||||
param.name.c_str(),
|
"{}(): argument '{}' must be {}, not {}",
|
||||||
param.type_name().c_str(),
|
name,
|
||||||
Py_TYPE(obj)->tp_name);
|
param.name,
|
||||||
|
param.type_name(),
|
||||||
|
Py_TYPE(obj)->tp_name));
|
||||||
} else {
|
} else {
|
||||||
// foo(): argument 'other' (position 2) must be str, not int
|
// foo(): argument 'other' (position 2) must be str, not int
|
||||||
if (failed_idx != -1) {
|
if (failed_idx != -1) {
|
||||||
@ -1605,25 +1617,29 @@ bool FunctionSignature::parse(
|
|||||||
obj = args;
|
obj = args;
|
||||||
}
|
}
|
||||||
TORCH_INTERNAL_ASSERT(failed_idx < PySequence_Size(obj));
|
TORCH_INTERNAL_ASSERT(failed_idx < PySequence_Size(obj));
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"%s(): argument '%s' (position %ld) must be %s, but found element of type %s at pos %ld",
|
false,
|
||||||
name.c_str(),
|
fmt::format(
|
||||||
param.name.c_str(),
|
"{}(): argument '{}' (position {}) must be {}, but found element of type {} at pos {}",
|
||||||
static_cast<long>(arg_pos + 1),
|
name,
|
||||||
param.type_name().c_str(),
|
param.name,
|
||||||
Py_TYPE(py::reinterpret_steal<py::object>(
|
arg_pos + 1,
|
||||||
PySequence_GetItem(obj, failed_idx))
|
param.type_name(),
|
||||||
.ptr())
|
Py_TYPE(py::reinterpret_steal<py::object>(
|
||||||
->tp_name,
|
PySequence_GetItem(obj, failed_idx))
|
||||||
static_cast<long>(failed_idx));
|
.ptr())
|
||||||
|
->tp_name,
|
||||||
|
failed_idx));
|
||||||
}
|
}
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"%s(): argument '%s' (position %ld) must be %s, not %s",
|
false,
|
||||||
name.c_str(),
|
fmt::format(
|
||||||
param.name.c_str(),
|
"{}(): argument '{}' (position {}) must be {}, not {}",
|
||||||
static_cast<long>(arg_pos + 1),
|
name,
|
||||||
param.type_name().c_str(),
|
param.name,
|
||||||
Py_TYPE(obj)->tp_name);
|
arg_pos + 1,
|
||||||
|
param.type_name(),
|
||||||
|
Py_TYPE(obj)->tp_name));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
@ -1745,7 +1761,7 @@ void PythonArgParser::print_error(
|
|||||||
auto options = get_signatures();
|
auto options = get_signatures();
|
||||||
auto msg =
|
auto msg =
|
||||||
torch::format_invalid_args(args, kwargs, function_name + "()", options);
|
torch::format_invalid_args(args, kwargs, function_name + "()", options);
|
||||||
throw TypeError("%s", msg.c_str());
|
TORCH_CHECK_TYPE(false, msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::string> PythonArgParser::get_signatures() const {
|
std::vector<std::string> PythonArgParser::get_signatures() const {
|
||||||
@ -1812,8 +1828,12 @@ at::Tensor PythonArgs::tensor_slow(int i) {
|
|||||||
// a test for Py_None here; instead, you need to mark the argument
|
// a test for Py_None here; instead, you need to mark the argument
|
||||||
// as *allowing none*; you can do this by writing 'Tensor?' instead
|
// as *allowing none*; you can do this by writing 'Tensor?' instead
|
||||||
// of 'Tensor' in the ATen metadata.
|
// of 'Tensor' in the ATen metadata.
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"expected Tensor as argument %d, but got %s", i, Py_TYPE(obj)->tp_name);
|
false,
|
||||||
|
fmt::format(
|
||||||
|
"expected Tensor as argument {}, but got {}",
|
||||||
|
i,
|
||||||
|
Py_TYPE(obj)->tp_name));
|
||||||
}
|
}
|
||||||
at::AutoDispatchBelowADInplaceOrView guard; // TODO: remove
|
at::AutoDispatchBelowADInplaceOrView guard; // TODO: remove
|
||||||
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
// Scalar and Tensor, UNLESS they require grad (in which case
|
// Scalar and Tensor, UNLESS they require grad (in which case
|
||||||
// they only bind to Tensor).
|
// they only bind to Tensor).
|
||||||
|
|
||||||
|
#include <fmt/format.h>
|
||||||
#include <pybind11/pytypes.h>
|
#include <pybind11/pytypes.h>
|
||||||
#include <torch/csrc/python_headers.h>
|
#include <torch/csrc/python_headers.h>
|
||||||
|
|
||||||
@ -490,7 +491,9 @@ inline std::array<at::Tensor, N> PythonArgs::tensorlist_n(int i) {
|
|||||||
// NOLINTNEXTLINE(bugprone-branch-clone)
|
// NOLINTNEXTLINE(bugprone-branch-clone)
|
||||||
auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
|
auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
|
||||||
if (size != N) {
|
if (size != N) {
|
||||||
throw TypeError("expected tuple of %d elements but got %d", N, (int)size);
|
TORCH_CHECK_TYPE(
|
||||||
|
false,
|
||||||
|
fmt::format("expected tuple of {} elements but got {}", N, size));
|
||||||
}
|
}
|
||||||
for (const auto idx : c10::irange(size)) {
|
for (const auto idx : c10::irange(size)) {
|
||||||
PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx)
|
PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx)
|
||||||
@ -528,12 +531,14 @@ inline void throw_intlist_exception(
|
|||||||
? e.what()
|
? e.what()
|
||||||
: std::string("type must be ") + args->signature.params[i].type_name() +
|
: std::string("type must be ") + args->signature.params[i].type_name() +
|
||||||
",but got " + Py_TYPE(obj)->tp_name;
|
",but got " + Py_TYPE(obj)->tp_name;
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"%s(): argument '%s' failed to unpack the object at pos %zu with error \"%s\"",
|
false,
|
||||||
args->signature.name.c_str(),
|
fmt::format(
|
||||||
args->signature.params[i].name.c_str(),
|
"{}(): argument '{}' failed to unpack the object at pos {} with error \"{}\"",
|
||||||
idx + 1,
|
args->signature.name,
|
||||||
error.c_str());
|
args->signature.params[i].name,
|
||||||
|
idx + 1,
|
||||||
|
error));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline std::vector<c10::SymInt> PythonArgs::symintlist(int i) {
|
inline std::vector<c10::SymInt> PythonArgs::symintlist(int i) {
|
||||||
@ -712,13 +717,15 @@ inline std::vector<double> PythonArgs::getDoublelist(int i) {
|
|||||||
res[idx] = THPUtils_unpackDouble(obj);
|
res[idx] = THPUtils_unpackDouble(obj);
|
||||||
}
|
}
|
||||||
} catch (const std::exception&) {
|
} catch (const std::exception&) {
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"%s(): argument '%s' must be %s, but found element of type %s at pos %zu",
|
false,
|
||||||
signature.name.c_str(),
|
fmt::format(
|
||||||
signature.params[i].name.c_str(),
|
"{}(): argument '{}' must be {}, but found element of type {} at pos {}",
|
||||||
signature.params[i].type_name().c_str(),
|
signature.name,
|
||||||
Py_TYPE(obj)->tp_name,
|
signature.params[i].name,
|
||||||
idx + 1);
|
signature.params[i].type_name(),
|
||||||
|
Py_TYPE(obj)->tp_name,
|
||||||
|
idx + 1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
@ -1119,8 +1126,10 @@ inline c10::Stream PythonArgs::stream(int i) {
|
|||||||
return c10::Stream(
|
return c10::Stream(
|
||||||
c10::Stream::Default::DEFAULT, c10::Device(c10::DeviceType::CPU, -1));
|
c10::Stream::Default::DEFAULT, c10::Device(c10::DeviceType::CPU, -1));
|
||||||
if (!THPStream_Check(args[i])) {
|
if (!THPStream_Check(args[i])) {
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"expected Stream object. Got '%s'", Py_TYPE(args[i])->tp_name);
|
false,
|
||||||
|
fmt::format(
|
||||||
|
"expected Stream object. Got '{}'", Py_TYPE(args[i])->tp_name));
|
||||||
}
|
}
|
||||||
return c10::Stream::unpack3(
|
return c10::Stream::unpack3(
|
||||||
((THPStream*)args[i])->stream_id,
|
((THPStream*)args[i])->stream_id,
|
||||||
|
@ -670,11 +670,13 @@ Tensor legacy_sparse_tensor_generic_ctor_new(
|
|||||||
// new(sequence) binds to this signature but should be treated differently
|
// new(sequence) binds to this signature but should be treated differently
|
||||||
// unless the sequences is a torch.Size
|
// unless the sequences is a torch.Size
|
||||||
if (ctor_or_new == CtorOrNew::CTOR) {
|
if (ctor_or_new == CtorOrNew::CTOR) {
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
|
false,
|
||||||
"torch.sparse.SparseTensor(sequence) only accepts sizes. Please use torch.sparse_coo_tensor() "
|
"torch.sparse.SparseTensor(sequence) only accepts sizes. Please use torch.sparse_coo_tensor() "
|
||||||
"or construct a strided tensor and convert it to sparse via to_sparse.");
|
"or construct a strided tensor and convert it to sparse via to_sparse.");
|
||||||
} else {
|
} else {
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
|
false,
|
||||||
"SparseTensor.new(sequence) only accepts sizes. Please use torch.sparse_coo_tensor() "
|
"SparseTensor.new(sequence) only accepts sizes. Please use torch.sparse_coo_tensor() "
|
||||||
"or construct a strided tensor and convert it to sparse via to_sparse.");
|
"or construct a strided tensor and convert it to sparse via to_sparse.");
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
#include <fmt/format.h>
|
||||||
#include <torch/csrc/THP.h>
|
#include <torch/csrc/THP.h>
|
||||||
#include <torch/csrc/utils/tensor_numpy.h>
|
#include <torch/csrc/utils/tensor_numpy.h>
|
||||||
#define WITH_NUMPY_IMPORT_ARRAY
|
#define WITH_NUMPY_IMPORT_ARRAY
|
||||||
@ -105,7 +106,7 @@ static std::vector<int64_t> to_aten_shape(int ndim, npy_intp* values) {
|
|||||||
static std::vector<int64_t> seq_to_aten_shape(PyObject* py_seq) {
|
static std::vector<int64_t> seq_to_aten_shape(PyObject* py_seq) {
|
||||||
int ndim = PySequence_Length(py_seq);
|
int ndim = PySequence_Length(py_seq);
|
||||||
if (ndim == -1) {
|
if (ndim == -1) {
|
||||||
throw TypeError("shape and strides must be sequences");
|
TORCH_CHECK_TYPE(false, "shape and strides must be sequences");
|
||||||
}
|
}
|
||||||
auto result = std::vector<int64_t>(ndim);
|
auto result = std::vector<int64_t>(ndim);
|
||||||
for (const auto i : c10::irange(ndim)) {
|
for (const auto i : c10::irange(ndim)) {
|
||||||
@ -303,7 +304,8 @@ int aten_to_numpy_dtype(const ScalarType scalar_type) {
|
|||||||
case kBool:
|
case kBool:
|
||||||
return NPY_BOOL;
|
return NPY_BOOL;
|
||||||
default:
|
default:
|
||||||
throw TypeError("Got unsupported ScalarType %s", toString(scalar_type));
|
TORCH_CHECK_TYPE(
|
||||||
|
false, "Got unsupported ScalarType ", toString(scalar_type));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -355,10 +357,12 @@ ScalarType numpy_dtype_to_aten(int dtype) {
|
|||||||
auto pytype = THPObjectPtr(PyArray_TypeObjectFromType(dtype));
|
auto pytype = THPObjectPtr(PyArray_TypeObjectFromType(dtype));
|
||||||
if (!pytype)
|
if (!pytype)
|
||||||
throw python_error();
|
throw python_error();
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"can't convert np.ndarray of type %s. The only supported types are: "
|
false,
|
||||||
"float64, float32, float16, complex64, complex128, int64, int32, int16, int8, uint64, uint32, uint16, uint8, and bool.",
|
fmt::format(
|
||||||
((PyTypeObject*)pytype.get())->tp_name);
|
"can't convert np.ndarray of type {}. The only supported types are: "
|
||||||
|
"float64, float32, float16, complex64, complex128, int64, int32, int16, int8, uint64, uint32, uint16, uint8, and bool.",
|
||||||
|
((PyTypeObject*)pytype.get())->tp_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_numpy_int(PyObject* obj) {
|
bool is_numpy_int(PyObject* obj) {
|
||||||
@ -385,7 +389,7 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
|
|||||||
TORCH_INTERNAL_ASSERT(cuda_dict);
|
TORCH_INTERNAL_ASSERT(cuda_dict);
|
||||||
|
|
||||||
if (!PyDict_Check(cuda_dict.get())) {
|
if (!PyDict_Check(cuda_dict.get())) {
|
||||||
throw TypeError("`__cuda_array_interface__` must be a dict");
|
TORCH_CHECK_TYPE(false, "`__cuda_array_interface__` must be a dict");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract the `obj.__cuda_array_interface__['shape']` attribute
|
// Extract the `obj.__cuda_array_interface__['shape']` attribute
|
||||||
@ -396,7 +400,7 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
|
|||||||
throw python_error();
|
throw python_error();
|
||||||
}
|
}
|
||||||
if (py_shape == nullptr) {
|
if (py_shape == nullptr) {
|
||||||
throw TypeError("attribute `shape` must exist");
|
TORCH_CHECK_TYPE(false, "attribute `shape` must exist");
|
||||||
}
|
}
|
||||||
sizes = seq_to_aten_shape(py_shape);
|
sizes = seq_to_aten_shape(py_shape);
|
||||||
}
|
}
|
||||||
@ -410,7 +414,7 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
|
|||||||
throw python_error();
|
throw python_error();
|
||||||
}
|
}
|
||||||
if (py_typestr == nullptr) {
|
if (py_typestr == nullptr) {
|
||||||
throw TypeError("attribute `typestr` must exist");
|
TORCH_CHECK_TYPE(false, "attribute `typestr` must exist");
|
||||||
}
|
}
|
||||||
PyArray_Descr* descr = nullptr;
|
PyArray_Descr* descr = nullptr;
|
||||||
TORCH_CHECK_VALUE(
|
TORCH_CHECK_VALUE(
|
||||||
@ -432,10 +436,10 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
|
|||||||
throw python_error();
|
throw python_error();
|
||||||
}
|
}
|
||||||
if (py_data == nullptr) {
|
if (py_data == nullptr) {
|
||||||
throw TypeError("attribute `shape` data exist");
|
TORCH_CHECK_TYPE(false, "attribute `shape` data exist");
|
||||||
}
|
}
|
||||||
if (!PyTuple_Check(py_data) || PyTuple_GET_SIZE(py_data) != 2) {
|
if (!PyTuple_Check(py_data) || PyTuple_GET_SIZE(py_data) != 2) {
|
||||||
throw TypeError("`data` must be a 2-tuple of (int, bool)");
|
TORCH_CHECK_TYPE(false, "`data` must be a 2-tuple of (int, bool)");
|
||||||
}
|
}
|
||||||
data_ptr = PyLong_AsVoidPtr(PyTuple_GET_ITEM(py_data, 0));
|
data_ptr = PyLong_AsVoidPtr(PyTuple_GET_ITEM(py_data, 0));
|
||||||
if (data_ptr == nullptr && PyErr_Occurred()) {
|
if (data_ptr == nullptr && PyErr_Occurred()) {
|
||||||
@ -446,8 +450,8 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
|
|||||||
throw python_error();
|
throw python_error();
|
||||||
}
|
}
|
||||||
if (read_only) {
|
if (read_only) {
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"the read only flag is not supported, should always be False");
|
false, "the read only flag is not supported, should always be False");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -461,8 +465,8 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
|
|||||||
if (py_strides != nullptr && py_strides != Py_None) {
|
if (py_strides != nullptr && py_strides != Py_None) {
|
||||||
if (PySequence_Length(py_strides) == -1 ||
|
if (PySequence_Length(py_strides) == -1 ||
|
||||||
static_cast<size_t>(PySequence_Length(py_strides)) != sizes.size()) {
|
static_cast<size_t>(PySequence_Length(py_strides)) != sizes.size()) {
|
||||||
throw TypeError(
|
TORCH_CHECK_TYPE(
|
||||||
"strides must be a sequence of the same length as shape");
|
false, "strides must be a sequence of the same length as shape");
|
||||||
}
|
}
|
||||||
strides = seq_to_aten_shape(py_strides);
|
strides = seq_to_aten_shape(py_strides);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user