[Exception] [4/N] Replace torch::IndexError and torch::ValueError with C10 counterparts (#117317)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/117317
Approved by: https://github.com/ezyang
This commit is contained in:
cyy
2024-01-18 00:35:29 +00:00
committed by PyTorch MergeBot
parent c64fd8b89c
commit 396a5c3091
10 changed files with 124 additions and 122 deletions

View File

@ -317,9 +317,7 @@ template<typename T>
static T dispatch_to(const Tensor & self) {
pybind11::gil_scoped_release no_gil;
OptionalDeviceGuard device_guard(device_of(self));
if (self.sym_numel() != 1) {
throw ValueError("only one element tensors can be converted to Python scalars");
}
TORCH_CHECK_VALUE(self.sym_numel() == 1, "only one element tensors can be converted to Python scalars");
return self.template item<T>();
}

View File

@ -238,13 +238,6 @@ TypeError::TypeError(const char* format, ...) {
va_end(fmt_args);
}
ValueError::ValueError(const char* format, ...) {
va_list fmt_args{};
va_start(fmt_args, format);
msg = formatMessage(format, fmt_args);
va_end(fmt_args);
}
AttributeError::AttributeError(const char* format, ...) {
va_list fmt_args{};
va_start(fmt_args, format);

View File

@ -297,15 +297,6 @@ struct TypeError : public PyTorchError {
}
};
// Translates to Python ValueError
struct ValueError : public PyTorchError {
using PyTorchError::PyTorchError;
TORCH_PYTHON_API ValueError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);
PyObject* python_type() override {
return PyExc_ValueError;
}
};
// Translates to Python AttributeError
struct AttributeError : public PyTorchError {
AttributeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3);

View File

@ -476,9 +476,11 @@ namespace autograd {
static PyObject* set_autocast_enabled(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
if (!PyBool_Check(arg)) {
throw TypeError("enabled must be a bool (got %s)", Py_TYPE(arg)->tp_name);
}
TORCH_CHECK_TYPE(
PyBool_Check(arg),
"enabled must be a bool (got ",
Py_TYPE(arg)->tp_name,
")");
at::autocast::set_enabled(arg == Py_True);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
@ -508,9 +510,11 @@ static PyObject* is_any_autocast_enabled(PyObject* _unused, PyObject* arg) {
static PyObject* set_autocast_cpu_enabled(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
if (!PyBool_Check(arg)) {
throw TypeError("enabled must be a bool (got %s)", Py_TYPE(arg)->tp_name);
}
TORCH_CHECK_TYPE(
PyBool_Check(arg),
"enabled must be a bool (got ",
Py_TYPE(arg)->tp_name,
")");
at::autocast::set_cpu_enabled(arg == Py_True);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
@ -528,9 +532,11 @@ static PyObject* is_autocast_cpu_enabled(PyObject* _unused, PyObject* arg) {
static PyObject* set_autocast_ipu_enabled(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
if (!PyBool_Check(arg)) {
throw TypeError("enabled must be a bool (got %s)", Py_TYPE(arg)->tp_name);
}
TORCH_CHECK_TYPE(
PyBool_Check(arg),
"enabled must be a bool (got ",
Py_TYPE(arg)->tp_name,
")");
at::autocast::set_ipu_enabled(arg == Py_True);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
@ -548,9 +554,11 @@ static PyObject* is_autocast_ipu_enabled(PyObject* _unused, PyObject* arg) {
static PyObject* set_autocast_xla_enabled(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
if (!PyBool_Check(arg)) {
throw TypeError("enabled must be a bool (got %s)", Py_TYPE(arg)->tp_name);
}
TORCH_CHECK_TYPE(
PyBool_Check(arg),
"enabled must be a bool (got ",
Py_TYPE(arg)->tp_name,
")");
at::autocast::set_xla_enabled(arg == Py_True);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
@ -568,10 +576,11 @@ static PyObject* is_autocast_xla_enabled(PyObject* _unused, PyObject* arg) {
static PyObject* set_autocast_gpu_dtype(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
if (!THPDtype_Check(arg)) {
throw TypeError(
"dtype must be a torch.dtype (got %s)", Py_TYPE(arg)->tp_name);
}
TORCH_CHECK_TYPE(
THPDtype_Check(arg),
"dtype must be a torch.dtype (got ",
Py_TYPE(arg)->tp_name,
")");
at::ScalarType targetType = reinterpret_cast<THPDtype*>(arg)->scalar_type;
at::autocast::set_autocast_gpu_dtype(targetType);
Py_RETURN_NONE;
@ -580,10 +589,11 @@ static PyObject* set_autocast_gpu_dtype(PyObject* _unused, PyObject* arg) {
static PyObject* set_autocast_cpu_dtype(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
if (!THPDtype_Check(arg)) {
throw TypeError(
"dtype must be a torch.dtype (got %s)", Py_TYPE(arg)->tp_name);
}
TORCH_CHECK_TYPE(
THPDtype_Check(arg),
"dtype must be a torch.dtype (got ",
Py_TYPE(arg)->tp_name,
")");
at::ScalarType targetType = reinterpret_cast<THPDtype*>(arg)->scalar_type;
at::autocast::set_autocast_cpu_dtype(targetType);
Py_RETURN_NONE;
@ -592,10 +602,11 @@ static PyObject* set_autocast_cpu_dtype(PyObject* _unused, PyObject* arg) {
static PyObject* set_autocast_ipu_dtype(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
if (!THPDtype_Check(arg)) {
throw TypeError(
"dtype must be a torch.dtype (got %s)", Py_TYPE(arg)->tp_name);
}
TORCH_CHECK_TYPE(
THPDtype_Check(arg),
"dtype must be a torch.dtype (got ",
Py_TYPE(arg)->tp_name,
")");
at::ScalarType targetType = reinterpret_cast<THPDtype*>(arg)->scalar_type;
at::autocast::set_autocast_ipu_dtype(targetType);
Py_RETURN_NONE;
@ -604,10 +615,11 @@ static PyObject* set_autocast_ipu_dtype(PyObject* _unused, PyObject* arg) {
static PyObject* set_autocast_xla_dtype(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
if (!THPDtype_Check(arg)) {
throw TypeError(
"dtype must be a torch.dtype (got %s)", Py_TYPE(arg)->tp_name);
}
TORCH_CHECK_TYPE(
THPDtype_Check(arg),
"dtype must be a torch.dtype (got ",
Py_TYPE(arg)->tp_name,
")");
at::ScalarType targetType = reinterpret_cast<THPDtype*>(arg)->scalar_type;
at::autocast::set_autocast_xla_dtype(targetType);
Py_RETURN_NONE;
@ -681,9 +693,11 @@ static PyObject* is_autocast_cache_enabled(PyObject* _unused, PyObject* arg) {
static PyObject* set_autocast_cache_enabled(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
if (!PyBool_Check(arg)) {
throw TypeError("enabled must be a bool (got %s)", Py_TYPE(arg)->tp_name);
}
TORCH_CHECK_TYPE(
PyBool_Check(arg),
"enabled must be a bool (got ",
Py_TYPE(arg)->tp_name,
")");
at::autocast::set_autocast_cache_enabled(arg == Py_True);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
@ -723,9 +737,11 @@ static PyObject* is_grad_enabled(PyObject* _unused, PyObject* arg) {
static PyObject* set_fwd_grad_enabled(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
if (!PyBool_Check(arg)) {
throw TypeError("enabled must be a bool (got %s)", Py_TYPE(arg)->tp_name);
}
TORCH_CHECK_TYPE(
PyBool_Check(arg),
"enabled must be a bool (got ",
Py_TYPE(arg)->tp_name,
")");
c10::AutogradState::get_tls_state().set_fw_grad_mode(arg == Py_True);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS

View File

@ -54,10 +54,9 @@ static PyObject* THPVariable_pynew(
throw python_error();
}
if (is_volatile && requires_grad) {
throw ValueError(
"Variable can't be volatile and require_grad at the same time!");
}
TORCH_CHECK_VALUE(
!is_volatile || !requires_grad,
"Variable can't be volatile and require_grad at the same time!");
if (grad_fn && !THPFunction_Check(grad_fn)) {
throw TypeError(
"_grad_fn has to be a Function object or None, but got %s",

View File

@ -91,10 +91,12 @@ static inline int64_t count_specified_dimensions(PyObject* index) {
}
[[noreturn]] static inline void invalid_index(PyObject* obj) {
throw IndexError(
TORCH_CHECK_INDEX(
false,
"only integers, slices (`:`), ellipsis (`...`), None and long or byte "
"Variables are valid indices (got %s)",
Py_TYPE(obj)->tp_name);
"Variables are valid indices (got ",
Py_TYPE(obj)->tp_name,
")");
}
static inline Variable sequenceToVariable(

View File

@ -355,12 +355,13 @@ inline PythonArgs PythonArgParser::parse(
PyObject* args,
PyObject* kwargs,
ParsedArgs<N>& dst) {
if (N < max_args) {
throw ValueError(
"PythonArgParser: dst ParsedArgs buffer does not have enough capacity, expected %d (got %d)",
(int)max_args,
N);
}
TORCH_CHECK_VALUE(
N >= max_args,
"PythonArgParser: dst ParsedArgs buffer does not have enough capacity, expected ",
max_args,
" (got ",
N,
")");
return raw_parse(self, args, kwargs, dst.args);
}

View File

@ -104,18 +104,20 @@ std::vector<int64_t> compute_sizes(PyObject* seq, ScalarType scalar_type) {
length /= static_cast<int64_t>(elementSize(scalar_type));
}
sizes.push_back(length);
if (sizes.size() > MAX_DIMS) {
throw ValueError("too many dimensions '%s'", Py_TYPE(seq)->tp_name);
}
TORCH_CHECK_VALUE(
sizes.size() <= MAX_DIMS,
"too many dimensions '",
Py_TYPE(seq)->tp_name,
"'");
if (length == 0)
break;
PyObject* new_obj = PySequence_GetItem(seq, 0);
if (!new_obj) {
// This line uses seq so we must NOT override obj before this line
throw ValueError(
"could not determine the shape of object type '%s'",
Py_TYPE(seq)->tp_name);
}
// This line uses seq so we must NOT override obj before this line
TORCH_CHECK_VALUE(
new_obj,
"could not determine the shape of object type '",
Py_TYPE(seq)->tp_name,
"'");
obj = THPObjectPtr(new_obj);
seq = obj.get();
}
@ -167,9 +169,11 @@ ScalarType infer_scalar_type(PyObject* obj) {
const auto& var = THPVariable_Unpack(obj);
return var.scalar_type();
}
if (THPUtils_checkString(obj)) {
throw TypeError("new(): invalid data type '%s'", Py_TYPE(obj)->tp_name);
}
TORCH_CHECK_TYPE(
!THPUtils_checkString(obj),
"new(): invalid data type '",
Py_TYPE(obj)->tp_name,
"'");
if (PySequence_Check(obj)) {
c10::optional<ScalarType> scalarType;
auto length = PySequence_Length(obj);
@ -183,8 +187,8 @@ ScalarType infer_scalar_type(PyObject* obj) {
if (!handle)
throw python_error();
auto cur_item = handle.get();
if (cur_item == obj)
throw TypeError("new(): self-referential lists are incompatible");
TORCH_CHECK_TYPE(
cur_item != obj, "new(): self-referential lists are incompatible");
ScalarType item_scalarType = infer_scalar_type(cur_item);
scalarType = (scalarType) ? at::promoteTypes(*scalarType, item_scalarType)
: item_scalarType;
@ -260,13 +264,15 @@ void recursive_store(
throw python_error();
// NOLINTNEXTLINE(bugprone-branch-clone)
auto seq_size = PySequence_Fast_GET_SIZE(seq.get());
if (seq_size != n) {
throw ValueError(
"expected sequence of length %lld at dim %lld (got %lld)",
(long long)n,
(long long)dim,
(long long)seq_size);
}
TORCH_CHECK_VALUE(
seq_size == n,
"expected sequence of length ",
n,
" at dim ",
dim,
" (got ",
seq_size,
")");
PyObject** items = PySequence_Fast_ITEMS(seq.get());
for (const auto i : c10::irange(n)) {
@ -293,9 +299,11 @@ Tensor internal_new_from_data(
bool copy_numpy,
bool type_inference,
bool pin_memory = false) {
if (THPUtils_checkString(data)) {
throw TypeError("new(): invalid data type '%s'", Py_TYPE(data)->tp_name);
}
TORCH_CHECK_TYPE(
!THPUtils_checkString(data),
"new(): invalid data type '",
Py_TYPE(data)->tp_name,
"'");
if (THPVariable_Check(data)) {
TORCH_CHECK(!pin_memory, "Can't pin tensor constructed from a variable");
@ -485,10 +493,11 @@ Tensor legacy_new_from_sequence(
at::ScalarType scalar_type,
c10::optional<Device> device,
PyObject* data) {
if (!PySequence_Check(data)) {
throw TypeError(
"new(): data must be a sequence (got %s)", Py_TYPE(data)->tp_name);
}
TORCH_CHECK_TYPE(
PySequence_Check(data),
"new(): data must be a sequence (got ",
Py_TYPE(data)->tp_name,
")");
return internal_new_from_data(
options,
scalar_type,

View File

@ -237,30 +237,27 @@ at::Tensor tensor_from_numpy(
// NumPy strides use bytes. Torch strides use element counts.
auto element_size_in_bytes = PyArray_ITEMSIZE(array);
for (auto& stride : strides) {
if (stride % element_size_in_bytes != 0) {
throw ValueError(
"given numpy array strides not a multiple of the element byte size. "
"Copy the numpy array to reallocate the memory.");
}
TORCH_CHECK_VALUE(
stride % element_size_in_bytes == 0,
"given numpy array strides not a multiple of the element byte size. "
"Copy the numpy array to reallocate the memory.");
stride /= element_size_in_bytes;
}
for (const auto i : c10::irange(ndim)) {
if (strides[i] < 0) {
throw ValueError(
"At least one stride in the given numpy array is negative, "
"and tensors with negative strides are not currently supported. "
"(You can probably work around this by making a copy of your array "
" with array.copy().) ");
}
TORCH_CHECK_VALUE(
strides[i] >= 0,
"At least one stride in the given numpy array is negative, "
"and tensors with negative strides are not currently supported. "
"(You can probably work around this by making a copy of your array "
" with array.copy().) ");
}
void* data_ptr = PyArray_DATA(array);
if (!PyArray_EquivByteorders(PyArray_DESCR(array)->byteorder, NPY_NATIVE)) {
throw ValueError(
"given numpy array has byte order different from the native byte order. "
"Conversion between byte orders is currently not supported.");
}
TORCH_CHECK_VALUE(
PyArray_EquivByteorders(PyArray_DESCR(array)->byteorder, NPY_NATIVE),
"given numpy array has byte order different from the native byte order. "
"Conversion between byte orders is currently not supported.");
Py_INCREF(obj);
return at::lift_fresh(at::from_blob(
data_ptr,
@ -411,9 +408,8 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
}
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
PyArray_Descr* descr;
if (!PyArray_DescrConverter(py_typestr, &descr)) {
throw ValueError("cannot parse `typestr`");
}
TORCH_CHECK_VALUE(
PyArray_DescrConverter(py_typestr, &descr), "cannot parse `typestr`");
dtype = numpy_dtype_to_aten(descr->type_num);
dtype_size_in_bytes = descr->elsize;
TORCH_INTERNAL_ASSERT(dtype_size_in_bytes > 0);
@ -459,11 +455,10 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj) {
// __cuda_array_interface__ strides use bytes. Torch strides use element
// counts.
for (auto& stride : strides) {
if (stride % dtype_size_in_bytes != 0) {
throw ValueError(
"given array strides not a multiple of the element byte size. "
"Make a copy of the array to reallocate the memory.");
}
TORCH_CHECK_VALUE(
stride % dtype_size_in_bytes == 0,
"given array strides not a multiple of the element byte size. "
"Make a copy of the array to reallocate the memory.");
stride /= dtype_size_in_bytes;
}
} else {

View File

@ -125,9 +125,7 @@ at::TensorOptions options_from_string(const std::string& str) {
}
auto it = map->find(str);
if (it == map->end()) {
throw ValueError("invalid type: '%s'", str.c_str());
}
TORCH_CHECK_VALUE(it != map->end(), "invalid type: '", str, "'");
return it->second->options();
}