[CodeClean] Replace std::runtime_error with TORCH_CHECK (#164129)

As the title stated.

**Changes**:
- torch/csrc/Module.cpp
- torch/csrc/utils.cpp
- torch/csrc/stable
- torch/lib/libshm
Pull Request resolved: https://github.com/pytorch/pytorch/pull/164129
Approved by: https://github.com/albanD
This commit is contained in:
FFFrog
2025-10-09 18:22:19 +08:00
committed by PyTorch MergeBot
parent ae25ec569c
commit 5390324984
6 changed files with 35 additions and 43 deletions

View File

@ -1982,7 +1982,7 @@ SigHandler* _getOldHandler(int signum) {
SIG_CHECK(SIGSEGV);
SIG_CHECK(SIGILL);
throw std::runtime_error("unexpected signal number");
TORCH_CHECK(false, "unexpected signal number");
#undef SIG_CHECK
}

View File

@ -1,5 +1,6 @@
#pragma once
#include <c10/util/Exception.h>
#include <torch/csrc/inductor/aoti_torch/c/shim.h>
#include <torch/csrc/stable/tensor_struct.h>
#include <torch/headeronly/core/ScalarType.h>
@ -118,7 +119,8 @@ struct FromImpl<ScalarType> {
case ScalarType::UInt64:
return from(aoti_torch_dtype_uint64());
default:
throw std::runtime_error(
TORCH_CHECK(
false,
"Not yet supported ScalarType, please file an issue describing your use case.");
}
}
@ -267,8 +269,10 @@ struct ToImpl<ScalarType> {
} else if (shim_scalartype == aoti_torch_dtype_uint64()) {
return ScalarType::UInt64;
} else {
throw std::runtime_error(
"Not yet supported ScalarType " + std::to_string(shim_scalartype) +
TORCH_CHECK(
false,
"Not yet supported ScalarType ",
std::to_string(shim_scalartype),
", please file an issue describing your use case.");
}
}

View File

@ -55,17 +55,17 @@ std::vector<int64_t> THPUtils_unpackLongs(PyObject* arg) {
for (int i = 0; i != nDim; ++i) {
PyObject* item =
tuple ? PyTuple_GET_ITEM(arg, i) : PyList_GET_ITEM(arg, i);
if (!THPUtils_checkLong(item)) {
std::ostringstream oss;
oss << "expected int at position " << i
<< ", but got: " << THPUtils_typename(item);
throw std::runtime_error(oss.str());
}
TORCH_CHECK(
THPUtils_checkLong(item),
"expected int at position ",
i,
", but got: ",
THPUtils_typename(item));
sizes[i] = THPUtils_unpackLong(item);
}
return sizes;
}
throw std::runtime_error("Expected tuple or list");
TORCH_CHECK(false, "Expected tuple or list");
}
bool THPUtils_checkIntTuple(PyObject* arg) {
@ -81,9 +81,7 @@ bool THPUtils_checkIntTuple(PyObject* arg) {
}
std::vector<int> THPUtils_unpackIntTuple(PyObject* arg) {
if (!THPUtils_checkIntTuple(arg)) {
throw std::runtime_error("Couldn't unpack int tuple");
}
TORCH_CHECK(THPUtils_checkIntTuple(arg), "Couldn't unpack int tuple");
std::vector<int> values(PyTuple_GET_SIZE(arg));
for (Py_ssize_t i = 0; i < PyTuple_GET_SIZE(arg); ++i) {
values[i] = (int)THPUtils_unpackLong(PyTuple_GET_ITEM(arg, i));

View File

@ -16,9 +16,7 @@ static AllocInfo get_alloc_info(const char* filename) {
info.pid = getpid();
info.free = false;
size_t len = strlen(filename);
if (len >= sizeof(info.filename)) {
throw std::runtime_error("MapAllocatorContext_filename too long");
}
TORCH_CHECK(len < sizeof(info.filename), "MapAllocatorContext_filename too long");
memcpy(info.filename, filename, len + 1);
return info;
}
@ -57,21 +55,16 @@ static void start_manager() {
handle.append(buffer.data(), bytes_read);
}
SYSCHECK_ERR_RETURN_NEG1(close(pipe_ends[0]));
if (handle.length() == 0) {
std::string msg("no response from torch_shm_manager at \"");
msg += manager_executable_path;
msg += "\"";
throw std::runtime_error(msg);
}
TORCH_CHECK(handle.length() != 0, "no response from torch_shm_manager at \"", manager_executable_path, "\"");
handle.pop_back(); // remove \n
if (handle.rfind("ERROR: ", 0) == 0) {
std::string msg("torch_shm_manager at \"");
msg += manager_executable_path;
msg += "\": ";
msg += handle.substr(7); // remove "ERROR: "
throw std::runtime_error(msg);
}
TORCH_CHECK(
handle.rfind("ERROR: ", 0) != 0,
"torch_shm_manager at \"",
manager_executable_path,
"\": ",
handle.substr(7));
ClientSocket manager{handle};
managers.emplace(std::move(handle), std::move(manager));

View File

@ -10,6 +10,7 @@
#include <vector>
#include <c10/util/tempfile.h>
#include <c10/util/Exception.h>
#include <libshm/err.h>
#include <libshm/socket.h>
@ -96,10 +97,9 @@ int main(int argc, char* argv[]) {
std::optional<c10::TempDir> tempdir;
try {
tempdir = c10::try_make_tempdir(/*name_prefix=*/"torch-shm-dir-");
if (!tempdir.has_value()) {
throw std::runtime_error(
"could not generate a random directory for manager socket");
}
TORCH_CHECK(
tempdir.has_value(),
"could not generate a random directory for manager socket");
std::string tempfile = tempdir->name + "/manager.sock";

View File

@ -58,16 +58,13 @@ class Socket {
SYSCHECK_ERR_RETURN_NEG1(
step_received =
::read(socket_fd, buffer, num_bytes - bytes_received));
if (step_received == 0)
throw std::runtime_error("Other end has closed the connection");
TORCH_CHECK(step_received != 0, "Other end has closed the connection");
bytes_received += step_received;
buffer += step_received;
} else if (pfd.revents & (POLLERR | POLLHUP)) {
throw std::runtime_error(
"An error occurred while waiting for the data");
TORCH_CHECK(false, "An error occurred while waiting for the data");
} else {
throw std::runtime_error(
"Shared memory manager connection has timed out");
TORCH_CHECK(false, "Shared memory manager connection has timed out");
}
}
}
@ -156,9 +153,9 @@ class ClientSocket : public Socket {
char buffer[3] = {0, 0, 0};
send(&info, sizeof(info));
recv(buffer, 2);
if (strcmp(buffer, "OK") != 0)
throw std::runtime_error(
"Shared memory manager didn't respond with an OK");
TORCH_CHECK(
strcmp(buffer, "OK") == 0,
"Shared memory manager didn't respond with an OK");
}
void register_deallocation(AllocInfo& info) {