mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-27 00:54:52 +08:00
add Wmissing-prototypes to clang-tidy (#96805)
This PR introduces **-Wmissing-prototypes** of clang-tidy to prevent further coding errors such as the one fixed by PR #96714. <!-- copilot:summary --> ### <samp>🤖 Generated by Copilot at fd2cf2a</samp> This pull request makes several internal functions static to improve performance and avoid name clashes. It also fixes some typos, formatting, and missing includes in various files. It adds a new .clang-tidy check to warn about missing prototypes for non-static functions. Pull Request resolved: https://github.com/pytorch/pytorch/pull/96805 Approved by: https://github.com/malfet, https://github.com/albanD
This commit is contained in:
@ -113,15 +113,15 @@ static PyObject* Tensor_instancecheck(PyObject* _self, PyObject* arg) {
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
||||
PyObject* Tensor_dtype(PyTensorType* self, void* unused) {
|
||||
static PyObject* Tensor_dtype(PyTensorType* self, void* unused) {
|
||||
return torch::autograd::utils::wrap(self->dtype);
|
||||
}
|
||||
|
||||
PyObject* Tensor_layout(PyTensorType* self, void* unused) {
|
||||
static PyObject* Tensor_layout(PyTensorType* self, void* unused) {
|
||||
return torch::autograd::utils::wrap(self->layout);
|
||||
}
|
||||
|
||||
PyObject* Tensor_is_cuda(PyTensorType* self, void* unused) {
|
||||
static PyObject* Tensor_is_cuda(PyTensorType* self, void* unused) {
|
||||
if (self->is_cuda) {
|
||||
Py_RETURN_TRUE;
|
||||
} else {
|
||||
@ -129,7 +129,7 @@ PyObject* Tensor_is_cuda(PyTensorType* self, void* unused) {
|
||||
}
|
||||
}
|
||||
|
||||
PyObject* Tensor_is_sparse(PyTensorType* self, void* unused) {
|
||||
static PyObject* Tensor_is_sparse(PyTensorType* self, void* unused) {
|
||||
if (self->layout->layout == at::Layout::Strided) {
|
||||
Py_RETURN_FALSE;
|
||||
} else {
|
||||
@ -137,7 +137,7 @@ PyObject* Tensor_is_sparse(PyTensorType* self, void* unused) {
|
||||
}
|
||||
}
|
||||
|
||||
PyObject* Tensor_is_sparse_csr(PyTensorType* self, void* unused) {
|
||||
static PyObject* Tensor_is_sparse_csr(PyTensorType* self, void* unused) {
|
||||
if (self->layout->layout == at::Layout::SparseCsr) {
|
||||
Py_RETURN_TRUE;
|
||||
} else {
|
||||
@ -302,7 +302,7 @@ static THPObjectPtr get_tensor_dict() {
|
||||
// importing torch.
|
||||
static std::vector<PyTensorType*> tensor_types;
|
||||
|
||||
void set_default_storage_type(Backend backend, ScalarType dtype) {
|
||||
static void set_default_storage_type(Backend backend, ScalarType dtype) {
|
||||
THPObjectPtr storage = get_storage_obj(backend, dtype);
|
||||
|
||||
auto torch_module = THPObjectPtr(PyImport_ImportModule("torch"));
|
||||
@ -314,7 +314,7 @@ void set_default_storage_type(Backend backend, ScalarType dtype) {
|
||||
}
|
||||
}
|
||||
|
||||
void set_default_tensor_type(
|
||||
static void set_default_tensor_type(
|
||||
c10::optional<Backend> backend,
|
||||
c10::optional<ScalarType> dtype) {
|
||||
if (backend.has_value()) {
|
||||
|
||||
Reference in New Issue
Block a user