add Wmissing-prototypes to clang-tidy (#96805)

This PR introduces **-Wmissing-prototypes** of clang-tidy to prevent further coding errors such as the one fixed by PR #96714.

<!--
copilot:summary
-->
### <samp>🤖 Generated by Copilot at fd2cf2a</samp>

This pull request makes several internal functions static to improve performance and avoid name clashes. It also fixes some typos, formatting, and missing includes in various files. It adds a new .clang-tidy check to warn about missing prototypes for non-static functions.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/96805
Approved by: https://github.com/malfet, https://github.com/albanD
This commit is contained in:
cyy
2023-04-25 18:20:32 +00:00
committed by PyTorch MergeBot
parent 39ff87c6a4
commit dbc7e919b8
26 changed files with 67 additions and 56 deletions

View File

@ -113,15 +113,15 @@ static PyObject* Tensor_instancecheck(PyObject* _self, PyObject* arg) {
END_HANDLE_TH_ERRORS
}
PyObject* Tensor_dtype(PyTensorType* self, void* unused) {
static PyObject* Tensor_dtype(PyTensorType* self, void* unused) {
return torch::autograd::utils::wrap(self->dtype);
}
PyObject* Tensor_layout(PyTensorType* self, void* unused) {
static PyObject* Tensor_layout(PyTensorType* self, void* unused) {
return torch::autograd::utils::wrap(self->layout);
}
PyObject* Tensor_is_cuda(PyTensorType* self, void* unused) {
static PyObject* Tensor_is_cuda(PyTensorType* self, void* unused) {
if (self->is_cuda) {
Py_RETURN_TRUE;
} else {
@ -129,7 +129,7 @@ PyObject* Tensor_is_cuda(PyTensorType* self, void* unused) {
}
}
PyObject* Tensor_is_sparse(PyTensorType* self, void* unused) {
static PyObject* Tensor_is_sparse(PyTensorType* self, void* unused) {
if (self->layout->layout == at::Layout::Strided) {
Py_RETURN_FALSE;
} else {
@ -137,7 +137,7 @@ PyObject* Tensor_is_sparse(PyTensorType* self, void* unused) {
}
}
PyObject* Tensor_is_sparse_csr(PyTensorType* self, void* unused) {
static PyObject* Tensor_is_sparse_csr(PyTensorType* self, void* unused) {
if (self->layout->layout == at::Layout::SparseCsr) {
Py_RETURN_TRUE;
} else {
@ -302,7 +302,7 @@ static THPObjectPtr get_tensor_dict() {
// importing torch.
static std::vector<PyTensorType*> tensor_types;
void set_default_storage_type(Backend backend, ScalarType dtype) {
static void set_default_storage_type(Backend backend, ScalarType dtype) {
THPObjectPtr storage = get_storage_obj(backend, dtype);
auto torch_module = THPObjectPtr(PyImport_ImportModule("torch"));
@ -314,7 +314,7 @@ void set_default_storage_type(Backend backend, ScalarType dtype) {
}
}
void set_default_tensor_type(
static void set_default_tensor_type(
c10::optional<Backend> backend,
c10::optional<ScalarType> dtype) {
if (backend.has_value()) {