mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[18/N] Fix extra warnings brought by clang-tidy-17 (#144014)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/144014 Approved by: https://github.com/Skylion007, https://github.com/albanD
This commit is contained in:
@ -8,12 +8,12 @@ namespace at::caching {
|
||||
|
||||
using weakref_type = c10::weak_intrusive_ptr<TensorImpl, UndefinedTensorImpl>;
|
||||
|
||||
bool cached_tensorimpls_enabled = false;
|
||||
static bool cached_tensorimpls_enabled = false;
|
||||
|
||||
// Like `cached_casts` in autocast_mode, we hash on the TensorImpl*
|
||||
// and keep the pointer alive with a weakref value.
|
||||
ska::flat_hash_map<TensorImpl*, weakref_type> cached_tensorimpls;
|
||||
std::mutex cached_tensorimpl_mutex;
|
||||
static ska::flat_hash_map<TensorImpl*, weakref_type> cached_tensorimpls;
|
||||
static std::mutex cached_tensorimpl_mutex;
|
||||
|
||||
|
||||
bool is_cached_tensor(const at::Tensor& t) {
|
||||
|
@ -573,7 +573,7 @@ Tensor math_mish_backward(
|
||||
}
|
||||
|
||||
template <typename scalar_t>
|
||||
inline void _rrelu_with_noise_train(
|
||||
static void _rrelu_with_noise_train(
|
||||
Tensor& output,
|
||||
const Tensor& input,
|
||||
Tensor& noise,
|
||||
|
@ -17,8 +17,7 @@
|
||||
#include <ATen/ops/mm_native.h>
|
||||
#endif
|
||||
|
||||
namespace at::native {
|
||||
namespace xpu {
|
||||
namespace at::native::xpu {
|
||||
|
||||
// result = beta * self + alpha * (mat1 * mat2)
|
||||
Tensor& addmm_out(
|
||||
@ -455,7 +454,7 @@ Tensor& tensordot_out(
|
||||
TORCH_LIBRARY_IMPL(aten, XPU, m) {
|
||||
m.impl("tensordot.out", TORCH_FN(tensordot_out));
|
||||
}
|
||||
} // namespace xpu
|
||||
} // namespace at::native::xpu
|
||||
|
||||
TORCH_IMPL_FUNC(addmm_out_xpu)
|
||||
(const Tensor& self,
|
||||
@ -511,5 +510,3 @@ TORCH_IMPL_FUNC(addmv_out_xpu)
|
||||
const Tensor& result) {
|
||||
xpu::addmv_out(self, mat, vec, beta, alpha, const_cast<Tensor&>(result));
|
||||
}
|
||||
|
||||
} // namespace at::native
|
@ -154,7 +154,7 @@ bool onednn_strides_check(const Tensor& src) {
|
||||
int perm[DNNL_MAX_NDIMS] = {0};
|
||||
for (int d = 0; d < md_ndims; ++d) {
|
||||
// no strides check needed for empty tensor
|
||||
if (md_padded_dims[d] == 0)
|
||||
if (md_padded_dims[d] == nullptr)
|
||||
return true;
|
||||
|
||||
// no strides verification for runtime dims
|
||||
|
@ -14,7 +14,7 @@ void refcounted_deleter(void* ctx_) {
|
||||
}
|
||||
}
|
||||
|
||||
std::mutex replace_data_ptr_mutex;
|
||||
static std::mutex replace_data_ptr_mutex;
|
||||
|
||||
void maybeApplyRefcountedDeleter(const c10::Storage& storage) {
|
||||
std::lock_guard<std::mutex> guard(replace_data_ptr_mutex);
|
||||
|
@ -172,7 +172,7 @@ std::optional<DeviceIndex> getDeviceIndexWithPrimaryContext() {
|
||||
}
|
||||
|
||||
namespace _internal {
|
||||
bool dummyHasPrimaryContext([[maybe_unused]] DeviceIndex device_index) {
|
||||
static bool dummyHasPrimaryContext([[maybe_unused]] DeviceIndex device_index) {
|
||||
TORCH_CHECK(false, "Should never been called");
|
||||
}
|
||||
static bool (*hasPrimaryContext)(DeviceIndex) = dummyHasPrimaryContext;
|
||||
|
@ -193,12 +193,12 @@ C10_DECLARE_REGISTRY(C10FlagsRegistry, C10FlagParser, const std::string&);
|
||||
success_ = C10FlagParser::Parse<type>(content, &FLAGS_##name); \
|
||||
} \
|
||||
}; \
|
||||
} \
|
||||
RegistererC10FlagsRegistry g_C10FlagsRegistry_##name( \
|
||||
#name, \
|
||||
C10FlagsRegistry(), \
|
||||
RegistererC10FlagsRegistry::DefaultCreator<C10FlagParser_##name>, \
|
||||
"(" #type ", default " #default_value ") " help_str); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define C10_DEFINE_int(name, default_value, help_str) \
|
||||
|
@ -66,7 +66,7 @@ PyObject* createPyObject(const at::Storage& storage) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
PyTypeObject* loadTypedStorageTypeObject() {
|
||||
static PyTypeObject* loadTypedStorageTypeObject() {
|
||||
PyObject* storage_module = PyImport_ImportModule("torch.storage");
|
||||
TORCH_INTERNAL_ASSERT(storage_module && PyModule_Check(storage_module));
|
||||
|
||||
@ -77,7 +77,7 @@ PyTypeObject* loadTypedStorageTypeObject() {
|
||||
PyObject_GetAttrString(storage_module, "TypedStorage"));
|
||||
}
|
||||
|
||||
PyTypeObject* getTypedStorageTypeObject() {
|
||||
static PyTypeObject* getTypedStorageTypeObject() {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
static PyTypeObject* typed_storage_type_obj = loadTypedStorageTypeObject();
|
||||
return typed_storage_type_obj;
|
||||
|
@ -99,7 +99,7 @@ static PyObject* THPGenerator_setState(PyObject* _self, PyObject* _new_state) {
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
||||
uint64_t unpack_uint64(PyObject* pyobj) {
|
||||
static uint64_t unpack_uint64(PyObject* pyobj) {
|
||||
uint64_t unsigned_obj = 0;
|
||||
try {
|
||||
// First try to interpret as unsigned long
|
||||
@ -225,7 +225,7 @@ static PyObject* THPGenerator_get_device(THPGenerator* self, void* unused) {
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
||||
PyObject* THPGenerator_reduce(PyObject* _self, PyObject* noargs) {
|
||||
static PyObject* THPGenerator_reduce(PyObject* _self, PyObject* noargs) {
|
||||
HANDLE_TH_ERRORS
|
||||
auto self = (THPGenerator*)_self;
|
||||
auto& gen = self->cdata;
|
||||
@ -304,7 +304,7 @@ static struct PyMemberDef THPGenerator_members[] = {
|
||||
{"_cdata", T_ULONGLONG, offsetof(THPGenerator, cdata), READONLY, nullptr},
|
||||
{nullptr}};
|
||||
|
||||
PyTypeObject THPGeneratorType = {
|
||||
static PyTypeObject THPGeneratorType = {
|
||||
PyVarObject_HEAD_INIT(nullptr, 0)
|
||||
"torch._C.Generator", /* tp_name */
|
||||
sizeof(THPGenerator), /* tp_basicsize */
|
||||
@ -355,12 +355,12 @@ bool THPGenerator_init(PyObject* module) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void set_pyobj(const Generator& self, PyObject* pyobj) {
|
||||
static void set_pyobj(const Generator& self, PyObject* pyobj) {
|
||||
TORCH_CHECK(self.defined(), "cannot call set_pyobj() on undefined generator");
|
||||
self.set_pyobj(pyobj);
|
||||
}
|
||||
|
||||
PyObject* pyobj(const Generator& self) {
|
||||
static PyObject* pyobj(const Generator& self) {
|
||||
TORCH_CHECK(self.defined(), "cannot call pyobj() on undefined generator");
|
||||
return self.pyobj();
|
||||
}
|
||||
|
@ -24,11 +24,11 @@ PyObject* THPMemoryFormat_New(
|
||||
return self.release();
|
||||
}
|
||||
|
||||
PyObject* THPMemoryFormat_repr(THPMemoryFormat* self) {
|
||||
static PyObject* THPMemoryFormat_repr(THPMemoryFormat* self) {
|
||||
return THPUtils_packString(self->name);
|
||||
}
|
||||
|
||||
PyObject* THPMemoryFormat_reduce(PyObject* _self, PyObject* noargs) {
|
||||
static PyObject* THPMemoryFormat_reduce(PyObject* _self, PyObject* noargs) {
|
||||
auto* self = (THPMemoryFormat*)_self;
|
||||
return THPUtils_packString(self->name);
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ PyObject* THPQScheme_New(at::QScheme qscheme, const std::string& name) {
|
||||
return self.release();
|
||||
}
|
||||
|
||||
PyObject* THPQScheme_reduce(PyObject* _self, PyObject* noargs) {
|
||||
static PyObject* THPQScheme_reduce(PyObject* _self, PyObject* noargs) {
|
||||
auto self = (THPQScheme*)_self;
|
||||
return THPUtils_packString(self->name);
|
||||
}
|
||||
@ -33,7 +33,7 @@ static PyMethodDef THPQScheme_methods[] = {
|
||||
{nullptr} /* Sentinel */
|
||||
};
|
||||
|
||||
PyObject* THPQScheme_repr(THPQScheme* self) {
|
||||
static PyObject* THPQScheme_repr(THPQScheme* self) {
|
||||
std::string name = self->name;
|
||||
return THPUtils_packString("torch." + name);
|
||||
}
|
||||
|
@ -376,7 +376,7 @@ class Sequential : public torch::nn::ModuleHolder<SequentialImpl> {
|
||||
public:
|
||||
using torch::nn::ModuleHolder<SequentialImpl>::ModuleHolder;
|
||||
|
||||
Sequential() {}
|
||||
Sequential() = default;
|
||||
|
||||
/// Constructs the `Sequential` from a braced-init-list of named `AnyModule`s.
|
||||
/// It enables the following use case:
|
||||
|
@ -766,7 +766,7 @@ edge_list collect_next_edges(Variables&&... variables) {
|
||||
}
|
||||
|
||||
struct TypeAndSize {
|
||||
TypeAndSize() {}
|
||||
TypeAndSize() = default;
|
||||
/* implicit */
|
||||
TypeAndSize(const at::Tensor& t)
|
||||
: sym_sizes(t.sym_sizes().vec()), options(t.options()) {}
|
||||
|
@ -29,7 +29,7 @@ class TORCH_API hash_t : public c10::uint128 {
|
||||
hash_t(uint64_t val) : uint128(val) {}
|
||||
hash_t(uint128 val) : uint128(val) {}
|
||||
hash_t(uint64_t top, uint64_t bottom) : uint128(top, bottom) {}
|
||||
hash_t() {}
|
||||
hash_t() = default;
|
||||
};
|
||||
|
||||
// Std* functions use 64-bit hash
|
||||
|
@ -94,7 +94,7 @@ void initModule(PyObject* module) {
|
||||
const std::string& stacks,
|
||||
size_t max_entries) {
|
||||
at::detail::getMTIAHooks().recordMemoryHistory(
|
||||
enabled, stacks, max_entries);
|
||||
std::move(enabled), stacks, max_entries);
|
||||
});
|
||||
|
||||
m.def("_mtia_memorySnapshot", []() {
|
||||
|
@ -22,7 +22,7 @@ struct InternedStringsTable {
|
||||
ska::flat_hash_map<PyObject*, at::Dimname> py_interned_string_to_dimname_;
|
||||
};
|
||||
|
||||
InternedStringsTable kPyInternedStringToDimname;
|
||||
static InternedStringsTable kPyInternedStringToDimname;
|
||||
|
||||
// NOLINTNEXTLINE(bugprone-exception-escape)
|
||||
InternedStringsTable::~InternedStringsTable() {
|
||||
|
@ -1,5 +1,4 @@
|
||||
#include <torch/csrc/python_headers.h>
|
||||
#include <system_error>
|
||||
#include <vector>
|
||||
|
||||
#include <ATen/ops/from_blob.h>
|
||||
|
@ -241,7 +241,7 @@ uint8_t storage_get(const at::Storage& self, ptrdiff_t idx) {
|
||||
}
|
||||
|
||||
template class THPPointer<THPStorage>;
|
||||
|
||||
// NOLINTBEGIN(misc-use-internal-linkage)
|
||||
namespace torch::gdb {
|
||||
/* ~~~ misc debugging utilities ~~~
|
||||
*
|
||||
@ -324,6 +324,7 @@ std::string dispatch_keyset_string(c10::DispatchKeySet keyset) {
|
||||
}
|
||||
|
||||
} // namespace torch::gdb
|
||||
// NOLINTEND(misc-use-internal-linkage)
|
||||
|
||||
namespace pybind11::detail {
|
||||
|
||||
|
@ -1068,9 +1068,9 @@ inline c10::complex<double> PythonArgs::toComplex(int i) {
|
||||
|
||||
inline c10::complex<double> PythonArgs::toComplexWithDefault(
|
||||
int i,
|
||||
c10::complex<double> default_value) {
|
||||
c10::complex<double> default_complex) {
|
||||
if (!args[i])
|
||||
return default_value;
|
||||
return default_complex;
|
||||
return toComplex(i);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user