[8/N] Fixes clang-tidy warnings in c10/{core,util}/*.h (#116082)

This patch enables clang-tidy coverage on c10/**/*.h and contains other fixes.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/116082
Approved by: https://github.com/Skylion007
This commit is contained in:
cyy
2023-12-20 12:22:21 +00:00
committed by PyTorch MergeBot
parent d72d99e591
commit 968b94bef2
30 changed files with 80 additions and 93 deletions

View File

@ -242,6 +242,7 @@ code = 'CLANGTIDY'
include_patterns = [
'aten/src/ATen/core/*.cpp',
'c10/**/*.cpp',
'c10/core/**/*.h',
'torch/csrc/**/*.cpp',
]
exclude_patterns = [

View File

@ -116,5 +116,6 @@ struct hash<c10::DeviceType> {
} // namespace std
namespace torch {
// NOLINTNEXTLINE(misc-unused-using-decls)
using c10::DeviceType;
}
} // namespace torch

View File

@ -716,6 +716,7 @@ constexpr DispatchKey toRuntimePerBackendFunctionalityKey(
namespace torch {
// Expose the constant, but not the TYPE (DispatchKey is an implementation
// detail!)
// NOLINTNEXTLINE(misc-unused-using-decls)
using c10::kAutograd;
} // namespace torch

View File

@ -319,6 +319,7 @@ class C10_API Scalar {
enum class Tag { HAS_d, HAS_i, HAS_z, HAS_b, HAS_sd, HAS_si, HAS_sb };
// NB: assumes that self has already been cleared
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
C10_ALWAYS_INLINE void moveFrom(Scalar&& rhs) noexcept {
v = rhs.v;
tag = rhs.tag;
@ -337,6 +338,7 @@ class C10_API Scalar {
int64_t i;
c10::complex<double> z;
c10::intrusive_ptr_target* p;
// NOLINTNEXTLINE(modernize-use-equals-default)
v_t() {} // default constructor
} v;

View File

@ -351,6 +351,7 @@ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_CppTypeToScalarType)
#define DEFINE_CONSTANT(_, name) \
constexpr ScalarType k##name = ScalarType::name;
// NOLINTNEXTLINE(clang-diagnostic-unused-const-variable)
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CONSTANT)
#undef DEFINE_CONSTANT
@ -443,7 +444,6 @@ static inline ScalarType toQIntType(ScalarType t) {
static inline ScalarType toUnderlying(ScalarType t) {
switch (t) {
case ScalarType::QUInt8:
[[fallthrough]];
case ScalarType::QUInt4x2:
[[fallthrough]];
case ScalarType::QUInt2x4:

View File

@ -36,12 +36,12 @@ struct C10_API Storage {
// Allocates memory buffer using given allocator and creates a storage with it
Storage(
use_byte_size_t /*use_byte_size*/,
SymInt size_bytes,
const SymInt& size_bytes,
Allocator* allocator = nullptr,
bool resizable = false)
: storage_impl_(c10::make_intrusive<StorageImpl>(
StorageImpl::use_byte_size_t(),
std::move(size_bytes),
size_bytes,
allocator,
resizable)) {}

View File

@ -104,7 +104,7 @@ struct C10_API StorageImpl : public c10::intrusive_ptr_target {
// TODO: remove later
void set_nbytes(size_t size_bytes) {
size_bytes_ = size_bytes;
size_bytes_ = static_cast<int64_t>(size_bytes);
size_bytes_is_heap_allocated_ = false;
}
@ -193,7 +193,7 @@ struct C10_API StorageImpl : public c10::intrusive_ptr_target {
at::DataPtr&& data_ptr,
size_t size_bytes) {
data_ptr_ = std::move(data_ptr);
size_bytes_ = size_bytes;
size_bytes_ = static_cast<int64_t>(size_bytes);
size_bytes_is_heap_allocated_ = false;
allocator_ = nullptr;
resizable_ = false;

View File

@ -17,5 +17,5 @@ constexpr size_t gAlignment = 64;
constexpr size_t gPagesize = 4096;
// since the default thp pagesize is 2MB, enable thp only
// for buffers of size 2MB or larger to avoid memory bloating
constexpr size_t gAlloc_threshold_thp = 2 * 1024 * 1024;
constexpr size_t gAlloc_threshold_thp = static_cast<size_t>(2) * 1024 * 1024;
} // namespace c10

View File

@ -290,6 +290,7 @@ struct NoOpDeviceGuardImpl final : public DeviceGuardImplInterface {
// in a Meyer singleton), it implies that you must *leak* objects when
// putting them in the registry. This is done by deleting the destructor
// on DeviceGuardImplInterface.
// NOLINTNEXTLINE(*c-arrays*)
extern C10_API std::atomic<const DeviceGuardImplInterface*>
device_guard_impl_registry[static_cast<size_t>(
DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)];

View File

@ -3,8 +3,7 @@
#include <c10/macros/Export.h>
#include <atomic>
namespace c10 {
namespace impl {
namespace c10::impl {
// This TLS controls whether or not we permanently associate PyObject
// with Tensor the first time it is allocated. When hermetic PyObject
@ -57,5 +56,4 @@ struct C10_API HermeticPyObjectTLS {
static bool get_tls_state();
};
} // namespace impl
} // namespace c10
} // namespace c10::impl

View File

@ -9,8 +9,7 @@
#include <c10/util/C++17.h>
#include <c10/util/Optional.h>
namespace c10 {
namespace impl {
namespace c10::impl {
/**
* A DeviceGuard is an RAII class that sets a device to some value
@ -423,5 +422,4 @@ class InlineOptionalDeviceGuard {
optional<InlineDeviceGuard<T>> guard_;
};
} // namespace impl
} // namespace c10
} // namespace c10::impl

View File

@ -5,8 +5,7 @@
#include <c10/core/impl/DeviceGuardImplInterface.h>
#include <c10/util/Exception.h>
namespace c10 {
namespace impl {
namespace c10::impl {
template <typename T>
struct InlineEvent final {
@ -111,5 +110,4 @@ struct InlineEvent final {
bool was_marked_for_recording_ = false;
};
} // namespace impl
} // namespace c10
} // namespace c10::impl

View File

@ -4,8 +4,7 @@
#include <c10/util/ArrayRef.h>
#include <c10/util/irange.h>
namespace c10 {
namespace impl {
namespace c10::impl {
/**
* A StreamGuard is an RAII class that changes the current device
@ -253,5 +252,4 @@ class InlineMultiStreamGuard {
}
};
} // namespace impl
} // namespace c10
} // namespace c10::impl

View File

@ -20,8 +20,7 @@
// set, but TLS is defined to be zero-initialized, so this doesn't actually work
// (if it's inverted, you want the set to be -1 initialized).
namespace c10 {
namespace impl {
namespace c10::impl {
// POD version of LocalDispatchKeySet. Declared here just so that
// we can put it in the guards.
@ -160,5 +159,4 @@ C10_API void tls_set_dispatch_key_included(DispatchKey x, bool desired_state);
C10_API bool tls_is_dispatch_keyset_excluded(DispatchKeySet ks);
C10_API bool tls_is_dispatch_keyset_included(DispatchKeySet ks);
} // namespace impl
} // namespace c10
} // namespace c10::impl

View File

@ -20,16 +20,13 @@ class OperatorHandle;
struct TensorImpl;
} // namespace c10
namespace torch {
namespace jit {
namespace torch::jit {
using Stack = std::vector<c10::IValue>;
}
} // namespace torch
// Actual implementation
namespace c10 {
namespace impl {
namespace c10::impl {
struct C10_API PyInterpreter;
@ -239,5 +236,4 @@ enum class PyInterpreterStatus {
TAGGED_BY_OTHER,
};
} // namespace impl
} // namespace c10
} // namespace c10::impl

View File

@ -7,8 +7,7 @@
#include <atomic>
namespace c10 {
namespace impl {
namespace c10::impl {
struct C10_API PyObjectSlot {
public:
@ -188,5 +187,4 @@ struct C10_API PyObjectSlot {
PyObject* pyobj_;
};
} // namespace impl
} // namespace c10
} // namespace c10::impl

View File

@ -9,8 +9,7 @@
#define C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE 5
namespace c10 {
namespace impl {
namespace c10::impl {
// Packed container for TensorImpl sizes and strides.
// This design improves on the previous approach of using a pair of
@ -30,6 +29,7 @@ class C10_API SizesAndStrides {
using strides_iterator = int64_t*;
using strides_const_iterator = const int64_t*;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
SizesAndStrides() {
size_at_unchecked(0) = 0;
stride_at_unchecked(0) = 1;
@ -37,10 +37,12 @@ class C10_API SizesAndStrides {
~SizesAndStrides() {
if (C10_UNLIKELY(!isInline())) {
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
free(outOfLineStorage_);
}
}
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
SizesAndStrides(const SizesAndStrides& rhs) : size_(rhs.size_) {
if (C10_LIKELY(rhs.isInline())) {
copyDataInline(rhs);
@ -56,6 +58,7 @@ class C10_API SizesAndStrides {
}
if (C10_LIKELY(rhs.isInline())) {
if (C10_UNLIKELY(!isInline())) {
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
free(outOfLineStorage_);
}
copyDataInline(rhs);
@ -90,12 +93,14 @@ class C10_API SizesAndStrides {
}
if (C10_LIKELY(rhs.isInline())) {
if (C10_UNLIKELY(!isInline())) {
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
free(outOfLineStorage_);
}
copyDataInline(rhs);
} else {
// They're outline. We're going to steal their vector.
if (!isInline()) {
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
free(outOfLineStorage_);
}
outOfLineStorage_ = rhs.outOfLineStorage_;
@ -278,6 +283,7 @@ class C10_API SizesAndStrides {
}
void allocateOutOfLineStorage(size_t size) {
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
outOfLineStorage_ = static_cast<int64_t*>(malloc(storageBytes(size)));
TORCH_CHECK(
outOfLineStorage_,
@ -287,6 +293,7 @@ class C10_API SizesAndStrides {
void resizeOutOfLineStorage(size_t newSize) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isInline());
outOfLineStorage_ = static_cast<int64_t*>(
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
realloc(outOfLineStorage_, storageBytes(newSize)));
TORCH_CHECK(
outOfLineStorage_,
@ -300,9 +307,9 @@ class C10_API SizesAndStrides {
size_t size_{1};
union {
int64_t* outOfLineStorage_;
// NOLINTNEXTLINE(*c-array*)
int64_t inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE * 2]{};
};
};
} // namespace impl
} // namespace c10
} // namespace c10::impl

View File

@ -2,8 +2,7 @@
#include <c10/core/impl/DeviceGuardImplInterface.h>
namespace c10 {
namespace impl {
namespace c10::impl {
/**
* An implementation of DeviceGuardImplInterface which delegates
@ -89,5 +88,4 @@ class VirtualGuardImpl final : public DeviceGuardImplInterface {
const DeviceGuardImplInterface* impl_ = nullptr;
};
} // namespace impl
} // namespace c10
} // namespace c10::impl

View File

@ -5,8 +5,7 @@
#include <cstddef>
#include <cstdint>
namespace c10 {
namespace util {
namespace c10::util {
namespace detail {
constexpr uint64_t crc64_table[] = {
@ -124,8 +123,7 @@ crc64(const char* str, size_t size) {
inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA crc64_t crc64(c10::string_view str) {
return crc64(str.data(), str.size());
}
} // namespace util
} // namespace c10
} // namespace c10::util
// Allow usage of crc64_t in std::unordered_set
C10_DEFINE_HASH_FOR_IDWRAPPER(c10::util::crc64_t);

View File

@ -445,8 +445,8 @@ C10_API std::string GetExceptionString(const std::exception& e);
C10_THROW_ERROR(Error, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \
}
#else
namespace c10 {
namespace detail {
namespace c10::detail {
template <typename... Args>
decltype(auto) torchCheckMsgImpl(const char* /*msg*/, const Args&... args) {
return ::c10::str(args...);
@ -460,8 +460,7 @@ inline C10_API const char* torchCheckMsgImpl(
const char* args) {
return args;
}
} // namespace detail
} // namespace c10
} // namespace c10::detail
#define TORCH_CHECK_MSG(cond, type, ...) \
(::c10::detail::torchCheckMsgImpl( \
@ -476,8 +475,7 @@ inline C10_API const char* torchCheckMsgImpl(
}
#endif
namespace c10 {
namespace detail {
namespace c10::detail {
[[noreturn]] C10_API void torchCheckFail(
const char* func,
@ -516,8 +514,7 @@ namespace detail {
const char* condMsg,
const std::string& userMsg);
} // namespace detail
} // namespace c10
} // namespace c10::detail
#ifdef STRIP_ERROR_MESSAGES
#define TORCH_CHECK(cond, ...) \
@ -645,8 +642,7 @@ namespace detail {
// Deprecated macros
// ----------------------------------------------------------------------------
namespace c10 {
namespace detail {
namespace c10::detail {
/*
// Deprecation disabled until we fix sites in our codebase
@ -675,8 +671,7 @@ https://github.com/pytorch/pytorch/issues/20287 for more details.")
*/
inline void deprecated_AT_ASSERTM() {}
} // namespace detail
} // namespace c10
} // namespace c10::detail
// Deprecated alias; this alias was deprecated because people kept mistakenly
// using it for user error checking. Use TORCH_INTERNAL_ASSERT or TORCH_CHECK

View File

@ -4,8 +4,7 @@
#include <functional>
#include <type_traits>
namespace c10 {
namespace guts {
namespace c10::guts {
/**
* Access information about result type or arguments from a function type.
@ -222,5 +221,4 @@ auto tuple_map(std::tuple<Args...>&& tuple, const Mapper& mapper) {
std::move(tuple), mapper, std::index_sequence_for<Args...>());
}
} // namespace guts
} // namespace c10
} // namespace c10::guts

View File

@ -7,8 +7,7 @@
#include <cinttypes>
#include <functional>
namespace c10 {
namespace util {
namespace c10::util {
// TODO Make it work for more compilers
@ -190,7 +189,6 @@ get_fully_qualified_type_name() noexcept {
string_view name = detail::fully_qualified_type_name_impl<T>();
return name;
}
} // namespace util
} // namespace c10
} // namespace c10::util
C10_DEFINE_HASH_FOR_IDWRAPPER(c10::util::type_index);

View File

@ -4,8 +4,7 @@
#include <c10/util/TypeTraits.h>
#include <algorithm>
namespace c10 {
namespace guts {
namespace c10::guts {
template <class... T>
struct false_t : std::false_type {};
@ -512,5 +511,4 @@ decltype(auto) map_types_to_values(Func&& func) {
}
} // namespace typelist
} // namespace guts
} // namespace c10
} // namespace c10::guts

View File

@ -2,8 +2,7 @@
#include <c10/util/C++17.h>
namespace c10 {
namespace guts {
namespace c10::guts {
/**
* is_equality_comparable<T> is true_type iff the equality operator is defined
@ -148,5 +147,4 @@ struct is_type_condition<
*/
template <class T>
struct is_fundamental : std::is_fundamental<T> {};
} // namespace guts
} // namespace c10
} // namespace c10::guts

View File

@ -433,9 +433,9 @@ constexpr complex<T> operator/(const T& lhs, const complex<T>& rhs) {
// not support this when T is a floating-point number. This is useful because it
// saves a lot of "static_cast" when operate a complex and an integer. This
// makes the code both less verbose and potentially more efficient.
#define COMPLEX_INTEGER_OP_TEMPLATE_CONDITION \
typename std::enable_if_t< \
std::is_floating_point<fT>::value && std::is_integral<iT>::value, \
#define COMPLEX_INTEGER_OP_TEMPLATE_CONDITION \
typename std::enable_if_t< \
std::is_floating_point_v<fT> && std::is_integral_v<iT>, \
int> = 0
template <typename fT, typename iT, COMPLEX_INTEGER_OP_TEMPLATE_CONDITION>

View File

@ -494,9 +494,9 @@ class sherwood_v3_table : private EntryAlloc,
// otherwise.
template <
class target_type = const value_type,
class = typename std::enable_if<
std::is_same<target_type, const value_type>::value &&
!std::is_same<target_type, value_type>::value>::type>
class = std::enable_if_t<
std::is_same_v<target_type, const value_type> &&
!std::is_same_v<target_type, value_type>>>
operator templated_iterator<target_type>() const {
return {current};
}
@ -726,7 +726,7 @@ class sherwood_v3_table : private EntryAlloc,
rehash_for_other_container(*this);
}
void swap(sherwood_v3_table& other) {
void swap(sherwood_v3_table& other) noexcept {
using std::swap;
swap_pointers(other);
swap(static_cast<ArgumentHash&>(*this), static_cast<ArgumentHash&>(other));

View File

@ -231,6 +231,7 @@ class intrusive_ptr final {
// This static_assert triggers on MSVC
// error C2131: expression did not evaluate to a constant
static_assert(
// NOLINTNEXTLINE(misc-redundant-expression)
NullType::singleton() == NullType::singleton(),
"NullType must have a constexpr singleton() method");
#endif
@ -340,6 +341,7 @@ class intrusive_ptr final {
}
template <class From, class FromNullType>
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
/* implicit */ intrusive_ptr(intrusive_ptr<From, FromNullType>&& rhs) noexcept
: target_(
detail::assign_ptr_<TTarget, NullType, FromNullType>(rhs.target_)) {
@ -368,6 +370,7 @@ class intrusive_ptr final {
}
intrusive_ptr& operator=(intrusive_ptr&& rhs) & noexcept {
// NOLINTNEXTLINE(*assign*)
return operator= <TTarget, NullType>(std::move(rhs));
}
@ -385,6 +388,8 @@ class intrusive_ptr final {
if (this == &rhs) {
return *this;
}
// NOLINTNEXTLINE(misc-unconventional-assign-operator,
// cppcoreguidelines-c-copy-assignment-signature)
return operator= <TTarget, NullType>(rhs);
}
@ -724,6 +729,7 @@ class weak_intrusive_ptr final {
template <class From, class FromNullType>
/* implicit */ weak_intrusive_ptr(
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
weak_intrusive_ptr<From, FromNullType>&& rhs) noexcept
: target_(
detail::assign_ptr_<TTarget, NullType, FromNullType>(rhs.target_)) {
@ -753,6 +759,7 @@ class weak_intrusive_ptr final {
}
weak_intrusive_ptr& operator=(weak_intrusive_ptr&& rhs) & noexcept {
// NOLINTNEXTLINE(*assign*)
return operator= <TTarget, NullType>(std::move(rhs));
}
@ -771,6 +778,7 @@ class weak_intrusive_ptr final {
if (this == &rhs) {
return *this;
}
// NOLINTNEXTLINE(*assign*)
return operator= <TTarget, NullType>(rhs);
}

View File

@ -56,8 +56,7 @@ unsigned char _BitScanReverse64(unsigned long* _Index, unsigned __int64 _Mask);
}
#endif
namespace c10 {
namespace llvm {
namespace c10::llvm {
/// The behavior an operation has on an input of 0.
enum ZeroBehavior {
/// The returned value is undefined.
@ -900,5 +899,4 @@ std::enable_if_t<std::is_unsigned_v<T>, T> SaturatingMultiplyAdd(
/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
extern const float huge_valf;
} // namespace llvm
} // namespace c10
} // namespace c10::llvm

View File

@ -484,6 +484,7 @@ class basic_string_view final {
}
template <class Condition>
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
constexpr size_type find_first_if_(size_type pos, Condition&& condition)
const noexcept {
if (pos + 1 <= size()) {
@ -497,6 +498,7 @@ class basic_string_view final {
}
template <class Condition>
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
constexpr size_type find_last_if_(size_type pos, Condition&& condition)
const noexcept {
// Write it iteratively. This is faster.

View File

@ -40,12 +40,11 @@
// later. So the namespace is not fixed at the moment.
// Make at::Half a fundamental type.
namespace c10 {
namespace guts {
namespace c10::guts {
template <>
struct is_fundamental<at::Half> : std::true_type {};
} // namespace guts
} // namespace c10
} // namespace c10::guts
namespace caffe2 {