[11/N] Fix extra warnings brought by clang-tidy-17 (#139599)

Follows #139385
Pull Request resolved: https://github.com/pytorch/pytorch/pull/139599
Approved by: https://github.com/sraikund16
This commit is contained in:
cyy
2024-11-04 23:57:39 +00:00
committed by PyTorch MergeBot
parent 3f248a5735
commit 64d9ee88d7
26 changed files with 54 additions and 34 deletions

View File

@ -35,13 +35,14 @@ cppcoreguidelines-*,
hicpp-exception-baseclass,
hicpp-avoid-goto,
misc-*,
-misc-confusable-identifiers,
-misc-const-correctness,
-misc-include-cleaner,
-misc-use-anonymous-namespace,
-misc-unused-parameters,
-misc-no-recursion,
-misc-non-private-member-variables-in-classes,
-misc-confusable-identifiers,
-misc-unused-using-decls,
modernize-*,
-modernize-macro-to-enum,
-modernize-return-braced-init-list,

View File

@ -227,6 +227,9 @@ exclude_patterns = [
'**/generated/**',
'**/*pb.h',
'**/*inl.h',
'aten/src/ATen/cpu/FlushDenormal.cpp',
'aten/src/ATen/cpu/Utils.cpp',
'aten/src/ATen/cpu/vml.h',
'aten/src/ATen/CPUFixedAllocator.h',
'aten/src/ATen/Parallel*.h',
'c10/xpu/**/*.h',

View File

@ -40,6 +40,7 @@ struct alignas(64) FreeBlockList {
namespace {
// Max cached block sizes: (1 << MAX_SIZE_INDEX) bytes
// NOLINTNEXTLINE(misc-definitions-in-headers)
constexpr size_t MAX_SIZE_INDEX = 64;
}

View File

@ -12,6 +12,7 @@
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDASparse.h>
// NOLINTBEGIN(misc-misplaced-const)
namespace at::cuda::sparse {
#define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \
@ -316,3 +317,4 @@ void bsrsm2_solve<c10::complex<double>>(
#endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE
} // namespace at::cuda::sparse
// NOLINTEND(misc-misplaced-const)

View File

@ -8,6 +8,7 @@
namespace at::cuda::sparse {
cusparseStatus_t destroyConstDnMat(const cusparseDnMatDescr* dnMatDescr) {
// NOLINTNEXTLINE(*const-cast)
return cusparseDestroyDnMat(const_cast<cusparseDnMatDescr*>(dnMatDescr));
}
@ -83,6 +84,7 @@ cusparseDnMatDescr_t createRawDnMatDescriptor(const Tensor& input, int64_t batch
#endif
auto batch_stride = ndim > 2 && batch_offset >= 0 ? input_strides[ndim - 3] : 0;
// NOLINTNEXTLINE(*const-cast)
void* data_ptr = is_const ? const_cast<void*>(input.const_data_ptr()) : input.data_ptr();
void* values_ptr = static_cast<char*>(data_ptr) +
batch_offset * batch_stride * input.itemsize();

View File

@ -18,7 +18,7 @@ Autocast wrapper for CuDNN RNNs (the weight reflattening needs special attention
// To be registered for the "_cudnn_rnn(...)" schema.
// _cudnn_rnn is autograd-exposed (test_autocast_cudnn_rnn in test_cuda.py includes a test to confirm)
std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>
static std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>
_cudnn_rnn_cast_reflatten(const Tensor & input,
TensorList weight,
int64_t weight_stride0,

View File

@ -6,6 +6,7 @@
#include <iostream>
#include <sstream>
// NOLINTBEGIN(*c-arrays*)
namespace at::native {
namespace {
@ -101,7 +102,7 @@ std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d) {
int nbDims = 0;
int dimA[CUDNN_DIM_MAX];
int strideA[CUDNN_DIM_MAX];
cudnnDataType_t dtype;
cudnnDataType_t dtype{};
cudnnGetTensorNdDescriptor(d.desc(), CUDNN_DIM_MAX, &dtype, &nbDims, dimA, strideA);
out << " type = " << cudnnTypeToString(dtype) << "\n";
out << " nbDims = " << nbDims << "\n";
@ -143,7 +144,7 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo
size[i] = (int) 1;
}
dim = std::max(dim, pad);
cudnnTensorFormat_t filter_format;
cudnnTensorFormat_t filter_format{};
switch(memory_format) {
case at::MemoryFormat::Contiguous:
filter_format = CUDNN_TENSOR_NCHW;
@ -155,7 +156,8 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo
default:
TORCH_INTERNAL_ASSERT(false, "unsupported memory_format for cuDNN filters");
}
set(getDataType(t), (int) dim, size, filter_format);
// NOLINTNEXTLINE(*narrowing-conversions)
set(getDataType(t), static_cast<int64_t>(dim), size, filter_format);
}
std::string cudnnMemoryFormatToString(cudnnTensorFormat_t tformat) {
@ -175,8 +177,8 @@ std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d) {
out << "FilterDescriptor " << static_cast<void*>(d.desc()) << "\n";
int nbDims = 0;
int dimA[CUDNN_DIM_MAX];
cudnnDataType_t dtype;
cudnnTensorFormat_t tformat;
cudnnDataType_t dtype{};
cudnnTensorFormat_t tformat{};
cudnnGetFilterNdDescriptor(d.desc(), CUDNN_DIM_MAX, &dtype, &tformat, &nbDims, dimA);
out << " type = " << cudnnTypeToString(dtype) << "\n";
out << " tensor_format = " << cudnnMemoryFormatToString(tformat) << "\n";
@ -193,3 +195,4 @@ std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d) {
void FilterDescriptor::print() { std::cout << *this; }
}
// NOLINTEND(*c-arrays*)

View File

@ -92,6 +92,7 @@ struct DescriptorDeleter {
// initialized the first time you call set() or any other initializing
// function.
template <typename T, cudnnStatus_t (*ctor)(T**), cudnnStatus_t (*dtor)(T*)>
// NOLINTNEXTLINE(bugprone-exception-escape)
class TORCH_CUDA_CPP_API Descriptor {
public:
// TODO: Figure out why const-correctness doesn't work here
@ -128,7 +129,7 @@ public:
void set(const at::Tensor &t, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray);
private:
void set(cudnnDataType_t dataType, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray) {
AT_CUDNN_CHECK(cudnnSetRNNDataDescriptor(mut_desc(), dataType, layout, maxSeqLength, batchSize, vectorSize, seqLengthArray, NULL));
AT_CUDNN_CHECK(cudnnSetRNNDataDescriptor(mut_desc(), dataType, layout, maxSeqLength, batchSize, vectorSize, seqLengthArray, nullptr));
}
};
@ -224,6 +225,7 @@ struct TORCH_CUDA_CPP_API SpatialTransformerDescriptor
}
};
// NOLINTNEXTLINE(bugprone-exception-escape)
struct TORCH_CUDA_CPP_API DropoutDescriptor
: public Descriptor<
cudnnDropoutStruct,
@ -244,9 +246,8 @@ struct TORCH_CUDA_CPP_API DropoutDescriptor
}
// Restore a dropout descriptor given a dropout probability and existing RNG state.
void set(cudnnHandle_t handle, float dropout, at::Tensor state_) {
void set(cudnnHandle_t handle, float dropout, const at::Tensor& state) {
TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
state = state_;
void *state_ptr = state.data_ptr();
size_t state_size = state.size(0);
// NB: The seed doesn't actually matter, so we give a dummy value

View File

@ -5,7 +5,7 @@
namespace at::native {
cudnnDataType_t getCudnnDataTypeFromScalarType(const at::ScalarType dtype) {
if (dtype == c10::kQInt8) {
if (dtype == c10::kQInt8 || dtype == at::kChar) {
return CUDNN_DATA_INT8;
} else if (dtype == at::kFloat) {
return CUDNN_DATA_FLOAT;
@ -19,8 +19,6 @@ cudnnDataType_t getCudnnDataTypeFromScalarType(const at::ScalarType dtype) {
return CUDNN_DATA_INT32;
} else if (dtype == at::kByte) {
return CUDNN_DATA_UINT8;
} else if (dtype == at::kChar) {
return CUDNN_DATA_INT8;
}
std::string msg("getCudnnDataTypeFromScalarType() not supported for ");
msg += toString(dtype);

View File

@ -32,7 +32,7 @@ struct NestedTensorImpl;
// The following functions are used to construct nested tensors from buffers and
// metadata.
inline at::Tensor wrap_buffer(at::Tensor buffer, at::Tensor nested_sizes) {
inline at::Tensor wrap_buffer(const at::Tensor& buffer, const at::Tensor& nested_sizes) {
TORCH_CHECK(
buffer.dim() == 1,
"Expected given buffer to be 1dim, but got ",
@ -41,19 +41,19 @@ inline at::Tensor wrap_buffer(at::Tensor buffer, at::Tensor nested_sizes) {
TORCH_CHECK(
buffer.is_contiguous(), "Expected given buffer to be contiguous.");
return at::detail::make_tensor<NestedTensorImpl>(
std::move(buffer), std::move(nested_sizes));
buffer, nested_sizes);
}
// TODO: Figure out if we need a non-moving wrap_buffer()
inline at::Tensor wrap_buffer(
at::Tensor buffer,
const at::Tensor& buffer,
at::Tensor nested_sizes,
at::Tensor nested_strides,
at::Tensor storage_offsets) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
buffer.is_contiguous(), "Given buffer must be contiguous.");
return at::detail::make_tensor<NestedTensorImpl>(
std::move(buffer),
buffer,
std::move(nested_sizes),
std::move(nested_strides),
std::move(storage_offsets));
@ -95,9 +95,9 @@ inline at::Tensor create_nested_view_tensor(
return at::detail::make_tensor<NestedTensorImpl>(
c10::TensorImpl::VIEW,
base,
nested_sizes,
nested_strides,
storage_offsets);
std::move(nested_sizes),
std::move(nested_strides),
std::move(storage_offsets));
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -195,7 +195,7 @@ class TORCH_API Tensor: public TensorBase {
//
// TODO: temporarily disabled
Tensor& operator=(const TensorBase& x) & {
Tensor& operator=(const TensorBase& x) & noexcept {
impl_ = x.getIntrusivePtr();
return *this;
}
@ -204,7 +204,7 @@ class TORCH_API Tensor: public TensorBase {
return *this;
}
Tensor& operator=(const Tensor &x) & {
Tensor& operator=(const Tensor &x) & noexcept {
return operator=(static_cast<const TensorBase&>(x));
}
Tensor& operator=(Tensor &&x) & noexcept {

View File

@ -664,15 +664,17 @@ struct MaybeOwnedTraits<c10::intrusive_ptr<T>> {
toDestroy.release();
}
static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
static const owned_type& referenceFromBorrow(
const borrow_type& borrow) noexcept {
return borrow;
}
static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
static const owned_type* pointerFromBorrow(
const borrow_type& borrow) noexcept {
return &borrow;
}
static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
static bool debugBorrowIsValid(const borrow_type& /*borrow*/) noexcept {
return true;
}
};

View File

@ -1,4 +1,5 @@
#pragma once
#include <cstdint>
/**
${generated_comment}

View File

@ -1,5 +1,6 @@
#ifndef THP_AUTOGRAD_H
#define THP_AUTOGRAD_H
#include <torch/csrc/utils/pythoncapi_compat.h>
PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused);
void THPAutograd_initFunctions();

View File

@ -1,4 +1,5 @@
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>
namespace torch::autograd {

View File

@ -1,4 +1,5 @@
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>
namespace torch::autograd {

View File

@ -1,4 +1,5 @@
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>
namespace torch::autograd {

View File

@ -1,5 +1,5 @@
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>
namespace torch::autograd {
void initSpecialFunctions(PyObject* module);

View File

@ -1,5 +1,6 @@
#ifndef THCP_CUDA_MODULE_INC
#define THCP_CUDA_MODULE_INC
#include <torch/csrc/utils/pythoncapi_compat.h>
PyObject* THCPModule_getDevice_wrap(PyObject* self);
PyObject* THCPModule_setDevice_wrap(PyObject* self, PyObject* arg);

View File

@ -273,6 +273,7 @@ struct NcclCommList {
devices.data()));
}
NcclCommList(NcclCommList&& foo) = default;
// NOLINTNEXTLINE(bugprone-exception-escape)
~NcclCommList() {
if (comms) {
for (const auto i : c10::irange(ndevices)) {
@ -457,6 +458,7 @@ AutoNcclGroup::AutoNcclGroup(ncclComm_t comm, bool comm_nonblocking)
#endif
}
// NOLINTNEXTLINE(bugprone-exception-escape)
AutoNcclGroup::~AutoNcclGroup() noexcept(false) {
#if defined(NCCL_MAJOR) && (NCCL_MAJOR >= 2)
if (comm_nonblocking_ && comm_ != nullptr) {

View File

@ -4,7 +4,6 @@
#if defined(USE_CUDNN) || defined(USE_ROCM)
#include <torch/csrc/utils/pybind.h>
#include <array>
#include <tuple>
namespace {
@ -22,7 +21,7 @@ version_tuple getCompileVersion() {
version_tuple getRuntimeVersion() {
#ifndef USE_STATIC_CUDNN
int major, minor, patch;
int major = 0, minor = 0, patch = 0;
cudnnGetProperty(MAJOR_VERSION, &major);
cudnnGetProperty(MINOR_VERSION, &minor);
cudnnGetProperty(PATCH_LEVEL, &patch);

View File

@ -378,7 +378,7 @@ struct TORCH_API Result : public std::enable_shared_from_this<Result> {
}
template <typename T, typename Fn>
void visit_if_base(Fn&& fn) const {
void visit_if_base(const Fn& fn) const {
visit([&](const auto& extra_fields) {
using extra_fields_t = typename std::remove_cv_t<
typename std::remove_reference_t<decltype(extra_fields)>>;

View File

@ -6,6 +6,6 @@
namespace torch {
// declare global_kineto_init for libtorch_cpu.so to call
TORCH_API void global_kineto_init(void);
TORCH_API void global_kineto_init();
} // namespace torch

View File

@ -222,7 +222,7 @@ bool collectivesProfilerExists() {
#ifdef USE_KINETO
static const std::string setTraceID(const std::string& trace_id) {
if (trace_id == "") {
if (trace_id.empty()) {
return "";
}
std::stringstream configss;

View File

@ -39,7 +39,7 @@ ProfilerConfig::ProfilerConfig(
bool with_flops,
bool with_modules,
ExperimentalConfig experimental_config,
const std::string& trace_id)
std::string trace_id)
: state{state},
experimental_config{std::move(experimental_config)},
report_input_shapes{report_input_shapes},
@ -47,7 +47,7 @@ ProfilerConfig::ProfilerConfig(
with_stack{with_stack},
with_flops{with_flops},
with_modules{with_modules},
trace_id{trace_id} {}
trace_id{std::move(trace_id)} {}
bool ProfilerConfig::disabled() const {
return state == torch::profiler::impl::ProfilerState::Disabled;

View File

@ -104,7 +104,7 @@ struct TORCH_API ProfilerConfig {
bool with_flops = false,
bool with_modules = false,
ExperimentalConfig experimental_config = ExperimentalConfig(),
const std::string& trace_id = "");
std::string trace_id = "");
bool disabled() const;
bool global() const;