[16/N] Fix extra warnings brought by clang-tidy-17 (#143714)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/143714
Approved by: https://github.com/Skylion007, https://github.com/albanD
This commit is contained in:
cyy
2024-12-24 03:29:35 +00:00
committed by PyTorch MergeBot
parent 49fdc52fd2
commit 1feae27ed6
10 changed files with 40 additions and 41 deletions

View File

@ -266,6 +266,7 @@ exclude_patterns = [
'torch/csrc/inductor/aoti_torch/c/shim.h',
'torch/csrc/jit/**/*',
'torch/csrc/jit/serialization/mobile_bytecode_generated.h',
'torch/csrc/utils/generated_serialization_types.h',
'torch/csrc/utils/pythoncapi_compat.h',
'torch/csrc/inductor/aoti_runtime/sycl_runtime_wrappers.h',
]

View File

@ -1,4 +1,5 @@
#pragma once
#include <cstdint>
namespace at {

View File

@ -284,8 +284,7 @@ class CuBlasLtMatmulDescriptor : public CuBlasLtDescriptor<
}
template <typename T>
inline void setAttribute(cublasLtMatmulDescAttributes_t attr, const T value) {
// NOLINTNEXTLINE(bugprone-sizeof-expression)
TORCH_CUDABLAS_CHECK(::cublasLtMatmulDescSetAttribute(descriptor(), attr, &value, sizeof(T)));
TORCH_CUDABLAS_CHECK(::cublasLtMatmulDescSetAttribute(descriptor(), attr, &value, sizeof(value)));
}
};

View File

@ -111,7 +111,7 @@ embedding_dense_backward_batch_rule(
*/
template<typename F, F Func, typename... ExtraArgs>
std::tuple<Tensor, std::optional<int64_t>>
grid_sample_batch_rule(const Tensor& input, std::optional<int64_t> input_bdim, const Tensor& grid, std::optional<int64_t> grid_bdim, ExtraArgs... extra_args) {
static grid_sample_batch_rule(const Tensor& input, std::optional<int64_t> input_bdim, const Tensor& grid, std::optional<int64_t> grid_bdim, ExtraArgs... extra_args) {
std::tuple<Tensor, std::optional<int64_t>> result;
if (input_bdim && !grid_bdim) {
auto new_input = reshape_dim_into(*input_bdim, 1, input);
@ -175,7 +175,7 @@ grid_sample_backward_helper_out(
template<typename F, F Func, typename... ExtraArgs>
std::tuple<Tensor, std::optional<int64_t>, Tensor, std::optional<int64_t>>
grid_sample_backward_batch_rule(
static grid_sample_backward_batch_rule(
const Tensor& grad_output, std::optional<int64_t> grad_output_bdim,
const Tensor& input, std::optional<int64_t> input_bdim,
const Tensor& grid, std::optional<int64_t> grid_bdim,

View File

@ -136,10 +136,10 @@ Welford<T> welford_combine(const Welford<T>& acc, const T& data, const WeightRec
template <typename T>
struct IndexValue {
int64_t index;
int64_t index{};
T value;
IndexValue(int64_t idx, T val) :index(idx), value(val) {};
IndexValue() {};
IndexValue(int64_t idx, T val) :index(idx), value(val) {}
IndexValue() = default;
};
#if INDUCTOR_USE_VECTOR_TYPES()
@ -564,16 +564,16 @@ constexpr float uint32_to_uniform_float(uint32_t value) {
return static_cast<float>(value & 0x7FFFFFFF) * scale;
}
float normalized_rand_cpu(uint32_t seed, uint32_t offset) {
inline float normalized_rand_cpu(uint32_t seed, uint32_t offset) {
return uint32_to_uniform_float(at::Philox4_32(seed, 0, offset)());
}
float randn_cpu(uint32_t seed, uint32_t offset) {
inline float randn_cpu(uint32_t seed, uint32_t offset) {
at::Philox4_32 engine(seed, 0, offset);
return engine.randn(10);
}
int64_t randint64_cpu(uint32_t seed, uint32_t offset, int64_t low, int64_t high) {
inline int64_t randint64_cpu(uint32_t seed, uint32_t offset, int64_t low, int64_t high) {
auto gen = at::Philox4_32(seed, 0, offset);
uint64_t r0 = gen();
uint64_t r1 = gen();
@ -646,7 +646,7 @@ void atomic_add_vec(T *addr, at::vec::VectorizedN<int64_t, NI> index, at::vec::V
}
#endif
std::tuple<std::shared_ptr<int64_t[]>, int> _get_factors(int64_t number) {
inline std::tuple<std::shared_ptr<int64_t[]>, int> _get_factors(int64_t number) {
int count = 0;
for (int64_t i = std::sqrt(number); i > 0; --i) {
if (number % i == 0) {
@ -664,7 +664,7 @@ std::tuple<std::shared_ptr<int64_t[]>, int> _get_factors(int64_t number) {
return std::make_tuple(factors, count);
}
std::tuple<std::shared_ptr<int64_t[]>, int> get_factors(int64_t number) {
inline std::tuple<std::shared_ptr<int64_t[]>, int> get_factors(int64_t number) {
thread_local std::map<int64_t, std::tuple<std::shared_ptr<int64_t[]>, int>> cache;
auto it = cache.find(number);
if (it != cache.end()) {
@ -676,7 +676,7 @@ std::tuple<std::shared_ptr<int64_t[]>, int> get_factors(int64_t number) {
}
}
void _mm_get_thread_blocking(
inline void _mm_get_thread_blocking(
int num_threads,
int max_k_slices,
int64_t M,
@ -772,7 +772,7 @@ void _mm_get_thread_blocking(
assert(Mt != 0);
}
void mm_get_thread_blocking(
inline void mm_get_thread_blocking(
int num_threads,
int max_k_slices,
int64_t M,
@ -887,25 +887,23 @@ void mm_get_cache_blocking(
}
struct amx_tilecfg {
uint8_t palette_id;
uint8_t start_row;
uint8_t reserved_0[14];
uint16_t colsb[16];
uint8_t rows[16];
uint8_t palette_id{0};
uint8_t start_row{0};
uint8_t reserved_0[14]{};
uint16_t colsb[16]{};
uint8_t rows[16]{};
};
class AMXState {
private:
amx_tilecfg tilecfg_;
uint8_t rows_;
uint16_t colsb_;
uint8_t num_tile_rows_;
uint8_t num_tile_columns_;
amx_tilecfg tilecfg_{};
uint8_t rows_{0};
uint16_t colsb_{0};
uint8_t num_tile_rows_{0};
uint8_t num_tile_columns_{0};
public:
AMXState() : rows_(0), colsb_(0), num_tile_rows_(0), num_tile_columns_(0) {
memset(&tilecfg_, 0, sizeof(tilecfg_));
}
AMXState() = default;
inline void configure(
uint8_t rows,

View File

@ -154,9 +154,8 @@ static PyObject* THPGenerator_cloneState(PyObject* _self, PyObject* noargs) {
// See Note [Acquire lock when using random generators]
std::scoped_lock<std::mutex> lock(gen.mutex());
auto new_generator = gen.clone();
return THPGenerator_Wrap(new_generator);
return THPGenerator_Wrap(gen.clone());
END_HANDLE_TH_ERRORS
}

View File

@ -30,7 +30,7 @@ namespace torch::nn {
/// ```
class TORCH_API UpsampleImpl : public Cloneable<UpsampleImpl> {
public:
explicit UpsampleImpl(const UpsampleOptions& options_ = {});
explicit UpsampleImpl(UpsampleOptions options_ = {});
void reset() override;

View File

@ -4,8 +4,8 @@ namespace F = torch::nn::functional;
namespace torch::nn {
UpsampleImpl::UpsampleImpl(const UpsampleOptions& options_)
: options(options_) {}
UpsampleImpl::UpsampleImpl(UpsampleOptions options_)
: options(std::move(options_)) {}
void UpsampleImpl::reset() {}

View File

@ -499,7 +499,7 @@ PyObject* TensorGuards_check_verbose(
}
std::string fail_reason = checks[i].check_verbose(
state, THPVariable_Unpack(item), tensor_check_names[i]);
if (fail_reason.length() > 0) {
if (!fail_reason.empty()) {
return Py_BuildValue("s", fail_reason.c_str());
}
}
@ -1127,7 +1127,7 @@ std::unordered_set<int64_t> compute_overlapping_tensors(
const std::vector<Tensor>& tensors) {
std::unordered_set<int64_t> aliased_tensor_indices;
for (int64_t i = 0; i < static_cast<int64_t>(tensors.size()); i++) {
auto tensor_i = tensors[i];
const auto& tensor_i = tensors[i];
for (int64_t j = 0; j < i; j++) {
if (!tensors_definitely_do_not_overlap<Meta>(tensor_i, tensors[j])) {
aliased_tensor_indices.insert(i);
@ -1890,7 +1890,7 @@ class STORAGE_OVERLAPPING : public RelationalGuard {
py::object verbose_code_parts)
: RelationalGuard(std::move(verbose_code_parts)),
_overlapping(overlapping),
_checker(checker) {}
_checker(std::move(checker)) {}
bool check_nopybind(PyObject* value) override {
_checker->add(value, _overlapping);
@ -2210,8 +2210,8 @@ class GuardManager {
*/
template <typename GuardAccessorT>
GuardManager* get_child_manager(
py::object accessor_key,
std::string source,
const py::object& accessor_key,
const std::string& source,
py::handle example_value,
py::handle guard_manager_enum) {
// accessor_key type depends on the GuardAccessorT
@ -4624,11 +4624,11 @@ void install_no_tensor_aliasing_guard(
}
void install_storage_overlapping_guard_with_checker(
std::shared_ptr<StorageOverlapChecker> checker,
const std::shared_ptr<StorageOverlapChecker>& checker,
const py::list& guard_managers,
py::object verbose_code_parts,
const py::object& verbose_code_parts,
bool overlapping) {
if (guard_managers.size() == 0) {
if (guard_managers.empty()) {
// If there are no GuardManagers, there's no need to create a
// STORAGE_OVERLAPPING guard.
return;
@ -4648,7 +4648,7 @@ void install_storage_overlapping_guard_with_checker(
void install_storage_overlapping_guard(
const py::list& overlapping_guard_managers,
const py::list& non_overlapping_guard_managers,
py::object verbose_code_parts) {
const py::object& verbose_code_parts) {
// Create a single StorageOverlapChecker that will be shared amongst
// the 2 STORAGE_OVERLAPPING guards below.
std::shared_ptr<StorageOverlapChecker> checker =

View File

@ -501,6 +501,7 @@ std::vector<std::string> AOTIModelPackageLoader::get_constant_fqns() {
std::unordered_map<std::string, std::string> constant_name_to_fqn =
runner_->getConstantNamesToOriginalFQNs();
std::vector<std::string> constant_fqns;
constant_fqns.reserve(constant_name_to_fqn.size());
for (const auto& it : constant_name_to_fqn) {
constant_fqns.push_back(it.second);
}