mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[5/N] Fix extra warnings brought by clang-tidy-17 (#138403)
Follows #137983 Pull Request resolved: https://github.com/pytorch/pytorch/pull/138403 Approved by: https://github.com/ezyang
This commit is contained in:
@ -231,6 +231,7 @@ Tensor FunctionalInverses::slice_Tensor_inverse(const Tensor& base, const Tensor
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(performance-unnecessary-value-param)
|
||||
Tensor FunctionalInverses::split_Tensor_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, int64_t mutated_view_idx, c10::SymInt split_size, int64_t dim) {
|
||||
// It would be nice if this logic could be re-used from autograd's split_backward(), but I don't think it can.
|
||||
// For functionalization, we have only have one of the tensors from the TensorList outputed by split(), and we want to layer i
|
||||
@ -452,6 +453,7 @@ Tensor FunctionalInverses::chunk_inverse(const at::Tensor & base, const at::Tens
|
||||
return split_with_sizes_inverse(base, mutated_view, inverse_return_mode, mutated_view_idx, split_sizes, dim);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(performance-unnecessary-value-param)
|
||||
Tensor FunctionalInverses::narrow_inverse(const at::Tensor & base, const at::Tensor & mutated_view, InverseReturnMode inverse_return_mode, int dim, c10::SymInt start, c10::SymInt length) {
|
||||
if (inverse_return_mode == InverseReturnMode::AlwaysView) {
|
||||
// NB: assumes mutated_view is a narrowed view of base.
|
||||
|
@ -95,11 +95,9 @@ struct uniform_int_distribution {
|
||||
template <typename T>
|
||||
struct uniform_real_distribution {
|
||||
|
||||
C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) {
|
||||
C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) : from_(from), to_(to) {
|
||||
TORCH_CHECK_IF_NOT_ON_CUDA(from <= to);
|
||||
TORCH_CHECK_IF_NOT_ON_CUDA(to - from <= std::numeric_limits<T>::max());
|
||||
from_ = from;
|
||||
to_ = to;
|
||||
}
|
||||
|
||||
template <typename RNG>
|
||||
@ -186,10 +184,8 @@ DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float);
|
||||
template <typename T>
|
||||
struct normal_distribution {
|
||||
|
||||
C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) {
|
||||
C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) : mean(mean_in), stdv(stdv_in) {
|
||||
TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in >= 0, "stdv_in must be positive: ", stdv_in);
|
||||
mean = mean_in;
|
||||
stdv = stdv_in;
|
||||
}
|
||||
|
||||
template <typename RNG>
|
||||
@ -236,9 +232,8 @@ template <> struct DiscreteDistributionType<double> { using type = double; };
|
||||
template <typename T>
|
||||
struct bernoulli_distribution {
|
||||
|
||||
C10_HOST_DEVICE inline bernoulli_distribution(T p_in) {
|
||||
C10_HOST_DEVICE inline bernoulli_distribution(T p_in) : p(p_in) {
|
||||
TORCH_CHECK_IF_NOT_ON_CUDA(p_in >= 0 && p_in <= 1);
|
||||
p = p_in;
|
||||
}
|
||||
|
||||
template <typename RNG>
|
||||
@ -257,9 +252,8 @@ struct bernoulli_distribution {
|
||||
template <typename T>
|
||||
struct geometric_distribution {
|
||||
|
||||
C10_HOST_DEVICE inline geometric_distribution(T p_in) {
|
||||
C10_HOST_DEVICE inline geometric_distribution(T p_in) : p(p_in) {
|
||||
TORCH_CHECK_IF_NOT_ON_CUDA(p_in > 0 && p_in < 1);
|
||||
p = p_in;
|
||||
}
|
||||
|
||||
template <typename RNG>
|
||||
@ -317,10 +311,8 @@ struct cauchy_distribution {
|
||||
template <typename T>
|
||||
struct lognormal_distribution {
|
||||
|
||||
C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) {
|
||||
C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) : mean(mean_in), stdv(stdv_in) {
|
||||
TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in > 0);
|
||||
mean = mean_in;
|
||||
stdv = stdv_in;
|
||||
}
|
||||
|
||||
template<typename RNG>
|
||||
|
@ -1163,7 +1163,7 @@ struct TORCH_API IValue final {
|
||||
// this value different (e.g. using NaN boxing), and this would make it more
|
||||
// costly to determine the tag for all types vs just determining if something
|
||||
// is a particular type. Instead we want clients to use the `isX` methods when
|
||||
// possible. If for perf. reasons you really, absolutely, must have a jump
|
||||
// possible. If for performance reasons you really, absolutely, must have a jump
|
||||
// table, then we can revisit this.
|
||||
enum class Tag : uint32_t {
|
||||
#define DEFINE_TAG(x) x,
|
||||
|
@ -32,7 +32,9 @@
|
||||
#define DLPACK_DLL
|
||||
#endif
|
||||
|
||||
// NOLINTNEXTLINE(modernize-deprecated-headers)
|
||||
#include <stdint.h>
|
||||
// NOLINTNEXTLINE(modernize-deprecated-headers)
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -103,7 +103,7 @@ template<
|
||||
// optional cannot be used in a template, otherwise we would use it here.
|
||||
int maybe_keepdim_arg_pos
|
||||
>
|
||||
void boxed_reduction_batch_rule(const c10::OperatorHandle& op, torch::jit::Stack* stack) {
|
||||
static void boxed_reduction_batch_rule(const c10::OperatorHandle& op, torch::jit::Stack* stack) {
|
||||
const auto& schema = op.schema();
|
||||
const auto num_returns = schema.returns().size();
|
||||
const auto num_arguments = schema.arguments().size();
|
||||
@ -357,21 +357,21 @@ static std::tuple<Tensor, std::optional<int64_t>> searchsorted_batch_rule(
|
||||
// B<...>D, B<...>V -> no change
|
||||
if (buckets_bdim.has_value() && self_bdim.has_value()) {
|
||||
auto self_ = moveBatchDimToFront(self, self_bdim);
|
||||
auto result = at::searchsorted(buckets, self_, out_int32, right, std::move(side), sorter_);
|
||||
auto result = at::searchsorted(buckets, self_, out_int32, right, side, sorter_);
|
||||
return std::make_tuple(std::move(result), 0);
|
||||
}
|
||||
// B<...>D, <...>V -> B<...>D, B<...>V
|
||||
if (buckets_bdim.has_value() && !self_bdim.has_value()) {
|
||||
auto self_ = moveBatchDimToFront(self, self_bdim);
|
||||
self_ = ensure_has_bdim(self_, self_bdim.has_value(), buckets.size(0));
|
||||
auto result = at::searchsorted(buckets, self_, out_int32, right, std::move(side), sorter_);
|
||||
auto result = at::searchsorted(buckets, self_, out_int32, right, side, sorter_);
|
||||
return std::make_tuple(std::move(result), 0);
|
||||
}
|
||||
// <...>D, B<...>V -> <...>D, <...>(BV)
|
||||
if (!buckets_bdim.has_value() && self_bdim.has_value()) {
|
||||
auto bdim_size = self.size(*self_bdim);
|
||||
auto self_ = reshape_dim_into(*self_bdim, -1, self);
|
||||
auto result = at::searchsorted(buckets, self_, out_int32, right, std::move(side), sorter_);
|
||||
auto result = at::searchsorted(buckets, self_, out_int32, right, side, sorter_);
|
||||
result = reshape_dim_outof(-1, bdim_size, result);
|
||||
return std::make_tuple(result, result.dim() - 2);
|
||||
}
|
||||
@ -382,7 +382,7 @@ static std::tuple<Tensor, std::optional<int64_t>> searchsorted_batch_rule(
|
||||
if (buckets_bdim.has_value() && self_bdim.has_value()) {
|
||||
auto self_ = moveBatchDimToFront(self, self_bdim);
|
||||
auto self_view_ = self_logical_rank == 0 ? self_.unsqueeze(-1) : self_.flatten(1);
|
||||
auto result = at::searchsorted(buckets, self_view_, out_int32, right, std::move(side), sorter_);
|
||||
auto result = at::searchsorted(buckets, self_view_, out_int32, right, side, sorter_);
|
||||
result = self_logical_rank == 0 ? result.squeeze(-1) : result.view(self_.sizes());
|
||||
return std::make_tuple(std::move(result), 0);
|
||||
}
|
||||
@ -391,13 +391,13 @@ static std::tuple<Tensor, std::optional<int64_t>> searchsorted_batch_rule(
|
||||
auto bdim_size = buckets.size(*buckets_bdim);
|
||||
auto self_ = ensure_has_bdim(self, false, bdim_size);
|
||||
auto self_view_ = self_logical_rank == 0 ? self_.unsqueeze(-1) : self_.flatten(1);
|
||||
auto result = at::searchsorted(buckets, self_view_, out_int32, right, std::move(side), sorter_);
|
||||
auto result = at::searchsorted(buckets, self_view_, out_int32, right, side, sorter_);
|
||||
result = self_logical_rank == 0 ? result.squeeze(-1) : result.view(self_.sizes());
|
||||
return std::make_tuple(std::move(result), 0);
|
||||
}
|
||||
// D, B* -> no change
|
||||
if (!buckets_bdim.has_value() && self_bdim.has_value()) {
|
||||
auto result = at::searchsorted(buckets, self, out_int32, right, std::move(side), sorter_);
|
||||
auto result = at::searchsorted(buckets, self, out_int32, right, side, sorter_);
|
||||
return std::make_tuple(std::move(result), self_bdim);
|
||||
}
|
||||
TORCH_INTERNAL_ASSERT(false);
|
||||
|
@ -81,11 +81,7 @@ TensorImpl::TensorImpl(
|
||||
DispatchKeySet key_set,
|
||||
const caffe2::TypeMeta data_type)
|
||||
// Use std::forward to suppress static analyzer false positive.
|
||||
: TensorImpl(
|
||||
std::forward<Storage>(storage),
|
||||
key_set,
|
||||
data_type,
|
||||
storage.device()) {}
|
||||
: TensorImpl(std::move(storage), key_set, data_type, storage.device()) {}
|
||||
|
||||
// [Note: Python key removal]
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -62,6 +62,7 @@ ThreadPool::~ThreadPool() {
|
||||
for (auto& t : threads_) {
|
||||
try {
|
||||
t.join();
|
||||
// NOLINTNEXTLINE(bugprone-empty-catch)
|
||||
} catch (const std::exception&) {
|
||||
}
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ TEST(TypeIndex, TypeComputationsAreResolved) {
|
||||
EXPECT_EQ(
|
||||
string_view::npos,
|
||||
get_fully_qualified_type_name<
|
||||
typename std::remove_pointer<typename Type<int>::type>::type>()
|
||||
std::remove_pointer_t<typename Type<int>::type>>()
|
||||
.find("*"));
|
||||
}
|
||||
|
||||
|
@ -281,6 +281,7 @@ class GetBacktraceImpl {
|
||||
}
|
||||
|
||||
private:
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
||||
const bool skip_python_frames_;
|
||||
std::vector<void*> callstack_;
|
||||
};
|
||||
|
@ -36,6 +36,7 @@ class C10_API GaugeHandle {
|
||||
void record(int64_t value);
|
||||
|
||||
private:
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
||||
detail::GaugeImpl& impl_;
|
||||
};
|
||||
|
||||
|
@ -29,7 +29,7 @@ class OptimisticLazy {
|
||||
}
|
||||
|
||||
template <class Factory>
|
||||
T& ensure(Factory&& factory) {
|
||||
T& ensure(const Factory& factory) {
|
||||
if (T* value = value_.load(std::memory_order_acquire)) {
|
||||
return *value;
|
||||
}
|
||||
|
@ -220,6 +220,7 @@ void SetGlobalRank(int64_t rank) {
|
||||
void LogAPIUsage(const std::string& event) try {
|
||||
if (auto logger = GetAPIUsageLogger())
|
||||
(*logger)(event);
|
||||
// NOLINTNEXTLINE(bugprone-empty-catch)
|
||||
} catch (std::bad_function_call&) {
|
||||
// static destructor race
|
||||
}
|
||||
@ -229,6 +230,7 @@ void LogAPIUsageMetadata(
|
||||
const std::map<std::string, std::string>& metadata_map) try {
|
||||
if (auto logger = GetAPIUsageMetadataLogger())
|
||||
(*logger)(context, metadata_map);
|
||||
// NOLINTNEXTLINE(bugprone-empty-catch)
|
||||
} catch (std::bad_function_call&) {
|
||||
// static destructor race
|
||||
}
|
||||
@ -236,6 +238,7 @@ void LogAPIUsageMetadata(
|
||||
void LogPyTorchDDPUsage(const DDPLoggingData& ddpData) try {
|
||||
if (auto logger = GetDDPUsageLogger())
|
||||
(*logger)(ddpData);
|
||||
// NOLINTNEXTLINE(bugprone-empty-catch)
|
||||
} catch (std::bad_function_call&) {
|
||||
// static destructor race
|
||||
}
|
||||
@ -245,6 +248,7 @@ bool LogAPIUsageFakeReturn(const std::string& event) try {
|
||||
if (auto logger = GetAPIUsageLogger())
|
||||
(*logger)(event);
|
||||
return true;
|
||||
// NOLINTNEXTLINE(bugprone-empty-catch)
|
||||
} catch (std::bad_function_call&) {
|
||||
// static destructor race
|
||||
return true;
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
@ -110,7 +111,7 @@ class WaitCounterImpl {
|
||||
return ctxs;
|
||||
}
|
||||
|
||||
void stop(SmallVector<intptr_t>&& ctxs) noexcept {
|
||||
void stop(const SmallVector<intptr_t>& ctxs) noexcept {
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
assert(ctxs.size() == backends_.size());
|
||||
for (size_t i = 0; i < ctxs.size(); ++i) {
|
||||
@ -155,7 +156,7 @@ WaitCounterHandle::WaitGuard WaitCounterHandle::start() {
|
||||
return WaitCounterHandle::WaitGuard(*this, impl_.start());
|
||||
}
|
||||
|
||||
void WaitCounterHandle::stop(SmallVector<intptr_t>&& ctxs) {
|
||||
return impl_.stop(std::move(ctxs));
|
||||
void WaitCounterHandle::stop(const SmallVector<intptr_t>& ctxs) {
|
||||
return impl_.stop(ctxs);
|
||||
}
|
||||
} // namespace c10::monitor
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
#include <c10/macros/Macros.h>
|
||||
@ -61,7 +60,7 @@ class C10_API WaitCounterHandle {
|
||||
|
||||
void stop() {
|
||||
if (auto handle = std::exchange(handle_, nullptr)) {
|
||||
handle->stop(std::move(ctxs_));
|
||||
handle->stop(ctxs_);
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,8 +80,9 @@ class C10_API WaitCounterHandle {
|
||||
private:
|
||||
// Stops the waiter. Each start() call should be matched by exactly one stop()
|
||||
// call.
|
||||
void stop(SmallVector<intptr_t>&& ctxs);
|
||||
void stop(const SmallVector<intptr_t>& ctxs);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
||||
detail::WaitCounterImpl& impl_;
|
||||
};
|
||||
} // namespace c10::monitor
|
||||
|
@ -38,12 +38,10 @@ using DeviceStreamType = cudaStream_t;
|
||||
throw std::runtime_error("CPU runtime error"); \
|
||||
}
|
||||
|
||||
namespace torch {
|
||||
namespace aot_inductor {
|
||||
namespace torch::aot_inductor {
|
||||
|
||||
using DeviceStreamType = void*;
|
||||
|
||||
} // namespace aot_inductor
|
||||
} // namespace torch
|
||||
} // namespace torch::aot_inductor
|
||||
|
||||
#endif // USE_CUDA
|
||||
|
@ -193,7 +193,7 @@ void initLazyBindings(PyObject* module) {
|
||||
torch::lazy::getLTCForceFallback() = std::move(newval);
|
||||
});
|
||||
lazy.def("_clear_ir_cache", []() { TrieCache::Get()->Clear(); });
|
||||
lazy.def("_dump_ir_cache", [](std::string filename) {
|
||||
lazy.def("_dump_ir_cache", [](const std::string& filename) {
|
||||
TrieCache::Get()->DumpToDotFile(filename);
|
||||
});
|
||||
lazy.def("_set_reuse_ir", [](bool val) { FLAGS_torch_lazy_reuse_ir = val; });
|
||||
|
@ -57,8 +57,7 @@ bool is_numpy_dlpack_deleter_bugged() {
|
||||
using namespace at;
|
||||
using namespace torch::autograd;
|
||||
|
||||
namespace torch {
|
||||
namespace utils {
|
||||
namespace torch::utils {
|
||||
|
||||
bool is_numpy_available() {
|
||||
static bool available = []() {
|
||||
@ -567,7 +566,6 @@ void validate_numpy_for_dlpack_deleter_bug() {
|
||||
bool is_numpy_dlpack_deleter_bugged() {
|
||||
return numpy_with_dlpack_deleter_bug_installed;
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace torch
|
||||
} // namespace torch::utils
|
||||
|
||||
#endif // USE_NUMPY
|
||||
|
Reference in New Issue
Block a user