diff --git a/aten/src/ATen/FunctionalInverses.cpp b/aten/src/ATen/FunctionalInverses.cpp index b5581b71e767..60c86bad733a 100644 --- a/aten/src/ATen/FunctionalInverses.cpp +++ b/aten/src/ATen/FunctionalInverses.cpp @@ -231,6 +231,7 @@ Tensor FunctionalInverses::slice_Tensor_inverse(const Tensor& base, const Tensor } } +// NOLINTNEXTLINE(performance-unnecessary-value-param) Tensor FunctionalInverses::split_Tensor_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, int64_t mutated_view_idx, c10::SymInt split_size, int64_t dim) { // It would be nice if this logic could be re-used from autograd's split_backward(), but I don't think it can. // For functionalization, we have only have one of the tensors from the TensorList outputed by split(), and we want to layer i @@ -452,6 +453,7 @@ Tensor FunctionalInverses::chunk_inverse(const at::Tensor & base, const at::Tens return split_with_sizes_inverse(base, mutated_view, inverse_return_mode, mutated_view_idx, split_sizes, dim); } +// NOLINTNEXTLINE(performance-unnecessary-value-param) Tensor FunctionalInverses::narrow_inverse(const at::Tensor & base, const at::Tensor & mutated_view, InverseReturnMode inverse_return_mode, int dim, c10::SymInt start, c10::SymInt length) { if (inverse_return_mode == InverseReturnMode::AlwaysView) { // NB: assumes mutated_view is a narrowed view of base. diff --git a/aten/src/ATen/core/DistributionsHelper.h b/aten/src/ATen/core/DistributionsHelper.h index 18588ee00a36..39004008d007 100644 --- a/aten/src/ATen/core/DistributionsHelper.h +++ b/aten/src/ATen/core/DistributionsHelper.h @@ -95,11 +95,9 @@ struct uniform_int_distribution { template struct uniform_real_distribution { - C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) { + C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) : from_(from), to_(to) { TORCH_CHECK_IF_NOT_ON_CUDA(from <= to); TORCH_CHECK_IF_NOT_ON_CUDA(to - from <= std::numeric_limits::max()); - from_ = from; - to_ = to; } template @@ -186,10 +184,8 @@ DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float); template struct normal_distribution { - C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) { + C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) : mean(mean_in), stdv(stdv_in) { TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in >= 0, "stdv_in must be positive: ", stdv_in); - mean = mean_in; - stdv = stdv_in; } template @@ -236,9 +232,8 @@ template <> struct DiscreteDistributionType { using type = double; }; template struct bernoulli_distribution { - C10_HOST_DEVICE inline bernoulli_distribution(T p_in) { + C10_HOST_DEVICE inline bernoulli_distribution(T p_in) : p(p_in) { TORCH_CHECK_IF_NOT_ON_CUDA(p_in >= 0 && p_in <= 1); - p = p_in; } template @@ -257,9 +252,8 @@ struct bernoulli_distribution { template struct geometric_distribution { - C10_HOST_DEVICE inline geometric_distribution(T p_in) { + C10_HOST_DEVICE inline geometric_distribution(T p_in) : p(p_in) { TORCH_CHECK_IF_NOT_ON_CUDA(p_in > 0 && p_in < 1); - p = p_in; } template @@ -317,10 +311,8 @@ struct cauchy_distribution { template struct lognormal_distribution { - C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) { + C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) : mean(mean_in), stdv(stdv_in) { TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in > 0); - mean = mean_in; - stdv = stdv_in; } template diff --git a/aten/src/ATen/core/ivalue.h b/aten/src/ATen/core/ivalue.h index 29659124184e..42a03ea94602 100644 --- a/aten/src/ATen/core/ivalue.h +++ b/aten/src/ATen/core/ivalue.h @@ -1163,7 +1163,7 @@ struct TORCH_API IValue final { // this value different (e.g. using NaN boxing), and this would make it more // costly to determine the tag for all types vs just determining if something // is a particular type. Instead we want clients to use the `isX` methods when - // possible. If for perf. reasons you really, absolutely, must have a jump + // possible. If for performance reasons you really, absolutely, must have a jump // table, then we can revisit this. enum class Tag : uint32_t { #define DEFINE_TAG(x) x, diff --git a/aten/src/ATen/dlpack.h b/aten/src/ATen/dlpack.h index c77205f96215..6f8e03dd5704 100644 --- a/aten/src/ATen/dlpack.h +++ b/aten/src/ATen/dlpack.h @@ -32,7 +32,9 @@ #define DLPACK_DLL #endif +// NOLINTNEXTLINE(modernize-deprecated-headers) #include +// NOLINTNEXTLINE(modernize-deprecated-headers) #include #ifdef __cplusplus diff --git a/aten/src/ATen/functorch/BatchRulesReduceOps.cpp b/aten/src/ATen/functorch/BatchRulesReduceOps.cpp index 8385660be0b3..878ea58bdb2c 100644 --- a/aten/src/ATen/functorch/BatchRulesReduceOps.cpp +++ b/aten/src/ATen/functorch/BatchRulesReduceOps.cpp @@ -103,7 +103,7 @@ template< // optional cannot be used in a template, otherwise we would use it here. int maybe_keepdim_arg_pos > -void boxed_reduction_batch_rule(const c10::OperatorHandle& op, torch::jit::Stack* stack) { +static void boxed_reduction_batch_rule(const c10::OperatorHandle& op, torch::jit::Stack* stack) { const auto& schema = op.schema(); const auto num_returns = schema.returns().size(); const auto num_arguments = schema.arguments().size(); @@ -357,21 +357,21 @@ static std::tuple> searchsorted_batch_rule( // B<...>D, B<...>V -> no change if (buckets_bdim.has_value() && self_bdim.has_value()) { auto self_ = moveBatchDimToFront(self, self_bdim); - auto result = at::searchsorted(buckets, self_, out_int32, right, std::move(side), sorter_); + auto result = at::searchsorted(buckets, self_, out_int32, right, side, sorter_); return std::make_tuple(std::move(result), 0); } // B<...>D, <...>V -> B<...>D, B<...>V if (buckets_bdim.has_value() && !self_bdim.has_value()) { auto self_ = moveBatchDimToFront(self, self_bdim); self_ = ensure_has_bdim(self_, self_bdim.has_value(), buckets.size(0)); - auto result = at::searchsorted(buckets, self_, out_int32, right, std::move(side), sorter_); + auto result = at::searchsorted(buckets, self_, out_int32, right, side, sorter_); return std::make_tuple(std::move(result), 0); } // <...>D, B<...>V -> <...>D, <...>(BV) if (!buckets_bdim.has_value() && self_bdim.has_value()) { auto bdim_size = self.size(*self_bdim); auto self_ = reshape_dim_into(*self_bdim, -1, self); - auto result = at::searchsorted(buckets, self_, out_int32, right, std::move(side), sorter_); + auto result = at::searchsorted(buckets, self_, out_int32, right, side, sorter_); result = reshape_dim_outof(-1, bdim_size, result); return std::make_tuple(result, result.dim() - 2); } @@ -382,7 +382,7 @@ static std::tuple> searchsorted_batch_rule( if (buckets_bdim.has_value() && self_bdim.has_value()) { auto self_ = moveBatchDimToFront(self, self_bdim); auto self_view_ = self_logical_rank == 0 ? self_.unsqueeze(-1) : self_.flatten(1); - auto result = at::searchsorted(buckets, self_view_, out_int32, right, std::move(side), sorter_); + auto result = at::searchsorted(buckets, self_view_, out_int32, right, side, sorter_); result = self_logical_rank == 0 ? result.squeeze(-1) : result.view(self_.sizes()); return std::make_tuple(std::move(result), 0); } @@ -391,13 +391,13 @@ static std::tuple> searchsorted_batch_rule( auto bdim_size = buckets.size(*buckets_bdim); auto self_ = ensure_has_bdim(self, false, bdim_size); auto self_view_ = self_logical_rank == 0 ? self_.unsqueeze(-1) : self_.flatten(1); - auto result = at::searchsorted(buckets, self_view_, out_int32, right, std::move(side), sorter_); + auto result = at::searchsorted(buckets, self_view_, out_int32, right, side, sorter_); result = self_logical_rank == 0 ? result.squeeze(-1) : result.view(self_.sizes()); return std::make_tuple(std::move(result), 0); } // D, B* -> no change if (!buckets_bdim.has_value() && self_bdim.has_value()) { - auto result = at::searchsorted(buckets, self, out_int32, right, std::move(side), sorter_); + auto result = at::searchsorted(buckets, self, out_int32, right, side, sorter_); return std::make_tuple(std::move(result), self_bdim); } TORCH_INTERNAL_ASSERT(false); diff --git a/c10/core/TensorImpl.cpp b/c10/core/TensorImpl.cpp index 3df75af67822..f268dbe17859 100644 --- a/c10/core/TensorImpl.cpp +++ b/c10/core/TensorImpl.cpp @@ -81,11 +81,7 @@ TensorImpl::TensorImpl( DispatchKeySet key_set, const caffe2::TypeMeta data_type) // Use std::forward to suppress static analyzer false positive. - : TensorImpl( - std::forward(storage), - key_set, - data_type, - storage.device()) {} + : TensorImpl(std::move(storage), key_set, data_type, storage.device()) {} // [Note: Python key removal] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/c10/core/thread_pool.cpp b/c10/core/thread_pool.cpp index dfe6cfaeb334..cb997c1e59e7 100644 --- a/c10/core/thread_pool.cpp +++ b/c10/core/thread_pool.cpp @@ -62,6 +62,7 @@ ThreadPool::~ThreadPool() { for (auto& t : threads_) { try { t.join(); + // NOLINTNEXTLINE(bugprone-empty-catch) } catch (const std::exception&) { } } diff --git a/c10/test/util/TypeIndex_test.cpp b/c10/test/util/TypeIndex_test.cpp index b44bbab35689..370a0ad81ba3 100644 --- a/c10/test/util/TypeIndex_test.cpp +++ b/c10/test/util/TypeIndex_test.cpp @@ -163,7 +163,7 @@ TEST(TypeIndex, TypeComputationsAreResolved) { EXPECT_EQ( string_view::npos, get_fully_qualified_type_name< - typename std::remove_pointer::type>::type>() + std::remove_pointer_t::type>>() .find("*")); } diff --git a/c10/util/Backtrace.cpp b/c10/util/Backtrace.cpp index df009dea3af9..bfcacfd9740d 100644 --- a/c10/util/Backtrace.cpp +++ b/c10/util/Backtrace.cpp @@ -281,6 +281,7 @@ class GetBacktraceImpl { } private: + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) const bool skip_python_frames_; std::vector callstack_; }; diff --git a/c10/util/Gauge.h b/c10/util/Gauge.h index f92ecd986bee..f505c037ebc9 100644 --- a/c10/util/Gauge.h +++ b/c10/util/Gauge.h @@ -36,6 +36,7 @@ class C10_API GaugeHandle { void record(int64_t value); private: + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) detail::GaugeImpl& impl_; }; diff --git a/c10/util/Lazy.h b/c10/util/Lazy.h index 34424691a8d8..ad778cc1108d 100644 --- a/c10/util/Lazy.h +++ b/c10/util/Lazy.h @@ -29,7 +29,7 @@ class OptimisticLazy { } template - T& ensure(Factory&& factory) { + T& ensure(const Factory& factory) { if (T* value = value_.load(std::memory_order_acquire)) { return *value; } diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp index 5c12a187b435..9aeee67f1a1e 100644 --- a/c10/util/Logging.cpp +++ b/c10/util/Logging.cpp @@ -220,6 +220,7 @@ void SetGlobalRank(int64_t rank) { void LogAPIUsage(const std::string& event) try { if (auto logger = GetAPIUsageLogger()) (*logger)(event); + // NOLINTNEXTLINE(bugprone-empty-catch) } catch (std::bad_function_call&) { // static destructor race } @@ -229,6 +230,7 @@ void LogAPIUsageMetadata( const std::map& metadata_map) try { if (auto logger = GetAPIUsageMetadataLogger()) (*logger)(context, metadata_map); + // NOLINTNEXTLINE(bugprone-empty-catch) } catch (std::bad_function_call&) { // static destructor race } @@ -236,6 +238,7 @@ void LogAPIUsageMetadata( void LogPyTorchDDPUsage(const DDPLoggingData& ddpData) try { if (auto logger = GetDDPUsageLogger()) (*logger)(ddpData); + // NOLINTNEXTLINE(bugprone-empty-catch) } catch (std::bad_function_call&) { // static destructor race } @@ -245,6 +248,7 @@ bool LogAPIUsageFakeReturn(const std::string& event) try { if (auto logger = GetAPIUsageLogger()) (*logger)(event); return true; + // NOLINTNEXTLINE(bugprone-empty-catch) } catch (std::bad_function_call&) { // static destructor race return true; diff --git a/c10/util/WaitCounter.cpp b/c10/util/WaitCounter.cpp index 3941942dfb35..1edf4fee29f0 100644 --- a/c10/util/WaitCounter.cpp +++ b/c10/util/WaitCounter.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -110,7 +111,7 @@ class WaitCounterImpl { return ctxs; } - void stop(SmallVector&& ctxs) noexcept { + void stop(const SmallVector& ctxs) noexcept { auto now = std::chrono::steady_clock::now(); assert(ctxs.size() == backends_.size()); for (size_t i = 0; i < ctxs.size(); ++i) { @@ -155,7 +156,7 @@ WaitCounterHandle::WaitGuard WaitCounterHandle::start() { return WaitCounterHandle::WaitGuard(*this, impl_.start()); } -void WaitCounterHandle::stop(SmallVector&& ctxs) { - return impl_.stop(std::move(ctxs)); +void WaitCounterHandle::stop(const SmallVector& ctxs) { + return impl_.stop(ctxs); } } // namespace c10::monitor diff --git a/c10/util/WaitCounter.h b/c10/util/WaitCounter.h index 504e88720a9c..193740cb10db 100644 --- a/c10/util/WaitCounter.h +++ b/c10/util/WaitCounter.h @@ -2,7 +2,6 @@ #include #include -#include #include #include @@ -61,7 +60,7 @@ class C10_API WaitCounterHandle { void stop() { if (auto handle = std::exchange(handle_, nullptr)) { - handle->stop(std::move(ctxs_)); + handle->stop(ctxs_); } } @@ -81,8 +80,9 @@ class C10_API WaitCounterHandle { private: // Stops the waiter. Each start() call should be matched by exactly one stop() // call. - void stop(SmallVector&& ctxs); + void stop(const SmallVector& ctxs); + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) detail::WaitCounterImpl& impl_; }; } // namespace c10::monitor diff --git a/torch/csrc/inductor/aoti_runtime/device_utils.h b/torch/csrc/inductor/aoti_runtime/device_utils.h index 76731999968d..5b1fc36c97ea 100644 --- a/torch/csrc/inductor/aoti_runtime/device_utils.h +++ b/torch/csrc/inductor/aoti_runtime/device_utils.h @@ -38,12 +38,10 @@ using DeviceStreamType = cudaStream_t; throw std::runtime_error("CPU runtime error"); \ } -namespace torch { -namespace aot_inductor { +namespace torch::aot_inductor { using DeviceStreamType = void*; -} // namespace aot_inductor -} // namespace torch +} // namespace torch::aot_inductor #endif // USE_CUDA diff --git a/torch/csrc/lazy/python/init.cpp b/torch/csrc/lazy/python/init.cpp index fe92dd211262..f30615355e0e 100644 --- a/torch/csrc/lazy/python/init.cpp +++ b/torch/csrc/lazy/python/init.cpp @@ -193,7 +193,7 @@ void initLazyBindings(PyObject* module) { torch::lazy::getLTCForceFallback() = std::move(newval); }); lazy.def("_clear_ir_cache", []() { TrieCache::Get()->Clear(); }); - lazy.def("_dump_ir_cache", [](std::string filename) { + lazy.def("_dump_ir_cache", [](const std::string& filename) { TrieCache::Get()->DumpToDotFile(filename); }); lazy.def("_set_reuse_ir", [](bool val) { FLAGS_torch_lazy_reuse_ir = val; }); diff --git a/torch/csrc/utils/tensor_numpy.cpp b/torch/csrc/utils/tensor_numpy.cpp index d0cafd6ad279..b8e9120b6c61 100644 --- a/torch/csrc/utils/tensor_numpy.cpp +++ b/torch/csrc/utils/tensor_numpy.cpp @@ -57,8 +57,7 @@ bool is_numpy_dlpack_deleter_bugged() { using namespace at; using namespace torch::autograd; -namespace torch { -namespace utils { +namespace torch::utils { bool is_numpy_available() { static bool available = []() { @@ -567,7 +566,6 @@ void validate_numpy_for_dlpack_deleter_bug() { bool is_numpy_dlpack_deleter_bugged() { return numpy_with_dlpack_deleter_bug_installed; } -} // namespace utils -} // namespace torch +} // namespace torch::utils #endif // USE_NUMPY