diff --git a/aten/src/ATen/MatrixRef.h b/aten/src/ATen/MatrixRef.h index 74a20a687090..21c010e66db5 100644 --- a/aten/src/ATen/MatrixRef.h +++ b/aten/src/ATen/MatrixRef.h @@ -92,6 +92,7 @@ class MatrixRef { /// The declaration here is extra complicated so that "arrayRef = {}" /// continues to select the move assignment operator. template + // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) std::enable_if_t, MatrixRef>& operator=( U&& Temporary) = delete; diff --git a/aten/src/ATen/core/ivalue.cpp b/aten/src/ATen/core/ivalue.cpp index 2f55e7448005..5a49b16387aa 100644 --- a/aten/src/ATen/core/ivalue.cpp +++ b/aten/src/ATen/core/ivalue.cpp @@ -574,12 +574,7 @@ static std::ostream& printMaybeAnnotatedDict( static std::ostream& printComplex(std::ostream & out, const IValue & v) { c10::complex d = v.toComplexDouble(); IValue real(d.real()), imag(std::abs(d.imag())); - auto sign = ""; - if (d.imag() >= 0) { - sign = "+"; - } else { - sign = "-"; - } + auto sign = d.imag() >= 0 ? '+' : '-'; return out << real << sign << imag << "j"; } diff --git a/aten/src/ATen/cuda/EmptyTensor.cpp b/aten/src/ATen/cuda/EmptyTensor.cpp index 108b7be47de1..5b96cdaaf77f 100644 --- a/aten/src/ATen/cuda/EmptyTensor.cpp +++ b/aten/src/ATen/cuda/EmptyTensor.cpp @@ -68,9 +68,11 @@ TensorBase empty_strided_cuda( std::optional device_opt, std::optional pin_memory_opt) { TORCH_CHECK(!pin_memory_opt.has_value() || !*pin_memory_opt, "Only dense CPU tensors can be pinned"); +#ifndef NDEBUG // TODO: remove check for jagged, see https://github.com/pytorch/pytorch/issues/130073 const auto layout = layout_or_default(layout_opt); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout == Layout::Strided || layout == Layout::Jagged); +#endif const auto dtype = dtype_or_default(dtype_opt); return at::detail::empty_strided_cuda(size, stride, dtype, device_opt); diff --git a/aten/src/ATen/cuda/tunable/GemmCommon.h b/aten/src/ATen/cuda/tunable/GemmCommon.h index 6f2ff5583a73..8e44014d756a 100644 --- a/aten/src/ATen/cuda/tunable/GemmCommon.h +++ b/aten/src/ATen/cuda/tunable/GemmCommon.h @@ -12,6 +12,7 @@ #include #include +#include #include #include @@ -22,6 +23,7 @@ #include #include #endif +#include #include namespace at::cuda::tunable { @@ -150,19 +152,19 @@ struct GemmParams : OpParams { return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T)) ? OK : FAIL; } - char transa; - char transb; - int64_t m; - int64_t n; - int64_t k; + char transa{}; + char transb{}; + int64_t m{}; + int64_t n{}; + int64_t k{}; at::opmath_type alpha; - const T* a; - int64_t lda; - const T* b; - int64_t ldb; + const T* a{}; + int64_t lda{}; + const T* b{}; + int64_t ldb{}; at::opmath_type beta; - T* c; - int64_t ldc; + T* c{}; + int64_t ldc{}; private: bool duplicate_inputs_{false}; }; @@ -223,7 +225,9 @@ struct GemmAndBiasParams : OpParams { void Delete() { c10::cuda::CUDACachingAllocator::raw_delete(c); if (duplicate_inputs_) { + // NOLINTNEXTLINE(*const-cast) c10::cuda::CUDACachingAllocator::raw_delete(const_cast(a)); + // NOLINTNEXTLINE(*const-cast) c10::cuda::CUDACachingAllocator::raw_delete(const_cast(b)); } } @@ -233,30 +237,26 @@ struct GemmAndBiasParams : OpParams { return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T)) ? OK : FAIL; } - char transa; - char transb; - int64_t m; - int64_t n; - int64_t k; - at::opmath_type alpha; - const T* a; - int64_t lda; - const T* b; - int64_t ldb; - T* c; - int64_t ldc; - const T* bias; - at::cuda::blas::GEMMAndBiasActivationEpilogue activation; + char transa{}; + char transb{}; + int64_t m{}; + int64_t n{}; + int64_t k{}; + at::opmath_type alpha{}; + const T* a{}; + int64_t lda{}; + const T* b{}; + int64_t ldb{}; + T* c{}; + int64_t ldc{}; + const T* bias{}; + at::cuda::blas::GEMMAndBiasActivationEpilogue activation{}; private: bool duplicate_inputs_{false}; }; template struct GemmStridedBatchedParams : OpParams { - GemmStridedBatchedParams() = default; - GemmStridedBatchedParams(const GemmStridedBatchedParams&) = default; - GemmStridedBatchedParams& operator=(const GemmStridedBatchedParams&) = default; - std::string Signature() const override { return fmt::sprintf("%c%c_%ld_%ld_%ld_B_%ld", transa, transb, m, n, k, batch); } @@ -325,23 +325,23 @@ struct GemmStridedBatchedParams : OpParams { return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T)) ? OK : FAIL; } - char transa; - char transb; - int64_t m; - int64_t n; - int64_t k; - at::opmath_type alpha; - const T* a; - int64_t lda; - int64_t stride_a; - const T* b; - int64_t ldb; - int64_t stride_b; + char transa{}; + char transb{}; + int64_t m{}; + int64_t n{}; + int64_t k{}; + at::opmath_type alpha{}; + const T* a{}; + int64_t lda{}; + int64_t stride_a{}; + const T* b{}; + int64_t ldb{}; + int64_t stride_b{}; at::opmath_type beta; - T* c; - int64_t ldc; - int64_t stride_c; - int64_t batch; + T* c{}; + int64_t ldc{}; + int64_t stride_c{}; + int64_t batch{}; private: bool duplicate_inputs_{false}; }; @@ -415,27 +415,27 @@ struct ScaledGemmParams : OpParams { return detail::NumericalCheck(c_dtype, c, other->c, GetSizeC()/sizeof(T)) ? OK : FAIL; } - char transa; - char transb; - int64_t m; - int64_t n; - int64_t k; - const void* a; - const void* a_scale_ptr; - int64_t lda; - ScalarType a_dtype; - const void* b; - const void* b_scale_ptr; - int64_t ldb; - ScalarType b_dtype; - const void* bias_ptr; - ScalarType bias_dtype; - void* c; - const void* c_scale_ptr; - int64_t ldc; - ScalarType c_dtype; - void* amax_ptr; - bool use_fast_accum; + char transa{}; + char transb{}; + int64_t m{}; + int64_t n{}; + int64_t k{}; + const void* a{}; + const void* a_scale_ptr{}; + int64_t lda{}; + ScalarType a_dtype{}; + const void* b{}; + const void* b_scale_ptr{}; + int64_t ldb{}; + ScalarType b_dtype{}; + const void* bias_ptr{}; + ScalarType bias_dtype{}; + void* c{}; + const void* c_scale_ptr{}; + int64_t ldc{}; + ScalarType c_dtype{}; + void* amax_ptr{}; + bool use_fast_accum{}; private: bool duplicate_inputs_{false}; }; diff --git a/aten/src/ATen/cuda/tunable/StreamTimer.cpp b/aten/src/ATen/cuda/tunable/StreamTimer.cpp index 1407c32dbb35..ed24a29d9919 100644 --- a/aten/src/ATen/cuda/tunable/StreamTimer.cpp +++ b/aten/src/ATen/cuda/tunable/StreamTimer.cpp @@ -9,9 +9,10 @@ // #include -#include #include #include +#include +#include namespace at::cuda::tunable { @@ -20,8 +21,7 @@ StreamTimer::StreamTimer() { AT_CUDA_CHECK(cudaEventCreate(&end_)); } -StreamTimer::~StreamTimer() { -} +StreamTimer::~StreamTimer() = default; void StreamTimer::Start() { AT_CUDA_CHECK(cudaDeviceSynchronize()); @@ -34,7 +34,7 @@ void StreamTimer::End() { } float StreamTimer::Duration() { - float time; + auto time = std::numeric_limits::quiet_NaN(); // time is in ms with a resolution of 1 us AT_CUDA_CHECK(cudaEventElapsedTime(&time, start_, end_)); return time; diff --git a/aten/src/ATen/cuda/tunable/StreamTimer.h b/aten/src/ATen/cuda/tunable/StreamTimer.h index 36b8d72a4953..c83291d1b0e5 100644 --- a/aten/src/ATen/cuda/tunable/StreamTimer.h +++ b/aten/src/ATen/cuda/tunable/StreamTimer.h @@ -27,8 +27,8 @@ class StreamTimer : public ITimer { float Duration() override; private: - cudaEvent_t start_; - cudaEvent_t end_; + cudaEvent_t start_{}; + cudaEvent_t end_{}; }; } // namespace at::cuda::tunable diff --git a/aten/src/ATen/cuda/tunable/TunableOp.h b/aten/src/ATen/cuda/tunable/TunableOp.h index 74d49f49e575..b1c607c72e0c 100644 --- a/aten/src/ATen/cuda/tunable/TunableOp.h +++ b/aten/src/ATen/cuda/tunable/TunableOp.h @@ -26,8 +26,6 @@ namespace at::cuda::tunable { template class Callable { public: - Callable() = default; - Callable(Callable&&) = default; virtual ~Callable() = default; virtual TuningStatus Call(const ParamsT*) { return FAIL; @@ -40,8 +38,6 @@ class Callable { template class TunableOp { public: - TunableOp() = default; - TunableOp(TunableOp&&) = default; virtual ~TunableOp() = default; TuningStatus operator()(const ParamsT* params) { diff --git a/aten/src/ATen/functorch/TensorWrapper.cpp b/aten/src/ATen/functorch/TensorWrapper.cpp index 53111ea98d08..f9fa6ee60d00 100644 --- a/aten/src/ATen/functorch/TensorWrapper.cpp +++ b/aten/src/ATen/functorch/TensorWrapper.cpp @@ -126,7 +126,7 @@ c10::intrusive_ptr TensorWrapper::shallow_copy_and_detach( c10::VariableVersion&& version_counter, bool allow_tensor_metadata_change) const { auto dest_impl = makeTensorWrapperPtr(value(), level_, is_alive_); - dest_impl->set_version_counter(version_counter); + dest_impl->set_version_counter(std::move(version_counter)); // TODO: is this even right? dest_impl->set_allow_tensor_metadata_change(allow_tensor_metadata_change); diff --git a/aten/src/ATen/native/nested/NestedTensorUtils.h b/aten/src/ATen/native/nested/NestedTensorUtils.h index e36ae8a372f9..427840b6e0e0 100644 --- a/aten/src/ATen/native/nested/NestedTensorUtils.h +++ b/aten/src/ATen/native/nested/NestedTensorUtils.h @@ -264,8 +264,8 @@ class _map; template class _map> { public: - static A function_one(F&& fn, const Args&... nested_node) { - return std::forward(fn)(nested_node...); + static A function_one(const F& fn, const Args&... nested_node) { + return fn(nested_node...); } static NestedNode function( const F& fn, diff --git a/c10/core/AutogradState.h b/c10/core/AutogradState.h index f98de71a83b6..ad168b8c0598 100644 --- a/c10/core/AutogradState.h +++ b/c10/core/AutogradState.h @@ -66,6 +66,7 @@ struct C10_API AutogradState { bool inference_mode_ : 1; bool fw_grad_mode_ : 1; bool multithreading_enabled_ : 1; + // NOLINTNEXTLINE(cppcoreguidelines-use-default-member-init) bool view_replay_enabled_ : 1; }; diff --git a/torch/csrc/Generator.cpp b/torch/csrc/Generator.cpp index 60e83ed1c3f1..dfbffa7ba68f 100644 --- a/torch/csrc/Generator.cpp +++ b/torch/csrc/Generator.cpp @@ -30,7 +30,7 @@ using namespace torch; PyObject* THPGeneratorClass = nullptr; -PyObject* THPGenerator_initDefaultGenerator(at::Generator cdata) { +PyObject* THPGenerator_initDefaultGenerator(const at::Generator& cdata) { auto type = (PyTypeObject*)THPGeneratorClass; auto self = THPObjectPtr{type->tp_alloc(type, 0)}; if (!self) @@ -391,7 +391,7 @@ PyObject* pyobj(const Generator& self) { return self.pyobj(); } -PyObject* THPGenerator_Wrap(Generator gen) { +PyObject* THPGenerator_Wrap(const Generator& gen) { if (!gen.defined()) { Py_RETURN_NONE; } diff --git a/torch/csrc/Generator.h b/torch/csrc/Generator.h index 4fef5911bab0..b5f72cb47b76 100644 --- a/torch/csrc/Generator.h +++ b/torch/csrc/Generator.h @@ -14,7 +14,7 @@ struct THPGenerator { // is borrowed. The caller should ensure that the at::Generator object lifetime // last at least as long as the Python wrapper. TORCH_PYTHON_API PyObject* THPGenerator_initDefaultGenerator( - at::Generator cdata); + const at::Generator& cdata); #define THPGenerator_Check(obj) PyObject_IsInstance(obj, THPGeneratorClass) @@ -22,7 +22,7 @@ TORCH_PYTHON_API extern PyObject* THPGeneratorClass; bool THPGenerator_init(PyObject* module); -TORCH_PYTHON_API PyObject* THPGenerator_Wrap(at::Generator gen); +TORCH_PYTHON_API PyObject* THPGenerator_Wrap(const at::Generator& gen); TORCH_PYTHON_API at::Generator THPGenerator_Unwrap(PyObject* state); diff --git a/torch/csrc/export/pybind.cpp b/torch/csrc/export/pybind.cpp index 458b08c3f361..71464be05cac 100644 --- a/torch/csrc/export/pybind.cpp +++ b/torch/csrc/export/pybind.cpp @@ -7,6 +7,7 @@ void initExportBindings(PyObject* module) { auto rootModule = py::handle(module).cast(); auto m = rootModule.def_submodule("_export"); + // NOLINTNEXTLINE(bugprone-unused-raii) py::class_(m, "CppExportedProgram"); m.def("deserialize_exported_program", [](const std::string& serialized) { diff --git a/torch/csrc/lazy/backend/backend_interface.h b/torch/csrc/lazy/backend/backend_interface.h index 064a578a39fd..19b1403cd285 100644 --- a/torch/csrc/lazy/backend/backend_interface.h +++ b/torch/csrc/lazy/backend/backend_interface.h @@ -7,7 +7,6 @@ #include #include #include -#include namespace torch::lazy {