diff --git a/.clang-tidy b/.clang-tidy index 5776dabe0072..45c5eeb08e8a 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -55,10 +55,12 @@ readability-container-size-empty, readability-delete-null-pointer, readability-duplicate-include readability-misplaced-array-index, -readability-redundant-function-ptr-dereference, -readability-redundant-smartptr-get, +readability-redundant* readability-simplify-subscript-expr, readability-string-compare, +-readability-redundant-access-specifiers, +-readability-redundant-control-flow, +-readability-redundant-declaration, ' HeaderFilterRegex: '^(aten/|c10/|torch/).*$' WarningsAsErrors: '*' diff --git a/aten/src/ATen/StorageUtils.cpp b/aten/src/ATen/StorageUtils.cpp index 19c240ed8904..bbd70a4571a3 100644 --- a/aten/src/ATen/StorageUtils.cpp +++ b/aten/src/ATen/StorageUtils.cpp @@ -11,7 +11,7 @@ C10_EXPORT c10::intrusive_ptr new_shm_fd_storage( ALLOCATOR_MAPPED_KEEPFD | ALLOCATOR_MAPPED_UNLINK; std::string handle = NewProcessWideShmHandle(); auto sptr = MapAllocator::makeDataPtr( - handle.c_str(), flags, size * sizeof(uint8_t), nullptr); + handle, flags, size * sizeof(uint8_t), nullptr); return c10::make_intrusive( c10::StorageImpl::use_byte_size_t(), size, diff --git a/aten/src/ATen/core/Vitals.cpp b/aten/src/ATen/core/Vitals.cpp index a854be6756bf..13b8eda63859 100644 --- a/aten/src/ATen/core/Vitals.cpp +++ b/aten/src/ATen/core/Vitals.cpp @@ -87,7 +87,7 @@ bool APIVitals::setVital( return true; } -APIVitals::APIVitals() : vitals_enabled(false), name_map_() { +APIVitals::APIVitals() : vitals_enabled(false) { // Set default values, force is necessary because in unit tests the env // variable may not be set when global APIVitals are constructed. setVital("CUDA", "used", "False", /* force = */ true); diff --git a/aten/src/ATen/core/Vitals.h b/aten/src/ATen/core/Vitals.h index 7ec213938d56..2fd7729744a1 100644 --- a/aten/src/ATen/core/Vitals.h +++ b/aten/src/ATen/core/Vitals.h @@ -11,7 +11,7 @@ TORCH_API bool torchVitalEnabled(); struct TORCH_API TorchVitalAttr { // always initialized to empty - std::string value = ""; + std::string value; template TorchVitalAttr& operator<<(const T& t) { if (torchVitalEnabled()) { diff --git a/aten/src/ATen/core/blob.h b/aten/src/ATen/core/blob.h index c45679794045..251da65e0896 100644 --- a/aten/src/ATen/core/blob.h +++ b/aten/src/ATen/core/blob.h @@ -22,7 +22,7 @@ class TORCH_API Blob final : public c10::intrusive_ptr_target { /** * Initializes an empty Blob. */ - Blob() noexcept : meta_() {} + Blob() noexcept = default; ~Blob() override { Reset(); } diff --git a/aten/src/ATen/core/class_type.cpp b/aten/src/ATen/core/class_type.cpp index 7fb0d355529a..800d9ea0ef9f 100644 --- a/aten/src/ATen/core/class_type.cpp +++ b/aten/src/ATen/core/class_type.cpp @@ -76,7 +76,7 @@ std::string ClassType::getForwardPreHookErrorMessage(size_t pre_hook_idx) const std::string input_types = getSchemaInputTypesString(forward_schema); const std::vector& forward_args = forward_schema.arguments(); - std::string single_output = ""; + std::string single_output; if (forward_args.size() == 2 && forward_args[1].type()->cast() == nullptr) { // if the output type is a single tuple, it needs to be wrapped in an outer tuple diff --git a/aten/src/ATen/core/class_type.h b/aten/src/ATen/core/class_type.h index d3373fd2ee38..ea124fc6eb07 100644 --- a/aten/src/ATen/core/class_type.h +++ b/aten/src/ATen/core/class_type.h @@ -432,7 +432,7 @@ struct TORCH_API ClassType : public NamedType { bool isModule_ = false; // Doc string of class. - std::string doc_string_ = ""; + std::string doc_string_; // For error reporting accesses to class level attributes. std::vector unresolved_class_attributes_; diff --git a/aten/src/ATen/core/function.h b/aten/src/ATen/core/function.h index cebc10640a4c..7e8a765a05ab 100644 --- a/aten/src/ATen/core/function.h +++ b/aten/src/ATen/core/function.h @@ -43,7 +43,7 @@ struct TORCH_API Function { Function(Function&&) noexcept = default; Function& operator=(Function&&) noexcept = default; virtual std::string_view doc_string() const { - static constexpr std::string_view no_doc_string = ""; + static constexpr std::string_view no_doc_string; return no_doc_string; } diff --git a/aten/src/ATen/core/function_schema.h b/aten/src/ATen/core/function_schema.h index 3ec771328110..48685c62d6cd 100644 --- a/aten/src/ATen/core/function_schema.h +++ b/aten/src/ATen/core/function_schema.h @@ -567,7 +567,7 @@ inline std::ostream& operator<<(std::ostream& out, const Argument& arg) { if (arg.alias_info() && !arg.alias_info()->containedTypes().empty()){ out << arg.alias_info()->containedTypes()[0]; } - std::string N = ""; + std::string N; if (arg.N()) { N = std::to_string(*arg.N()); } diff --git a/aten/src/ATen/core/jit_type.h b/aten/src/ATen/core/jit_type.h index c5cbe512fc2a..2c8fbf8a09ad 100644 --- a/aten/src/ATen/core/jit_type.h +++ b/aten/src/ATen/core/jit_type.h @@ -2245,7 +2245,7 @@ static const TypeKind Kind = TypeKind::ScalarTypeType; static ScalarTypeTypePtr get(); private: -ScalarTypeType() : EnumerationType() {} +ScalarTypeType() {} }; struct MemoryFormatType; @@ -2259,7 +2259,7 @@ static const TypeKind Kind = TypeKind::MemoryFormatType; static MemoryFormatTypePtr get(); private: -MemoryFormatType() : EnumerationType() {} +MemoryFormatType() {} }; struct LayoutType; @@ -2273,7 +2273,7 @@ static const TypeKind Kind = TypeKind::LayoutType; static LayoutTypePtr get(); private: -LayoutType() : EnumerationType() {} +LayoutType() {} }; namespace detail { diff --git a/aten/src/ATen/core/library.cpp b/aten/src/ATen/core/library.cpp index 8657cd9274f8..b8a5b418bbc0 100644 --- a/aten/src/ATen/core/library.cpp +++ b/aten/src/ATen/core/library.cpp @@ -48,7 +48,6 @@ CppFunction::CppFunction(c10::KernelFunction func, std::optional device_opt) @@ -208,7 +208,7 @@ struct CUDAStreamGuard { /// CUDAGuard for when you can use this. struct OptionalCUDAStreamGuard { /// Create an uninitialized guard. - explicit OptionalCUDAStreamGuard() : guard_() {} + explicit OptionalCUDAStreamGuard() = default; /// Set the current CUDA device to the device associated with the passed /// stream, and set the current CUDA stream on that device to the passed diff --git a/c10/test/util/string_util_test.cpp b/c10/test/util/string_util_test.cpp index 963253a4fcb0..62c053028f70 100644 --- a/c10/test/util/string_util_test.cpp +++ b/c10/test/util/string_util_test.cpp @@ -49,8 +49,8 @@ TEST(StringUtilTest, testStrWideSingleMultibyte) { namespace test_str_wide_empty { TEST(StringUtilTest, testStrWideEmpty) { - std::wstring s = L""; - std::string narrow = ""; + std::wstring s; + std::string narrow; EXPECT_EQ(narrow, c10::str(s)); const wchar_t* c_str = s.c_str(); diff --git a/c10/util/LeftRight.h b/c10/util/LeftRight.h index 0ad9a1b34610..759f6967933d 100644 --- a/c10/util/LeftRight.h +++ b/c10/util/LeftRight.h @@ -57,8 +57,7 @@ class LeftRight final { : _counters{{{0}, {0}}}, _foregroundCounterIndex(0), _foregroundDataIndex(0), - _data{{T{args...}, T{args...}}}, - _writeMutex() {} + _data{{T{args...}, T{args...}}} {} // Copying and moving would not be threadsafe. // Needs more thought and careful design to make that work. diff --git a/c10/util/signal_handler.cpp b/c10/util/signal_handler.cpp index 7132f08588ce..267fc5721b28 100644 --- a/c10/util/signal_handler.cpp +++ b/c10/util/signal_handler.cpp @@ -112,8 +112,6 @@ FatalSignalHandler::FatalSignalHandler() : fatalSignalHandlersInstalled(false), fatalSignalReceived(false), fatalSignalName(""), - writingCond(), - writingMutex(), signalReceived(false) {} // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) diff --git a/torch/csrc/CudaIPCTypes.cpp b/torch/csrc/CudaIPCTypes.cpp index cf1c7217045e..88875516f43f 100644 --- a/torch/csrc/CudaIPCTypes.cpp +++ b/torch/csrc/CudaIPCTypes.cpp @@ -150,7 +150,6 @@ CudaIPCSentData::CudaIPCSentData( : handle_(std::move(handle)), offset_(offset), counter_ptr_(counter_ptr), - original_ptr_(), device_(device) { #if !defined(USE_ROCM) // CUDA have the unofficial limit on the number of recorded blocking diff --git a/torch/csrc/api/include/torch/detail/TensorDataContainer.h b/torch/csrc/api/include/torch/detail/TensorDataContainer.h index d5e8f0f9234b..5544bbc0a0b9 100644 --- a/torch/csrc/api/include/torch/detail/TensorDataContainer.h +++ b/torch/csrc/api/include/torch/detail/TensorDataContainer.h @@ -126,8 +126,7 @@ struct TensorDataContainer { AT_FORALL_COMPLEX_TYPES(TENSOR) #undef TENSOR TensorDataContainer(std::initializer_list init_list) - : sizes_(), - scalar_type_(init_list.begin()->scalar_type()), + : scalar_type_(init_list.begin()->scalar_type()), type_(TensorDataContainerType::InitList), init_list_(init_list) { const TensorDataContainer& first_elem = *(init_list.begin()); diff --git a/torch/csrc/api/include/torch/nn/modules/container/sequential.h b/torch/csrc/api/include/torch/nn/modules/container/sequential.h index f5ddb4e370f6..e45a6af29401 100644 --- a/torch/csrc/api/include/torch/nn/modules/container/sequential.h +++ b/torch/csrc/api/include/torch/nn/modules/container/sequential.h @@ -376,7 +376,7 @@ class Sequential : public torch::nn::ModuleHolder { public: using torch::nn::ModuleHolder::ModuleHolder; - Sequential() : ModuleHolder() {} + Sequential() {} /// Constructs the `Sequential` from a braced-init-list of named `AnyModule`s. /// It enables the following use case: diff --git a/torch/csrc/autograd/function.h b/torch/csrc/autograd/function.h index 6e711b384cb5..b5360c6764f2 100644 --- a/torch/csrc/autograd/function.h +++ b/torch/csrc/autograd/function.h @@ -766,7 +766,7 @@ edge_list collect_next_edges(Variables&&... variables) { } struct TypeAndSize { - TypeAndSize() : options(at::TensorOptions()) {} + TypeAndSize() {} /* implicit */ TypeAndSize(const at::Tensor& t) : sym_sizes(t.sym_sizes().vec()), options(t.options()) {} diff --git a/torch/csrc/autograd/functions/tensor.cpp b/torch/csrc/autograd/functions/tensor.cpp index cd271db75bf6..1c13b60ca783 100644 --- a/torch/csrc/autograd/functions/tensor.cpp +++ b/torch/csrc/autograd/functions/tensor.cpp @@ -65,8 +65,7 @@ CopySlices::CopySlices( at::TensorGeometry view_, std::unique_ptr view_fn_, std::shared_ptr fn_) - : Node(), - base(base_var), + : base(base_var), view(std::move(view_)), view_fn(std::move(view_fn_)), fn(std::move(fn_)) { diff --git a/torch/csrc/autograd/profiler_python.cpp b/torch/csrc/autograd/profiler_python.cpp index f1d2822aeac6..51d1cd355563 100644 --- a/torch/csrc/autograd/profiler_python.cpp +++ b/torch/csrc/autograd/profiler_python.cpp @@ -603,8 +603,7 @@ static PyTypeObject TraceContextType = { class gil_and_restore_thread { public: - gil_and_restore_thread() - : gil_(), initial_thread_state_{PyThreadState_Get()} {} + gil_and_restore_thread() : initial_thread_state_{PyThreadState_Get()} {} ~gil_and_restore_thread() { PyThreadState_Swap(initial_thread_state_); diff --git a/torch/csrc/distributed/autograd/engine/dist_engine.cpp b/torch/csrc/distributed/autograd/engine/dist_engine.cpp index 4f8dfd6456df..6c3b3537c523 100644 --- a/torch/csrc/distributed/autograd/engine/dist_engine.cpp +++ b/torch/csrc/distributed/autograd/engine/dist_engine.cpp @@ -108,8 +108,7 @@ void DistEngine::globalCpuThread( } DistEngine::DistEngine() - : initializedContextIds_(), - engine_(Engine::get_default_engine()), + : engine_(Engine::get_default_engine()), global_cpu_ready_queue_(std::make_shared()), global_cpu_thread_( &DistEngine::globalCpuThread, diff --git a/torch/csrc/distributed/c10d/FileStore.cpp b/torch/csrc/distributed/c10d/FileStore.cpp index 3b4c38711f55..efd8fc647bb8 100644 --- a/torch/csrc/distributed/c10d/FileStore.cpp +++ b/torch/csrc/distributed/c10d/FileStore.cpp @@ -282,8 +282,7 @@ off_t refresh( } // namespace FileStore::FileStore(std::string path, int numWorkers) - : Store(), - path_(std::move(path)), + : path_(std::move(path)), numWorkers_(numWorkers), cleanupKey_("cleanup/"), diff --git a/torch/csrc/distributed/c10d/Work.cpp b/torch/csrc/distributed/c10d/Work.cpp index 4502e4aa235b..5e30e91ce05b 100644 --- a/torch/csrc/distributed/c10d/Work.cpp +++ b/torch/csrc/distributed/c10d/Work.cpp @@ -147,7 +147,7 @@ uint64_t Work::getSequencenumber() const { class FutureWrappingWork : public Work { public: FutureWrappingWork(c10::intrusive_ptr fut) - : Work(), _fut(std::move(fut)) {} + : _fut(std::move(fut)) {} ~FutureWrappingWork() override = default; diff --git a/torch/csrc/distributed/rpc/rref_impl.cpp b/torch/csrc/distributed/rpc/rref_impl.cpp index 937d491a18b2..a004a5e2b1c1 100644 --- a/torch/csrc/distributed/rpc/rref_impl.cpp +++ b/torch/csrc/distributed/rpc/rref_impl.cpp @@ -53,10 +53,7 @@ RRefForkData::RRefForkData( ////////////////////////////// RRef ///////////////////////////////////// RRef::RRef(worker_id_t ownerId, const RRefId& rrefId, TypePtr type) - : RRefInterface(), - ownerId_(ownerId), - rrefId_(rrefId), - type_(std::move(type)) {} + : ownerId_(ownerId), rrefId_(rrefId), type_(std::move(type)) {} RRefForkData RRef::fork() const { auto& ctx = RRefContext::getInstance(); diff --git a/torch/csrc/dynamo/guards.cpp b/torch/csrc/dynamo/guards.cpp index d6458d506568..b21af7beb760 100644 --- a/torch/csrc/dynamo/guards.cpp +++ b/torch/csrc/dynamo/guards.cpp @@ -3109,7 +3109,7 @@ class TORCH_FUNCTION_MODE_STACK : public LeafGuard { TORCH_FUNCTION_MODE_STACK( const py::list& initial_stack, py::object verbose_code_parts) - : LeafGuard(std::move(verbose_code_parts)), _ref_stack() { + : LeafGuard(std::move(verbose_code_parts)) { Py_ssize_t len = PyList_Size(initial_stack.ptr()); for (Py_ssize_t idx = 0; idx < len; idx++) { PyObject* mode = PyList_GetItem(initial_stack.ptr(), idx); // borrowed ref diff --git a/torch/csrc/inductor/aoti_eager/kernel_holder.h b/torch/csrc/inductor/aoti_eager/kernel_holder.h index fed2e3b5d61d..8459b35c6837 100644 --- a/torch/csrc/inductor/aoti_eager/kernel_holder.h +++ b/torch/csrc/inductor/aoti_eager/kernel_holder.h @@ -21,7 +21,7 @@ struct AOTIKernelMetadata { std::vector parameter_metadata_list_; // AOTI model runner to run the AOTI kernel std::shared_ptr kernel_runner_; - AOTIKernelMetadata() : parameter_metadata_list_(), kernel_runner_(nullptr) {} + AOTIKernelMetadata() : kernel_runner_(nullptr) {} // Check whether the given parameter metadata list is the same as the // parameter metadata list of the AOTI kernel. diff --git a/torch/csrc/inductor/aoti_package/model_package_loader.cpp b/torch/csrc/inductor/aoti_package/model_package_loader.cpp index 9473492bb305..3dbb8b810aa4 100644 --- a/torch/csrc/inductor/aoti_package/model_package_loader.cpp +++ b/torch/csrc/inductor/aoti_package/model_package_loader.cpp @@ -91,7 +91,7 @@ std::tuple get_cpp_compile_command( std::string compiler = compile_options["compiler"].get(); bool compile_only = compile_options["compile_only"].get(); - std::string source_args = ""; + std::string source_args; for (const std::string& source : sources) { source_args += source + " "; } @@ -99,37 +99,37 @@ std::tuple get_cpp_compile_command( std::string file_ext = compile_only ? ".o" : ".so"; std::string target_file = output_dir + filename + file_ext; - std::string cflags_args = ""; + std::string cflags_args; for (auto& arg : compile_options["cflags"]) { cflags_args += "-" + arg.get() + " "; } - std::string definitions_args = ""; + std::string definitions_args; for (auto& arg : compile_options["definitions"]) { definitions_args += "-D " + arg.get() + " "; } - std::string include_dirs_args = ""; + std::string include_dirs_args; for (auto& arg : compile_options["include_dirs"]) { include_dirs_args += "-I" + arg.get() + " "; } - std::string ldflags_args = ""; + std::string ldflags_args; for (auto& arg : compile_options["ldflags"]) { ldflags_args += "-" + arg.get() + " "; } - std::string libraries_dirs_args = ""; + std::string libraries_dirs_args; for (auto& arg : compile_options["libraries_dirs"]) { libraries_dirs_args += "-L" + arg.get() + " "; } - std::string libraries_args = ""; + std::string libraries_args; for (auto& arg : compile_options["libraries"]) { libraries_args += "-l" + arg.get() + " "; } - std::string passthrough_parameters_args = ""; + std::string passthrough_parameters_args; for (auto& arg : compile_options["passthrough_args"]) { passthrough_parameters_args += arg.get() + " "; } @@ -343,10 +343,10 @@ AOTIModelPackageLoader::AOTIModelPackageLoader( } temp_dir_ = create_temp_dir(); - std::string so_filename = ""; - std::string cpp_filename = ""; - std::string consts_filename = ""; - std::string found_filenames = ""; // Saving for bookkeeping + std::string so_filename; + std::string cpp_filename; + std::string consts_filename; + std::string found_filenames; // Saving for bookkeeping std::string model_directory = "data" + k_separator + "aotinductor" + k_separator + model_name; @@ -379,7 +379,7 @@ AOTIModelPackageLoader::AOTIModelPackageLoader( "Failed to find parent path in " + output_path_str); } std::string parent_path = output_path_str.substr(0, parent_path_idx); - if (!recursive_mkdir(parent_path.c_str())) { + if (!recursive_mkdir(parent_path)) { throw std::runtime_error(fmt::format( "Failed to create directory {}: {}", parent_path, diff --git a/torch/csrc/lazy/core/hash.h b/torch/csrc/lazy/core/hash.h index 0ea3022f2696..d9143da8beb8 100644 --- a/torch/csrc/lazy/core/hash.h +++ b/torch/csrc/lazy/core/hash.h @@ -29,7 +29,7 @@ class TORCH_API hash_t : public c10::uint128 { hash_t(uint64_t val) : uint128(val) {} hash_t(uint128 val) : uint128(val) {} hash_t(uint64_t top, uint64_t bottom) : uint128(top, bottom) {} - hash_t() : uint128() {} + hash_t() {} }; // Std* functions use 64-bit hash diff --git a/torch/csrc/profiler/standalone/execution_trace_observer.cpp b/torch/csrc/profiler/standalone/execution_trace_observer.cpp index 1f6953667c99..1990c30c73bf 100644 --- a/torch/csrc/profiler/standalone/execution_trace_observer.cpp +++ b/torch/csrc/profiler/standalone/execution_trace_observer.cpp @@ -410,7 +410,7 @@ convertIValue( size_t offset = 0; size_t numel = 0; size_t itemsize = 0; - std::string device_str = ""; + std::string device_str; // symbolic sizes/strides implies t->storage_offset() will fail if (tensor_impl->has_storage() && !tensor_impl->has_symbolic_sizes_strides()) { diff --git a/torch/csrc/utils/invalid_arguments.cpp b/torch/csrc/utils/invalid_arguments.cpp index c2825f7d945d..a0a85956442d 100644 --- a/torch/csrc/utils/invalid_arguments.cpp +++ b/torch/csrc/utils/invalid_arguments.cpp @@ -111,7 +111,7 @@ struct Option { is_variadic(is_variadic), has_out(has_out) {} Option(bool is_variadic, bool has_out) - : arguments(), is_variadic(is_variadic), has_out(has_out) {} + : is_variadic(is_variadic), has_out(has_out) {} Option(const Option&) = delete; Option(Option&& other) noexcept = default; Option& operator=(const Option&) = delete; diff --git a/torch/csrc/utils/throughput_benchmark.h b/torch/csrc/utils/throughput_benchmark.h index 50854f1b73aa..8cf2f97158f2 100644 --- a/torch/csrc/utils/throughput_benchmark.h +++ b/torch/csrc/utils/throughput_benchmark.h @@ -58,7 +58,7 @@ struct BenchmarkConfig { // If set autograd profiler will be enabled. I.e. this variable would be // created before the main benchmark loop (but after the warmup): // RecordProfile guard(profiler_output_path); - std::string profiler_output_path{""}; + std::string profiler_output_path; }; namespace detail { diff --git a/torch/library.h b/torch/library.h index 2761573e2ccc..4914f3d4189e 100644 --- a/torch/library.h +++ b/torch/library.h @@ -119,8 +119,8 @@ class TORCH_API CppFunction final { : func_(c10::KernelFunction::makeFromUnboxedRuntimeFunction(f)), cpp_signature_(c10::impl::CppSignature::make()), schema_( - c10::detail::inferFunctionSchemaFromFunctor>()), - debug_() {} + c10::detail::inferFunctionSchemaFromFunctor>()) + {} /// This overload accepts compile time function pointers, e.g., /// `CppFunction(TORCH_FN(add_impl))` @@ -134,8 +134,8 @@ class TORCH_API CppFunction final { cpp_signature_( c10::impl::CppSignature::make()), schema_(c10::detail::inferFunctionSchemaFromFunctor< - typename FuncPtr::FuncType>()), - debug_() {} + typename FuncPtr::FuncType>()) + {} /// This overload accepts lambdas, e.g., `CppFunction([](const Tensor& self) { /// ... })` @@ -149,8 +149,8 @@ class TORCH_API CppFunction final { std::forward(f))), cpp_signature_(c10::impl::CppSignature::make()), schema_(c10::detail::inferFunctionSchemaFromFunctor< - std::decay_t>()), - debug_() {} + std::decay_t>()) + {} #if defined C10_MOBILE /// This overload accepts function pointers, e.g., `CppFunction(&add_impl,