diff --git a/c10/util/strong_type.h b/c10/util/strong_type.h index daf8a1804d26..c7d2fc0ecdd5 100644 --- a/c10/util/strong_type.h +++ b/c10/util/strong_type.h @@ -65,7 +65,7 @@ struct default_constructible namespace impl { template - constexpr bool supports_default_construction(const ::strong::default_constructible::modifier*) + constexpr bool supports_default_construction(const ::strong::default_constructible::modifier* /*unused*/) { return true; } @@ -76,7 +76,7 @@ class type : public modifier>... { public: template {}>> - explicit type(uninitialized_t) + explicit type(uninitialized_t /*unused*/) noexcept { } @@ -138,7 +138,7 @@ private: namespace impl { template - constexpr bool is_strong_type_func(const strong::type*) { return true;} + constexpr bool is_strong_type_func(const strong::type* /*unused*/) { return true;} constexpr bool is_strong_type_func(...) { return false;} template constexpr T underlying_type(strong::type*); diff --git a/torch/csrc/Exceptions.cpp b/torch/csrc/Exceptions.cpp index 4ce5834a1713..b771e6532700 100644 --- a/torch/csrc/Exceptions.cpp +++ b/torch/csrc/Exceptions.cpp @@ -252,10 +252,10 @@ PyWarningHandler::PyWarningHandler() noexcept(true) // Get the Python warning type for a warning static PyObject* map_warning_to_python_type(const c10::Warning& warning) { struct Visitor { - PyObject* operator()(const c10::UserWarning&) const { + PyObject* operator()(const c10::UserWarning& /*unused*/) const { return PyExc_UserWarning; } - PyObject* operator()(const c10::DeprecationWarning&) const { + PyObject* operator()(const c10::DeprecationWarning& /*unused*/) const { return PyExc_DeprecationWarning; } }; diff --git a/torch/csrc/Exceptions.h b/torch/csrc/Exceptions.h index 60a7bb644df0..d58080946081 100644 --- a/torch/csrc/Exceptions.h +++ b/torch/csrc/Exceptions.h @@ -269,7 +269,8 @@ bool THPException_init(PyObject* module); namespace torch { // Set python current exception from a C++ exception -TORCH_PYTHON_API void translate_exception_to_python(const std::exception_ptr&); +TORCH_PYTHON_API void translate_exception_to_python( + const std::exception_ptr& /*e_ptr*/); TORCH_PYTHON_API std::string processErrorMsg(std::string str); @@ -358,8 +359,8 @@ using Arg = typename invoke_traits::template arg::type; template auto wrap_pybind_function_impl_( Func&& f, - std::index_sequence, - std::bool_constant) { + std::index_sequence /*unused*/, + std::bool_constant /*unused*/) { namespace py = pybind11; // f=f is needed to handle function references on older compilers @@ -371,7 +372,7 @@ auto wrap_pybind_function_impl_( }; } -PyObject* _new_accelerator_error_object(const c10::AcceleratorError&); +PyObject* _new_accelerator_error_object(const c10::AcceleratorError& /*e*/); } // namespace detail // Wrap a function with TH error and warning handling. diff --git a/torch/csrc/PyInterpreter.cpp b/torch/csrc/PyInterpreter.cpp index e6016a7721e8..684611fe498a 100644 --- a/torch/csrc/PyInterpreter.cpp +++ b/torch/csrc/PyInterpreter.cpp @@ -57,7 +57,7 @@ struct ConcretePyInterpreterVTable final void reportErrorCallback(PyObject* callback, DispatchKey key) const override; void python_dispatcher( const c10::OperatorHandle& op, - c10::DispatchKeySet, + c10::DispatchKeySet /*ks*/, torch::jit::Stack* stack) const override; // NB: this is defined in python_dispatch.cpp void python_op_registration_trampoline( @@ -80,12 +80,15 @@ struct ConcretePyInterpreterVTable final opname, pymodule, context); } - bool is_contiguous(const c10::TensorImpl* self, at::MemoryFormat) - const override; - c10::SymBool sym_is_contiguous(const c10::TensorImpl* self, at::MemoryFormat) - const override; - bool is_strides_like(const c10::TensorImpl* self, at::MemoryFormat) - const override; + bool is_contiguous( + const c10::TensorImpl* self, + at::MemoryFormat /*memory_format*/) const override; + c10::SymBool sym_is_contiguous( + const c10::TensorImpl* self, + at::MemoryFormat /*memory_format*/) const override; + bool is_strides_like( + const c10::TensorImpl* self, + at::MemoryFormat /*memory_format*/) const override; bool is_non_overlapping_and_dense(const c10::TensorImpl* self) const override; c10::Device device(const c10::TensorImpl* self) const override; int64_t dim(const c10::TensorImpl* self) const override; diff --git a/torch/csrc/PyInterpreterHooks.cpp b/torch/csrc/PyInterpreterHooks.cpp index 5e064493fd59..f3f07273eb90 100644 --- a/torch/csrc/PyInterpreterHooks.cpp +++ b/torch/csrc/PyInterpreterHooks.cpp @@ -3,7 +3,8 @@ namespace torch::detail { -PyInterpreterHooks::PyInterpreterHooks(c10::impl::PyInterpreterHooksArgs) {} +PyInterpreterHooks::PyInterpreterHooks( + c10::impl::PyInterpreterHooksArgs /*unused*/) {} c10::impl::PyInterpreter* PyInterpreterHooks::getPyInterpreter() const { // Delegate to the existing implementation diff --git a/torch/csrc/PyInterpreterHooks.h b/torch/csrc/PyInterpreterHooks.h index 1def7b8c55ae..65c6f3e149ec 100644 --- a/torch/csrc/PyInterpreterHooks.h +++ b/torch/csrc/PyInterpreterHooks.h @@ -7,7 +7,7 @@ namespace torch::detail { // Concrete implementation of PyInterpreterHooks class PyInterpreterHooks : public c10::impl::PyInterpreterHooksInterface { public: - explicit PyInterpreterHooks(c10::impl::PyInterpreterHooksArgs); + explicit PyInterpreterHooks(c10::impl::PyInterpreterHooksArgs /*unused*/); c10::impl::PyInterpreter* getPyInterpreter() const override; }; diff --git a/torch/csrc/TypeInfo.cpp b/torch/csrc/TypeInfo.cpp index 524ae4d01bfa..ac1d238b4c2f 100644 --- a/torch/csrc/TypeInfo.cpp +++ b/torch/csrc/TypeInfo.cpp @@ -117,7 +117,7 @@ static PyObject* THPDTypeInfo_compare( return Py_INCREF(Py_NotImplemented), Py_NotImplemented; } -static PyObject* THPDTypeInfo_bits(THPDTypeInfo* self, void*) { +static PyObject* THPDTypeInfo_bits(THPDTypeInfo* self, void* /*unused*/) { uint64_t bits = elementSize(self->type) * CHAR_BIT; return THPUtils_packUInt64(bits); } @@ -133,7 +133,7 @@ static PyObject* THPDTypeInfo_bits(THPDTypeInfo* self, void*) { at::ScalarType::BFloat16, \ AT_EXPAND(AT_FLOAT8_TYPES)) -static PyObject* THPFInfo_eps(THPFInfo* self, void*) { +static PyObject* THPFInfo_eps(THPFInfo* self, void* /*unused*/) { HANDLE_TH_ERRORS return _AT_DISPATCH_FINFO_TYPES(self->type, "epsilon", [] { return PyFloat_FromDouble( @@ -142,7 +142,7 @@ static PyObject* THPFInfo_eps(THPFInfo* self, void*) { END_HANDLE_TH_ERRORS } -static PyObject* THPFInfo_max(THPFInfo* self, void*) { +static PyObject* THPFInfo_max(THPFInfo* self, void* /*unused*/) { HANDLE_TH_ERRORS return _AT_DISPATCH_FINFO_TYPES(self->type, "max", [] { return PyFloat_FromDouble( @@ -151,7 +151,7 @@ static PyObject* THPFInfo_max(THPFInfo* self, void*) { END_HANDLE_TH_ERRORS } -static PyObject* THPFInfo_min(THPFInfo* self, void*) { +static PyObject* THPFInfo_min(THPFInfo* self, void* /*unused*/) { HANDLE_TH_ERRORS return _AT_DISPATCH_FINFO_TYPES(self->type, "lowest", [] { return PyFloat_FromDouble( @@ -164,7 +164,7 @@ static PyObject* THPFInfo_min(THPFInfo* self, void*) { AT_DISPATCH_V2( \ TYPE, NAME, AT_WRAP(__VA_ARGS__), AT_EXPAND(AT_INTEGRAL_TYPES_V2)) -static PyObject* THPIInfo_max(THPIInfo* self, void*) { +static PyObject* THPIInfo_max(THPIInfo* self, void* /*unused*/) { HANDLE_TH_ERRORS if (at::isIntegralType(self->type, /*includeBool=*/false)) { return AT_DISPATCH_IINFO_TYPES(self->type, "max", [] { @@ -182,7 +182,7 @@ static PyObject* THPIInfo_max(THPIInfo* self, void*) { END_HANDLE_TH_ERRORS } -static PyObject* THPIInfo_min(THPIInfo* self, void*) { +static PyObject* THPIInfo_min(THPIInfo* self, void* /*unused*/) { HANDLE_TH_ERRORS if (at::isIntegralType(self->type, /*includeBool=*/false)) { return AT_DISPATCH_IINFO_TYPES(self->type, "min", [] { @@ -200,7 +200,7 @@ static PyObject* THPIInfo_min(THPIInfo* self, void*) { END_HANDLE_TH_ERRORS } -static PyObject* THPIInfo_dtype(THPIInfo* self, void*) { +static PyObject* THPIInfo_dtype(THPIInfo* self, void* /*unused*/) { HANDLE_TH_ERRORS auto primary_name = c10::getDtypeNames(self->type).first; return AT_DISPATCH_IINFO_TYPES(self->type, "dtype", [&primary_name] { @@ -209,7 +209,7 @@ static PyObject* THPIInfo_dtype(THPIInfo* self, void*) { END_HANDLE_TH_ERRORS } -static PyObject* THPFInfo_smallest_normal(THPFInfo* self, void*) { +static PyObject* THPFInfo_smallest_normal(THPFInfo* self, void* /*unused*/) { HANDLE_TH_ERRORS return _AT_DISPATCH_FINFO_TYPES(self->type, "min", [] { return PyFloat_FromDouble( @@ -218,12 +218,12 @@ static PyObject* THPFInfo_smallest_normal(THPFInfo* self, void*) { END_HANDLE_TH_ERRORS } -static PyObject* THPFInfo_tiny(THPFInfo* self, void*) { +static PyObject* THPFInfo_tiny(THPFInfo* self, void* /*unused*/) { // see gh-70909, essentially the array_api prefers smallest_normal over tiny return THPFInfo_smallest_normal(self, nullptr); } -static PyObject* THPFInfo_resolution(THPFInfo* self, void*) { +static PyObject* THPFInfo_resolution(THPFInfo* self, void* /*unused*/) { HANDLE_TH_ERRORS return _AT_DISPATCH_FINFO_TYPES(self->type, "digits10", [] { return PyFloat_FromDouble(std::pow( @@ -233,7 +233,7 @@ static PyObject* THPFInfo_resolution(THPFInfo* self, void*) { END_HANDLE_TH_ERRORS } -static PyObject* THPFInfo_dtype(THPFInfo* self, void*) { +static PyObject* THPFInfo_dtype(THPFInfo* self, void* /*unused*/) { HANDLE_TH_ERRORS auto primary_name = c10::getDtypeNames(self->type).first; return _AT_DISPATCH_FINFO_TYPES(self->type, "dtype", [&primary_name] { diff --git a/torch/csrc/acc/Module.cpp b/torch/csrc/acc/Module.cpp index 6360d0430bf8..1ae2cd2d0bc3 100644 --- a/torch/csrc/acc/Module.cpp +++ b/torch/csrc/acc/Module.cpp @@ -76,18 +76,19 @@ struct PythonDeviceGuard final : public c10::impl::DeviceGuardImplInterface { } void setDevice(c10::Device device) const override {} void uncheckedSetDevice(c10::Device device) const noexcept override {} - c10::Stream getStream(c10::Device) const noexcept override { + c10::Stream getStream(c10::Device /*unused*/) const noexcept override { // no-op return c10::Stream(c10::Stream::DEFAULT, getDevice()); } - c10::Stream getNewStream(c10::Device, int priority = 0) const override { + c10::Stream getNewStream(c10::Device /*unused*/, int priority = 0) + const override { // no-op (void)priority; return c10::Stream(c10::Stream::DEFAULT, getDevice()); } - c10::Stream exchangeStream(c10::Stream) const noexcept override { + c10::Stream exchangeStream(c10::Stream /*unused*/) const noexcept override { // no-op return c10::Stream(c10::Stream::DEFAULT, getDevice()); } diff --git a/torch/csrc/api/include/torch/nn/functional/conv.h b/torch/csrc/api/include/torch/nn/functional/conv.h index 1c2b5b73c48d..2ab6a7684285 100644 --- a/torch/csrc/api/include/torch/nn/functional/conv.h +++ b/torch/csrc/api/include/torch/nn/functional/conv.h @@ -8,11 +8,11 @@ namespace torch::nn::functional { #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace detail { -inline std::string padding_unwrap(enumtype::kValid) { +inline std::string padding_unwrap(enumtype::kValid /*unused*/) { return "valid"; } -inline std::string padding_unwrap(enumtype::kSame) { +inline std::string padding_unwrap(enumtype::kSame /*unused*/) { return "same"; } diff --git a/torch/csrc/api/include/torch/nn/modules/container/any.h b/torch/csrc/api/include/torch/nn/modules/container/any.h index 28f297388757..c7a2fcbe62f7 100644 --- a/torch/csrc/api/include/torch/nn/modules/container/any.h +++ b/torch/csrc/api/include/torch/nn/modules/container/any.h @@ -185,11 +185,12 @@ class AnyModule { typename... ArgumentTypes> std::unique_ptr make_holder( std::shared_ptr&& module, - ReturnType (Class::*)(ArgumentTypes...)); + ReturnType (Class::* /*unused*/)(ArgumentTypes...)); /// Helper method invoked by const and non-const `get()`. template - ModuleType& get_(ReturnType (ModuleType::*)(ArgumentTypes...)) const; + ModuleType& get_( + ReturnType (ModuleType::* /*unused*/)(ArgumentTypes...)) const; /// Helper method invoked by const and non-const `get()`. template @@ -320,7 +321,7 @@ template < typename... ArgumentTypes> std::unique_ptr AnyModule::make_holder( std::shared_ptr&& module, - ReturnType (Class::*)(ArgumentTypes...)) { + ReturnType (Class::* /*unused*/)(ArgumentTypes...)) { static_assert( torch::detail::check_not_lvalue_references(), "Modules stored inside AnyModule must not take references. " @@ -345,7 +346,7 @@ ModuleType& AnyModule::get_() const { template ModuleType& AnyModule::get_( - ReturnType (ModuleType::*)(ArgumentTypes...)) const { + ReturnType (ModuleType::* /*unused*/)(ArgumentTypes...)) const { if (typeid(ModuleType).hash_code() == type_info().hash_code()) { return *static_cast&>( *content_) diff --git a/torch/csrc/autograd/FunctionsManual.h b/torch/csrc/autograd/FunctionsManual.h index 96864e165a95..4dc0425d426e 100644 --- a/torch/csrc/autograd/FunctionsManual.h +++ b/torch/csrc/autograd/FunctionsManual.h @@ -279,7 +279,7 @@ std::tuple clamp_backward_min_max( const at::Tensor& self, const at::Tensor& min, const at::Tensor& max, - const std::array&); + const std::array& /*grad_input_mask*/); at::Tensor clamp_jvp( const Tensor& self_p, const Tensor& self_t, diff --git a/torch/csrc/autograd/autograd_not_implemented_fallback.cpp b/torch/csrc/autograd/autograd_not_implemented_fallback.cpp index 3d4ab7104293..9de461cc56a2 100644 --- a/torch/csrc/autograd/autograd_not_implemented_fallback.cpp +++ b/torch/csrc/autograd/autograd_not_implemented_fallback.cpp @@ -517,8 +517,9 @@ struct GenericViewFunc : public ViewFunc { } std::unique_ptr clone_and_set( - std::optional> = std::nullopt, - std::optional> = std::nullopt) const override { + std::optional> /*unused*/ = std::nullopt, + std::optional> /*unused*/ = + std::nullopt) const override { return std::make_unique( non_tensor_stack_, aliased_input_idx_val_, op_); } diff --git a/torch/csrc/autograd/function_hook.h b/torch/csrc/autograd/function_hook.h index c72aac4fbecf..8a847c56834f 100644 --- a/torch/csrc/autograd/function_hook.h +++ b/torch/csrc/autograd/function_hook.h @@ -60,8 +60,8 @@ struct TORCH_API PostAccumulateGradHook { } virtual void apply_with_saved( - Variable&, - torch::dynamo::autograd::SwapSavedVariables&) { + Variable& /*unused*/, + torch::dynamo::autograd::SwapSavedVariables& /*unused*/) { TORCH_CHECK_NOT_IMPLEMENTED( false, std::string("compiled_args nyi, see [Note: Compiled Autograd] ") + diff --git a/torch/csrc/autograd/profiler_kineto.cpp b/torch/csrc/autograd/profiler_kineto.cpp index 7fbf04ae99bc..fe3acd99761c 100644 --- a/torch/csrc/autograd/profiler_kineto.cpp +++ b/torch/csrc/autograd/profiler_kineto.cpp @@ -222,7 +222,7 @@ struct AddTensorboardFields : public MetadataBase { } template - void operator()(const T&) {} + void operator()(const T& /*unused*/) {} }; struct AddGenericMetadata : public MetadataBase { @@ -346,7 +346,7 @@ struct AddGenericMetadata : public MetadataBase { } template - void operator()(const T&) {} + void operator()(const T& /*unused*/) {} private: /* To get names of the performance events */ diff --git a/torch/csrc/autograd/profiler_kineto.h b/torch/csrc/autograd/profiler_kineto.h index c8ddd2df2980..dbb4febce78b 100644 --- a/torch/csrc/autograd/profiler_kineto.h +++ b/torch/csrc/autograd/profiler_kineto.h @@ -23,7 +23,7 @@ using extra_meta_t = std::unordered_map; struct TORCH_API KinetoEvent { KinetoEvent( - const std::shared_ptr&, + const std::shared_ptr& /*result*/, const bool verbose); uint64_t startThreadId() const; @@ -63,7 +63,7 @@ struct TORCH_API KinetoEvent { bool isPythonFunction() const; int64_t cudaElapsedUs() const; int64_t privateuse1ElapsedUs() const; - void getPerfEventCounters(torch::profiler::perf_counters_t&) const; + void getPerfEventCounters(torch::profiler::perf_counters_t& /*in*/) const; extra_meta_t extraMeta() const; std::string metadataJson() const; diff --git a/torch/csrc/autograd/profiler_legacy.h b/torch/csrc/autograd/profiler_legacy.h index cd571d70f1fa..30a9fb96f258 100644 --- a/torch/csrc/autograd/profiler_legacy.h +++ b/torch/csrc/autograd/profiler_legacy.h @@ -328,7 +328,7 @@ struct TORCH_API ProfilerDisableOptions { // NOTE: profiler mode is thread local, with automatic propagation // across thread boundary (e.g. at::launch tasks) TORCH_API void enableProfilerLegacy( - const torch::profiler::impl::ProfilerConfig&); + const torch::profiler::impl::ProfilerConfig& /*new_config*/); using thread_event_lists = std::vector>; TORCH_API thread_event_lists disableProfilerLegacy( std::optional profilerDisableOptions = diff --git a/torch/csrc/autograd/profiler_python.cpp b/torch/csrc/autograd/profiler_python.cpp index 0e895312cbd1..a45935ecb299 100644 --- a/torch/csrc/autograd/profiler_python.cpp +++ b/torch/csrc/autograd/profiler_python.cpp @@ -365,7 +365,9 @@ std::vector> ValueCache::unpackTensorMap( } template <> -void ValueCache::store(const PyCallKey& key, no_ephemeral_t) { +void ValueCache::store( + const PyCallKey& key, + no_ephemeral_t /*unused*/) { auto& locations = std::get(state_); if (C10_UNLIKELY(locations.find(key) == locations.end())) { locations[key] = { @@ -1432,7 +1434,7 @@ struct PythonIDVisitor { } template - void operator()(T&) {} + void operator()(T& /*unused*/) {} size_t current_python_id_{0}; ska::flat_hash_map> diff --git a/torch/csrc/autograd/python_variable.cpp b/torch/csrc/autograd/python_variable.cpp index 2316c58ac4c7..4d6c618d0fae 100644 --- a/torch/csrc/autograd/python_variable.cpp +++ b/torch/csrc/autograd/python_variable.cpp @@ -686,7 +686,7 @@ static Tensor make_tensor_for_subclass_helper( } static PyObject* THPVariable_make_wrapper_subclass( - PyObject*, + PyObject* /*unused*/, PyObject* args, PyObject* kwargs) { HANDLE_TH_ERRORS @@ -895,7 +895,7 @@ static c10::SymDimVector tuple_to_symintlist(PyObject* obj) { // DTensor-specific variant of make_wrapper_subclass to minimize DTensor // overhead. static PyObject* THPVariable_dtensor_new( - PyObject*, + PyObject* /*unused*/, PyObject* args, PyObject* kwargs) { HANDLE_TH_ERRORS diff --git a/torch/csrc/autograd/variable.h b/torch/csrc/autograd/variable.h index 2ed4a1e8fd5a..697557787b39 100644 --- a/torch/csrc/autograd/variable.h +++ b/torch/csrc/autograd/variable.h @@ -108,31 +108,35 @@ namespace impl { // WARNING: This may return a nullptr. If you require AutogradMeta to return // a materialized structure, use materialize_autograd_meta instead. -TORCH_API AutogradMeta* get_autograd_meta(const at::TensorBase&); +TORCH_API AutogradMeta* get_autograd_meta(const at::TensorBase& /*self*/); // WARNING: This will return a nullptr if the Tensor is not a view. -TORCH_API DifferentiableViewMeta* get_view_autograd_meta(const at::TensorBase&); +TORCH_API DifferentiableViewMeta* get_view_autograd_meta( + const at::TensorBase& /*self*/); // Returns the current autograd meta, materializing it if it was previously // none. This counts as a *mutating* operation, so do not call it on // "read-only" operators; in particular, this is NOT thread safe -TORCH_API AutogradMeta* materialize_autograd_meta(const at::TensorBase&); +TORCH_API AutogradMeta* materialize_autograd_meta( + const at::TensorBase& /*self*/); /// Set the gradient accumulator of the `Variable`. This is only applicable to /// leaf variables. Interior variables should call `set_gradient_edge()`. TORCH_API void set_grad_accumulator( - const Variable&, + const Variable& /*self*/, std::weak_ptr grad_accumulator); /// Attempts to get a pointer to the gradient accumulator of the `Variable`, /// if it still exists. If the gradient accumulator function has been /// destroyed, returns a `nullptr`. -TORCH_API std::shared_ptr try_get_grad_accumulator(const Variable&); -TORCH_API std::shared_ptr try_get_grad_accumulator(const at::TensorBase&); +TORCH_API std::shared_ptr try_get_grad_accumulator( + const Variable& /*self*/); +TORCH_API std::shared_ptr try_get_grad_accumulator( + const at::TensorBase& /*self*/); /// Gets the gradient accumulator of the `Variable` if it has one, or else /// create one on the fly and return it. -TORCH_API std::shared_ptr grad_accumulator(const Variable&); +TORCH_API std::shared_ptr grad_accumulator(const Variable& /*self*/); /// Returns the "canonical" gradient edge of this `Variable`, i.e. either the /// gradient function if this is an interior `Variable`, or the gradient @@ -142,7 +146,7 @@ TORCH_API std::shared_ptr grad_accumulator(const Variable&); /// zero. Note that `set_gradient_edge` and `gradient_edge` are not /// symmetric. You must use `set_gradient_edge` to set the `grad_fn` and /// `set_grad_accumulator` to set the accumulator. -TORCH_API Edge gradient_edge(const Variable&); +TORCH_API Edge gradient_edge(const Variable& /*self*/); /// Set the gradient edge -- i.e. `grad_fn` and `input_nr` -- of the /// `Variable`. @@ -150,7 +154,7 @@ TORCH_API Edge gradient_edge(const Variable&); /// and never the `grad_accumulator`. For the latter, use /// `set_grad_accumulator`. This allows late construction of an interior /// `Variable`. -TORCH_API void set_gradient_edge(const Variable&, Edge edge); +TORCH_API void set_gradient_edge(const Variable& /*self*/, Edge edge); // Autograd Graph Interaction //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -161,36 +165,37 @@ TORCH_API void set_gradient_edge(const Variable&, Edge edge); /// For View Variables: /// Called after in-place modifications. Modifies the grad_fn of the base /// Variable. -TORCH_API void rebase_history(const Variable&, Edge gradient_edge); +TORCH_API void rebase_history(const Variable& /*self*/, Edge gradient_edge); /// Gets the raw gradient function pointer, whatever it currently is. -TORCH_API Node* grad_fn_unsafe(const Variable&); +TORCH_API Node* grad_fn_unsafe(const Variable& /*self*/); /// Increments the version count of this `Variable`. -TORCH_API void bump_version(const Variable&); +TORCH_API void bump_version(const Variable& /*self*/); TORCH_API void set_version_counter( - const Variable&, + const Variable& /*self*/, const c10::VariableVersion& version_counter); /// Retrieves this `Variable`s version counter. -TORCH_API const c10::VariableVersion& version_counter(const Variable&); +TORCH_API const c10::VariableVersion& version_counter(const Variable& /*self*/); -TORCH_API void set_name(const Variable&, const std::string& name); +TORCH_API void set_name(const Variable& /*self*/, const std::string& name); TORCH_API void add_hook( - const at::TensorBase&, + const at::TensorBase& /*self*/, std::unique_ptr hook); -TORCH_API std::vector>& hooks(const Variable&); -TORCH_API void clear_hooks(const at::TensorBase&); +TORCH_API std::vector>& hooks( + const Variable& /*self*/); +TORCH_API void clear_hooks(const at::TensorBase& /*self*/); TORCH_API void set_post_acc_grad_hooks( - const at::TensorBase&, + const at::TensorBase& /*self*/, std::unique_ptr dict); TORCH_API std::unique_ptr& post_acc_grad_hooks( - const Variable&); + const Variable& /*self*/); TORCH_API void create_cpp_hook( - const at::TensorBase&, + const at::TensorBase& /*self*/, bool is_retains_grad_hooks = false); } // namespace impl @@ -373,12 +378,12 @@ struct TORCH_API ViewFunc { /// must match the number of SymInts in the saved state (i.e. the size of the /// list returned by get_symints()). /// NOLINTNEXTLINE(performance-unnecessary-value-param) - virtual void set_symints(std::vector) {} + virtual void set_symints(std::vector /*unused*/) {} /// Sets the values of any Tensors in the saved state. The input vector size /// must match the number of Tensors in the saved state (i.e. the size of the /// list returned by get_tensors()). /// NOLINTNEXTLINE(performance-unnecessary-value-param) - virtual void set_tensors(std::vector) {} + virtual void set_tensors(std::vector /*unused*/) {} }; /// ViewFunc that represents a chain of two ViewFuncs. @@ -396,10 +401,13 @@ struct ChainedViewFunc : public ViewFunc { size_t num_tensors() const override { return first->num_tensors() + second->num_tensors(); } - at::Tensor operator()(const at::Tensor&) const override; + at::Tensor operator()( + const at::Tensor& /*input_base*/ /*unused*/) const override; std::unique_ptr clone_and_set( - std::optional> = std::nullopt, - std::optional> = std::nullopt) const override; + std::optional> /*symints*/ /*unused*/ = + std::nullopt, + std::optional> /*tensors*/ /*unused*/ = + std::nullopt) const override; private: std::unique_ptr first; @@ -410,12 +418,13 @@ struct ChainedViewFunc : public ViewFunc { struct ErroringViewFunc : public ViewFunc { ErroringViewFunc(std::string error_msg) : error_msg(std::move(error_msg)) {} ~ErroringViewFunc() override = default; - at::Tensor operator()(const at::Tensor&) const override { + at::Tensor operator()(const at::Tensor& /*unused*/) const override { TORCH_CHECK(false, error_msg); } std::unique_ptr clone_and_set( - std::optional> = std::nullopt, - std::optional> = std::nullopt) const override { + std::optional> /*unused*/ = std::nullopt, + std::optional> /*unused*/ = + std::nullopt) const override { return std::make_unique(error_msg); } @@ -923,19 +932,24 @@ inline Variable make_variable( } struct VariableHooks final : at::impl::VariableHooksInterface { - at::TensorBase tensor_data(const at::TensorBase&) const override; - at::TensorBase variable_data(const at::TensorBase&) const override; + at::TensorBase tensor_data( + const at::TensorBase& /*self*/ /*unused*/) const override; + at::TensorBase variable_data( + const at::TensorBase& /*self*/ /*unused*/) const override; const std::shared_ptr& grad_fn( - const at::TensorBase&) const override; + const at::TensorBase& /*self*/ /*unused*/) const override; unsigned _register_hook( - const at::TensorBase&, + const at::TensorBase& /*self*/ /*unused*/, std::function hook) const override; - void remove_hook(const at::TensorBase&, unsigned pos) const override; - bool is_view(const at::TensorBase&) const override; - const at::TensorBase& base(const at::TensorBase&) const override; - const std::string& name(const at::TensorBase&) const override; - bool is_leaf(const at::TensorBase&) const override; - int64_t output_nr(const at::TensorBase&) const override; + void remove_hook(const at::TensorBase& /*self*/ /*unused*/, unsigned pos) + const override; + bool is_view(const at::TensorBase& /*self*/ /*unused*/) const override; + const at::TensorBase& base( + const at::TensorBase& /*self*/ /*unused*/) const override; + const std::string& name( + const at::TensorBase& /*self*/ /*unused*/) const override; + bool is_leaf(const at::TensorBase& /*self*/ /*unused*/) const override; + int64_t output_nr(const at::TensorBase& /*self*/ /*unused*/) const override; void set_data(const at::TensorBase& self, const at::TensorBase& new_data) const override; at::TensorBase data(const at::TensorBase& self) const override; @@ -955,10 +969,11 @@ struct VariableHooks final : at::impl::VariableHooksInterface { c10::DispatchKeySet dispatch_keys, torch::jit::Stack* stack) const override; std::optional grad_dtype( - const at::TensorBase&) const override; + const at::TensorBase& /*self*/ /*unused*/) const override; void set_grad_dtype( - const at::TensorBase&, - const std::optional&) const override; + const at::TensorBase& /*self*/ /*unused*/, + const std::optional& /*grad_dtype*/ /*unused*/) + const override; }; namespace utils { diff --git a/torch/csrc/distributed/c10d/Work.hpp b/torch/csrc/distributed/c10d/Work.hpp index 9e242d6faf9b..2eeea75330fd 100644 --- a/torch/csrc/distributed/c10d/Work.hpp +++ b/torch/csrc/distributed/c10d/Work.hpp @@ -135,7 +135,7 @@ class TORCH_API Work : public torch::CustomClassHolder { OpType retrieveOpType() const; static c10::intrusive_ptr create_from_future( - const c10::intrusive_ptr&); + const c10::intrusive_ptr& /*future*/); protected: // Completes the work object and optionally sets the exception in a diff --git a/torch/csrc/distributed/c10d/logger.hpp b/torch/csrc/distributed/c10d/logger.hpp index cd562af7473a..75f8b2998f35 100644 --- a/torch/csrc/distributed/c10d/logger.hpp +++ b/torch/csrc/distributed/c10d/logger.hpp @@ -153,7 +153,7 @@ class TORCH_API C10dLogger { virtual ~C10dLogger() = default; virtual void log(const C10dLoggingData& data); static C10dLogger* getLogger(); - static void registerLogger(std::unique_ptr); + static void registerLogger(std::unique_ptr /*logger*/); protected: // singletion, hide constructor from the public diff --git a/torch/csrc/distributed/rpc/rref_context.h b/torch/csrc/distributed/rpc/rref_context.h index ce3b71580ab6..5a3fff5d6722 100644 --- a/torch/csrc/distributed/rpc/rref_context.h +++ b/torch/csrc/distributed/rpc/rref_context.h @@ -225,7 +225,7 @@ class TORCH_API RRefContext { c10::intrusive_ptr confirmationFuture_; }; - RRefContext(std::shared_ptr); + RRefContext(std::shared_ptr /*agent*/); c10::intrusive_ptr createUserRRef( worker_id_t ownerId, diff --git a/torch/csrc/distributed/rpc/tensorpipe_agent.h b/torch/csrc/distributed/rpc/tensorpipe_agent.h index e6f4d66af138..a1d449fba549 100644 --- a/torch/csrc/distributed/rpc/tensorpipe_agent.h +++ b/torch/csrc/distributed/rpc/tensorpipe_agent.h @@ -232,11 +232,11 @@ class TORCH_API TensorPipeAgent : public RpcAgent { // messages by server, and write request messages by client. This // is a protected method since it is overwritten by FaultyTensorPipeAgent virtual void pipeWrite( - const std::shared_ptr&, + const std::shared_ptr& /*pipe*/, const c10::intrusive_ptr& message, std::vector&& devices, std::vector streams, - std::function) noexcept; + std::function /*fn*/) noexcept; private: // Removes the given messageId with the given expirationTime from the @@ -257,11 +257,11 @@ class TORCH_API TensorPipeAgent : public RpcAgent { // TensorPipe read function that could be used to read response messages // by client, and read request messages by server. void pipeRead( - const std::shared_ptr&, + const std::shared_ptr& /*pipe*/, std::function, - std::vector)>) noexcept; + std::vector)> /*fn*/) noexcept; // Callback of listener accept() void onListenerAccepted( diff --git a/torch/csrc/distributed/rpc/tensorpipe_utils.h b/torch/csrc/distributed/rpc/tensorpipe_utils.h index 9021bc11c86a..cfb0bad8bdad 100644 --- a/torch/csrc/distributed/rpc/tensorpipe_utils.h +++ b/torch/csrc/distributed/rpc/tensorpipe_utils.h @@ -49,8 +49,8 @@ extern TORCH_API std::array< class TORCH_API TensorpipeDeviceTypeConverterRegistrar { public: TensorpipeDeviceTypeConverterRegistrar( - DeviceType, - const TensorpipeDeviceTypeConverter*); + DeviceType /*type*/, + const TensorpipeDeviceTypeConverter* /*impl*/); }; #define C10_REGISTER_TENSORPIPE_DEVICE_TYPE_CONVERTER( \ diff --git a/torch/csrc/distributed/rpc/types.h b/torch/csrc/distributed/rpc/types.h index 863ccb6d6c8f..665d26a87c9e 100644 --- a/torch/csrc/distributed/rpc/types.h +++ b/torch/csrc/distributed/rpc/types.h @@ -32,7 +32,7 @@ struct TORCH_API GloballyUniqueId final { bool operator!=(const GloballyUniqueId& other) const; at::IValue toIValue() const; - static GloballyUniqueId fromIValue(const at::IValue&); + static GloballyUniqueId fromIValue(const at::IValue& /*ivalue*/); struct Hash { size_t operator()(const GloballyUniqueId& key) const { diff --git a/torch/csrc/inductor/aoti_eager/kernel_holder.h b/torch/csrc/inductor/aoti_eager/kernel_holder.h index 8459b35c6837..1575481148a0 100644 --- a/torch/csrc/inductor/aoti_eager/kernel_holder.h +++ b/torch/csrc/inductor/aoti_eager/kernel_holder.h @@ -105,7 +105,7 @@ class AOTIPythonKernelHolder : public c10::OperatorKernel { void init_aoti_kernel_cache(); // Load the AOTIModelContainerRunner object from the given file path. std::shared_ptr load_aoti_model_runner( - const std::string&); + const std::string& /*so_path*/); }; } // namespace torch::inductor diff --git a/torch/csrc/inductor/aoti_runtime/utils.h b/torch/csrc/inductor/aoti_runtime/utils.h index 49255a858d4d..7d1938f1c606 100644 --- a/torch/csrc/inductor/aoti_runtime/utils.h +++ b/torch/csrc/inductor/aoti_runtime/utils.h @@ -40,7 +40,7 @@ namespace torch::aot_inductor { using DeleterFnPtr = void (*)(void*); -inline void noop_deleter(void*) {} +inline void noop_deleter(void* /*unused*/) {} inline void delete_record_function_object(void* ptr) { AOTI_TORCH_ERROR_CODE_CHECK(aoti_record_function_end( diff --git a/torch/csrc/jit/api/function_impl.cpp b/torch/csrc/jit/api/function_impl.cpp index 820ecef66a89..0c911970347b 100644 --- a/torch/csrc/jit/api/function_impl.cpp +++ b/torch/csrc/jit/api/function_impl.cpp @@ -62,7 +62,7 @@ T& toGraphFunctionImpl(F& function) { } // namespace -static void placeholderCreator(GraphFunction&) { +static void placeholderCreator(GraphFunction& /*unused*/) { throw RecursiveMethodCallError(); } diff --git a/torch/csrc/jit/api/function_impl.h b/torch/csrc/jit/api/function_impl.h index f508f3e5d522..298ff1957c11 100644 --- a/torch/csrc/jit/api/function_impl.h +++ b/torch/csrc/jit/api/function_impl.h @@ -173,8 +173,8 @@ struct TORCH_API GraphFunction : public Function { }; // Short hands for dynamic_cast. -TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept; -TORCH_API GraphFunction& toGraphFunction(Function&); -TORCH_API const GraphFunction& toGraphFunction(const Function&); +TORCH_API GraphFunction* tryToGraphFunction(Function& /*function*/) noexcept; +TORCH_API GraphFunction& toGraphFunction(Function& /*function*/); +TORCH_API const GraphFunction& toGraphFunction(const Function& /*function*/); } // namespace torch::jit C10_DECLARE_bool(torch_jit_do_not_store_optimized_graph); diff --git a/torch/csrc/jit/api/method.h b/torch/csrc/jit/api/method.h index d7ef14ddb193..906ef46c1ad6 100644 --- a/torch/csrc/jit/api/method.h +++ b/torch/csrc/jit/api/method.h @@ -65,7 +65,9 @@ struct TORCH_API Method : public torch::IMethod { } private: - void setArgumentNames(std::vector&) const override; + void setArgumentNames( + std::vector& /*argumentNames*/ /*argumentNamesOut*/) + const override; // Methods are uniqued owned by a single module. This raw pointer allows // looking up the module. diff --git a/torch/csrc/jit/api/module.h b/torch/csrc/jit/api/module.h index 52cec12fb859..c9b7793c89b6 100644 --- a/torch/csrc/jit/api/module.h +++ b/torch/csrc/jit/api/module.h @@ -93,7 +93,7 @@ struct TORCH_API Module : public Object { Module(Module&&) noexcept = default; Module& operator=(Module&&) noexcept = default; Module( - c10::QualifiedName, + c10::QualifiedName /*class_name*/, std::shared_ptr cu, bool shouldMangle = false); Module(ModulePtr module_value) : Object(std::move(module_value)) {} diff --git a/torch/csrc/jit/codegen/cuda/interface.h b/torch/csrc/jit/codegen/cuda/interface.h index 926e4cb5d265..2223c9b47b27 100644 --- a/torch/csrc/jit/codegen/cuda/interface.h +++ b/torch/csrc/jit/codegen/cuda/interface.h @@ -38,7 +38,7 @@ TORCH_API CudaFuserInterface* getFuserInterface(); TORCH_API void compileFusionGroup(Node* fusion_node); TORCH_API void runFusionGroup(const Node* fusion_node, Stack& stack); -TORCH_API void fuseGraph(std::shared_ptr&); +TORCH_API void fuseGraph(std::shared_ptr& /*graph*/); TORCH_API bool canFuseNode(const Node* node); TORCH_API void InsertProfileNodesForCUDAFuser(ProfilingRecord* pr); TORCH_API bool profileNode(const Node* node); diff --git a/torch/csrc/jit/frontend/tracer.h b/torch/csrc/jit/frontend/tracer.h index dbfc6faa88c4..58f6260145da 100644 --- a/torch/csrc/jit/frontend/tracer.h +++ b/torch/csrc/jit/frontend/tracer.h @@ -388,7 +388,7 @@ template < !std::is_convertible_v< std::decay_t, c10::intrusive_ptr>)>> -void addOutput(Node* node, T&&) { +void addOutput(Node* node, T&& /*unused*/) { TORCH_CHECK( false, "Found an unsupported argument type ", diff --git a/torch/csrc/jit/ir/scope.h b/torch/csrc/jit/ir/scope.h index 51baee8e277c..f94110508e87 100644 --- a/torch/csrc/jit/ir/scope.h +++ b/torch/csrc/jit/ir/scope.h @@ -190,7 +190,7 @@ struct TORCH_API InlinedCallStack : public c10::intrusive_ptr_target { // Return callstack as a vector of [Function, SourceRange] pairs. std::vector vec(); - void setCallee(std::optional); + void setCallee(std::optional /*callee*/); bool operator==(const InlinedCallStack& rhs) const { // No need to compare fn_, since source_range equivalence check diff --git a/torch/csrc/jit/mobile/flatbuffer_loader.cpp b/torch/csrc/jit/mobile/flatbuffer_loader.cpp index 4d9505ee21a9..103fadaf3a57 100644 --- a/torch/csrc/jit/mobile/flatbuffer_loader.cpp +++ b/torch/csrc/jit/mobile/flatbuffer_loader.cpp @@ -154,34 +154,34 @@ class FlatbufferLoader final { }; IValue parseList( - FlatbufferLoader&, + FlatbufferLoader& /*loader*/, const mobile::serialization::IValue& ivalue); IValue parseTensor( - FlatbufferLoader&, + FlatbufferLoader& /*loader*/, const mobile::serialization::IValue& ivalue); IValue parseTuple( - FlatbufferLoader&, + FlatbufferLoader& /*loader*/, const mobile::serialization::IValue& ivalue); IValue parseDict( - FlatbufferLoader&, + FlatbufferLoader& /*loader*/, const mobile::serialization::IValue& ivalue); IValue parseObject( - FlatbufferLoader&, + FlatbufferLoader& /*loader*/, const mobile::serialization::IValue& ivalue); IValue parseIntList( - FlatbufferLoader&, + FlatbufferLoader& /*unused*/, const mobile::serialization::IValue& ivalue); IValue parseDoubleList( - FlatbufferLoader&, + FlatbufferLoader& /*unused*/, const mobile::serialization::IValue& ivalue); IValue parseBoolList( - FlatbufferLoader&, + FlatbufferLoader& /*unused*/, const mobile::serialization::IValue& ivalue); IValue parseBasic( - FlatbufferLoader&, + FlatbufferLoader& /*unused*/, const mobile::serialization::IValue& ivalue); IValue parseEnum( - FlatbufferLoader&, + FlatbufferLoader& /*loader*/, const mobile::serialization::IValue& ivalue); TypePtr resolveType( @@ -442,7 +442,7 @@ IValue parseEnum( } IValue parseBasic( - FlatbufferLoader&, + FlatbufferLoader& /*unused*/, const mobile::serialization::IValue& ivalue) { switch (ivalue.val_type()) { case mobile::serialization::IValueUnion::NONE: @@ -546,21 +546,21 @@ std::vector parseListNative(const U* list) { } IValue parseIntList( - FlatbufferLoader&, + FlatbufferLoader& /*unused*/, const mobile::serialization::IValue& ivalue) { const auto& list = ivalue.val_as_IntList(); return parseListNative(list); } IValue parseDoubleList( - FlatbufferLoader&, + FlatbufferLoader& /*unused*/, const mobile::serialization::IValue& ivalue) { const auto& list = ivalue.val_as_DoubleList(); return parseListNative(list); } IValue parseBoolList( - FlatbufferLoader&, + FlatbufferLoader& /*unused*/, const mobile::serialization::IValue& ivalue) { const auto& list = ivalue.val_as_BoolList(); std::vector res = parseListNative(list); @@ -690,8 +690,8 @@ IValue FlatbufferLoader::parseIValue( *this, *ivalue); } -void deleteNothing2(void*); -void deleteNothing2(void*) {} +void deleteNothing2(void* /*unused*/); +void deleteNothing2(void* /*unused*/) {} c10::Storage FlatbufferLoader::getStorage(uint32_t index) { TORCH_CHECK(index < storage_loaded_.size()); @@ -760,7 +760,7 @@ void FlatbufferLoader::extractJitSourceAndConstants( mobile::Module parse_and_initialize_mobile_module( void* data, size_t size, - std::optional, + std::optional /*unused*/, ExtraFilesMap* extra_files, bool should_copy_tensor_memory) { // TODO(T128189662): If not copying, enforce that data is aligned to @@ -806,7 +806,7 @@ mobile::Module parse_and_initialize_mobile_module_for_jit( size_t size, ExtraFilesMap& jit_sources, std::vector& jit_constants, - std::optional, + std::optional /*unused*/, ExtraFilesMap* extra_files) { TORCH_CHECK( mobile::serialization::ModuleBufferHasIdentifier(data), "Format error"); diff --git a/torch/csrc/jit/mobile/function.cpp b/torch/csrc/jit/mobile/function.cpp index ed807f8c073b..87128a180a6d 100644 --- a/torch/csrc/jit/mobile/function.cpp +++ b/torch/csrc/jit/mobile/function.cpp @@ -149,7 +149,9 @@ size_t Function::num_inputs() const { return schema_->arguments().size(); } -bool Function::call(Stack&, c10::function_ref f) { +bool Function::call( + Stack& /*unused*/, + c10::function_ref f) { initialize_operators(true); f(code_); return true; diff --git a/torch/csrc/jit/mobile/function.h b/torch/csrc/jit/mobile/function.h index 5e0824f880b2..1f0f90d34561 100644 --- a/torch/csrc/jit/mobile/function.h +++ b/torch/csrc/jit/mobile/function.h @@ -26,7 +26,9 @@ class TORCH_API Function : public torch::jit::Function { void ensure_defined() override {} size_t num_inputs() const override; const c10::QualifiedName& qualname() const override; - bool call(Stack&, c10::function_ref) override; + bool call( + Stack& /*unused*/, + c10::function_ref /*f*/ /*unused*/) override; // NOTE: the APIs below is dangerous: if you call append_instruction with // dbg_handle and then call it without; then the dbg_handle will become diff --git a/torch/csrc/jit/mobile/interpreter.h b/torch/csrc/jit/mobile/interpreter.h index e67595c06b57..48755954e04b 100644 --- a/torch/csrc/jit/mobile/interpreter.h +++ b/torch/csrc/jit/mobile/interpreter.h @@ -12,7 +12,7 @@ struct InterpreterState { TORCH_API bool run(Stack& stack); private: - void enterFrame(const Code&); + void enterFrame(const Code& /*code*/); void leaveFrame(); void saveExceptionDebugHandles(); void callFunction(torch::jit::Function& f, Stack& stack); diff --git a/torch/csrc/jit/mobile/observer.h b/torch/csrc/jit/mobile/observer.h index 694fe1df82c1..4b22af1fda41 100644 --- a/torch/csrc/jit/mobile/observer.h +++ b/torch/csrc/jit/mobile/observer.h @@ -67,26 +67,28 @@ class MobileModuleObserver { public: virtual ~MobileModuleObserver() = default; - virtual void onEnterRunMethod(const int32_t) {} + virtual void onEnterRunMethod(const int32_t /*unused*/) {} virtual void onExitRunMethod( - const std::unordered_map&, - const std::string&, - const int32_t) {} + const std::unordered_map& /*unused*/, + const std::string& /*unused*/, + const int32_t /*unused*/) {} virtual void onFailRunMethod( - const std::unordered_map&, - const std::string&, - const int32_t, - const char*) {} - virtual void onEnterLoadModel(const int32_t) {} + const std::unordered_map& /*unused*/, + const std::string& /*unused*/, + const int32_t /*unused*/, + const char* /*unused*/) {} + virtual void onEnterLoadModel(const int32_t /*unused*/) {} virtual void onExitLoadModel( - const int32_t, - const std::unordered_map&) { + const int32_t /*unused*/, + const std::unordered_map& /*unused*/) { } // key: filename, value: file content - virtual void onFailLoadModel(const int32_t, const char*) {} virtual void onFailLoadModel( - const int32_t, - const char*, - const std::unordered_map&) {} + const int32_t /*unused*/, + const char* /*unused*/) {} + virtual void onFailLoadModel( + const int32_t /*unused*/, + const char* /*unused*/, + const std::unordered_map& /*unused*/) {} virtual std::vector getDefaultExtraFiles() = 0; virtual std::unordered_map processMetadataFromExtra( const std::unordered_map&) = 0; diff --git a/torch/csrc/jit/passes/onnx/function_extraction.cpp b/torch/csrc/jit/passes/onnx/function_extraction.cpp index 32c0e1b77c2c..7901b44bb85f 100644 --- a/torch/csrc/jit/passes/onnx/function_extraction.cpp +++ b/torch/csrc/jit/passes/onnx/function_extraction.cpp @@ -87,14 +87,14 @@ struct FunctionExtractor { const std::shared_ptr& graph); static void HandleNoScopeNodes( - scope_ctx_map&, + scope_ctx_map& /*scope_ctxs*/, const node_list& no_scope_nlist); std::tuple PartitionNodesByScope(Block* b); scope_ctx_map PartitionNodesByScope(const std::shared_ptr& graph); static std::unordered_map PartitionIdenticalScopes( scope_ctx_map& scope_ctxs); static scope_list SortScopesByMaxDepth( - std::unordered_map&); + std::unordered_map& /*identical_scope_map*/); Node* CreateFunctionDefNode( FunctionContext& func_ctx, const std::shared_ptr& graph, @@ -107,7 +107,7 @@ struct FunctionExtractor { const std::string& domain_name, const std::string& func_name); - static void DebugPrintScopeContexts(const scope_ctx_map&); + static void DebugPrintScopeContexts(const scope_ctx_map& /*scope_ctxs*/); static void DebugPrintGraphWithFunction(const std::shared_ptr& g); static void DebugPrintConstantDiff(const FunctionContext&); diff --git a/torch/csrc/jit/passes/onnx/naming.cpp b/torch/csrc/jit/passes/onnx/naming.cpp index 692d60a2d3d4..034c73beb4c7 100644 --- a/torch/csrc/jit/passes/onnx/naming.cpp +++ b/torch/csrc/jit/passes/onnx/naming.cpp @@ -85,7 +85,7 @@ class NodeNameGenerator { protected: virtual void CreateNodeName(Node* n) = 0; - void PopulateNodeNames(Block*); + void PopulateNodeNames(Block* /*b*/); void UpdateOutputsNames(Node* n); bool IsGraphOutput(const Value* v, const std::shared_ptr& graph) const; diff --git a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp index 2e39bf67bf5f..8df57982bc33 100644 --- a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp +++ b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp @@ -750,7 +750,7 @@ class InsertQuantDeQuantHelper { } } - void collectObserverNodesAndValueToQuantize(Module& module, Value*); + void collectObserverNodesAndValueToQuantize(Module& module, Value* /*v*/); void cleanup(Module& module, Graph* g); void removeObserverNodes(Graph* g); void quantizeTensors(Module& module, Graph* g, Value* self); diff --git a/torch/csrc/jit/python/pybind.h b/torch/csrc/jit/python/pybind.h index 5bab3878f3b4..066ff7f77f56 100644 --- a/torch/csrc/jit/python/pybind.h +++ b/torch/csrc/jit/python/pybind.h @@ -113,7 +113,7 @@ struct type_caster { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(torch::jit::IValue, _("IValue")); - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { try { value = torch::jit::toTypeInferredIValue(src); return true; @@ -136,7 +136,7 @@ struct type_caster { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(torch::jit::Symbol, _("Symbol")); - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { // TODO: Is there a way to py::cast that doesn't raise an exception on // failure? Can we catch pybind11::cast_error here instead? std::string src_str; @@ -164,7 +164,7 @@ struct type_caster { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(torch::jit::AttributeKind, _("AttributeKind")); - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { return false; } @@ -186,7 +186,7 @@ template <> struct type_caster> : ListCasterBase { static handle cast( const std::vector& src, - return_value_policy, + return_value_policy /*unused*/, handle parent) { return ListCasterBase::cast(src, return_value_policy::reference, parent); } diff --git a/torch/csrc/jit/runtime/jit_trace.cpp b/torch/csrc/jit/runtime/jit_trace.cpp index b25088b32eca..45be4fe21bb4 100644 --- a/torch/csrc/jit/runtime/jit_trace.cpp +++ b/torch/csrc/jit/runtime/jit_trace.cpp @@ -62,7 +62,10 @@ void eraseAllOutputs(Node* opt_pn) { } } -void insertTracingNodes(Block*, ProfilingRecord*, TracingData&); +void insertTracingNodes( + Block* /*block*/, + ProfilingRecord* /*pr*/, + TracingData& /*td*/); // The subtlety in `createPropNodeForIfBlock` is that we need to create // a "propagate" node that will propagate the mapping between the outputs diff --git a/torch/csrc/jit/runtime/profiling_record.h b/torch/csrc/jit/runtime/profiling_record.h index c45dcde7b0bf..0dfdb246dd68 100644 --- a/torch/csrc/jit/runtime/profiling_record.h +++ b/torch/csrc/jit/runtime/profiling_record.h @@ -81,7 +81,8 @@ namespace torch::jit { using ::c10::TensorTypePtr; using Dimension = int64_t; -TORCH_API void RegisterProfilingNode(const std::function&); +TORCH_API void RegisterProfilingNode( + const std::function& /*func*/); struct ProfilingRecord; diff --git a/torch/csrc/jit/runtime/register_ops_utils.h b/torch/csrc/jit/runtime/register_ops_utils.h index 340b597280a6..7578ea6b1f99 100644 --- a/torch/csrc/jit/runtime/register_ops_utils.h +++ b/torch/csrc/jit/runtime/register_ops_utils.h @@ -418,8 +418,8 @@ struct OperatorGeneratorArgs { template explicit constexpr OperatorGeneratorArgs( - torch::detail::SelectiveStr, - Args...) + torch::detail::SelectiveStr /*unused*/, + Args... /*unused*/) : schema_str(nullptr), isOperationCreator(false), operation(nullptr), diff --git a/torch/csrc/jit/runtime/script_profile.h b/torch/csrc/jit/runtime/script_profile.h index 8061d6fc8597..6c6588b2cec4 100644 --- a/torch/csrc/jit/runtime/script_profile.h +++ b/torch/csrc/jit/runtime/script_profile.h @@ -24,7 +24,7 @@ struct Datapoint { class TORCH_API InstructionSpan { public: - explicit InstructionSpan(Node&); + explicit InstructionSpan(Node& /*node*/); ~InstructionSpan(); InstructionSpan(InstructionSpan&&) = delete; InstructionSpan& operator=(InstructionSpan&&) = delete; @@ -91,7 +91,7 @@ class TORCH_API ScriptProfile : public CustomClassHolder { void enable(); void disable(); const SourceMap& dumpStats(); - void addDatapoint(std::shared_ptr); + void addDatapoint(std::shared_ptr /*datapoint*/); ~ScriptProfile() override; private: diff --git a/torch/csrc/jit/runtime/static/ops.h b/torch/csrc/jit/runtime/static/ops.h index 7b4b00e7e8ea..69fbfc7d58fa 100644 --- a/torch/csrc/jit/runtime/static/ops.h +++ b/torch/csrc/jit/runtime/static/ops.h @@ -22,7 +22,7 @@ namespace torch::jit { using SROpFunctor = SROperator (*)(Node* n); struct SROperatorFunctor { - virtual SROperator Generate(Node*) { + virtual SROperator Generate(Node* /*unused*/) { SROperator out; return out; } @@ -165,7 +165,7 @@ inline void LogAndDumpSchema(const Node* node) { VLOG(1) << "Found schema mismatch for: " << node->schema(); } -inline bool sr_schema_check(torch::jit::Node*) { +inline bool sr_schema_check(torch::jit::Node* /*unused*/) { return true; } diff --git a/torch/csrc/jit/tensorexpr/cpp_codegen.h b/torch/csrc/jit/tensorexpr/cpp_codegen.h index d8a46fa7893a..6b6011b66a37 100644 --- a/torch/csrc/jit/tensorexpr/cpp_codegen.h +++ b/torch/csrc/jit/tensorexpr/cpp_codegen.h @@ -26,35 +26,35 @@ class TORCH_API CppPrinter : public IRPrinter { using IRPrinter::visit; // Binary expressions. - void visit(const ModPtr&) override; - void visit(const MaxPtr&) override; - void visit(const MinPtr&) override; + void visit(const ModPtr& /*v*/) override; + void visit(const MaxPtr& /*v*/) override; + void visit(const MinPtr& /*v*/) override; // Conditional expressions. - void visit(const CompareSelectPtr&) override; - void visit(const IfThenElsePtr&) override; + void visit(const CompareSelectPtr& /*v*/) override; + void visit(const IfThenElsePtr& /*v*/) override; // Tensor operations. - void visit(const AllocatePtr&) override; - void visit(const FreePtr&) override; - void visit(const LoadPtr&) override; - void visit(const StorePtr&) override; + void visit(const AllocatePtr& /*v*/) override; + void visit(const FreePtr& /*v*/) override; + void visit(const LoadPtr& /*v*/) override; + void visit(const StorePtr& /*v*/) override; // Casts. - void visit(const CastPtr&) override; - void visit(const BitCastPtr&) override; + void visit(const CastPtr& /*v*/) override; + void visit(const BitCastPtr& /*v*/) override; // Calls. - void visit(const IntrinsicsPtr&) override; - void visit(const ExternalCallPtr&) override; + void visit(const IntrinsicsPtr& /*v*/) override; + void visit(const ExternalCallPtr& /*v*/) override; // Vars. - void visit(const LetPtr&) override; - void visit(const VarPtr&) override; + void visit(const LetPtr& /*v*/) override; + void visit(const VarPtr& /*v*/) override; // Vector data types. - void visit(const RampPtr&) override; - void visit(const BroadcastPtr&) override; + void visit(const RampPtr& /*v*/) override; + void visit(const BroadcastPtr& /*v*/) override; private: int lane_; diff --git a/torch/csrc/jit/tensorexpr/exceptions.h b/torch/csrc/jit/tensorexpr/exceptions.h index 1241400474a4..9963feccde2b 100644 --- a/torch/csrc/jit/tensorexpr/exceptions.h +++ b/torch/csrc/jit/tensorexpr/exceptions.h @@ -14,8 +14,10 @@ class Stmt; // Forward declarations of functions namespace std { -TORCH_API std::string to_string(const torch::jit::tensorexpr::ExprPtr&); -TORCH_API std::string to_string(const torch::jit::tensorexpr::StmtPtr&); +TORCH_API std::string to_string( + const torch::jit::tensorexpr::ExprPtr& /*expr*/); +TORCH_API std::string to_string( + const torch::jit::tensorexpr::StmtPtr& /*stmt*/); } // namespace std namespace torch::jit::tensorexpr { diff --git a/torch/csrc/jit/tensorexpr/external_functions.cpp b/torch/csrc/jit/tensorexpr/external_functions.cpp index c9aedb115a98..ee43036d77c9 100644 --- a/torch/csrc/jit/tensorexpr/external_functions.cpp +++ b/torch/csrc/jit/tensorexpr/external_functions.cpp @@ -378,7 +378,7 @@ void nnc_aten_quantized_conv1d( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const double x_qscale = ((double*)extra_args)[0]; const int64_t x_qzero = extra_args[1]; @@ -408,7 +408,7 @@ void nnc_aten_quantized_conv1d_out( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const size_t bufs_out_num = 1u; const double x_qscale = ((double*)extra_args)[0]; @@ -442,7 +442,7 @@ void nnc_aten_quantized_conv2d( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const double x_qscale = ((double*)extra_args)[0]; const int64_t x_qzero = extra_args[1]; @@ -470,7 +470,7 @@ void nnc_aten_quantized_conv2d_out( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const size_t bufs_out_num = 1u; const double x_qscale = ((double*)extra_args)[0]; @@ -502,7 +502,7 @@ void nnc_aten_quantized_conv2d_relu( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const double x_qscale = ((double*)extra_args)[0]; const int64_t x_qzero = extra_args[1]; @@ -530,7 +530,7 @@ void nnc_aten_quantized_conv2d_relu_out( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const size_t bufs_out_num = 1u; const double x_qscale = ((double*)extra_args)[0]; @@ -562,7 +562,7 @@ void nnc_aten_quantized_linear( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const double x_qscale = ((double*)extra_args)[0]; const int64_t x_qzero = extra_args[1]; @@ -590,7 +590,7 @@ void nnc_aten_quantized_linear_out( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const size_t bufs_out_num = 1u; const double x_qscale = ((double*)extra_args)[0]; @@ -622,7 +622,7 @@ void nnc_aten_quantized_linear_relu( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const double x_qscale = ((double*)extra_args)[0]; const int64_t x_qzero = extra_args[1]; @@ -651,7 +651,7 @@ void nnc_aten_quantized_add( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { // TORCH_INTERNAL_ASSERT(tensors.size() == 3); @@ -684,7 +684,7 @@ void nnc_aten_quantized_mul( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const double a_qscale = ((double*)extra_args)[0]; const int64_t a_qzero = extra_args[1]; @@ -714,7 +714,7 @@ void nnc_aten_quantized_mul_out( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const size_t bufs_out_num = 1u; const double a_qscale = ((double*)extra_args)[0]; @@ -748,7 +748,7 @@ void nnc_aten_quantized_mul_scalar( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const double x_qscale = ((double*)extra_args)[0]; const int64_t x_qzero = extra_args[1]; @@ -773,7 +773,7 @@ void nnc_aten_quantized_mul_scalar_out( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const size_t bufs_out_num = 1u; const double x_qscale = ((double*)extra_args)[0]; @@ -802,7 +802,7 @@ void nnc_aten_quantized_relu( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const double x_qscale = ((double*)extra_args)[0]; const int64_t x_qzero = extra_args[1]; @@ -826,7 +826,7 @@ void nnc_aten_quantized_sigmoid( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const double x_qscale = ((double*)extra_args)[0]; const int64_t x_qzero = extra_args[1]; @@ -851,7 +851,7 @@ void nnc_aten_quantized_sigmoid_out( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const double x_qscale = ((double*)extra_args)[0]; const int64_t x_qzero = extra_args[1]; @@ -880,7 +880,7 @@ void nnc_aten_quantized_cat( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { std::vector> qdata; const auto in_bufs_num = bufs_num - 1; @@ -914,7 +914,7 @@ void nnc_aten_upsample_nearest2d( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { // NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds) const double x_qscale = ((double*)extra_args)[0]; @@ -956,7 +956,7 @@ void nnc_aten_upsample_nearest2d_out( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const size_t bufs_out_num = 1u; // NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds) @@ -1008,7 +1008,7 @@ void nnc_aten_quantize_per_tensor( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { auto tensors = constructTensors( bufs_num, buf_data, buf_ranks, buf_dims, buf_strides, buf_dtypes); @@ -1028,7 +1028,7 @@ void nnc_aten_quantize_per_tensor_out( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const size_t bufs_out_num = 1u; auto tensors = constructTensors2( @@ -1058,7 +1058,7 @@ void nnc_aten_dequantize( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const double qscale = ((double*)extra_args)[0]; const int64_t qzero = extra_args[1]; @@ -1083,7 +1083,7 @@ void nnc_aten_dequantize_out( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { const size_t bufs_out_num = 1u; const double qscale = ((double*)extra_args)[0]; @@ -1275,7 +1275,7 @@ void nnc_aten_max_red_out( int64_t* buf_dims, int64_t* buf_strides, int8_t* buf_dtypes, - int64_t, + int64_t /*unused*/, int64_t* extra_args) { size_t bufs_out_num = 1u; auto tensors = constructTensors2( diff --git a/torch/csrc/jit/tensorexpr/ir.h b/torch/csrc/jit/tensorexpr/ir.h index a8ceabe701e7..4f916c118165 100644 --- a/torch/csrc/jit/tensorexpr/ir.h +++ b/torch/csrc/jit/tensorexpr/ir.h @@ -901,13 +901,13 @@ class TORCH_API Intrinsics : public ExprNode { }; TORCH_API std::vector ExprHandleVectorToExprVector( - const std::vector&); + const std::vector& /*v*/); TORCH_API std::vector ExprVectorToExprHandleVector( - const std::vector&); + const std::vector& /*v*/); TORCH_API std::vector VarHandleVectorToVarVector( - const std::vector&); + const std::vector& /*v*/); TORCH_API std::vector VarVectorToVarHandleVector( - const std::vector&); + const std::vector& /*v*/); TORCH_API ExprPtr flatten_index( const std::vector& dims, const std::vector& indices, diff --git a/torch/csrc/jit/tensorexpr/ir_printer.h b/torch/csrc/jit/tensorexpr/ir_printer.h index 1909a40283c7..10ba6f4fdaeb 100644 --- a/torch/csrc/jit/tensorexpr/ir_printer.h +++ b/torch/csrc/jit/tensorexpr/ir_printer.h @@ -15,9 +15,9 @@ class TORCH_API IRPrinter : public IRVisitor { public: explicit IRPrinter(std::ostream& os) : printer_os_(this, os) {} - void print(ExprHandle); - void print(Expr&); - void print(Stmt&); + void print(ExprHandle /*expr*/); + void print(Expr& /*expr*/); + void print(Stmt& /*stmt*/); void visit(const AddPtr& v) override; void visit(const SubPtr& v) override; void visit(const MulPtr& v) override; @@ -105,10 +105,12 @@ class TORCH_API IRPrinter : public IRVisitor { UniqueNameManager name_manager_; }; -TORCH_API std::ostream& operator<<(std::ostream& stream, const Expr&); -TORCH_API std::ostream& operator<<(std::ostream& stream, const ExprHandle&); -TORCH_API std::ostream& operator<<(std::ostream& stream, const Stmt&); -TORCH_API std::ostream& operator<<(std::ostream& stream, const Tensor&); +TORCH_API std::ostream& operator<<(std::ostream& stream, const Expr& /*expr*/); +TORCH_API std::ostream& operator<<( + std::ostream& stream, + const ExprHandle& /*expr*/); +TORCH_API std::ostream& operator<<(std::ostream& stream, const Stmt& /*stmt*/); +TORCH_API std::ostream& operator<<(std::ostream& stream, const Tensor& /*t*/); TORCH_API void print(const ExprPtr& expr); TORCH_API void print(const StmtPtr& stmt); diff --git a/torch/csrc/jit/tensorexpr/ir_verifier.h b/torch/csrc/jit/tensorexpr/ir_verifier.h index e8e887ac80ae..d2043001184f 100644 --- a/torch/csrc/jit/tensorexpr/ir_verifier.h +++ b/torch/csrc/jit/tensorexpr/ir_verifier.h @@ -47,8 +47,8 @@ class TORCH_API IRVerifier : public IRVisitor { void visit(const BlockPtr& v) override; }; -TORCH_API void verify(const StmtPtr&); -TORCH_API void verify(const ExprPtr&); -TORCH_API void verify(const ExprHandle&); +TORCH_API void verify(const StmtPtr& /*s*/); +TORCH_API void verify(const ExprPtr& /*e*/); +TORCH_API void verify(const ExprHandle& /*e*/); } // namespace torch::jit::tensorexpr diff --git a/torch/csrc/jit/tensorexpr/loopnest.h b/torch/csrc/jit/tensorexpr/loopnest.h index 20614fea0bad..802998aaa4b8 100644 --- a/torch/csrc/jit/tensorexpr/loopnest.h +++ b/torch/csrc/jit/tensorexpr/loopnest.h @@ -43,11 +43,11 @@ class TORCH_API LoopNest { return root_stmt_; } - std::vector getLoopStmtsFor(const Tensor&) const; - std::vector getLoopStmtsFor(const BufPtr&) const; - std::vector getLoopStmtsFor(StmtPtr) const; - StmtPtr getLoopBodyFor(const Tensor&) const; - StmtPtr getLoopBodyFor(BufPtr) const; + std::vector getLoopStmtsFor(const Tensor& /*t*/) const; + std::vector getLoopStmtsFor(const BufPtr& /*buf*/) const; + std::vector getLoopStmtsFor(StmtPtr /*s*/) const; + StmtPtr getLoopBodyFor(const Tensor& /*t*/) const; + StmtPtr getLoopBodyFor(BufPtr /*buf*/) const; // Returns the For stmt indexed by 'indices' in the 'root' For stmt. //'indices' indicates the path to the returned loop from 'root' in AST, e.g., @@ -77,7 +77,7 @@ class TORCH_API LoopNest { static std::vector getEnclosingLoopNest(const StmtPtr& st); // Returns a list of all Stmts that write to the given buf. - std::vector getAllWritesToBuf(BufPtr) const; + std::vector getAllWritesToBuf(BufPtr /*buf*/) const; // The following methods return the For loops that contain writes to // the given buf. @@ -97,13 +97,14 @@ class TORCH_API LoopNest { // to buf. // For the above example: // getAllInnermostLoopsWritingToBuf(a) => {j1, k2, j3} - std::vector getAllInnermostLoopsWritingToBuf(BufPtr) const; + std::vector getAllInnermostLoopsWritingToBuf(BufPtr /*buf*/) const; // Returns a list of For loopnests which contain a Stmt that writes to // the given buf. Each loopnest here is a vector For loops. // For the above example: // getAllLoopNestsWritingToBuf(a) => {{i1,j1}, {i2,j2,k2}, {i2,j3}} - std::vector> getAllLoopNestsWritingToBuf(BufPtr) const; + std::vector> getAllLoopNestsWritingToBuf( + BufPtr /*buf*/) const; StmtPtr simplify(); @@ -561,7 +562,7 @@ class TORCH_API LoopNest { // Vectorize the given loop. This method requires that the given loop // does not perform a reduction. // It returns true if vectorization is successful and false otherwise. - static bool vectorize(const ForPtr&); + static bool vectorize(const ForPtr& /*f*/); // Find the inner-most loops and vectorize them. Currently, this only works // for the LLVM backend, when no reductions are involved. diff --git a/torch/csrc/jit/tensorexpr/operators/quantization.cpp b/torch/csrc/jit/tensorexpr/operators/quantization.cpp index 4b0bd3a1005a..f6ca4defaf62 100644 --- a/torch/csrc/jit/tensorexpr/operators/quantization.cpp +++ b/torch/csrc/jit/tensorexpr/operators/quantization.cpp @@ -139,8 +139,8 @@ Tensor computeQuantizePerTensor( const std::vector& inputs, const std::vector& outputShape, const std::vector& outputStrides, - const std::optional&, - at::Device) { + const std::optional& /*unused*/, + at::Device /*unused*/) { std::vector vars; std::vector indices; for (const auto& os : outputShape) { @@ -180,7 +180,7 @@ Tensor computeQuantizedAdd( const std::vector& outputShape, const std::vector& outputStrides, const std::optional& outputType, - at::Device) { + at::Device /*unused*/) { const BufHandle& QA = std::get(inputs[0]); const BufHandle& QB = std::get(inputs[1]); auto qa_scale = ExprHandle(QA.node()->qscale()); @@ -223,7 +223,7 @@ Tensor computeQuantizePerTensorExternalCall( const std::vector& outputShape, const std::vector& outputStrides, const std::optional& outputType, - at::Device) { + at::Device /*unused*/) { const BufHandle& x = std::get(inputs[0]); const auto qscale = std::get(inputs[1]); const auto qzero = std::get(inputs[2]); @@ -255,7 +255,7 @@ Tensor computeDequantizeExternalCall( const std::vector& outputShape, const std::vector& outputStrides, const std::optional& outputType, - at::Device) { + at::Device /*unused*/) { Dtype dtype = kFloat; if (outputType) { dtype = Dtype(*outputType); @@ -280,7 +280,7 @@ Tensor computeQuantizedConv2dPrepack( const std::vector& outputShape, const std::vector& outputStrides, const std::optional& outputType, - at::Device) { + at::Device /*unused*/) { Dtype dtype = kFloat; if (outputType) { dtype = Dtype(*outputType); @@ -634,7 +634,7 @@ Tensor computeDequantize( const std::vector& outputShape, const std::vector& outputStrides, const std::optional& outputType, - at::Device) { + at::Device /*unused*/) { Dtype dtype = kFloat; if (outputType) { dtype = Dtype(*outputType); @@ -666,7 +666,7 @@ Tensor computeUpsampleNearest2d( const std::vector& outputShape, const std::vector& outputStrides, const std::optional& outputType, - at::Device) { + at::Device /*unused*/) { const auto& A = std::get(inputs[0]); const auto& output_height = outputShape[2]; const auto& output_width = outputShape[3]; @@ -713,7 +713,7 @@ Tensor computeUpsampleNearest2dExternalCall( const std::vector& outputShape, const std::vector& outputStrides, const std::optional& outputType, - at::Device) { + at::Device /*unused*/) { Dtype dtype = kFloat; if (outputType) { dtype = Dtype(*outputType); @@ -772,7 +772,7 @@ Tensor computeQuantizedSigmoidExternalCall( const std::vector& outputShape, const std::vector& outputStrides, const std::optional& outputType, - at::Device) { + at::Device /*unused*/) { const BufHandle& qx = std::get(inputs[0]); const auto out_qdtype = immQDType(qx); diff --git a/torch/csrc/jit/tensorexpr/operators/quantization.h b/torch/csrc/jit/tensorexpr/operators/quantization.h index a33eb1081450..ecc86c912b50 100644 --- a/torch/csrc/jit/tensorexpr/operators/quantization.h +++ b/torch/csrc/jit/tensorexpr/operators/quantization.h @@ -145,5 +145,5 @@ TORCH_API Tensor computeQuantizedSigmoidExternalCall( const std::vector& outputShape, const std::vector& outputStrides, const std::optional& outputType, - at::Device); + at::Device /*unused*/); } // namespace torch::jit::tensorexpr diff --git a/torch/csrc/lazy/core/lazy_graph_executor.h b/torch/csrc/lazy/core/lazy_graph_executor.h index ffa444993e48..3bdf3e0fc736 100644 --- a/torch/csrc/lazy/core/lazy_graph_executor.h +++ b/torch/csrc/lazy/core/lazy_graph_executor.h @@ -21,7 +21,7 @@ class TORCH_API LazyGraphExecutor { }; // Register a lazy graph executor instance that can be retrieved using Get() - static void Register(LazyGraphExecutor*); + static void Register(LazyGraphExecutor* /*executor*/); static LazyGraphExecutor* Get(); virtual ~LazyGraphExecutor() = default; diff --git a/torch/csrc/lazy/core/tensor.h b/torch/csrc/lazy/core/tensor.h index a0f4ade6fdc9..bbe6fa1e5efb 100644 --- a/torch/csrc/lazy/core/tensor.h +++ b/torch/csrc/lazy/core/tensor.h @@ -253,7 +253,7 @@ TORCH_API at::Tensor to_lazy_tensor( template auto TupleAtenFromLtcTensorsImpl( const std::vector& tensors, - std::index_sequence) { + std::index_sequence /*unused*/) { return std::make_tuple(CreateAtenFromLtcTensor(tensors[Indices])...); } diff --git a/torch/csrc/monitor/python_init.cpp b/torch/csrc/monitor/python_init.cpp index 2151fbfbbabd..25b14c0a2b2c 100644 --- a/torch/csrc/monitor/python_init.cpp +++ b/torch/csrc/monitor/python_init.cpp @@ -24,7 +24,7 @@ struct type_caster { PYBIND11_TYPE_CASTER(torch::monitor::data_value_t, _("data_value_t")); // Python -> C++ - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { PyObject* source = src.ptr(); if (THPUtils_checkLong(source)) { this->value = THPUtils_unpackLong(source); diff --git a/torch/csrc/profiler/collection.cpp b/torch/csrc/profiler/collection.cpp index bcad67b3c0db..133951dd817c 100644 --- a/torch/csrc/profiler/collection.cpp +++ b/torch/csrc/profiler/collection.cpp @@ -1198,7 +1198,7 @@ class TransferEvents { class TransferEvents { public: template - TransferEvents(Args&&...) {} + TransferEvents(Args&&... /*unused*/) {} }; #endif diff --git a/torch/csrc/profiler/collection.h b/torch/csrc/profiler/collection.h index c0f25add5273..b05f4608fb77 100644 --- a/torch/csrc/profiler/collection.h +++ b/torch/csrc/profiler/collection.h @@ -447,7 +447,7 @@ struct TORCH_API Result : public std::enable_shared_from_this { extra_fields_{std::move(extra_fields)} {} template - static EventType deduceTag(const ExtraFields&) { + static EventType deduceTag(const ExtraFields& /*unused*/) { return E; } }; @@ -689,21 +689,22 @@ class TORCH_API RecordQueue { }; TORCH_API bool get_record_concrete_inputs_enabled(); -TORCH_API void set_record_concrete_inputs_enabled_fn(std::function); -TORCH_API void set_record_concrete_inputs_enabled_val(bool); +TORCH_API void set_record_concrete_inputs_enabled_fn( + std::function /*fn*/); +TORCH_API void set_record_concrete_inputs_enabled_val(bool /*val*/); TORCH_API bool get_fwd_bwd_enabled(); -TORCH_API void set_fwd_bwd_enabled_fn(std::function); -TORCH_API void set_fwd_bwd_enabled_val(bool); +TORCH_API void set_fwd_bwd_enabled_fn(std::function /*fn*/); +TORCH_API void set_fwd_bwd_enabled_val(bool /*val*/); TORCH_API bool get_cuda_sync_enabled(); -TORCH_API void set_cuda_sync_enabled_fn(std::function); -TORCH_API void set_cuda_sync_enabled_val(bool); +TORCH_API void set_cuda_sync_enabled_fn(std::function /*fn*/); +TORCH_API void set_cuda_sync_enabled_val(bool /*val*/); // Comms related RecordFunctions will record information about tensor storage // locations. TORCH_API bool get_record_tensor_addrs_enabled(); -TORCH_API void set_record_tensor_addrs_enabled_fn(std::function); -TORCH_API void set_record_tensor_addrs_enabled_val(bool); +TORCH_API void set_record_tensor_addrs_enabled_fn(std::function /*fn*/); +TORCH_API void set_record_tensor_addrs_enabled_val(bool /*val*/); } // namespace torch::profiler::impl diff --git a/torch/csrc/profiler/data_flow.cpp b/torch/csrc/profiler/data_flow.cpp index 5f13421c5524..a9f98930f8c6 100644 --- a/torch/csrc/profiler/data_flow.cpp +++ b/torch/csrc/profiler/data_flow.cpp @@ -50,7 +50,7 @@ struct RawTensors { } template - void operator()(T&) {} + void operator()(T& /*unused*/) {} std::vector tensors_; }; diff --git a/torch/csrc/profiler/orchestration/python_tracer.cpp b/torch/csrc/profiler/orchestration/python_tracer.cpp index 0d1ad389f889..f7f0ea584e64 100644 --- a/torch/csrc/profiler/orchestration/python_tracer.cpp +++ b/torch/csrc/profiler/orchestration/python_tracer.cpp @@ -13,9 +13,9 @@ struct NoOpPythonTracer : public PythonTracerBase { void restart() override {} void register_gc_callback() override {} std::vector> getEvents( - std::function, - std::vector&, - c10::time_t) override { + std::function /*time_converter*/, + std::vector& /*enters*/, + c10::time_t /*end_time_ns*/) override { return {}; } }; @@ -25,7 +25,7 @@ struct NoOpMemoryPythonTracer : public PythonMemoryTracerBase { ~NoOpMemoryPythonTracer() override = default; void start() override {} void stop() override {} - void export_memory_history(const std::string&) override {} + void export_memory_history(const std::string& /*path*/) override {} }; } // namespace diff --git a/torch/csrc/profiler/perf.h b/torch/csrc/profiler/perf.h index 07ff1211dbf9..906ee79e2cf4 100644 --- a/torch/csrc/profiler/perf.h +++ b/torch/csrc/profiler/perf.h @@ -88,7 +88,7 @@ class PerfProfiler { /* Disable counting and fill in the caller supplied container with delta * calculated from the start count values since last Enable() */ - void Disable(perf_counters_t&); + void Disable(perf_counters_t& /*vals*/); private: uint64_t CalcDelta(uint64_t start, uint64_t end) const; diff --git a/torch/csrc/profiler/python/init.cpp b/torch/csrc/profiler/python/init.cpp index 4023c038ae32..f057f736c4af 100644 --- a/torch/csrc/profiler/python/init.cpp +++ b/torch/csrc/profiler/python/init.cpp @@ -89,7 +89,7 @@ struct type_caster> { std::shared_ptr, _("torch._C._profiler.CapturedTraceback")); - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { if (Py_TYPE(src.ptr()) == &THPCapturedTracebackType) { value = reinterpret_cast(src.ptr())->data; return true; diff --git a/torch/csrc/profiler/standalone/itt_observer.cpp b/torch/csrc/profiler/standalone/itt_observer.cpp index d7e1029494cc..6a1088c91e06 100644 --- a/torch/csrc/profiler/standalone/itt_observer.cpp +++ b/torch/csrc/profiler/standalone/itt_observer.cpp @@ -20,8 +20,12 @@ struct ITTThreadLocalState : ProfilerStateBase { return ActiveProfilerType::ITT; } - void reportMemoryUsage(void*, int64_t, size_t, size_t, c10::Device) override { - } + void reportMemoryUsage( + void* /*ptr*/, + int64_t /*alloc_size*/, + size_t /*total_allocated*/, + size_t /*total_reserved*/, + c10::Device /*device*/) override {} static ITTThreadLocalState* getTLS() { auto tls = ProfilerStateBase::get(/*global=*/false); diff --git a/torch/csrc/profiler/standalone/nvtx_observer.cpp b/torch/csrc/profiler/standalone/nvtx_observer.cpp index d5697e6323bc..6631b2c132d1 100644 --- a/torch/csrc/profiler/standalone/nvtx_observer.cpp +++ b/torch/csrc/profiler/standalone/nvtx_observer.cpp @@ -20,8 +20,12 @@ struct NVTXThreadLocalState : ProfilerStateBase { return ActiveProfilerType::NVTX; } - void reportMemoryUsage(void*, int64_t, size_t, size_t, c10::Device) override { - } + void reportMemoryUsage( + void* /*ptr*/, + int64_t /*alloc_size*/, + size_t /*total_allocated*/, + size_t /*total_reserved*/, + c10::Device /*device*/) override {} static NVTXThreadLocalState* getTLS() { auto tls = ProfilerStateBase::get(/*global=*/false); diff --git a/torch/csrc/utils.cpp b/torch/csrc/utils.cpp index 4293a1ed4bf5..f792b5ac644b 100644 --- a/torch/csrc/utils.cpp +++ b/torch/csrc/utils.cpp @@ -354,7 +354,7 @@ std::string dispatch_keyset_string(c10::DispatchKeySet keyset) { namespace pybind11::detail { -bool type_caster::load(handle src, bool) { +bool type_caster::load(handle src, bool /*unused*/) { PyObject* obj = src.ptr(); if (THPVariable_Check(obj)) { value = THPVariable_Unpack(obj); @@ -370,7 +370,7 @@ handle type_caster::cast( return handle(THPVariable_Wrap(src)); } -bool type_caster::load(handle src, bool) { +bool type_caster::load(handle src, bool /*unused*/) { PyObject* source = src.ptr(); auto tuple = PyTuple_Check(source); if (tuple || PyList_Check(source)) { @@ -403,7 +403,7 @@ handle type_caster::cast( return handle(THPUtils_packInt64Array(src.size(), src.data())); } -bool type_caster::load(handle src, bool) { +bool type_caster::load(handle src, bool /*unused*/) { PyObject* source = src.ptr(); auto tuple = PyTuple_Check(source); @@ -444,7 +444,9 @@ handle type_caster::cast( return t.release(); } -bool type_caster>::load(handle src, bool) { +bool type_caster>::load( + handle src, + bool /*unused*/) { TORCH_INTERNAL_ASSERT(0, "NYI"); } handle type_caster>::cast( diff --git a/torch/csrc/utils/byte_order.cpp b/torch/csrc/utils/byte_order.cpp index b7d00207a3ae..ccb8990e5915 100644 --- a/torch/csrc/utils/byte_order.cpp +++ b/torch/csrc/utils/byte_order.cpp @@ -172,7 +172,7 @@ template <> TORCH_API void THP_decodeBuffer( bool* dst, const uint8_t* src, - bool, + bool /*unused*/, size_t len) { for (const auto i : c10::irange(len)) { dst[i] = (int)src[i] != 0 ? true : false; diff --git a/torch/csrc/utils/disable_torch_function.cpp b/torch/csrc/utils/disable_torch_function.cpp index 9dc6e9777a36..becbe1681f00 100644 --- a/torch/csrc/utils/disable_torch_function.cpp +++ b/torch/csrc/utils/disable_torch_function.cpp @@ -348,7 +348,7 @@ inline static bool array_has_torch_function( return false; } -PyObject* THPModule_has_torch_function(PyObject*, PyObject* arg) { +PyObject* THPModule_has_torch_function(PyObject* /*unused*/, PyObject* arg) { bool result = false; if (PyTuple_CheckExact(arg) || PyList_CheckExact(arg)) { // Fast path: @@ -372,7 +372,9 @@ PyObject* THPModule_has_torch_function(PyObject*, PyObject* arg) { Py_RETURN_FALSE; } -PyObject* THPModule_has_torch_function_unary(PyObject*, PyObject* obj) { +PyObject* THPModule_has_torch_function_unary( + PyObject* /*unused*/, + PyObject* obj) { // Special case `THPModule_has_torch_function` for the single arg case. if (torch::check_has_torch_function(obj)) { Py_RETURN_TRUE; @@ -381,7 +383,7 @@ PyObject* THPModule_has_torch_function_unary(PyObject*, PyObject* obj) { } PyObject* THPModule_has_torch_function_variadic( - PyObject*, + PyObject* /*unused*/, PyObject* const* args, Py_ssize_t nargs) { if (array_has_torch_function(args, nargs)) { diff --git a/torch/csrc/utils/disable_torch_function.h b/torch/csrc/utils/disable_torch_function.h index 9331c521b183..b52173c252a8 100644 --- a/torch/csrc/utils/disable_torch_function.h +++ b/torch/csrc/utils/disable_torch_function.h @@ -37,9 +37,11 @@ PyObject* THPModule_DisableTorchFunctionType(); PyObject* THPModule_DisableTorchFunctionSubclassType(); PyObject* THPModule_disable_torch_function(PyObject* self, PyObject* args); PyObject* THPModule_disable_torch_dispatch(PyObject* self, PyObject* args); -PyObject* THPModule_has_torch_function(PyObject*, PyObject* arg); -PyObject* THPModule_has_torch_function_unary(PyObject*, PyObject* obj); +PyObject* THPModule_has_torch_function(PyObject* /*unused*/, PyObject* arg); +PyObject* THPModule_has_torch_function_unary( + PyObject* /*unused*/, + PyObject* obj); PyObject* THPModule_has_torch_function_variadic( - PyObject*, + PyObject* /*unused*/, PyObject* const* args, Py_ssize_t nargs); diff --git a/torch/csrc/utils/pybind.cpp b/torch/csrc/utils/pybind.cpp index 2ff645b7593c..cce34b7cf68b 100644 --- a/torch/csrc/utils/pybind.cpp +++ b/torch/csrc/utils/pybind.cpp @@ -4,7 +4,7 @@ namespace pybind11::detail { -bool type_caster::load(py::handle src, bool) { +bool type_caster::load(py::handle src, bool /*unused*/) { if (torch::is_symint(src)) { auto node = src.attr("node"); if (py::isinstance(node)) { @@ -62,7 +62,7 @@ py::handle type_caster::cast( } } -bool type_caster::load(py::handle src, bool) { +bool type_caster::load(py::handle src, bool /*unused*/) { if (torch::is_symfloat(src)) { value = c10::SymFloat(static_cast( c10::make_intrusive(src.attr("node")))); @@ -92,7 +92,7 @@ py::handle type_caster::cast( } } -bool type_caster::load(py::handle src, bool) { +bool type_caster::load(py::handle src, bool /*unused*/) { if (torch::is_symbool(src)) { value = c10::SymBool(static_cast( c10::make_intrusive(src.attr("node")))); @@ -122,7 +122,7 @@ py::handle type_caster::cast( } } -bool type_caster::load(py::handle src, bool) { +bool type_caster::load(py::handle src, bool /*unused*/) { TORCH_INTERNAL_ASSERT( 0, "pybind11 loading for c10::Scalar NYI (file a bug if you need it)"); } diff --git a/torch/csrc/utils/pybind.h b/torch/csrc/utils/pybind.h index 681d94582986..b2c0863148ad 100644 --- a/torch/csrc/utils/pybind.h +++ b/torch/csrc/utils/pybind.h @@ -38,7 +38,7 @@ struct TORCH_PYTHON_API type_caster { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(at::Tensor, _("torch.Tensor")); - bool load(handle src, bool); + bool load(handle src, bool /*unused*/); static handle cast( const at::Tensor& src, @@ -53,7 +53,7 @@ struct type_caster { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(at::Storage, _("torch.StorageBase")); - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { PyObject* obj = src.ptr(); if (torch::isStorage(obj)) { value = torch::createStorage(obj); @@ -76,7 +76,7 @@ struct type_caster { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(at::Generator, _("torch.Generator")); - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { PyObject* obj = src.ptr(); if (THPGenerator_Check(obj)) { value = reinterpret_cast(obj)->cdata; @@ -99,7 +99,7 @@ struct TORCH_PYTHON_API type_caster { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(at::IntArrayRef, _("Tuple[int, ...]")); - bool load(handle src, bool); + bool load(handle src, bool /*unused*/); static handle cast( at::IntArrayRef src, return_value_policy /* policy */, @@ -115,7 +115,7 @@ struct TORCH_PYTHON_API type_caster { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(at::SymIntArrayRef, _("List[int]")); - bool load(handle src, bool); + bool load(handle src, bool /*unused*/); static handle cast( at::SymIntArrayRef src, return_value_policy /* policy */, @@ -131,7 +131,7 @@ struct TORCH_PYTHON_API type_caster> { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(at::ArrayRef, _("List[SymNode]")); - bool load(handle src, bool); + bool load(handle src, bool /*unused*/); static handle cast( at::ArrayRef src, return_value_policy /* policy */, @@ -147,7 +147,7 @@ struct type_caster { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(at::MemoryFormat, _("torch.memory_format")); - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { PyObject* obj = src.ptr(); if (THPMemoryFormat_Check(obj)) { value = reinterpret_cast(obj)->memory_format; @@ -175,7 +175,7 @@ struct type_caster { // after a successful call to load. type_caster() : value(c10::kCPU) {} - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { PyObject* obj = src.ptr(); if (THPDevice_Check(obj)) { value = reinterpret_cast(obj)->device; @@ -204,7 +204,7 @@ struct type_caster { // after a successful call to load. type_caster() : value(at::kFloat) {} - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { PyObject* obj = src.ptr(); if (THPDtype_Check(obj)) { value = reinterpret_cast(obj)->scalar_type; @@ -233,7 +233,7 @@ struct type_caster { // after a successful call to load. type_caster() : value(c10::Stream::DEFAULT, c10::Device(c10::kCPU, 0)) {} - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { PyObject* obj = src.ptr(); if (THPStream_Check(obj)) { value = c10::Stream::unpack3( @@ -286,7 +286,7 @@ struct TORCH_PYTHON_API type_caster { PYBIND11_TYPE_CASTER( c10::Scalar, _("Union[Number, torch.SymInt, torch.SymFloat, torch.SymBool]")); - bool load(py::handle src, bool); + bool load(py::handle src, bool /*unused*/); static py::handle cast( const c10::Scalar& si, @@ -298,7 +298,7 @@ template <> struct TORCH_PYTHON_API type_caster { public: PYBIND11_TYPE_CASTER(c10::SymInt, _("Union[int, torch.SymInt]")); - bool load(py::handle src, bool); + bool load(py::handle src, bool /*unused*/); static py::handle cast( const c10::SymInt& si, @@ -310,7 +310,7 @@ template <> struct TORCH_PYTHON_API type_caster { public: PYBIND11_TYPE_CASTER(c10::SymFloat, _("float")); - bool load(py::handle src, bool); + bool load(py::handle src, bool /*unused*/); static py::handle cast( const c10::SymFloat& si, @@ -322,7 +322,7 @@ template <> struct TORCH_PYTHON_API type_caster { public: PYBIND11_TYPE_CASTER(c10::SymBool, _("Union[bool, torch.SymBool]")); - bool load(py::handle src, bool); + bool load(py::handle src, bool /*unused*/); static py::handle cast( const c10::SymBool& si, @@ -336,7 +336,7 @@ struct type_caster> { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(c10::complex, _("complex")); - bool load(handle src, bool) { + bool load(handle src, bool /*unused*/) { PyObject* obj = src.ptr(); // Referred from `THPUtils_unpackComplexDouble` diff --git a/torch/csrc/utils/tensor_memoryformats.h b/torch/csrc/utils/tensor_memoryformats.h index b9268070e34c..4f08109284a4 100644 --- a/torch/csrc/utils/tensor_memoryformats.h +++ b/torch/csrc/utils/tensor_memoryformats.h @@ -9,6 +9,7 @@ namespace torch::utils { void initializeMemoryFormats(); // This methods returns a borrowed reference! -TORCH_PYTHON_API PyObject* getTHPMemoryFormat(c10::MemoryFormat); +TORCH_PYTHON_API PyObject* getTHPMemoryFormat( + c10::MemoryFormat /*memory_format*/); } // namespace torch::utils diff --git a/torch/csrc/utils/variadic.h b/torch/csrc/utils/variadic.h index 44fe1028fe5c..ae40ff5ab8f2 100644 --- a/torch/csrc/utils/variadic.h +++ b/torch/csrc/utils/variadic.h @@ -101,7 +101,10 @@ template < typename Function, typename Accessor, size_t... Is> -ReturnType unpack(Function function, Accessor accessor, Indices) { +ReturnType unpack( + Function function, + Accessor accessor, + Indices /*unused*/) { return ReturnType(function(accessor.template operator()(Is)...)); } diff --git a/torch/lib/libshm/libshm.h b/torch/lib/libshm/libshm.h index 28024aa2338d..d3f7c7061abc 100644 --- a/torch/lib/libshm/libshm.h +++ b/torch/lib/libshm/libshm.h @@ -36,7 +36,7 @@ class THManagedMapAllocator : private THManagedMapAllocatorInit, const char* filename, int flags, size_t size); - static THManagedMapAllocator* fromDataPtr(const at::DataPtr&); + static THManagedMapAllocator* fromDataPtr(const at::DataPtr& /*dptr*/); const char* manager_handle() const { return manager_handle_.c_str(); diff --git a/torch/nativert/common/FileUtil.cpp b/torch/nativert/common/FileUtil.cpp index 490f44d158b6..798a76ee00f6 100644 --- a/torch/nativert/common/FileUtil.cpp +++ b/torch/nativert/common/FileUtil.cpp @@ -27,7 +27,7 @@ int unistd_close(int fh) { #endif } -inline void incr(ssize_t) {} +inline void incr(ssize_t /*unused*/) {} template inline void incr(ssize_t n, Offset& offset) { offset += static_cast(n); diff --git a/torch/nativert/common/FileUtil.h b/torch/nativert/common/FileUtil.h index 28fc7c11bc35..6fa82347ac2b 100644 --- a/torch/nativert/common/FileUtil.h +++ b/torch/nativert/common/FileUtil.h @@ -111,8 +111,8 @@ class File { void swap(File& other) noexcept; // movable - File(File&&) noexcept; - File& operator=(File&&) noexcept; + File(File&& /*other*/) noexcept; + File& operator=(File&& /*other*/) noexcept; private: // unique diff --git a/torch/nativert/detail/ITree.h b/torch/nativert/detail/ITree.h index 19359920720a..5448fb2dead7 100644 --- a/torch/nativert/detail/ITree.h +++ b/torch/nativert/detail/ITree.h @@ -32,7 +32,7 @@ using ITreeMapNoReturnFn = using IValueApplyFn = void (*)(ITreeMapNoReturnFn, const c10::IValue&, const ITreeSpec&); -nlohmann::json defaultContextLoadFn(std::string_view); +nlohmann::json defaultContextLoadFn(std::string_view /*context*/); struct NodeDef { ITreeFlattenFn flattenFn; diff --git a/torch/nativert/executor/ExecutionFrame.cpp b/torch/nativert/executor/ExecutionFrame.cpp index c3c044b0611f..2cef8e208670 100644 --- a/torch/nativert/executor/ExecutionFrame.cpp +++ b/torch/nativert/executor/ExecutionFrame.cpp @@ -138,8 +138,8 @@ void ExecutionFrame::updateMovableOutputs() { ExecutionFrame::ExecutionFrame( const Graph& graph, size_t numValues, - const std::vector&, - const std::vector&) + const std::vector& /*unused*/, + const std::vector& /*unused*/) : graph_(graph) { allValues_.resize(numValues); } diff --git a/torch/nativert/graph/Graph.h b/torch/nativert/graph/Graph.h index 49335ec6aebd..bbd87a8e2014 100644 --- a/torch/nativert/graph/Graph.h +++ b/torch/nativert/graph/Graph.h @@ -71,7 +71,7 @@ class Type { // These are all the constant types that are allowed as attributes on Nodes. struct None {}; // None always equals itself -inline bool operator==(const None&, const None&) { +inline bool operator==(const None& /*unused*/, const None& /*unused*/) { return true; }