From f231be25c679adb47ac3e483dc68948e5ad137a4 Mon Sep 17 00:00:00 2001 From: Yuanyuan Chen Date: Thu, 9 Oct 2025 06:23:21 +0000 Subject: [PATCH] Mark unused parameters in C++ code (#164912) This PR adds unused parameter name comments in C++ declarations to improve code readability. Pull Request resolved: https://github.com/pytorch/pytorch/pull/164912 Approved by: https://github.com/Skylion007 --- aten/src/ATen/Context.h | 44 +++++------ aten/src/ATen/MapAllocator.cpp | 8 +- aten/src/ATen/MapAllocator.h | 12 +-- aten/src/ATen/NestedTensorImpl.cpp | 2 +- aten/src/ATen/NestedTensorImpl.h | 3 +- aten/src/ATen/Parallel.h | 6 +- aten/src/ATen/SparseCsrTensorImpl.cpp | 2 +- aten/src/ATen/SparseCsrTensorImpl.h | 7 +- aten/src/ATen/SparseTensorImpl.h | 8 +- aten/src/ATen/TensorIndexing.h | 4 +- aten/src/ATen/TensorIterator.h | 20 ++--- aten/src/ATen/Utils.h | 2 +- aten/src/ATen/autocast_mode.cpp | 2 +- aten/src/ATen/core/NamedTensor.h | 8 +- .../core/PythonOpRegistrationTrampoline.h | 2 +- aten/src/ATen/core/TensorBase.h | 4 +- aten/src/ATen/core/boxing/BoxedKernel.h | 32 ++++---- aten/src/ATen/core/boxing/BoxedKernel_impl.h | 6 +- aten/src/ATen/core/boxing/KernelFunction.cpp | 6 +- aten/src/ATen/core/boxing/KernelFunction.h | 4 +- aten/src/ATen/core/boxing/impl/boxing.h | 4 +- .../impl/make_boxed_from_unboxed_functor.h | 12 +-- aten/src/ATen/core/builtin_function.h | 6 +- .../ATen/core/dispatch/DispatchKeyExtractor.h | 5 +- aten/src/ATen/core/dispatch/Dispatcher.h | 2 +- aten/src/ATen/core/dispatch/OperatorEntry.h | 2 +- aten/src/ATen/core/dynamic_type.cpp | 4 +- aten/src/ATen/core/dynamic_type.h | 10 +-- aten/src/ATen/core/function.h | 8 +- aten/src/ATen/core/ivalue.h | 2 +- aten/src/ATen/core/ivalue_inl.h | 42 +++++------ .../ATen/core/op_registration/infer_schema.h | 2 +- aten/src/ATen/core/operator_name.h | 2 +- aten/src/ATen/core/type_ptr.h | 2 +- .../cpu/vec/vec256/vec256_complex_double.h | 8 +- aten/src/ATen/cuda/detail/CUDAHooks.h | 2 +- aten/src/ATen/cuda/tunable/TunableOp.h | 2 +- aten/src/ATen/detail/HPUHooksInterface.h | 2 +- aten/src/ATen/functorch/BatchRulesHelper.h | 2 +- aten/src/ATen/metal/Context.h | 2 +- aten/src/ATen/native/BatchLinearAlgebra.cpp | 2 +- aten/src/ATen/native/SharedReduceOps.h | 6 +- aten/src/ATen/native/TensorCompare.cpp | 2 +- .../ATen/native/cpu/DepthwiseConvKernel.cpp | 10 +-- aten/src/ATen/native/cpu/Loops.h | 8 +- .../cuda/linalg/BatchLinearAlgebraLib.h | 2 +- .../native/quantized/AffineQuantizerBase.h | 2 +- .../ATen/native/quantized/TensorFactories.cpp | 34 ++++----- aten/src/ATen/native/xnnpack/Shim.cpp | 74 +++++++++---------- aten/src/ATen/ops/from_blob.h | 2 +- aten/src/ATen/vulkan/Context.h | 2 +- c10/core/CPUAllocator.cpp | 2 +- c10/core/CPUAllocator.h | 2 +- c10/core/DispatchKey.h | 10 ++- c10/core/DispatchKeySet.h | 10 +-- c10/core/SafePyObject.h | 4 +- c10/core/Scalar.h | 6 +- c10/core/Storage.h | 2 +- c10/core/Stream.h | 5 +- c10/core/SymBool.h | 4 +- c10/core/SymFloat.h | 20 ++--- c10/core/SymInt.h | 2 +- c10/core/TensorImpl.h | 14 ++-- c10/core/UndefinedTensorImpl.cpp | 2 +- c10/core/impl/DeviceGuardImplInterface.h | 30 ++++---- c10/core/impl/FakeGuardImpl.h | 2 +- c10/core/impl/GPUTrace.h | 2 +- c10/core/impl/LocalDispatchKeySet.h | 4 +- c10/core/impl/PyInterpreter.cpp | 14 ++-- c10/cuda/CUDAMallocAsyncAllocator.cpp | 4 +- c10/cuda/CUDAMiscFunctions.h | 2 +- c10/cuda/CUDAStream.h | 2 +- c10/util/DynamicCounter.h | 4 +- c10/util/Exception.h | 2 +- c10/util/ExclusivelyOwned.h | 2 +- c10/util/FunctionRef.h | 10 ++- c10/util/Gauge.h | 3 +- c10/util/Logging.h | 7 +- c10/util/MaybeOwned.h | 4 +- c10/util/Metaprogramming.h | 4 +- c10/util/OptionalArrayRef.h | 4 +- c10/util/SmallVector.h | 10 +-- c10/util/StringUtil.h | 2 +- c10/util/UniqueVoidPtr.cpp | 2 +- c10/util/UniqueVoidPtr.h | 2 +- c10/util/WaitCounter.h | 2 +- c10/util/flat_hash_map.h | 14 ++-- c10/util/int128.h | 4 +- c10/util/intrusive_ptr.h | 4 +- c10/util/llvmMathExtras.h | 4 +- c10/util/logging_is_not_google_glog.h | 4 +- c10/util/order_preserving_flat_hash_map.h | 14 ++-- torch/custom_class.h | 8 +- torch/custom_class_detail.h | 2 +- torch/headeronly/util/BFloat16.h | 4 +- torch/headeronly/util/Float8_e4m3fn.h | 2 +- torch/headeronly/util/Float8_e4m3fnuz.h | 4 +- torch/headeronly/util/Float8_e5m2.h | 3 +- torch/headeronly/util/Float8_e5m2fnuz.h | 4 +- torch/headeronly/util/Float8_e8m0fnu.h | 2 +- torch/headeronly/util/Half.h | 3 +- torch/library.h | 18 ++--- 102 files changed, 386 insertions(+), 356 deletions(-) diff --git a/aten/src/ATen/Context.h b/aten/src/ATen/Context.h index 4055083cfcb2..d0f6ce18862a 100644 --- a/aten/src/ATen/Context.h +++ b/aten/src/ATen/Context.h @@ -226,15 +226,15 @@ class TORCH_API Context { bool userEnabledMkldnn() const; void setUserEnabledMkldnn(bool e); bool benchmarkCuDNN() const; - void setBenchmarkCuDNN(bool); + void setBenchmarkCuDNN(bool /*b*/); int benchmarkLimitCuDNN() const; - void setBenchmarkLimitCuDNN(int); + void setBenchmarkLimitCuDNN(int /*b*/); bool immediateMiopen() const; - void setImmediateMiopen(bool); + void setImmediateMiopen(bool /*b*/); bool deterministicCuDNN() const; - void setDeterministicCuDNN(bool); + void setDeterministicCuDNN(bool /*b*/); bool deterministicMkldnn() const; - void setDeterministicMkldnn(bool); + void setDeterministicMkldnn(bool /*b*/); bool userEnabledNNPACK() const; void setUserEnabledNNPACK(bool e); @@ -252,32 +252,32 @@ class TORCH_API Context { void setSDPPriorityOrder(const std::vector& order); std::array sDPPriorityOrder(); - void setSDPUseFlash(bool); + void setSDPUseFlash(bool /*e*/); bool userEnabledFlashSDP() const; - void setSDPUseMemEfficient(bool); + void setSDPUseMemEfficient(bool /*e*/); bool userEnabledMemEfficientSDP() const; - void setSDPUseMath(bool); + void setSDPUseMath(bool /*e*/); bool userEnabledMathSDP() const; - void setSDPUseCuDNN(bool); + void setSDPUseCuDNN(bool /*e*/); bool userEnabledCuDNNSDP() const; - void setAllowFP16BF16ReductionMathSDP(bool); + void setAllowFP16BF16ReductionMathSDP(bool /*e*/); bool allowFP16BF16ReductionMathSDP() const; - void setSDPUseOverrideable(bool); + void setSDPUseOverrideable(bool /*e*/); bool userEnabledOverrideableSDP() const; at::LinalgBackend linalgPreferredBackend() const; - void setLinalgPreferredBackend(at::LinalgBackend); + void setLinalgPreferredBackend(at::LinalgBackend /*b*/); at::BlasBackend blasPreferredBackend(); - void setBlasPreferredBackend(at::BlasBackend); + void setBlasPreferredBackend(at::BlasBackend /*b*/); at::ROCmFABackend getROCmFAPreferredBackend(); - void setROCmFAPreferredBackend(at::ROCmFABackend); + void setROCmFAPreferredBackend(at::ROCmFABackend /*b*/); // Note [Enabling Deterministic Operations] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -310,9 +310,9 @@ class TORCH_API Context { bool deterministicAlgorithms() const; bool deterministicAlgorithmsWarnOnly() const; - void setDeterministicAlgorithms(bool, bool); + void setDeterministicAlgorithms(bool /*b*/, bool /*warn_only*/); bool deterministicFillUninitializedMemory() const; - void setDeterministicFillUninitializedMemory(bool); + void setDeterministicFillUninitializedMemory(bool /*b*/); // Note [Writing Nondeterministic Operations] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -356,11 +356,11 @@ class TORCH_API Context { Float32Op op, Float32Precision p); bool allowTF32CuDNN(std::optional op = std::nullopt) const; - void setAllowTF32CuDNN(bool); + void setAllowTF32CuDNN(bool /*b*/); bool allowTF32OneDNN() const; - void setAllowTF32OneDNN(bool); + void setAllowTF32OneDNN(bool /*b*/); bool allowTF32CuBLAS() const; - void setAllowTF32CuBLAS(bool); + void setAllowTF32CuBLAS(bool /*b*/); Float32MatmulPrecision float32MatmulPrecision() const; Float32Precision float32Precision(Float32Backend backend, Float32Op op) const; CuBLASReductionOption allowFP16ReductionCuBLAS() const; @@ -372,7 +372,7 @@ class TORCH_API Context { bool allow_reduced_precision, bool allow_splitk = true); bool allowFP16AccumulationCuBLAS() const; - void setAllowFP16AccumulationCuBLAS(bool); + void setAllowFP16AccumulationCuBLAS(bool /*b*/); // Matmuls can use a so-called "persistent" kernel which launches one CUDA // block for each SM on the GPU, and each block then iterates over multiple @@ -384,7 +384,7 @@ class TORCH_API Context { // to make matmuls target only a subset of the SMs, so they can fully schedule // even next to a comms kernel, and only be a few percent slower. std::optional _SMCarveout_EXPERIMENTAL() const; - void _setSMCarveout_EXPERIMENTAL(std::optional); + void _setSMCarveout_EXPERIMENTAL(std::optional /*c*/); at::QEngine qEngine() const; void setQEngine(at::QEngine e); @@ -405,7 +405,7 @@ class TORCH_API Context { void setDefaultMobileCPUAllocator(); void unsetDefaultMobileCPUAllocator(); bool allowFP16ReductionCPU() const; - void setAllowFP16ReductionCPU(bool); + void setAllowFP16ReductionCPU(bool /*b*/); // Preserved for BC void lazyInitCUDA() { diff --git a/aten/src/ATen/MapAllocator.cpp b/aten/src/ATen/MapAllocator.cpp index d88c05f087a9..ed697c32b58a 100644 --- a/aten/src/ATen/MapAllocator.cpp +++ b/aten/src/ATen/MapAllocator.cpp @@ -62,7 +62,7 @@ constexpr const char* unknown_eventname = "eventname not specified"; #endif } // namespace (anonymous) -MapAllocator::MapAllocator(WithFd, std::string_view filename, int fd, int flags, size_t size) +MapAllocator::MapAllocator(WithFd /*unused*/, std::string_view filename, int fd, int flags, size_t size) : filename_(filename.empty() ? unknown_filename : filename) , size_(0) // to be filled later #ifdef _WIN32 @@ -494,7 +494,7 @@ RefcountedMapAllocator::RefcountedMapAllocator(const char *filename, int flags, initializeAlloc(); } -RefcountedMapAllocator::RefcountedMapAllocator(WithFd, const char *filename, int fd, int flags, size_t size) +RefcountedMapAllocator::RefcountedMapAllocator(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size) : RefcountedMapAllocatorArgCheck(flags) , MapAllocator(WITH_FD, filename, flags, fd, size + map_alloc_alignment) { @@ -614,7 +614,7 @@ at::DataPtr MapAllocator::makeDataPtr(std::string_view filename, int flags, size return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU}; } -at::DataPtr MapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) { +at::DataPtr MapAllocator::makeDataPtr(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) { auto* context = new MapAllocator(WITH_FD, filename, fd, flags, size); if (actual_size_out) *actual_size_out = context->size(); return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU}; @@ -626,7 +626,7 @@ at::DataPtr RefcountedMapAllocator::makeDataPtr(const char *filename, int flags, return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU}; } -at::DataPtr RefcountedMapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) { +at::DataPtr RefcountedMapAllocator::makeDataPtr(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) { auto* context = new RefcountedMapAllocator(WITH_FD, filename, fd, flags, size); if (actual_size_out) *actual_size_out = context->size() - map_alloc_alignment; return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU}; diff --git a/aten/src/ATen/MapAllocator.h b/aten/src/ATen/MapAllocator.h index 9fc5e32adcb5..7a3415a4c411 100644 --- a/aten/src/ATen/MapAllocator.h +++ b/aten/src/ATen/MapAllocator.h @@ -25,7 +25,7 @@ class TORCH_API MapAllocator { public: MapAllocator(std::string_view filename, int flags, size_t size); MapAllocator( - WithFd, + WithFd /*unused*/, std::string_view filename, int fd, int flags, @@ -59,14 +59,14 @@ class TORCH_API MapAllocator { return flags_; } - static MapAllocator* fromDataPtr(const at::DataPtr&); + static MapAllocator* fromDataPtr(const at::DataPtr& /*dptr*/); static at::DataPtr makeDataPtr( std::string_view filename, int flags, size_t size, size_t* actual_size_out); static at::DataPtr makeDataPtr( - WithFd, + WithFd /*unused*/, const char* filename, int fd, int flags, @@ -105,13 +105,13 @@ class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck, public: RefcountedMapAllocator(const char* filename, int flags, size_t size); RefcountedMapAllocator( - WithFd, + WithFd /*unused*/, const char* filename, int fd, int flags, size_t size); - static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&); + static RefcountedMapAllocator* fromDataPtr(const at::DataPtr& /*dptr*/); RefcountedMapAllocator(const RefcountedMapAllocator&) = delete; RefcountedMapAllocator(RefcountedMapAllocator&&) = delete; RefcountedMapAllocator& operator=(const RefcountedMapAllocator&) = delete; @@ -122,7 +122,7 @@ class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck, size_t size, size_t* actual_size_out); static at::DataPtr makeDataPtr( - WithFd, + WithFd /*unused*/, const char* filename, int fd, int flags, diff --git a/aten/src/ATen/NestedTensorImpl.cpp b/aten/src/ATen/NestedTensorImpl.cpp index 63bd867f9022..ea951ed3db13 100644 --- a/aten/src/ATen/NestedTensorImpl.cpp +++ b/aten/src/ATen/NestedTensorImpl.cpp @@ -273,7 +273,7 @@ c10::SymInt NestedTensorImpl::sym_numel_custom() const { return NestedTensorImpl::numel_custom(); } -c10::SymBool NestedTensorImpl::sym_is_contiguous_custom(MemoryFormat) const { +c10::SymBool NestedTensorImpl::sym_is_contiguous_custom(MemoryFormat /*memory_format*/) const { return nested_tensor_impl_is_contiguous(this); } IntArrayRef NestedTensorImpl::sizes_custom() const { diff --git a/aten/src/ATen/NestedTensorImpl.h b/aten/src/ATen/NestedTensorImpl.h index cddf37df34a5..9b92e9ec83ad 100644 --- a/aten/src/ATen/NestedTensorImpl.h +++ b/aten/src/ATen/NestedTensorImpl.h @@ -115,7 +115,8 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl { // with real implementations int64_t numel_custom() const override; c10::SymInt sym_numel_custom() const override; - c10::SymBool sym_is_contiguous_custom(MemoryFormat) const override; + c10::SymBool sym_is_contiguous_custom( + MemoryFormat /*memory_format*/) const override; int64_t size_custom(int64_t d) const override { return this->size(d); } diff --git a/aten/src/ATen/Parallel.h b/aten/src/ATen/Parallel.h index b55dad02f347..d09a33841b94 100644 --- a/aten/src/ATen/Parallel.h +++ b/aten/src/ATen/Parallel.h @@ -14,7 +14,7 @@ inline int64_t divup(int64_t x, int64_t y) { TORCH_API void init_num_threads(); // Sets the number of threads to be used in parallel region -TORCH_API void set_num_threads(int); +TORCH_API void set_num_threads(int /*nthreads*/); // Returns the maximum number of threads that may be used in a parallel region TORCH_API int get_num_threads(); @@ -37,7 +37,7 @@ inline void lazy_init_num_threads() { } } -TORCH_API void set_thread_num(int); +TORCH_API void set_thread_num(int /*id*/); class TORCH_API ThreadIdGuard { public: @@ -130,7 +130,7 @@ inline scalar_t parallel_reduce( TORCH_API std::string get_parallel_info(); // Sets number of threads used for inter-op parallelism -TORCH_API void set_num_interop_threads(int); +TORCH_API void set_num_interop_threads(int /*nthreads*/); // Returns the number of threads used for inter-op parallelism TORCH_API size_t get_num_interop_threads(); diff --git a/aten/src/ATen/SparseCsrTensorImpl.cpp b/aten/src/ATen/SparseCsrTensorImpl.cpp index f73d75ab53ad..dec6d2e95960 100644 --- a/aten/src/ATen/SparseCsrTensorImpl.cpp +++ b/aten/src/ATen/SparseCsrTensorImpl.cpp @@ -252,7 +252,7 @@ void SparseCsrTensorImpl::set_stride(int64_t dim, int64_t new_stride) { void SparseCsrTensorImpl::set_storage_offset(int64_t storage_offset) { TORCH_CHECK(false, "Sparse ", at::sparse_csr::layoutToString(layout_, /*upper=*/true), " tensors do not have set_storage_offset."); } -c10::SymBool SparseCsrTensorImpl::sym_is_contiguous_custom(MemoryFormat) const { +c10::SymBool SparseCsrTensorImpl::sym_is_contiguous_custom(MemoryFormat /*memory_format*/) const { TORCH_CHECK(false, "Sparse ", at::sparse_csr::layoutToString(layout_, /*upper=*/true), " tensors do not have is_contiguous"); } } // namespace at diff --git a/aten/src/ATen/SparseCsrTensorImpl.h b/aten/src/ATen/SparseCsrTensorImpl.h index 14688163a374..e764f954db33 100644 --- a/aten/src/ATen/SparseCsrTensorImpl.h +++ b/aten/src/ATen/SparseCsrTensorImpl.h @@ -32,10 +32,10 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl { public: explicit SparseCsrTensorImpl( - at::DispatchKeySet, + at::DispatchKeySet /*key_set*/, at::Device device, Layout layout, - const caffe2::TypeMeta); + const caffe2::TypeMeta /*data_type*/); void resize_(int64_t nnz, IntArrayRef size); void resize_and_clear_( @@ -86,7 +86,8 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl { protected: IntArrayRef strides_custom() const override; SymIntArrayRef sym_strides_custom() const override; - SymBool sym_is_contiguous_custom(MemoryFormat) const override; + SymBool sym_is_contiguous_custom( + MemoryFormat /*memory_format*/) const override; public: void set_size(int64_t dim, int64_t new_size) override; diff --git a/aten/src/ATen/SparseTensorImpl.h b/aten/src/ATen/SparseTensorImpl.h index 5ba7b3f1c930..a2c12fcfe8b9 100644 --- a/aten/src/ATen/SparseTensorImpl.h +++ b/aten/src/ATen/SparseTensorImpl.h @@ -46,7 +46,9 @@ struct TORCH_API SparseTensorImpl : public TensorImpl { public: // Public for now... - explicit SparseTensorImpl(at::DispatchKeySet, const caffe2::TypeMeta); + explicit SparseTensorImpl( + at::DispatchKeySet /*key_set*/, + const caffe2::TypeMeta /*data_type*/); void release_resources() override; @@ -384,8 +386,8 @@ struct TORCH_API SparseTensorImpl : public TensorImpl { private: explicit SparseTensorImpl( - at::DispatchKeySet, - const caffe2::TypeMeta, + at::DispatchKeySet /*key_set*/, + const caffe2::TypeMeta /*data_type*/, at::Tensor indices, at::Tensor values); diff --git a/aten/src/ATen/TensorIndexing.h b/aten/src/ATen/TensorIndexing.h index 7785ca861f0e..9291d2e66e5f 100644 --- a/aten/src/ATen/TensorIndexing.h +++ b/aten/src/ATen/TensorIndexing.h @@ -112,10 +112,10 @@ TORCH_API std::ostream& operator<<(std::ostream& stream, const Slice& slice); // `torch.tensor([1, 2])`) | `torch::tensor({1, 2})` struct TORCH_API TensorIndex final { // Case 1: `at::indexing::None` - TensorIndex(std::nullopt_t) : type_(TensorIndexType::None) {} + TensorIndex(std::nullopt_t /*unused*/) : type_(TensorIndexType::None) {} // Case 2: "..." / `at::indexing::Ellipsis` - TensorIndex(at::indexing::EllipsisIndexType) + TensorIndex(at::indexing::EllipsisIndexType /*unused*/) : type_(TensorIndexType::Ellipsis) {} TensorIndex(const char* str) : TensorIndex(at::indexing::Ellipsis) { TORCH_CHECK_VALUE( diff --git a/aten/src/ATen/TensorIterator.h b/aten/src/ATen/TensorIterator.h index d8eebd4c06a4..d8593a80292b 100644 --- a/aten/src/ATen/TensorIterator.h +++ b/aten/src/ATen/TensorIterator.h @@ -250,7 +250,7 @@ struct TORCH_API TensorIteratorBase : public impl::MetaBase { using PtrVector = SmallVector; using StrideVector = SmallVector; - void build(TensorIteratorConfig&); + void build(TensorIteratorConfig& /*config*/); // The inner-loop function operates on the fastest moving dimension. It // implements element-wise operations in terms of 1-d strided tensors. @@ -618,20 +618,20 @@ struct TORCH_API TensorIteratorBase : public impl::MetaBase { #undef TORCH_DISALLOW_TEMPORARIES protected: // Mutable reference as it moves tensors out of TensorIteratorConfig - void populate_operands(TensorIteratorConfig&); + void populate_operands(TensorIteratorConfig& /*config*/); void mark_outputs(); - void mark_resize_outputs(const TensorIteratorConfig&); - void compute_mem_overlaps(const TensorIteratorConfig&); - void compute_shape(const TensorIteratorConfig&); - void compute_strides(const TensorIteratorConfig&); + void mark_resize_outputs(const TensorIteratorConfig& /*config*/); + void compute_mem_overlaps(const TensorIteratorConfig& /*config*/); + void compute_shape(const TensorIteratorConfig& /*config*/); + void compute_strides(const TensorIteratorConfig& /*config*/); void reorder_dimensions(); void permute_dimensions(IntArrayRef perm); - void compute_types(const TensorIteratorConfig&); + void compute_types(const TensorIteratorConfig& /*config*/); ScalarType compute_common_dtype(); void allocate_or_resize_outputs(); - bool fast_set_up(const TensorIteratorConfig&); - FastSetupType compute_fast_setup_type(const TensorIteratorConfig&); - void compute_names(const TensorIteratorConfig&); + bool fast_set_up(const TensorIteratorConfig& /*config*/); + FastSetupType compute_fast_setup_type(const TensorIteratorConfig& /*config*/); + void compute_names(const TensorIteratorConfig& /*config*/); void propagate_names_to_outputs(); void coalesce_dimensions(); diff --git a/aten/src/ATen/Utils.h b/aten/src/ATen/Utils.h index 95a35bd5563a..e9c936b906c6 100644 --- a/aten/src/ATen/Utils.h +++ b/aten/src/ATen/Utils.h @@ -20,7 +20,7 @@ namespace at { -TORCH_API int _crash_if_asan(int); +TORCH_API int _crash_if_asan(int /*arg*/); // Converts a TensorList (i.e. ArrayRef to vector of TensorImpl*) // NB: This is ONLY used by legacy TH bindings, and ONLY used by cat. diff --git a/aten/src/ATen/autocast_mode.cpp b/aten/src/ATen/autocast_mode.cpp index 4b8b5f6c5d18..8a50667ee722 100644 --- a/aten/src/ATen/autocast_mode.cpp +++ b/aten/src/ATen/autocast_mode.cpp @@ -148,7 +148,7 @@ Tensor cached_cast(at::ScalarType to_type, const Tensor& arg, DeviceType device_ Banned functions *******************************/ -static Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const std::optional&, int64_t) { +static Tensor binary_cross_entropy_banned(const Tensor & /*unused*/, const Tensor & /*unused*/, const std::optional& /*unused*/, int64_t /*unused*/) { TORCH_CHECK(false, "torch.nn.functional.binary_cross_entropy and torch.nn.BCELoss are unsafe to autocast.\n" "Many models use a sigmoid layer right before the binary cross entropy layer.\n" "In this case, combine the two layers using torch.nn.functional.binary_cross_entropy_with_logits\n" diff --git a/aten/src/ATen/core/NamedTensor.h b/aten/src/ATen/core/NamedTensor.h index 81998e160185..52acae90b128 100644 --- a/aten/src/ATen/core/NamedTensor.h +++ b/aten/src/ATen/core/NamedTensor.h @@ -27,11 +27,11 @@ struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface { HasNonWildcard }; - explicit NamedTensorMeta(HAS_NON_WILDCARD, DimnameList names) + explicit NamedTensorMeta(HAS_NON_WILDCARD /*unused*/, DimnameList names) : names_(names.vec()) { check_invariants(); } - explicit NamedTensorMeta(HAS_NON_WILDCARD, std::vector&& names) + explicit NamedTensorMeta(HAS_NON_WILDCARD /*unused*/, std::vector&& names) : names_(std::move(names)) { check_invariants(); } @@ -52,13 +52,13 @@ struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface { std::any_of(names_.begin(), names_.end(), [](const Dimname& n) { return !n.isWildcard(); })); } - void set_names(HAS_NON_WILDCARD, DimnameList new_names) { + void set_names(HAS_NON_WILDCARD /*unused*/, DimnameList new_names) { TORCH_INTERNAL_ASSERT(new_names.size() == names_.size()); std::copy(new_names.begin(), new_names.end(), names_.begin()); check_invariants(); } - void set_names(HAS_NON_WILDCARD, std::vector&& new_names) { + void set_names(HAS_NON_WILDCARD /*unused*/, std::vector&& new_names) { TORCH_INTERNAL_ASSERT(new_names.size() == names_.size()); names_ = std::move(new_names); check_invariants(); diff --git a/aten/src/ATen/core/PythonOpRegistrationTrampoline.h b/aten/src/ATen/core/PythonOpRegistrationTrampoline.h index bec323c7d25b..83b39de34d78 100644 --- a/aten/src/ATen/core/PythonOpRegistrationTrampoline.h +++ b/aten/src/ATen/core/PythonOpRegistrationTrampoline.h @@ -13,7 +13,7 @@ class TORCH_API PythonOpRegistrationTrampoline final { public: // Returns true if you successfully registered yourself (that means // you are in the hot seat for doing the operator registrations!) - static bool registerInterpreter(c10::impl::PyInterpreter*); + static bool registerInterpreter(c10::impl::PyInterpreter* /*interp*/); // Returns nullptr if no interpreter has been registered yet. static c10::impl::PyInterpreter* getInterpreter(); diff --git a/aten/src/ATen/core/TensorBase.h b/aten/src/ATen/core/TensorBase.h index 63fe4cad5149..1d0a3e73a5a5 100644 --- a/aten/src/ATen/core/TensorBase.h +++ b/aten/src/ATen/core/TensorBase.h @@ -100,7 +100,7 @@ class TORCH_API TensorBase { // Create a Tensor with a +0 reference count. Special care must be // taken to avoid decrementing this reference count at destruction // time. Intended to support MaybeOwnedTraits. - explicit TensorBase(unsafe_borrow_t, const TensorBase& rhs) + explicit TensorBase(unsafe_borrow_t /*unused*/, const TensorBase& rhs) : impl_(c10::intrusive_ptr(rhs.impl_.get(), c10::raw::DontIncreaseRefcount{})) {} friend MaybeOwnedTraits; @@ -954,7 +954,7 @@ protected: c10::intrusive_ptr impl_; private: - TensorBase __dispatch_contiguous(c10::MemoryFormat) const; + TensorBase __dispatch_contiguous(c10::MemoryFormat /*memory_format*/) const; }; inline DeviceIndex get_device(const TensorBase& self) { diff --git a/aten/src/ATen/core/boxing/BoxedKernel.h b/aten/src/ATen/core/boxing/BoxedKernel.h index 62b915885a80..c5e46d8de000 100644 --- a/aten/src/ATen/core/boxing/BoxedKernel.h +++ b/aten/src/ATen/core/boxing/BoxedKernel.h @@ -18,10 +18,10 @@ class KernelFunction; // implementation notes; notably, this does NOT actually go through the // boxing/unboxing codepath. TORCH_API void fallthrough_kernel( - OperatorKernel*, - const OperatorHandle&, - DispatchKeySet, - Stack*); + OperatorKernel* /*unused*/, + const OperatorHandle& /*unused*/, + DispatchKeySet /*unused*/, + Stack* /*unused*/); // Note [Ambiguity in AutogradOther kernel] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -62,10 +62,10 @@ TORCH_API void fallthrough_kernel( // than arbitrarily pick one or the other, we just register a kernel that raises // an error and let the user decide how to proceed. TORCH_API void ambiguous_autogradother_kernel( - OperatorKernel*, - const OperatorHandle&, - DispatchKeySet, - Stack*); + OperatorKernel* /*unused*/, + const OperatorHandle& /*op*/, + DispatchKeySet /*unused*/, + Stack* /*unused*/); // Note [named_not_supported_kernel] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -75,10 +75,10 @@ TORCH_API void ambiguous_autogradother_kernel( // give a good error message in cases when boxing is not supported). When // boxing is universally supported this can be removed. [[noreturn]] TORCH_API void named_not_supported_kernel( - OperatorKernel*, - const OperatorHandle&, - DispatchKeySet, - Stack*); + OperatorKernel* /*unused*/, + const OperatorHandle& /*op*/, + DispatchKeySet /*unused*/, + Stack* /*unused*/); /** * BoxedKernel is similar to a std::function storing a boxed kernel. @@ -185,16 +185,16 @@ class TORCH_API BoxedKernel final { template static void make_boxed_function( - OperatorKernel*, + OperatorKernel* /*unused*/, const OperatorHandle& opHandle, - DispatchKeySet, + DispatchKeySet /*unused*/, Stack* stack); template static void make_boxed_function( - OperatorKernel*, + OperatorKernel* /*unused*/, const OperatorHandle& opHandle, - DispatchKeySet, + DispatchKeySet /*ks*/, Stack* stack); explicit BoxedKernel( diff --git a/aten/src/ATen/core/boxing/BoxedKernel_impl.h b/aten/src/ATen/core/boxing/BoxedKernel_impl.h index 331141bcc8c9..04ba1368f070 100644 --- a/aten/src/ATen/core/boxing/BoxedKernel_impl.h +++ b/aten/src/ATen/core/boxing/BoxedKernel_impl.h @@ -11,9 +11,9 @@ inline BoxedKernel::BoxedKernel( template inline void BoxedKernel::make_boxed_function( - OperatorKernel*, + OperatorKernel* /*unused*/, const OperatorHandle& opHandle, - DispatchKeySet, + DispatchKeySet /*unused*/, Stack* stack) { // Note that we're dropping the DispatchKeySet argument. // See Note [Plumbing Keys Through The Dispatcher 2] for details. @@ -22,7 +22,7 @@ inline void BoxedKernel::make_boxed_function( template inline void BoxedKernel::make_boxed_function( - OperatorKernel*, + OperatorKernel* /*unused*/, const OperatorHandle& opHandle, DispatchKeySet ks, Stack* stack) { diff --git a/aten/src/ATen/core/boxing/KernelFunction.cpp b/aten/src/ATen/core/boxing/KernelFunction.cpp index c099c456814a..dd2fb32e6817 100644 --- a/aten/src/ATen/core/boxing/KernelFunction.cpp +++ b/aten/src/ATen/core/boxing/KernelFunction.cpp @@ -10,7 +10,7 @@ namespace c10 { // be handled specially. Its semantics is that it redispatches to the // *next* dispatch key that would have been processed, skipping the current // one. -void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*) { +void fallthrough_kernel(OperatorKernel* /*unused*/, const OperatorHandle& /*unused*/, DispatchKeySet /*unused*/, Stack* /*unused*/) { TORCH_INTERNAL_ASSERT(0, "fallthrough_kernel was executed but it should have been short-circuited by the dispatcher. " "This could occur if you registered a fallthrough kernel as a override for a specific operator " @@ -19,7 +19,7 @@ void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, "let us know in the bug tracker."); } -void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle& op, DispatchKeySet, Stack*) { +void ambiguous_autogradother_kernel(OperatorKernel* /*unused*/, const OperatorHandle& op, DispatchKeySet /*unused*/, Stack* /*unused*/) { TORCH_INTERNAL_ASSERT(0, op.operator_name(), " has kernels registered to both CompositeImplicitAutograd and a backend mapped to AutogradOther. " "This makes the backend kernel unreachable; the dispatcher will always prefer the CompositeImplicitAutograd lowering " @@ -32,7 +32,7 @@ void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle& op, D "\nCanonical state\n~~~~~~~~~~~\n", op.dumpState(), "\n\n"); } -void named_not_supported_kernel(OperatorKernel*, const OperatorHandle& op, DispatchKeySet, Stack*) { +void named_not_supported_kernel(OperatorKernel* /*unused*/, const OperatorHandle& op, DispatchKeySet /*unused*/, Stack* /*unused*/) { // DO NOT LOOK AT STACK, YOU HAVE SHORT CIRCUITED BOXING // See Note [named_not_supported_kernel] TORCH_CHECK(0, diff --git a/aten/src/ATen/core/boxing/KernelFunction.h b/aten/src/ATen/core/boxing/KernelFunction.h index 4300217235b8..eb0cf833dfc2 100644 --- a/aten/src/ATen/core/boxing/KernelFunction.h +++ b/aten/src/ATen/core/boxing/KernelFunction.h @@ -229,7 +229,7 @@ class TORCH_API KernelFunction final { * &unboxed_func>(); */ template - static KernelFunction makeFromUnboxedFunction(FuncPtr); + static KernelFunction makeFromUnboxedFunction(FuncPtr /*func_ptr*/); /** * Create a KernelFunction from an unboxed function. @@ -271,7 +271,7 @@ class TORCH_API KernelFunction final { std::string dumpState() const; // For testing internal invariants only - bool _equalsBoxedAndUnboxed(const KernelFunction&) const; + bool _equalsBoxedAndUnboxed(const KernelFunction& /*other*/) const; // Register a token to be invalidated when this KernelFunction is destroyed void registerToken(std::weak_ptr token) const; diff --git a/aten/src/ATen/core/boxing/impl/boxing.h b/aten/src/ATen/core/boxing/impl/boxing.h index 68e25cccd44c..7fbc3b982609 100644 --- a/aten/src/ATen/core/boxing/impl/boxing.h +++ b/aten/src/ATen/core/boxing/impl/boxing.h @@ -131,7 +131,7 @@ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack( new (dest++) IValue(options.pinned_memory()); } -inline void boxArgsToStack(IValue*&) {} +inline void boxArgsToStack(IValue*& /*unused*/) {} template C10_ALWAYS_INLINE_UNLESS_MOBILE void boxArgsToStack( @@ -185,7 +185,7 @@ struct PopResult> final { template static Result pop_to_tuple_impl( Stack& stack, - std::index_sequence) { + std::index_sequence /*unused*/) { return std::make_tuple((std::move(stack[indices]).template to())...); } }; diff --git a/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h b/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h index 20dfde846e64..34b1514f32cd 100644 --- a/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h +++ b/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h @@ -561,7 +561,7 @@ struct wrap_kernel_functor_unboxed_< // doesn't use && static ReturnType call( OperatorKernel* functor, - DispatchKeySet, + DispatchKeySet /*unused*/, ParameterTypes... args) { KernelFunctor* functor_ = static_cast(functor); // Note [Plumbing Keys Through The Dispatcher 2] @@ -629,8 +629,8 @@ call_functor_with_args_from_stack_( OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack, - std::index_sequence, - guts::typelist::typelist*) { + std::index_sequence /*unused*/, + guts::typelist::typelist* /*unused*/) { (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would // be unused and we have to silence the compiler warning. @@ -708,7 +708,7 @@ struct push_outputs, AllowDeprecatedTypes> final { static void call_( std::tuple&& output, Stack* stack, - std::index_sequence) { + std::index_sequence /*unused*/) { torch::jit::push( *stack, return_to_ivalue::call( @@ -718,7 +718,7 @@ struct push_outputs, AllowDeprecatedTypes> final { static void copy_( const std::tuple& output, Stack* stack, - std::index_sequence) { + std::index_sequence /*unused*/) { torch::jit::push( *stack, return_to_ivalue::copy( @@ -741,7 +741,7 @@ struct make_boxed_from_unboxed_functor final { static void call( OperatorKernel* functor, - const OperatorHandle&, + const OperatorHandle& /*unused*/, DispatchKeySet dispatchKeySet, Stack* stack) { using ReturnType = diff --git a/aten/src/ATen/core/builtin_function.h b/aten/src/ATen/core/builtin_function.h index 5ab1ace1685f..8c837871dff7 100644 --- a/aten/src/ATen/core/builtin_function.h +++ b/aten/src/ATen/core/builtin_function.h @@ -63,13 +63,13 @@ struct BuiltinOpFunction : public Function { bool call( Stack& stack, - std::optional, - c10::function_ref) override { + std::optional /*unused*/, + c10::function_ref /*unused*/) override { run(stack); return false; } - bool call(Stack& stack, c10::function_ref) + bool call(Stack& stack, c10::function_ref /*unused*/) override { run(stack); return false; diff --git a/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h b/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h index ecc4bc7b5d89..dbd00e9c5290 100644 --- a/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h +++ b/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h @@ -80,7 +80,8 @@ struct MultiDispatchKeySet : at::IterArgs { ts = ts | x.key_set(); } } - [[noreturn]] void operator()(at::ArrayRef>) { + [[noreturn]] void operator()( + at::ArrayRef> /*unused*/) { // Just checking that the handling of Tensor?[] didn't change. TORCH_INTERNAL_ASSERT(false); } @@ -95,7 +96,7 @@ struct MultiDispatchKeySet : at::IterArgs { } } template - void operator()(const T&) { + void operator()(const T& /*unused*/) { // do nothing } }; diff --git a/aten/src/ATen/core/dispatch/Dispatcher.h b/aten/src/ATen/core/dispatch/Dispatcher.h index 57a78ecf03a4..29139a294745 100644 --- a/aten/src/ATen/core/dispatch/Dispatcher.h +++ b/aten/src/ATen/core/dispatch/Dispatcher.h @@ -633,7 +633,7 @@ class TypedOperatorHandle final : public OperatorHandle { namespace detail { template -inline void unused_arg_(const Args&...) {} +inline void unused_arg_(const Args&... /*unused*/) {} // CaptureKernelCall is intended to capture return values from Dispatcher // unboxed kernel calls. A record function may request to get outputs from the diff --git a/aten/src/ATen/core/dispatch/OperatorEntry.h b/aten/src/ATen/core/dispatch/OperatorEntry.h index 59b54ce1d9d3..cc5736ba0e77 100644 --- a/aten/src/ATen/core/dispatch/OperatorEntry.h +++ b/aten/src/ATen/core/dispatch/OperatorEntry.h @@ -105,7 +105,7 @@ class TORCH_API OperatorEntry final { // versa that is an error. (Refcounting for the registrations is // handled in the OperatorHandle in Dispatcher) void registerSchema( - FunctionSchema&&, + FunctionSchema&& /*schema*/, std::string&& debug, std::vector tags = {}); void deregisterSchema(); diff --git a/aten/src/ATen/core/dynamic_type.cpp b/aten/src/ATen/core/dynamic_type.cpp index d4596ed2ca73..2b1a32bd0ac8 100644 --- a/aten/src/ATen/core/dynamic_type.cpp +++ b/aten/src/ATen/core/dynamic_type.cpp @@ -177,7 +177,7 @@ bool DynamicType::equals(const Type& rhs) const { return equals(*create(rhs)); } -bool DynamicType::isSubtypeOfExt(const Type& rhs, std::ostream*) const { +bool DynamicType::isSubtypeOfExt(const Type& rhs, std::ostream* /*why_not*/) const { auto other = create(rhs); if (tag_ == other->tag_) { if (equals(*other)) { @@ -371,7 +371,7 @@ DynamicTypePtr ivalue::TupleTypeFactory::create( } DynamicTypePtr ivalue::TupleTypeFactory::fallback( - const Type&) { + const Type& /*unused*/) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); return nullptr; } diff --git a/aten/src/ATen/core/dynamic_type.h b/aten/src/ATen/core/dynamic_type.h index 2ba841e44e20..ee0d077e5c51 100644 --- a/aten/src/ATen/core/dynamic_type.h +++ b/aten/src/ATen/core/dynamic_type.h @@ -138,8 +138,8 @@ class DynamicType : public SharedType { struct Arguments { Arguments() = default; - Arguments(c10::ArrayRef); - Arguments(const std::vector&, c10::ArrayRef); + Arguments(c10::ArrayRef /*args*/); + Arguments(const std::vector& /*names*/, c10::ArrayRef /*args*/); std::vector elems; }; @@ -156,15 +156,15 @@ class DynamicType : public SharedType { static const TypeKind Kind = TypeKind::DynamicType; static TORCH_API DynamicTypePtr create(Type& ty); - explicit DynamicType(Tag, Arguments); - explicit DynamicType(Tag, std::string_view, Arguments); + explicit DynamicType(Tag /*tag*/, Arguments /*arguments*/); + explicit DynamicType(Tag /*tag*/, std::string_view /*name*/, Arguments /*arguments*/); DynamicType(DynamicType&& other) = delete; DynamicType(const DynamicType&) = delete; DynamicType& operator=(const DynamicType&) = delete; DynamicType& operator=(DynamicType&&) = delete; - TypePtr containedType(size_t) const override; + TypePtr containedType(size_t /*i*/) const override; size_t containedTypeSize() const override; Tag tag() const { return tag_; diff --git a/aten/src/ATen/core/function.h b/aten/src/ATen/core/function.h index 7e8a765a05ab..83db2ec9d71d 100644 --- a/aten/src/ATen/core/function.h +++ b/aten/src/ATen/core/function.h @@ -96,15 +96,15 @@ struct TORCH_API Function { // Overload for server interpreter, a bailout size is needed for graph // executor. virtual bool call( - Stack&, - std::optional, - c10::function_ref) { + Stack& /*unused*/, + std::optional /*unused*/, + c10::function_ref /*unused*/) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); return false; } // Overload for mobile interpreter. - virtual bool call(Stack&, c10::function_ref) { + virtual bool call(Stack& /*unused*/, c10::function_ref /*unused*/) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); return false; } diff --git a/aten/src/ATen/core/ivalue.h b/aten/src/ATen/core/ivalue.h index ab2039e05820..f115b5a6a7c3 100644 --- a/aten/src/ATen/core/ivalue.h +++ b/aten/src/ATen/core/ivalue.h @@ -847,7 +847,7 @@ struct TORCH_API IValue final { IValue(std::optional v); template = nullptr> IValue(c10::OptionalArrayRef v); - IValue(std::nullopt_t); + IValue(std::nullopt_t /*unused*/); // ClassType IValue(c10::intrusive_ptr v); diff --git a/aten/src/ATen/core/ivalue_inl.h b/aten/src/ATen/core/ivalue_inl.h index 1251c4c0c210..89759560c3ea 100644 --- a/aten/src/ATen/core/ivalue_inl.h +++ b/aten/src/ATen/core/ivalue_inl.h @@ -660,7 +660,7 @@ struct TORCH_API TupleTypeFactory { template <> struct TORCH_API TupleTypeFactory { static DynamicTypePtr create(const std::vector& elemTypes); - static DynamicTypePtr fallback(const Type&); + static DynamicTypePtr fallback(const Type& /*unused*/); }; struct TORCH_API Tuple : c10::intrusive_ptr_target { @@ -1682,7 +1682,7 @@ struct ivalue::EnumHolder : c10::intrusive_ptr_target { namespace detail { struct _guarded_unsigned_long_unique_dummy final { - _guarded_unsigned_long_unique_dummy(int64_t){} + _guarded_unsigned_long_unique_dummy(int64_t /*unused*/){} }; using _guarded_unsigned_long = std::conditional_t< std::is_same_v || @@ -1776,7 +1776,7 @@ template // native_functions.yaml still return std::vector. // C10_DEPRECATED_MESSAGE("IValues based on std::vector are potentially slow // and deprecated. Please use torch::List instead.") -std::vector generic_to(IValue ivalue, _fake_type>) { +std::vector generic_to(IValue ivalue, _fake_type> /*unused*/) { // We need to do a deep copy of the vector because there might be other // references to this same IValue that also use the list. We can't just // move the elements out. @@ -1826,18 +1826,18 @@ c10::intrusive_ptr IValue::toCustomClass() const& { } template -T generic_to(IValue ivalue, _fake_type) { +T generic_to(IValue ivalue, _fake_type /*unused*/) { using ElemType = typename std::remove_pointer::type::element_type; return std::move(ivalue).template toCustomClass(); } template -tagged_capsule generic_to(IValue ivalue, _fake_type>) { +tagged_capsule generic_to(IValue ivalue, _fake_type> /*unused*/) { return tagged_capsule{std::move(ivalue)}; } template -c10::List generic_to(IValue ivalue, _fake_type>) { +c10::List generic_to(IValue ivalue, _fake_type> /*unused*/) { return impl::toTypedList(std::move(ivalue).toList()); } @@ -1867,7 +1867,7 @@ std::vector createVectorFromList(const c10::List& impl) { } template -OptionalArray generic_to(IValue ivalue, _fake_type>) { +OptionalArray generic_to(IValue ivalue, _fake_type> /*unused*/) { if (ivalue.isNone()) { return {}; } @@ -1880,8 +1880,8 @@ namespace detail { template std::array generic_to_array( IValue ivalue, - _fake_type>, - std::index_sequence) { + _fake_type> /*unused*/, + std::index_sequence /*unused*/) { // We need to do a deep copy of the array because there might be other // references to this same IValue that also use the list. We can't just // move the elements out. @@ -1906,7 +1906,7 @@ std::array generic_to( template c10::Dict generic_to( IValue ivalue, - _fake_type>) { + _fake_type> /*unused*/) { return impl::toTypedDict(std::move(ivalue).toGenericDict()); } @@ -1915,7 +1915,7 @@ C10_DEPRECATED_MESSAGE( "IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict instead.") std::unordered_map generic_to( IValue ivalue, - _fake_type>) { + _fake_type> /*unused*/) { std::unordered_map specialized_dict; for (const auto& item : std::move(ivalue).toGenericDict()) { @@ -1926,7 +1926,7 @@ std::unordered_map generic_to( } template -std::optional generic_to(IValue ivalue, _fake_type>) { +std::optional generic_to(IValue ivalue, _fake_type> /*unused*/) { if (ivalue.isNone()) { return std::nullopt; } @@ -1937,7 +1937,7 @@ namespace detail { template Tuple generic_to_tuple_impl( const ivalue::TupleElements& t, - std::index_sequence) { + std::index_sequence /*unused*/) { return std::make_tuple( t[INDEX].to::type>()...); } @@ -1951,7 +1951,7 @@ template < std::is_lvalue_reference..., std::negation>...>, std::nullptr_t> = nullptr> -std::tuple generic_to(const IValue& ivalue, _fake_type>) { +std::tuple generic_to(const IValue& ivalue, _fake_type> /*unused*/) { const auto& vals = ivalue.toTupleRef().elements(); TORCH_CHECK(vals.size() == sizeof...(Args)); return detail::generic_to_tuple_impl>(vals, Indices{}); @@ -2311,7 +2311,7 @@ inline IValue::IValue(std::optional v) : IValue() { } } -inline IValue::IValue(std::nullopt_t) : IValue() {} +inline IValue::IValue(std::nullopt_t /*unused*/) : IValue() {} inline IValue::IValue(c10::intrusive_ptr v) : tag(Tag::Object) { @@ -2482,15 +2482,15 @@ namespace ivalue { namespace detail { template -IValue from_(T&& x, std::true_type) { +IValue from_(T&& x, std::true_type /*unused*/) { return IValue(std::forward(x)); } template -IValue from_(c10::intrusive_ptr x, std::false_type) { +IValue from_(c10::intrusive_ptr x, std::false_type /*unused*/) { return IValue(std::move(x)); } template -IValue from_(T&& /*x*/, std::false_type) { +IValue from_(T&& /*x*/, std::false_type /*unused*/) { static_assert( guts::false_t::value, "You are calling from with a type that it doesn't support, and isn't a potential custom class (ie: is an intrusive_ptr)"); @@ -2546,19 +2546,19 @@ struct MaybeOwnedTraits { return &borrow; } - static bool debugBorrowIsValid(const borrow_type&) { + static bool debugBorrowIsValid(const borrow_type& /*unused*/) { return true; } }; template <> struct IValue::TagType { - static TORCH_API c10::TypePtr get(const IValue&); + static TORCH_API c10::TypePtr get(const IValue& /*v*/); }; template <> struct IValue::TagType { - static TORCH_API c10::TypePtr get(const IValue&); + static TORCH_API c10::TypePtr get(const IValue& /*v*/); }; template diff --git a/aten/src/ATen/core/op_registration/infer_schema.h b/aten/src/ATen/core/op_registration/infer_schema.h index a393e0290458..0ee79ed85930 100644 --- a/aten/src/ATen/core/op_registration/infer_schema.h +++ b/aten/src/ATen/core/op_registration/infer_schema.h @@ -44,7 +44,7 @@ constexpr int checkStaticTypes() { } template -constexpr std::array createArgumentVectorFromTypes(std::index_sequence) { +constexpr std::array createArgumentVectorFromTypes(std::index_sequence /*unused*/) { return ( // Check types for common errors checkStaticTypes(), diff --git a/aten/src/ATen/core/operator_name.h b/aten/src/ATen/core/operator_name.h index 22e1f427b632..4c138ee50456 100644 --- a/aten/src/ATen/core/operator_name.h +++ b/aten/src/ATen/core/operator_name.h @@ -83,7 +83,7 @@ inline bool operator!=(const OperatorName& lhs, const OperatorName& rhs) { } TORCH_API std::string toString(const OperatorName& opName); -TORCH_API std::ostream& operator<<(std::ostream&, const OperatorName&); +TORCH_API std::ostream& operator<<(std::ostream& /*os*/, const OperatorName& /*opName*/); } // namespace c10 diff --git a/aten/src/ATen/core/type_ptr.h b/aten/src/ATen/core/type_ptr.h index 0859e04c7d2d..011a1750ecaa 100644 --- a/aten/src/ATen/core/type_ptr.h +++ b/aten/src/ATen/core/type_ptr.h @@ -16,7 +16,7 @@ class SingletonTypePtr { /* implicit */ SingletonTypePtr(T* p) : repr_(p) {} // We need this to satisfy Pybind11, but it shouldn't be hit. - explicit SingletonTypePtr(std::shared_ptr) { TORCH_CHECK(false); } + explicit SingletonTypePtr(std::shared_ptr /*unused*/) { TORCH_CHECK(false); } using element_type = typename std::shared_ptr::element_type; diff --git a/aten/src/ATen/cpu/vec/vec256/vec256_complex_double.h b/aten/src/ATen/cpu/vec/vec256/vec256_complex_double.h index ba57ca034e9a..735315bee768 100644 --- a/aten/src/ATen/cpu/vec/vec256/vec256_complex_double.h +++ b/aten/src/ATen/cpu/vec/vec256/vec256_complex_double.h @@ -342,19 +342,19 @@ class Vectorized> { return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ); } Vectorized> operator<( - const Vectorized>&) const { + const Vectorized>& /*unused*/) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized> operator<=( - const Vectorized>&) const { + const Vectorized>& /*unused*/) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized> operator>( - const Vectorized>&) const { + const Vectorized>& /*unused*/) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized> operator>=( - const Vectorized>&) const { + const Vectorized>& /*unused*/) const { TORCH_CHECK(false, "not supported for complex numbers"); } diff --git a/aten/src/ATen/cuda/detail/CUDAHooks.h b/aten/src/ATen/cuda/detail/CUDAHooks.h index 729a4c45f31f..8d3d1db00392 100644 --- a/aten/src/ATen/cuda/detail/CUDAHooks.h +++ b/aten/src/ATen/cuda/detail/CUDAHooks.h @@ -17,7 +17,7 @@ TORCH_CUDA_CPP_API void set_magma_init_fn(void (*magma_init_fn)()); // The real implementation of CUDAHooksInterface struct CUDAHooks : public at::CUDAHooksInterface { - CUDAHooks(at::CUDAHooksArgs) {} + CUDAHooks(at::CUDAHooksArgs /*unused*/) {} void init() const override; Device getDeviceFromPtr(void* data) const override; bool isPinnedPtr(const void* data) const override; diff --git a/aten/src/ATen/cuda/tunable/TunableOp.h b/aten/src/ATen/cuda/tunable/TunableOp.h index 6ca9e213e148..b4b983dc739c 100644 --- a/aten/src/ATen/cuda/tunable/TunableOp.h +++ b/aten/src/ATen/cuda/tunable/TunableOp.h @@ -29,7 +29,7 @@ template class Callable { public: virtual ~Callable() = default; - virtual TuningStatus Call(const ParamsT*) { + virtual TuningStatus Call(const ParamsT* /*unused*/) { return FAIL; } virtual TuningStatus IsSupported(const ParamsT* params) { diff --git a/aten/src/ATen/detail/HPUHooksInterface.h b/aten/src/ATen/detail/HPUHooksInterface.h index 8cf9502a7e1b..3240ff4dac13 100644 --- a/aten/src/ATen/detail/HPUHooksInterface.h +++ b/aten/src/ATen/detail/HPUHooksInterface.h @@ -25,7 +25,7 @@ struct TORCH_API HPUHooksInterface : AcceleratorHooksInterface { false, "Cannot get device of pointer on HPU without HPU backend"); } - bool isPinnedPtr(const void*) const override { + bool isPinnedPtr(const void* /*data*/) const override { return false; } diff --git a/aten/src/ATen/functorch/BatchRulesHelper.h b/aten/src/ATen/functorch/BatchRulesHelper.h index 70fbf3135a3c..ee23a0320f7c 100644 --- a/aten/src/ATen/functorch/BatchRulesHelper.h +++ b/aten/src/ATen/functorch/BatchRulesHelper.h @@ -410,7 +410,7 @@ struct ExistingBdimBatchRuleHelper -Tensor& unary_inplace_batch_rule(Tensor& self, std::optional, ExtraArgs... extra_args) { +Tensor& unary_inplace_batch_rule(Tensor& self, std::optional /*unused*/, ExtraArgs... extra_args) { INVOKE(self, Method)(std::forward(extra_args)...); return self; } diff --git a/aten/src/ATen/metal/Context.h b/aten/src/ATen/metal/Context.h index 1f977cf50d9e..e4c6da738e0d 100644 --- a/aten/src/ATen/metal/Context.h +++ b/aten/src/ATen/metal/Context.h @@ -18,7 +18,7 @@ extern std::atomic g_metal_impl_registry; class MetalImplRegistrar { public: - explicit MetalImplRegistrar(MetalInterface*); + explicit MetalImplRegistrar(MetalInterface* /*impl*/); }; at::Tensor& metal_copy_(at::Tensor& self, const at::Tensor& src); diff --git a/aten/src/ATen/native/BatchLinearAlgebra.cpp b/aten/src/ATen/native/BatchLinearAlgebra.cpp index d323e54a95ab..6669357cda45 100644 --- a/aten/src/ATen/native/BatchLinearAlgebra.cpp +++ b/aten/src/ATen/native/BatchLinearAlgebra.cpp @@ -2060,7 +2060,7 @@ std::tuple linalg_lu_factor(const Tensor& A, bool pivot) { } // TODO Deprecate this function in favour of linalg_lu_factor_ex -std::tuple _lu_with_info(const Tensor& self, bool compute_pivots, bool) { +std::tuple _lu_with_info(const Tensor& self, bool compute_pivots, bool /*unused*/) { TORCH_WARN_ONCE( "torch.lu is deprecated in favor of torch.linalg.lu_factor / torch.linalg.lu_factor_ex and will be ", "removed in a future PyTorch release.\n", diff --git a/aten/src/ATen/native/SharedReduceOps.h b/aten/src/ATen/native/SharedReduceOps.h index 1de72abd5886..15794040bf39 100644 --- a/aten/src/ATen/native/SharedReduceOps.h +++ b/aten/src/ATen/native/SharedReduceOps.h @@ -346,17 +346,17 @@ template struct AbsSwitch {}; template -inline C10_DEVICE acc_t abs_if_complex(scalar_t data, AbsSwitch) { +inline C10_DEVICE acc_t abs_if_complex(scalar_t data, AbsSwitch /*unused*/) { return static_cast(data); } template -inline C10_DEVICE acc_t abs_if_complex(std::complex data, AbsSwitch) { +inline C10_DEVICE acc_t abs_if_complex(std::complex data, AbsSwitch /*unused*/) { return static_cast(std::abs(data)); } template -inline C10_DEVICE acc_t abs_if_complex(c10::complex data, AbsSwitch) { +inline C10_DEVICE acc_t abs_if_complex(c10::complex data, AbsSwitch /*unused*/) { return static_cast(std::abs(at::opmath_type>(data))); } diff --git a/aten/src/ATen/native/TensorCompare.cpp b/aten/src/ATen/native/TensorCompare.cpp index 29cb06ccf536..c6126eda61e7 100644 --- a/aten/src/ATen/native/TensorCompare.cpp +++ b/aten/src/ATen/native/TensorCompare.cpp @@ -846,7 +846,7 @@ TORCH_IMPL_FUNC(clamp_Tensor_out) (const Tensor& self, const OptionalTensorRef min, const OptionalTensorRef max, - const Tensor&) { + const Tensor& /*unused*/) { if (min && max) { clamp_stub(device_type(), *this); } else if (min) { diff --git a/aten/src/ATen/native/cpu/DepthwiseConvKernel.cpp b/aten/src/ATen/native/cpu/DepthwiseConvKernel.cpp index 6526a4308221..1f9a8ff1097d 100644 --- a/aten/src/ATen/native/cpu/DepthwiseConvKernel.cpp +++ b/aten/src/ATen/native/cpu/DepthwiseConvKernel.cpp @@ -452,11 +452,11 @@ void convolution_depthwise3x3_winograd_impl( #else void convolution_depthwise3x3_winograd_impl( - const Arguments&, - const float* const, - const float* const, - const float* const, - float* const) { + const Arguments& /*unused*/, + const float* const /*unused*/, + const float* const /*unused*/, + const float* const /*unused*/, + float* const /*unused*/) { } #endif /* __ARM_NEON__ */ diff --git a/aten/src/ATen/native/cpu/Loops.h b/aten/src/ATen/native/cpu/Loops.h index 83b51a998563..aad618a258a3 100644 --- a/aten/src/ATen/native/cpu/Loops.h +++ b/aten/src/ATen/native/cpu/Loops.h @@ -46,7 +46,7 @@ using namespace vec; template typename traits::ArgsTuple dereference_impl(char* C10_RESTRICT data[], const int64_t* strides, int64_t i, - std::index_sequence) { + std::index_sequence /*unused*/) { return std::make_tuple( c10::load::type>( data[INDEX] + i * strides[INDEX])...); @@ -65,7 +65,7 @@ dereference_vec_impl(char* C10_RESTRICT data[], const typename traits::result_type& opt_scalar, size_t S, int64_t i, - std::index_sequence) { + std::index_sequence /*unused*/) { using Vec = typename traits::result_type; using scalar_t = typename Vec::value_type; return std::make_tuple( @@ -231,7 +231,7 @@ vectorized_loop(char** C10_RESTRICT data_, int64_t n, int64_t S, func_t&& op, ve template inline void unroll_contiguous_scalar_checks( const int64_t* /*strides*/, - std::index_sequence<>, + std::index_sequence<> /*unused*/, cb_t&& cb) { cb(0); } @@ -239,7 +239,7 @@ inline void unroll_contiguous_scalar_checks( template inline void unroll_contiguous_scalar_checks( const int64_t* strides, - std::index_sequence, + std::index_sequence /*unused*/, cb_t&& cb) { if (is_contiguous_scalar(strides)) { cb(INDEX0 + 1); diff --git a/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.h b/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.h index 4ab411d9a025..c1785d61879d 100644 --- a/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.h +++ b/aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.h @@ -86,7 +86,7 @@ namespace cuda { namespace detail { struct LinalgDispatch { Tensor (*cholesky_solve_helper)(const Tensor& self, const Tensor& A, bool upper); }; -C10_EXPORT void registerLinalgDispatch(const LinalgDispatch&); +C10_EXPORT void registerLinalgDispatch(const LinalgDispatch& /*disp_*/); }} // namespace cuda::detail #endif diff --git a/aten/src/ATen/native/quantized/AffineQuantizerBase.h b/aten/src/ATen/native/quantized/AffineQuantizerBase.h index a0cfafdb9905..b38ec0b47e5e 100644 --- a/aten/src/ATen/native/quantized/AffineQuantizerBase.h +++ b/aten/src/ATen/native/quantized/AffineQuantizerBase.h @@ -31,7 +31,7 @@ TORCH_API float dequantize_vec( float* dst, size_t count = 8); template -TORCH_API DST_T requantize_val(double, int64_t, double, int64_t, SRC_T src); +TORCH_API DST_T requantize_val(double /*src_scale*/, int64_t /*src_zero_point*/, double /*dst_scale*/, int64_t /*dst_zero_point*/, SRC_T src); // Given a multiplier and a zero_point, requantize int32_t computed values back // to quantized values. See comment above diff --git a/aten/src/ATen/native/quantized/TensorFactories.cpp b/aten/src/ATen/native/quantized/TensorFactories.cpp index b2eb10bd4e9f..75405c51bd0a 100644 --- a/aten/src/ATen/native/quantized/TensorFactories.cpp +++ b/aten/src/ATen/native/quantized/TensorFactories.cpp @@ -104,27 +104,27 @@ Tensor empty_strided_unknown_quantized( // Provide better error message if dtype is wrong Tensor empty_affine_quantized_other_backends_stub( - IntArrayRef, - std::optional, - std::optional, - std::optional, - std::optional, - double, - int64_t, - std::optional) { + IntArrayRef /*unused*/, + std::optional /*unused*/, + std::optional /*unused*/, + std::optional /*unused*/, + std::optional /*unused*/, + double /*unused*/, + int64_t /*unused*/, + std::optional /*unused*/) { TORCH_CHECK(false, "Creation of quantized tensor requires quantized dtype like torch.quint8"); } Tensor empty_per_channel_affine_quantized_other_backends_stub( - IntArrayRef, - const Tensor&, - const Tensor&, - int64_t, - std::optional, - std::optional, - std::optional, - std::optional, - std::optional) { + IntArrayRef /*unused*/, + const Tensor& /*unused*/, + const Tensor& /*unused*/, + int64_t /*unused*/, + std::optional /*unused*/, + std::optional /*unused*/, + std::optional /*unused*/, + std::optional /*unused*/, + std::optional /*unused*/) { TORCH_CHECK(false, "Creation of quantized tensor requires quantized dtype like torch.quint8"); } diff --git a/aten/src/ATen/native/xnnpack/Shim.cpp b/aten/src/ATen/native/xnnpack/Shim.cpp index 03030d7826c6..de255fa6b6fc 100644 --- a/aten/src/ATen/native/xnnpack/Shim.cpp +++ b/aten/src/ATen/native/xnnpack/Shim.cpp @@ -29,63 +29,63 @@ bool available() { } bool use_convolution2d( - const Tensor&, - const Tensor&, - const at::OptionalIntArrayRef, - const IntArrayRef, - const IntArrayRef, - const IntArrayRef, - const int64_t, - bool) { + const Tensor& /*unused*/, + const Tensor& /*unused*/, + const at::OptionalIntArrayRef /*unused*/, + const IntArrayRef /*unused*/, + const IntArrayRef /*unused*/, + const IntArrayRef /*unused*/, + const int64_t /*unused*/, + bool /*unused*/) { return false; } Tensor convolution2d( - const Tensor&, - const Tensor&, - const Tensor&, - const IntArrayRef, - const IntArrayRef, - const IntArrayRef, - const int64_t) { + const Tensor& /*unused*/, + const Tensor& /*unused*/, + const Tensor& /*unused*/, + const IntArrayRef /*unused*/, + const IntArrayRef /*unused*/, + const IntArrayRef /*unused*/, + const int64_t /*unused*/) { TORCH_CHECK(false, internal::kError); } bool use_linear( - const Tensor&, - const Tensor&, - const Tensor&) { + const Tensor& /*unused*/, + const Tensor& /*unused*/, + const Tensor& /*unused*/) { return false; } Tensor linear( - const Tensor&, - const Tensor&, - const Tensor&) { + const Tensor& /*unused*/, + const Tensor& /*unused*/, + const Tensor& /*unused*/) { TORCH_CHECK(false, internal::kError); } bool use_max_pool2d( - const Tensor&, - const IntArrayRef, - const IntArrayRef, - IntArrayRef, - const IntArrayRef, - const bool, - const float, - const float) { + const Tensor& /*unused*/, + const IntArrayRef /*unused*/, + const IntArrayRef /*unused*/, + IntArrayRef /*unused*/, + const IntArrayRef /*unused*/, + const bool /*unused*/, + const float /*unused*/, + const float /*unused*/) { return false; } Tensor max_pool2d( - const Tensor&, - const IntArrayRef, - const IntArrayRef, - IntArrayRef, - const IntArrayRef, - const bool, - const float, - const float) { + const Tensor& /*unused*/, + const IntArrayRef /*unused*/, + const IntArrayRef /*unused*/, + IntArrayRef /*unused*/, + const IntArrayRef /*unused*/, + const bool /*unused*/, + const float /*unused*/, + const float /*unused*/) { TORCH_CHECK(false, internal::kError); } diff --git a/aten/src/ATen/ops/from_blob.h b/aten/src/ATen/ops/from_blob.h index 63b15ef5ca1b..2853d31ec24b 100644 --- a/aten/src/ATen/ops/from_blob.h +++ b/aten/src/ATen/ops/from_blob.h @@ -5,7 +5,7 @@ namespace at { namespace detail { -inline void noopDelete(void*) {} +inline void noopDelete(void* /*unused*/) {} } // namespace detail diff --git a/aten/src/ATen/vulkan/Context.h b/aten/src/ATen/vulkan/Context.h index d6619527c69b..f8eef31c74bd 100644 --- a/aten/src/ATen/vulkan/Context.h +++ b/aten/src/ATen/vulkan/Context.h @@ -18,7 +18,7 @@ extern std::atomic g_vulkan_impl_registry; class VulkanImplRegistrar { public: - explicit VulkanImplRegistrar(VulkanImplInterface*); + explicit VulkanImplRegistrar(VulkanImplInterface* /*impl*/); }; at::Tensor& vulkan_copy_(at::Tensor& self, const at::Tensor& src); diff --git a/c10/core/CPUAllocator.cpp b/c10/core/CPUAllocator.cpp index 4330bb63c796..c923663675d6 100644 --- a/c10/core/CPUAllocator.cpp +++ b/c10/core/CPUAllocator.cpp @@ -154,7 +154,7 @@ class DefaultMobileCPUAllocator final : public at::Allocator { } }; -void NoDelete(void*) {} +void NoDelete(void* /*unused*/) {} at::Allocator* GetCPUAllocator() { return GetAllocator(DeviceType::CPU); diff --git a/c10/core/CPUAllocator.h b/c10/core/CPUAllocator.h index 98debb9db50d..656adc6b14fe 100644 --- a/c10/core/CPUAllocator.h +++ b/c10/core/CPUAllocator.h @@ -17,7 +17,7 @@ namespace c10 { using MemoryDeleter = void (*)(void*); // A helper function that is basically doing nothing. -C10_API void NoDelete(void*); +C10_API void NoDelete(void* /*unused*/); // A simple struct that is used to report C10's memory allocation, // deallocation status and out-of-memory events to the profiler diff --git a/c10/core/DispatchKey.h b/c10/core/DispatchKey.h index 30aad0aeb00a..c513c4e8e390 100644 --- a/c10/core/DispatchKey.h +++ b/c10/core/DispatchKey.h @@ -590,10 +590,12 @@ constexpr uint16_t num_runtime_entries = num_functionality_keys + constexpr uint16_t full_backend_mask = (static_cast(1) << num_backends) - 1; -C10_API const char* toString(DispatchKey); -C10_API const char* toString(BackendComponent); -C10_API std::ostream& operator<<(std::ostream&, DispatchKey); -C10_API std::ostream& operator<<(std::ostream&, BackendComponent); +C10_API const char* toString(DispatchKey /*t*/); +C10_API const char* toString(BackendComponent /*t*/); +C10_API std::ostream& operator<<(std::ostream& /*str*/, DispatchKey /*rhs*/); +C10_API std::ostream& operator<<( + std::ostream& /*str*/, + BackendComponent /*rhs*/); C10_API DispatchKey getAutogradKeyFromBackend(BackendComponent k); diff --git a/c10/core/DispatchKeySet.h b/c10/core/DispatchKeySet.h index dea4c5a55de7..d46bf7efeed6 100644 --- a/c10/core/DispatchKeySet.h +++ b/c10/core/DispatchKeySet.h @@ -172,10 +172,10 @@ class DispatchKeySet final { // use of DispatchKeySet in TLS requires this. constexpr DispatchKeySet() = default; - constexpr DispatchKeySet(Full) + constexpr DispatchKeySet(Full /*unused*/) : repr_((1ULL << (num_backends + num_functionality_keys - 1)) - 1) {} - constexpr DispatchKeySet(FullAfter, DispatchKey t) + constexpr DispatchKeySet(FullAfter /*unused*/, DispatchKey t) // LSB after t are OK, but not t itself. // "functionalities" have a notion of ordering (e.g. Autograd > Sparse > // Quantized > Dense). But backends don't really have an ordering. @@ -191,7 +191,7 @@ class DispatchKeySet final { // Public version of DispatchKeySet(uint64_t) API; external users // must be explicit when they do this! - constexpr DispatchKeySet(Raw, uint64_t x) : repr_(x) {} + constexpr DispatchKeySet(Raw /*unused*/, uint64_t x) : repr_(x) {} constexpr explicit DispatchKeySet(BackendComponent k) { if (k == BackendComponent::InvalidBit) { @@ -631,8 +631,8 @@ class DispatchKeySet final { } }; -C10_API std::string toString(DispatchKeySet); -C10_API std::ostream& operator<<(std::ostream&, DispatchKeySet); +C10_API std::string toString(DispatchKeySet /*ts*/); +C10_API std::ostream& operator<<(std::ostream& /*os*/, DispatchKeySet /*ts*/); inline int getDispatchTableIndexForDispatchKey(DispatchKey k) { return DispatchKeySet(k).getDispatchTableIndexForDispatchKeySet(); diff --git a/c10/core/SafePyObject.h b/c10/core/SafePyObject.h index 6102aed8c0ba..1ec0cdb6751e 100644 --- a/c10/core/SafePyObject.h +++ b/c10/core/SafePyObject.h @@ -60,7 +60,7 @@ struct C10_API SafePyObject { c10::impl::PyInterpreter& pyinterpreter() const { return *pyinterpreter_; } - PyObject* ptr(const c10::impl::PyInterpreter*) const; + PyObject* ptr(const c10::impl::PyInterpreter* /*interpreter*/) const; // stop tracking the current object, and return it PyObject* release() { @@ -103,7 +103,7 @@ struct C10_API SafePyHandle { c10::impl::PyInterpreter& pyinterpreter() const { return *pyinterpreter_; } - PyObject* ptr(const c10::impl::PyInterpreter*) const; + PyObject* ptr(const c10::impl::PyInterpreter* /*interpreter*/) const; void reset() { data_ = nullptr; pyinterpreter_ = nullptr; diff --git a/c10/core/Scalar.h b/c10/core/Scalar.h index 646a1dde3994..d6701ec2c0e6 100644 --- a/c10/core/Scalar.h +++ b/c10/core/Scalar.h @@ -428,7 +428,7 @@ class C10_API Scalar { typename std::enable_if_t< std::is_integral_v && !std::is_same_v, bool>* = nullptr> - Scalar(T vv, bool) : tag(Tag::HAS_i) { + Scalar(T vv, bool /*unused*/) : tag(Tag::HAS_i) { v.i = convert(vv); } @@ -437,14 +437,14 @@ class C10_API Scalar { typename std::enable_if_t< !std::is_integral_v && !c10::is_complex::value, bool>* = nullptr> - Scalar(T vv, bool) : tag(Tag::HAS_d) { + Scalar(T vv, bool /*unused*/) : tag(Tag::HAS_d) { v.d = convert(vv); } template < typename T, typename std::enable_if_t::value, bool>* = nullptr> - Scalar(T vv, bool) : tag(Tag::HAS_z) { + Scalar(T vv, bool /*unused*/) : tag(Tag::HAS_z) { v.z = convert(vv); } }; diff --git a/c10/core/Storage.h b/c10/core/Storage.h index 5abbcb22ece0..e061375b8887 100644 --- a/c10/core/Storage.h +++ b/c10/core/Storage.h @@ -78,7 +78,7 @@ struct C10_API Storage { resizable)) {} protected: - explicit Storage(unsafe_borrow_t, const Storage& rhs) + explicit Storage(unsafe_borrow_t /*unused*/, const Storage& rhs) : storage_impl_(c10::intrusive_ptr::reclaim( rhs.storage_impl_.get())) {} diff --git a/c10/core/Stream.h b/c10/core/Stream.h index a35e608202c7..f71bfe52bbf9 100644 --- a/c10/core/Stream.h +++ b/c10/core/Stream.h @@ -82,14 +82,15 @@ class C10_API Stream final { /// should use the provided APIs to get a stream. In particular, /// we don't require backends to give any guarantees about non-zero /// StreamIds; they are welcome to allocate in whatever way they like. - explicit Stream(Unsafe, Device device, StreamId id) + explicit Stream(Unsafe /*unused*/, Device device, StreamId id) : device_(device), id_(id) {} /// Construct the default stream of a Device. The default stream is /// NOT the same as the current stream; default stream is a fixed stream /// that never changes, whereas the current stream may be changed by /// StreamGuard. - explicit Stream(Default, Device device) : device_(device), id_(0) {} + explicit Stream(Default /*unused*/, Device device) + : device_(device), id_(0) {} bool operator==(const Stream& other) const noexcept { return this->device_ == other.device_ && this->id_ == other.id_; diff --git a/c10/core/SymBool.h b/c10/core/SymBool.h index 6982d0380e57..d5d509e239b1 100644 --- a/c10/core/SymBool.h +++ b/c10/core/SymBool.h @@ -40,8 +40,8 @@ class C10_API SymBool { return *c; } - SymBool sym_and(const SymBool&) const; - SymBool sym_or(const SymBool&) const; + SymBool sym_and(const SymBool& /*sci*/) const; + SymBool sym_or(const SymBool& /*sci*/) const; SymBool sym_not() const; SymBool operator&(const SymBool& other) const { diff --git a/c10/core/SymFloat.h b/c10/core/SymFloat.h index d30b646a653f..e3064f0b8588 100644 --- a/c10/core/SymFloat.h +++ b/c10/core/SymFloat.h @@ -43,17 +43,17 @@ class C10_API SymFloat { return data_; } - SymFloat operator+(const SymFloat&) const; - SymFloat operator-(const SymFloat&) const; - SymFloat operator*(const SymFloat&) const; - SymFloat operator/(const SymFloat&) const; + SymFloat operator+(const SymFloat& /*sci*/) const; + SymFloat operator-(const SymFloat& /*sci*/) const; + SymFloat operator*(const SymFloat& /*sci*/) const; + SymFloat operator/(const SymFloat& /*sci*/) const; - SymBool sym_eq(const SymFloat&) const; - SymBool sym_ne(const SymFloat&) const; - SymBool sym_lt(const SymFloat&) const; - SymBool sym_le(const SymFloat&) const; - SymBool sym_gt(const SymFloat&) const; - SymBool sym_ge(const SymFloat&) const; + SymBool sym_eq(const SymFloat& /*sci*/) const; + SymBool sym_ne(const SymFloat& /*sci*/) const; + SymBool sym_lt(const SymFloat& /*sci*/) const; + SymBool sym_le(const SymFloat& /*sci*/) const; + SymBool sym_gt(const SymFloat& /*sci*/) const; + SymBool sym_ge(const SymFloat& /*sci*/) const; bool operator==(const SymFloat& o) const { return sym_eq(o).guard_bool(__FILE__, __LINE__); diff --git a/c10/core/SymInt.h b/c10/core/SymInt.h index 9b1c776cbe2a..2080b2c6a05b 100644 --- a/c10/core/SymInt.h +++ b/c10/core/SymInt.h @@ -52,7 +52,7 @@ class C10_API SymInt { // One appropriate use for this is when you are constructing a symint // in a situation where you know it is non-negative (or, if it is negative, // the negative value is -1; i.e., not user controlled) - SymInt(Unchecked, int64_t d) : data_(d) {} + SymInt(Unchecked /*unused*/, int64_t d) : data_(d) {} // TODO: these implementations are not optimal because they allocate a // temporary and then use the move constructor/assignment diff --git a/c10/core/TensorImpl.h b/c10/core/TensorImpl.h index 972181327b1f..66893b86c846 100644 --- a/c10/core/TensorImpl.h +++ b/c10/core/TensorImpl.h @@ -359,7 +359,7 @@ struct C10_API VariableVersion { // https://cplusplus.github.io/LWG/issue2334. VariableVersion(uint32_t version) : version_counter_(c10::make_intrusive(version)) {} - VariableVersion(Disabled = DISABLED) {} + VariableVersion(Disabled /*unused*/ = DISABLED) {} bool enabled() const { return version_counter_; @@ -522,21 +522,21 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { */ TensorImpl( Storage&& storage, - DispatchKeySet, + DispatchKeySet /*key_set*/, const caffe2::TypeMeta data_type); // See Note [Enum ImplType] TensorImpl( - ImplType, + ImplType /*unused*/, Storage&& storage, - DispatchKeySet, + DispatchKeySet /*key_set*/, const caffe2::TypeMeta data_type); /** * Construct a 1-dim 0 size tensor that doesn't have a storage. */ TensorImpl( - DispatchKeySet, + DispatchKeySet /*key_set*/, const caffe2::TypeMeta data_type, std::optional device_opt); @@ -563,9 +563,9 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { // from under us. TensorImpl( Storage&& storage, - DispatchKeySet, + DispatchKeySet /*key_set*/, const caffe2::TypeMeta data_type, - std::optional); + std::optional /*device_opt*/); public: TensorImpl(const TensorImpl&) = delete; diff --git a/c10/core/UndefinedTensorImpl.cpp b/c10/core/UndefinedTensorImpl.cpp index b42d3a92545f..037cac0f63b8 100644 --- a/c10/core/UndefinedTensorImpl.cpp +++ b/c10/core/UndefinedTensorImpl.cpp @@ -31,7 +31,7 @@ bool UndefinedTensorImpl::has_storage() const { } #endif -void UndefinedTensorImpl::set_storage_offset(int64_t) { +void UndefinedTensorImpl::set_storage_offset(int64_t /*storage_offset*/) { TORCH_CHECK(false, "set_storage_offset() called on an undefined Tensor"); } diff --git a/c10/core/impl/DeviceGuardImplInterface.h b/c10/core/impl/DeviceGuardImplInterface.h index e1efa53035b1..f9f67497c631 100644 --- a/c10/core/impl/DeviceGuardImplInterface.h +++ b/c10/core/impl/DeviceGuardImplInterface.h @@ -111,15 +111,16 @@ struct C10_API DeviceGuardImplInterface { /** * Get the default stream for a given device. */ - virtual Stream getDefaultStream(Device) const { + virtual Stream getDefaultStream(Device /*unused*/) const { TORCH_CHECK(false, "Backend doesn't support acquiring a default stream.") } /** * Get a stream from the global pool for a given device. */ - virtual Stream getStreamFromGlobalPool(Device, bool isHighPriority = false) - const { + virtual Stream getStreamFromGlobalPool( + Device /*unused*/, + bool isHighPriority = false) const { (void)isHighPriority; // Suppress unused variable warning TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.") } @@ -129,7 +130,7 @@ struct C10_API DeviceGuardImplInterface { * copied and shared around, device backend should be able to correctly handle * the lifetime of the stream. */ - virtual Stream getNewStream(Device, int priority = 0) const { + virtual Stream getNewStream(Device /*unused*/, int priority = 0) const { (void)priority; TORCH_CHECK(false, "Backend doesn't support create a new Stream.") } @@ -228,8 +229,9 @@ struct C10_API DeviceGuardImplInterface { * being used on the given stream, and that it should thus avoid recycling the * DataPtr until all work on that stream is done. */ - virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const { - } + virtual void recordDataPtrOnStream( + const c10::DataPtr& /*unused*/, + const Stream& /*unused*/) const {} /** * Fetch the elapsed time between two recorded events. @@ -257,31 +259,31 @@ struct NoOpDeviceGuardImpl : public DeviceGuardImplInterface { DeviceType type() const override { return D; } - Device exchangeDevice(Device) const override { + Device exchangeDevice(Device /*unused*/) const override { return Device(D, -1); // no-op } Device getDevice() const override { return Device(D, -1); } - void setDevice(Device) const override { + void setDevice(Device /*unused*/) const override { // no-op } - void uncheckedSetDevice(Device) const noexcept override { + void uncheckedSetDevice(Device /*unused*/) const noexcept override { // no-op } - Stream getStream(Device) const noexcept override { + Stream getStream(Device /*unused*/) const noexcept override { // no-op return Stream(Stream::DEFAULT, Device(D, -1)); } - Stream getNewStream(Device, int priority = 0) const override { + Stream getNewStream(Device /*unused*/, int priority = 0) const override { // no-op (void)priority; return Stream(Stream::DEFAULT, Device(D, -1)); } // NB: These do NOT set the current device - Stream exchangeStream(Stream) const noexcept override { + Stream exchangeStream(Stream /*unused*/) const noexcept override { // no-op return Stream(Stream::DEFAULT, Device(D, -1)); } @@ -344,7 +346,9 @@ extern C10_API std::array< class C10_API DeviceGuardImplRegistrar { public: - DeviceGuardImplRegistrar(DeviceType, const DeviceGuardImplInterface*); + DeviceGuardImplRegistrar( + DeviceType /*type*/, + const DeviceGuardImplInterface* /*impl*/); }; #define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) \ diff --git a/c10/core/impl/FakeGuardImpl.h b/c10/core/impl/FakeGuardImpl.h index fa58c5739315..7b2eeca79fd5 100644 --- a/c10/core/impl/FakeGuardImpl.h +++ b/c10/core/impl/FakeGuardImpl.h @@ -19,7 +19,7 @@ template struct FakeGuardImpl final : public DeviceGuardImplInterface { static constexpr DeviceType static_type = T; // Runtime device type is not used - FakeGuardImpl(DeviceType) {} + FakeGuardImpl(DeviceType /*unused*/) {} FakeGuardImpl() = default; DeviceType type() const override { return T; diff --git a/c10/core/impl/GPUTrace.h b/c10/core/impl/GPUTrace.h index 3acb875b54a3..df8cec135e23 100644 --- a/c10/core/impl/GPUTrace.h +++ b/c10/core/impl/GPUTrace.h @@ -16,7 +16,7 @@ struct C10_API GPUTrace { // This function will only register the first interpreter that tries to invoke // it. For all of the next ones it will be a no-op. - static void set_trace(const PyInterpreter*); + static void set_trace(const PyInterpreter* /*trace*/); static const PyInterpreter* get_trace() { if (!haveState) diff --git a/c10/core/impl/LocalDispatchKeySet.h b/c10/core/impl/LocalDispatchKeySet.h index 1232bd25eb3b..bba089bb2ad1 100644 --- a/c10/core/impl/LocalDispatchKeySet.h +++ b/c10/core/impl/LocalDispatchKeySet.h @@ -81,7 +81,7 @@ C10_API void _force_tls_local_dispatch_key_set(LocalDispatchKeySet key_set); class C10_API IncludeDispatchKeyGuard { public: - IncludeDispatchKeyGuard(DispatchKeySet); + IncludeDispatchKeyGuard(DispatchKeySet /*include*/); IncludeDispatchKeyGuard(DispatchKey k) : IncludeDispatchKeyGuard(DispatchKeySet(k)) {} IncludeDispatchKeyGuard(const IncludeDispatchKeyGuard&) = delete; @@ -99,7 +99,7 @@ class C10_API IncludeDispatchKeyGuard { class C10_API ExcludeDispatchKeyGuard { public: - ExcludeDispatchKeyGuard(DispatchKeySet); + ExcludeDispatchKeyGuard(DispatchKeySet /*exclude*/); ExcludeDispatchKeyGuard(DispatchKey k) : ExcludeDispatchKeyGuard(DispatchKeySet(k)) {} ExcludeDispatchKeyGuard(const ExcludeDispatchKeyGuard&) = delete; diff --git a/c10/core/impl/PyInterpreter.cpp b/c10/core/impl/PyInterpreter.cpp index 913bc7872657..8676f0aaf8e0 100644 --- a/c10/core/impl/PyInterpreter.cpp +++ b/c10/core/impl/PyInterpreter.cpp @@ -35,7 +35,7 @@ struct NoopPyInterpreterVTable final : public PyInterpreterVTable { void python_op_registration_trampoline( const c10::OperatorHandle& op, - c10::DispatchKey, + c10::DispatchKey /*unused*/, c10::DispatchKeySet keyset, torch::jit::Stack* stack, bool with_keyset, @@ -52,19 +52,21 @@ struct NoopPyInterpreterVTable final : public PyInterpreterVTable { void python_dispatcher( const c10::OperatorHandle& op, - c10::DispatchKeySet, + c10::DispatchKeySet /*unused*/, torch::jit::Stack* stack) const override { PANIC(python_dispatcher); } - bool is_contiguous(const TensorImpl* self, at::MemoryFormat) const override { + bool is_contiguous(const TensorImpl* self, at::MemoryFormat /*unused*/) + const override { PANIC(is_contiguous); } - c10::SymBool sym_is_contiguous(const TensorImpl* self, at::MemoryFormat) - const override { + c10::SymBool sym_is_contiguous( + const TensorImpl* self, + at::MemoryFormat /*unused*/) const override { PANIC(sym_is_contiguous); } - bool is_strides_like(const TensorImpl* self, at::MemoryFormat) + bool is_strides_like(const TensorImpl* self, at::MemoryFormat /*unused*/) const override { PANIC(is_strides_like); } diff --git a/c10/cuda/CUDAMallocAsyncAllocator.cpp b/c10/cuda/CUDAMallocAsyncAllocator.cpp index 7051bd5258f5..ce0f3d885543 100644 --- a/c10/cuda/CUDAMallocAsyncAllocator.cpp +++ b/c10/cuda/CUDAMallocAsyncAllocator.cpp @@ -517,7 +517,7 @@ struct CudaMallocAsyncAllocator : public CUDAAllocator { } } - void enable(bool) override { + void enable(bool /*value*/) override { // cannot disable } @@ -799,7 +799,7 @@ struct CudaMallocAsyncAllocator : public CUDAAllocator { void beginAllocateToPool( c10::DeviceIndex device, MempoolId_t mempool_id, - std::function) override { + std::function /*filter*/) override { std::lock_guard lk(general_mutex); TORCH_INTERNAL_ASSERT(capture_free_streams.empty()); diff --git a/c10/cuda/CUDAMiscFunctions.h b/c10/cuda/CUDAMiscFunctions.h index ec1114935457..bdb2f9998ecd 100644 --- a/c10/cuda/CUDAMiscFunctions.h +++ b/c10/cuda/CUDAMiscFunctions.h @@ -9,7 +9,7 @@ #include namespace c10::cuda { -C10_CUDA_API std::string get_cuda_error_help(cudaError_t) noexcept; +C10_CUDA_API std::string get_cuda_error_help(cudaError_t /*error*/) noexcept; C10_CUDA_API const char* get_cuda_check_suffix() noexcept; C10_CUDA_API std::mutex* getFreeMutex(); } // namespace c10::cuda diff --git a/c10/cuda/CUDAStream.h b/c10/cuda/CUDAStream.h index 05c314469f87..a1233aeb0570 100644 --- a/c10/cuda/CUDAStream.h +++ b/c10/cuda/CUDAStream.h @@ -70,7 +70,7 @@ class C10_CUDA_API CUDAStream { /// Construct a CUDAStream from a Stream with no error checking. /// This constructor uses the "named" constructor idiom, and can /// be invoked as: CUDAStream(CUDAStream::UNCHECKED, stream) - explicit CUDAStream(Unchecked, Stream stream) : stream_(stream) {} + explicit CUDAStream(Unchecked /*unused*/, Stream stream) : stream_(stream) {} bool operator==(const CUDAStream& other) const noexcept { return unwrap() == other.unwrap(); diff --git a/c10/util/DynamicCounter.h b/c10/util/DynamicCounter.h index d13b2b2191d2..22141f4cdc30 100644 --- a/c10/util/DynamicCounter.h +++ b/c10/util/DynamicCounter.h @@ -43,7 +43,7 @@ class DynamicCounterBackendIf { virtual void unregisterCounter(std::string_view key) = 0; }; -void C10_API - registerDynamicCounterBackend(std::unique_ptr); +void C10_API registerDynamicCounterBackend( + std::unique_ptr /*backend*/); } // namespace detail } // namespace c10::monitor diff --git a/c10/util/Exception.h b/c10/util/Exception.h index 545cef535138..f0c85a8b13d8 100644 --- a/c10/util/Exception.h +++ b/c10/util/Exception.h @@ -217,7 +217,7 @@ class C10_API WarningHandlerGuard { /// The TORCH_WARN_ONCE macro is difficult to test for. Use /// setWarnAlways(true) to turn it into TORCH_WARN, which can be /// tested for more easily. -C10_API void set_warnAlways(bool) noexcept(true); +C10_API void set_warnAlways(bool /*setting*/) noexcept(true); C10_API bool get_warnAlways() noexcept(true); // A RAII guard that sets warn_always (not thread-local) on diff --git a/c10/util/ExclusivelyOwned.h b/c10/util/ExclusivelyOwned.h index c2ff416380c8..ebb74a5823a0 100644 --- a/c10/util/ExclusivelyOwned.h +++ b/c10/util/ExclusivelyOwned.h @@ -63,7 +63,7 @@ class ExclusivelyOwned { explicit ExclusivelyOwned(T&& t) : repr_(EOT::moveToRepr(std::move(t))) {} template - explicit ExclusivelyOwned(std::in_place_t, Args&&... args) + explicit ExclusivelyOwned(std::in_place_t /*unused*/, Args&&... args) : repr_(EOT::createInPlace(std::forward(args)...)) {} ExclusivelyOwned(const ExclusivelyOwned&) = delete; diff --git a/c10/util/FunctionRef.h b/c10/util/FunctionRef.h index 4cab3be078e4..013874becc36 100644 --- a/c10/util/FunctionRef.h +++ b/c10/util/FunctionRef.h @@ -52,12 +52,14 @@ class function_ref { function_ref( // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) Callable&& callable, - std::enable_if_t< - !std::is_same_v, function_ref>>* = - nullptr, + std::enable_if_t, + function_ref>>* /*unused*/ + = nullptr, std::enable_if_t, - Ret>>* = nullptr) + Ret>>* /*unused*/ + = nullptr) : callback(callback_fn>), callable(reinterpret_cast(&callable)) {} diff --git a/c10/util/Gauge.h b/c10/util/Gauge.h index f505c037ebc9..e5596bde6e6f 100644 --- a/c10/util/Gauge.h +++ b/c10/util/Gauge.h @@ -26,7 +26,8 @@ class GaugeBackendFactoryIf { std::string_view key) noexcept = 0; }; -void C10_API registerGaugeBackend(std::unique_ptr); +void C10_API + registerGaugeBackend(std::unique_ptr /*backend*/); } // namespace detail // A handle to a Gauge. diff --git a/c10/util/Logging.h b/c10/util/Logging.h index 2a08b1f1ce69..fd9b982b7294 100644 --- a/c10/util/Logging.h +++ b/c10/util/Logging.h @@ -307,10 +307,11 @@ class C10_API EventSampledHandler { // Must be called in the main thread before any other threads are spawned. C10_API void InitEventSampledHandlers( - std::vector< - std::pair>>); + std::vector>> /*handlers*/); C10_API const std::unique_ptr& GetEventSampledHandler( - std::string_view); + std::string_view /*event*/); /** * Very lightweight logging for the first time API usage. It's beneficial for diff --git a/c10/util/MaybeOwned.h b/c10/util/MaybeOwned.h index 41f6d2db4acd..11b2d2a15a5c 100644 --- a/c10/util/MaybeOwned.h +++ b/c10/util/MaybeOwned.h @@ -82,7 +82,7 @@ class MaybeOwned final { /// Don't use this; use owned() instead. template - explicit MaybeOwned(std::in_place_t, Args&&... args) + explicit MaybeOwned(std::in_place_t /*unused*/, Args&&... args) : isBorrowed_(false), own_(std::forward(args)...) {} public: @@ -177,7 +177,7 @@ class MaybeOwned final { } template - static MaybeOwned owned(std::in_place_t, Args&&... args) { + static MaybeOwned owned(std::in_place_t /*unused*/, Args&&... args) { return MaybeOwned(std::in_place, std::forward(args)...); } diff --git a/c10/util/Metaprogramming.h b/c10/util/Metaprogramming.h index d759da7a2a4e..d504706f3283 100644 --- a/c10/util/Metaprogramming.h +++ b/c10/util/Metaprogramming.h @@ -112,7 +112,7 @@ using make_offset_index_sequence = * 2>()); */ template -constexpr auto tuple_elements(Tuple t, std::index_sequence) { +constexpr auto tuple_elements(Tuple t, std::index_sequence /*unused*/) { return std::tuple...>(std::get(t)...); } @@ -209,7 +209,7 @@ auto tuple_map( // NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved) std::tuple&& tuple, const Mapper& mapper, - std::index_sequence) { + std::index_sequence /*unused*/) { return std::tuple(std::get( tuple))))...>(mapper(std::forward(std::get(tuple)))...); } diff --git a/c10/util/OptionalArrayRef.h b/c10/util/OptionalArrayRef.h index 90610eb7d125..bf2a78985ed8 100644 --- a/c10/util/OptionalArrayRef.h +++ b/c10/util/OptionalArrayRef.h @@ -27,7 +27,7 @@ class OptionalArrayRef final { constexpr OptionalArrayRef() noexcept = default; - constexpr OptionalArrayRef(std::nullopt_t) noexcept {} + constexpr OptionalArrayRef(std::nullopt_t /*unused*/) noexcept {} OptionalArrayRef(const OptionalArrayRef& other) = default; @@ -89,7 +89,7 @@ class OptionalArrayRef final { // Assignment - constexpr OptionalArrayRef& operator=(std::nullopt_t) noexcept { + constexpr OptionalArrayRef& operator=(std::nullopt_t /*unused*/) noexcept { wrapped_opt_array_ref = std::nullopt; return *this; } diff --git a/c10/util/SmallVector.h b/c10/util/SmallVector.h index 88f450316b64..eaf3cbfc601e 100644 --- a/c10/util/SmallVector.h +++ b/c10/util/SmallVector.h @@ -215,7 +215,7 @@ class SmallVectorTemplateCommon class ItTy, std::enable_if_t, T*>, bool> = false> - void assertSafeToReferenceAfterClear(ItTy, ItTy) {} + void assertSafeToReferenceAfterClear(ItTy /*unused*/, ItTy /*unused*/) {} /// Check whether any part of the range will be invalidated by growing. void assertSafeToAddRange(const T* From, const T* To) { @@ -228,7 +228,7 @@ class SmallVectorTemplateCommon class ItTy, std::enable_if_t, T*>, bool> = false> - void assertSafeToAddRange(ItTy, ItTy) {} + void assertSafeToAddRange(ItTy /*unused*/, ItTy /*unused*/) {} /// Reserve enough space to add one element, and return the updated element /// pointer in case it was a reference to the storage. @@ -538,7 +538,7 @@ class SmallVectorTemplateBase : public SmallVectorTemplateCommon { SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon(Size) {} // No need to do a destroy loop for POD's. - static void destroy_range(T*, T*) {} + static void destroy_range(T* /*unused*/, T* /*unused*/) {} /// Move the range [I, E) onto the uninitialized memory /// starting with "Dest", constructing elements into it as needed. @@ -563,8 +563,8 @@ class SmallVectorTemplateBase : public SmallVectorTemplateCommon { T1* I, T1* E, T2* Dest, - std::enable_if_t, T2>>* = - nullptr) { + std::enable_if_t, T2>>* /*unused*/ + = nullptr) { // Use memcpy for PODs iterated by pointers (which includes SmallVector // iterators): std::uninitialized_copy optimizes to memmove, but we can // use memcpy here. Note that I and E are iterators and thus might be diff --git a/c10/util/StringUtil.h b/c10/util/StringUtil.h index 8a294cb80aa8..b2c41bb98ee1 100644 --- a/c10/util/StringUtil.h +++ b/c10/util/StringUtil.h @@ -87,7 +87,7 @@ C10_API std::ostream& _str(std::ostream& ss, const std::wstring& wString); template <> inline std::ostream& _str( std::ostream& ss, - const CompileTimeEmptyString&) { + const CompileTimeEmptyString& /*unused*/) { return ss; } diff --git a/c10/util/UniqueVoidPtr.cpp b/c10/util/UniqueVoidPtr.cpp index 3554bcf8ee62..dd92db1066f5 100644 --- a/c10/util/UniqueVoidPtr.cpp +++ b/c10/util/UniqueVoidPtr.cpp @@ -2,6 +2,6 @@ namespace c10::detail { -void deleteNothing(void*) {} +void deleteNothing(void* /*unused*/) {} } // namespace c10::detail diff --git a/c10/util/UniqueVoidPtr.h b/c10/util/UniqueVoidPtr.h index fe2a3c650cdd..394fb5500076 100644 --- a/c10/util/UniqueVoidPtr.h +++ b/c10/util/UniqueVoidPtr.h @@ -13,7 +13,7 @@ using DeleterFnPtr = void (*)(void*); namespace detail { // Does not delete anything -C10_API void deleteNothing(void*); +C10_API void deleteNothing(void* /*unused*/); // A detail::UniqueVoidPtr is an owning smart pointer like unique_ptr, but // with three major differences: diff --git a/c10/util/WaitCounter.h b/c10/util/WaitCounter.h index c87c2e3293e5..e8fe2e90aecf 100644 --- a/c10/util/WaitCounter.h +++ b/c10/util/WaitCounter.h @@ -35,7 +35,7 @@ class WaitCounterBackendFactoryIf { }; C10_API void registerWaitCounterBackend( - std::unique_ptr); + std::unique_ptr /*factory*/); C10_API std::vector> getRegisteredWaitCounterBackends(); diff --git a/c10/util/flat_hash_map.h b/c10/util/flat_hash_map.h index 8688510b2b81..df74877f8b17 100644 --- a/c10/util/flat_hash_map.h +++ b/c10/util/flat_hash_map.h @@ -573,13 +573,13 @@ class sherwood_v3_table : private EntryAlloc, return emplace(std::move(value)); } template - iterator emplace_hint(const_iterator, Args&&... args) { + iterator emplace_hint(const_iterator /*unused*/, Args&&... args) { return emplace(std::forward(args)...).first; } - iterator insert(const_iterator, const value_type& value) { + iterator insert(const_iterator /*unused*/, const value_type& value) { return emplace(value).first; } - iterator insert(const_iterator, value_type&& value) { + iterator insert(const_iterator /*unused*/, value_type&& value) { return emplace(std::move(value)).first; } @@ -896,7 +896,7 @@ class sherwood_v3_table : private EntryAlloc, } // namespace detailv3 struct prime_number_hash_policy { - static uint64_t mod0(uint64_t) { + static uint64_t mod0(uint64_t /*unused*/) { return 0llu; } static uint64_t mod2(uint64_t hash) { @@ -1883,7 +1883,7 @@ struct power_of_two_hash_policy { size = detailv3::next_power_of_two(size); return 0; } - void commit(int8_t) {} + void commit(int8_t /*unused*/) {} void reset() {} }; @@ -1989,14 +1989,14 @@ class flat_hash_map } template typename Table::iterator insert_or_assign( - typename Table::const_iterator, + typename Table::const_iterator /*unused*/, const key_type& key, M&& m) { return insert_or_assign(key, std::forward(m)).first; } template typename Table::iterator insert_or_assign( - typename Table::const_iterator, + typename Table::const_iterator /*unused*/, key_type&& key, M&& m) { return insert_or_assign(std::move(key), std::forward(m)).first; diff --git a/c10/util/int128.h b/c10/util/int128.h index 4bea5a5f1197..11d903002d2b 100644 --- a/c10/util/int128.h +++ b/c10/util/int128.h @@ -79,8 +79,8 @@ class C10_API uint128 { // Make msvc happy with using operator<<= from DivModImpl // which is a static function, and linker complained about missing // static version of this overload - friend uint128& operator<<=(uint128&, int); - uint128& operator>>=(int); + friend uint128& operator<<=(uint128& /*self*/, int /*amount*/); + uint128& operator>>=(int /*amount*/); uint128& operator&=(const uint128& b); uint128& operator|=(const uint128& b); uint128& operator^=(const uint128& b); diff --git a/c10/util/intrusive_ptr.h b/c10/util/intrusive_ptr.h index 1f89b2799ad6..3d5478be90e6 100644 --- a/c10/util/intrusive_ptr.h +++ b/c10/util/intrusive_ptr.h @@ -399,7 +399,9 @@ class intrusive_ptr final { // This constructor will not increase the ref counter for you. // We use the tagged dispatch mechanism to explicitly mark this constructor // to not increase the refcount - explicit intrusive_ptr(TTarget* target, raw::DontIncreaseRefcount) noexcept + explicit intrusive_ptr( + TTarget* target, + raw::DontIncreaseRefcount /*unused*/) noexcept : target_(target) {} explicit intrusive_ptr(std::unique_ptr rhs) noexcept diff --git a/c10/util/llvmMathExtras.h b/c10/util/llvmMathExtras.h index 556699be04b1..6321297a61c7 100644 --- a/c10/util/llvmMathExtras.h +++ b/c10/util/llvmMathExtras.h @@ -70,7 +70,7 @@ enum ZeroBehavior { namespace detail { template struct TrailingZerosCounter { - static std::size_t count(T Val, ZeroBehavior) { + static std::size_t count(T Val, ZeroBehavior /*unused*/) { if (!Val) return std::numeric_limits::digits; if (Val & 0x1) @@ -147,7 +147,7 @@ std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) { namespace detail { template struct LeadingZerosCounter { - static std::size_t count(T Val, ZeroBehavior) { + static std::size_t count(T Val, ZeroBehavior /*unused*/) { if (!Val) return std::numeric_limits::digits; diff --git a/c10/util/logging_is_not_google_glog.h b/c10/util/logging_is_not_google_glog.h index 9e9ff45948bb..803a833c3cae 100644 --- a/c10/util/logging_is_not_google_glog.h +++ b/c10/util/logging_is_not_google_glog.h @@ -234,7 +234,9 @@ inline std::ostream& operator<<( return out; } -inline std::ostream& operator<<(std::ostream& out, const std::nullptr_t&) { +inline std::ostream& operator<<( + std::ostream& out, + const std::nullptr_t& /*unused*/) { out << "(null)"; return out; } diff --git a/c10/util/order_preserving_flat_hash_map.h b/c10/util/order_preserving_flat_hash_map.h index fd8196432c99..a288894f69b6 100644 --- a/c10/util/order_preserving_flat_hash_map.h +++ b/c10/util/order_preserving_flat_hash_map.h @@ -560,13 +560,13 @@ class sherwood_v3_table : private EntryAlloc, private Hasher, private Equal { return emplace(std::move(value)); } template - iterator emplace_hint(const_iterator, Args&&... args) { + iterator emplace_hint(const_iterator /*unused*/, Args&&... args) { return emplace(std::forward(args)...).first; } - iterator insert(const_iterator, const value_type& value) { + iterator insert(const_iterator /*unused*/, const value_type& value) { return emplace(value).first; } - iterator insert(const_iterator, value_type&& value) { + iterator insert(const_iterator /*unused*/, value_type&& value) { return emplace(std::move(value)).first; } @@ -1013,7 +1013,7 @@ class sherwood_v3_table : private EntryAlloc, private Hasher, private Equal { } // namespace detailv3 struct prime_number_hash_policy { - static uint64_t mod0(uint64_t) { + static uint64_t mod0(uint64_t /*unused*/) { return 0llu; } static uint64_t mod2(uint64_t hash) { @@ -2000,7 +2000,7 @@ struct power_of_two_hash_policy { size = detailv3::next_power_of_two(size); return 0; } - void commit(int8_t) {} + void commit(int8_t /*unused*/) {} void reset() {} }; @@ -2106,14 +2106,14 @@ class order_preserving_flat_hash_map } template typename Table::iterator insert_or_assign( - typename Table::const_iterator, + typename Table::const_iterator /*unused*/, const key_type& key, M&& m) { return insert_or_assign(key, std::forward(m)).first; } template typename Table::iterator insert_or_assign( - typename Table::const_iterator, + typename Table::const_iterator /*unused*/, key_type&& key, M&& m) { return insert_or_assign(std::move(key), std::forward(m)).first; diff --git a/torch/custom_class.h b/torch/custom_class.h index 01a4f2e92b28..4a3d1a39db31 100644 --- a/torch/custom_class.h +++ b/torch/custom_class.h @@ -90,7 +90,7 @@ class class_ : public ::torch::detail::class_base { /// constructor taking an `int` and a `std::string` as argument. template class_& def( - torch::detail::types, + torch::detail::types /*unused*/, std::string doc_string = "", std::initializer_list default_args = {}) { // Used in combination with @@ -457,8 +457,8 @@ inline class_ selective_class_( template inline detail::ClassNotSelected selective_class_( - const std::string&, - detail::SelectiveStr) { + const std::string& /*unused*/, + detail::SelectiveStr /*unused*/) { return detail::ClassNotSelected(); } @@ -512,7 +512,7 @@ inline class_ Library::class_(detail::SelectiveStr className) { } template -inline detail::ClassNotSelected Library::class_(detail::SelectiveStr) { +inline detail::ClassNotSelected Library::class_(detail::SelectiveStr /*unused*/) { return detail::ClassNotSelected(); } diff --git a/torch/custom_class_detail.h b/torch/custom_class_detail.h index e770adfda51a..512320081b5d 100644 --- a/torch/custom_class_detail.h +++ b/torch/custom_class_detail.h @@ -128,7 +128,7 @@ typename c10::guts::infer_function_traits_t::return_type call_torchbind_method_from_stack( Functor& functor, jit::Stack& stack, - std::index_sequence) { + std::index_sequence /*unused*/) { (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would // be unused and we have to silence the compiler warning. diff --git a/torch/headeronly/util/BFloat16.h b/torch/headeronly/util/BFloat16.h index 2c1f805ac7b7..ac47e3f844a7 100644 --- a/torch/headeronly/util/BFloat16.h +++ b/torch/headeronly/util/BFloat16.h @@ -39,7 +39,9 @@ struct alignas(2) BFloat16 { return from_bits_t(); } - constexpr C10_HOST_DEVICE BFloat16(unsigned short bits, from_bits_t) + constexpr C10_HOST_DEVICE BFloat16( + unsigned short bits, + from_bits_t /*unused*/) : x(bits) {} /* implicit */ inline C10_HOST_DEVICE BFloat16(float value); inline C10_HOST_DEVICE operator float() const; diff --git a/torch/headeronly/util/Float8_e4m3fn.h b/torch/headeronly/util/Float8_e4m3fn.h index d54a8f40a6c1..4b5d2687a4c0 100644 --- a/torch/headeronly/util/Float8_e4m3fn.h +++ b/torch/headeronly/util/Float8_e4m3fn.h @@ -44,7 +44,7 @@ struct alignas(1) Float8_e4m3fn { Float8_e4m3fn() = default; - constexpr C10_HOST_DEVICE Float8_e4m3fn(uint8_t bits, from_bits_t) + constexpr C10_HOST_DEVICE Float8_e4m3fn(uint8_t bits, from_bits_t /*unused*/) : x(bits) {} inline C10_HOST_DEVICE Float8_e4m3fn(float value); inline C10_HOST_DEVICE operator float() const; diff --git a/torch/headeronly/util/Float8_e4m3fnuz.h b/torch/headeronly/util/Float8_e4m3fnuz.h index 772ffd9e96c6..e313588d4b89 100644 --- a/torch/headeronly/util/Float8_e4m3fnuz.h +++ b/torch/headeronly/util/Float8_e4m3fnuz.h @@ -45,7 +45,9 @@ struct alignas(1) Float8_e4m3fnuz { Float8_e4m3fnuz() = default; - constexpr C10_HOST_DEVICE Float8_e4m3fnuz(uint8_t bits, from_bits_t) + constexpr C10_HOST_DEVICE Float8_e4m3fnuz( + uint8_t bits, + from_bits_t /*unused*/) : x(bits) {} inline C10_HOST_DEVICE Float8_e4m3fnuz(float value); inline C10_HOST_DEVICE operator float() const; diff --git a/torch/headeronly/util/Float8_e5m2.h b/torch/headeronly/util/Float8_e5m2.h index aeee40d8e5b8..70748f18276d 100644 --- a/torch/headeronly/util/Float8_e5m2.h +++ b/torch/headeronly/util/Float8_e5m2.h @@ -30,7 +30,8 @@ struct alignas(1) Float8_e5m2 { Float8_e5m2() = default; - constexpr C10_HOST_DEVICE Float8_e5m2(uint8_t bits, from_bits_t) : x(bits) {} + constexpr C10_HOST_DEVICE Float8_e5m2(uint8_t bits, from_bits_t /*unused*/) + : x(bits) {} inline C10_HOST_DEVICE Float8_e5m2(float value); inline C10_HOST_DEVICE operator float() const; inline C10_HOST_DEVICE bool isnan() const; diff --git a/torch/headeronly/util/Float8_e5m2fnuz.h b/torch/headeronly/util/Float8_e5m2fnuz.h index 8bcb2ac07f76..0398587bc5e9 100644 --- a/torch/headeronly/util/Float8_e5m2fnuz.h +++ b/torch/headeronly/util/Float8_e5m2fnuz.h @@ -44,7 +44,9 @@ struct alignas(1) Float8_e5m2fnuz { Float8_e5m2fnuz() = default; - constexpr C10_HOST_DEVICE Float8_e5m2fnuz(uint8_t bits, from_bits_t) + constexpr C10_HOST_DEVICE Float8_e5m2fnuz( + uint8_t bits, + from_bits_t /*unused*/) : x(bits) {} inline C10_HOST_DEVICE Float8_e5m2fnuz(float value); inline C10_HOST_DEVICE operator float() const; diff --git a/torch/headeronly/util/Float8_e8m0fnu.h b/torch/headeronly/util/Float8_e8m0fnu.h index c5a70525f2f2..153a46e7b91f 100644 --- a/torch/headeronly/util/Float8_e8m0fnu.h +++ b/torch/headeronly/util/Float8_e8m0fnu.h @@ -39,7 +39,7 @@ struct alignas(1) Float8_e8m0fnu { Float8_e8m0fnu() = default; - constexpr C10_HOST_DEVICE Float8_e8m0fnu(uint8_t bits, from_bits_t) + constexpr C10_HOST_DEVICE Float8_e8m0fnu(uint8_t bits, from_bits_t /*unused*/) : x(bits) {} inline C10_HOST_DEVICE Float8_e8m0fnu(float value); inline C10_HOST_DEVICE operator float() const; diff --git a/torch/headeronly/util/Half.h b/torch/headeronly/util/Half.h index 59a86f07e333..9673301e2de7 100644 --- a/torch/headeronly/util/Half.h +++ b/torch/headeronly/util/Half.h @@ -80,7 +80,8 @@ struct alignas(2) Half { Half() = default; #endif - constexpr C10_HOST_DEVICE Half(unsigned short bits, from_bits_t) : x(bits) {} + constexpr C10_HOST_DEVICE Half(unsigned short bits, from_bits_t /*unused*/) + : x(bits) {} #if defined(__aarch64__) && !defined(__CUDACC__) inline Half(float16_t value); inline operator float16_t() const; diff --git a/torch/library.h b/torch/library.h index f906e04ddecf..816f88b13f30 100644 --- a/torch/library.h +++ b/torch/library.h @@ -115,7 +115,7 @@ class TORCH_API CppFunction final { Func* f, std::enable_if_t< c10::guts::is_function_type::value, - std::nullptr_t> = nullptr) + std::nullptr_t> /*unused*/= nullptr) : func_(c10::KernelFunction::makeFromUnboxedRuntimeFunction(f)), cpp_signature_(c10::impl::CppSignature::make()), schema_( @@ -129,7 +129,7 @@ class TORCH_API CppFunction final { FuncPtr f, std::enable_if_t< c10::is_compile_time_function_pointer::value, - std::nullptr_t> = nullptr) + std::nullptr_t> /*unused*/= nullptr) : func_(c10::KernelFunction::makeFromUnboxedFunction(f)), cpp_signature_( c10::impl::CppSignature::make()), @@ -144,7 +144,7 @@ class TORCH_API CppFunction final { Lambda&& f, std::enable_if_t< c10::guts::is_functor>::value, - std::nullptr_t> = nullptr) + std::nullptr_t> /*unused*/= nullptr) : func_(c10::KernelFunction::makeFromUnboxedLambda( std::forward(f))), cpp_signature_(c10::impl::CppSignature::make()), @@ -310,7 +310,7 @@ class TORCH_API CppFunction final { // The "setter" for dispatch_key_ template - friend CppFunction dispatch(c10::DispatchKey, Func&&); + friend CppFunction dispatch(c10::DispatchKey /*k*/, Func&& /*raw_f*/); // The only class which actually pulls out values from CppFunction (does so // destructively, felt too lazy to write accessors that I don't even @@ -746,14 +746,14 @@ class TORCH_API Library final { // These overloads cover cases when a SelectiveStr (see Note [Selective // build]) has been disabled at compile time. In that case, don't generate // any code referencing the passed in functions at all. - Library& def(detail::SelectiveStr, const std::vector& tags [[maybe_unused]] = {}) & { + Library& def(detail::SelectiveStr /*unused*/, const std::vector& tags [[maybe_unused]] = {}) & { return *this; } Library& def(detail::SelectiveStr raw_schema, const std::vector& tags = {}) & { return def(raw_schema.operator const char*(), tags); } template - Library& def(detail::SelectiveStr, Func&& /*raw_f*/, const std::vector& tags [[maybe_unused]] = {}) & { + Library& def(detail::SelectiveStr /*unused*/, Func&& /*raw_f*/, const std::vector& tags [[maybe_unused]] = {}) & { return *this; } template @@ -764,12 +764,12 @@ class TORCH_API Library final { template // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) - Library& impl(detail::SelectiveStr, Func&& /*raw_f*/) & { + Library& impl(detail::SelectiveStr /*unused*/, Func&& /*raw_f*/) & { return *this; } template Library& impl( - detail::SelectiveStr, + detail::SelectiveStr /*unused*/, // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) Dispatch&& /*key*/, // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) @@ -877,7 +877,7 @@ class TORCH_API Library final { const std::vector& tags = {}, _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) &; Library& _def( - std::variant&&, + std::variant&& /*name_or_schema*/, CppFunction&& f, const std::vector& tags = {}) &; Library& _impl(