mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Mark unused parameters in C++ code (#164912)
This PR adds unused parameter name comments in C++ declarations to improve code readability. Pull Request resolved: https://github.com/pytorch/pytorch/pull/164912 Approved by: https://github.com/Skylion007
This commit is contained in:
committed by
PyTorch MergeBot
parent
a753ffa9af
commit
f231be25c6
@ -154,7 +154,7 @@ class DefaultMobileCPUAllocator final : public at::Allocator {
|
||||
}
|
||||
};
|
||||
|
||||
void NoDelete(void*) {}
|
||||
void NoDelete(void* /*unused*/) {}
|
||||
|
||||
at::Allocator* GetCPUAllocator() {
|
||||
return GetAllocator(DeviceType::CPU);
|
||||
|
@ -17,7 +17,7 @@ namespace c10 {
|
||||
using MemoryDeleter = void (*)(void*);
|
||||
|
||||
// A helper function that is basically doing nothing.
|
||||
C10_API void NoDelete(void*);
|
||||
C10_API void NoDelete(void* /*unused*/);
|
||||
|
||||
// A simple struct that is used to report C10's memory allocation,
|
||||
// deallocation status and out-of-memory events to the profiler
|
||||
|
@ -590,10 +590,12 @@ constexpr uint16_t num_runtime_entries = num_functionality_keys +
|
||||
constexpr uint16_t full_backend_mask =
|
||||
(static_cast<uint16_t>(1) << num_backends) - 1;
|
||||
|
||||
C10_API const char* toString(DispatchKey);
|
||||
C10_API const char* toString(BackendComponent);
|
||||
C10_API std::ostream& operator<<(std::ostream&, DispatchKey);
|
||||
C10_API std::ostream& operator<<(std::ostream&, BackendComponent);
|
||||
C10_API const char* toString(DispatchKey /*t*/);
|
||||
C10_API const char* toString(BackendComponent /*t*/);
|
||||
C10_API std::ostream& operator<<(std::ostream& /*str*/, DispatchKey /*rhs*/);
|
||||
C10_API std::ostream& operator<<(
|
||||
std::ostream& /*str*/,
|
||||
BackendComponent /*rhs*/);
|
||||
|
||||
C10_API DispatchKey getAutogradKeyFromBackend(BackendComponent k);
|
||||
|
||||
|
@ -172,10 +172,10 @@ class DispatchKeySet final {
|
||||
// use of DispatchKeySet in TLS requires this.
|
||||
constexpr DispatchKeySet() = default;
|
||||
|
||||
constexpr DispatchKeySet(Full)
|
||||
constexpr DispatchKeySet(Full /*unused*/)
|
||||
: repr_((1ULL << (num_backends + num_functionality_keys - 1)) - 1) {}
|
||||
|
||||
constexpr DispatchKeySet(FullAfter, DispatchKey t)
|
||||
constexpr DispatchKeySet(FullAfter /*unused*/, DispatchKey t)
|
||||
// LSB after t are OK, but not t itself.
|
||||
// "functionalities" have a notion of ordering (e.g. Autograd > Sparse >
|
||||
// Quantized > Dense). But backends don't really have an ordering.
|
||||
@ -191,7 +191,7 @@ class DispatchKeySet final {
|
||||
|
||||
// Public version of DispatchKeySet(uint64_t) API; external users
|
||||
// must be explicit when they do this!
|
||||
constexpr DispatchKeySet(Raw, uint64_t x) : repr_(x) {}
|
||||
constexpr DispatchKeySet(Raw /*unused*/, uint64_t x) : repr_(x) {}
|
||||
|
||||
constexpr explicit DispatchKeySet(BackendComponent k) {
|
||||
if (k == BackendComponent::InvalidBit) {
|
||||
@ -631,8 +631,8 @@ class DispatchKeySet final {
|
||||
}
|
||||
};
|
||||
|
||||
C10_API std::string toString(DispatchKeySet);
|
||||
C10_API std::ostream& operator<<(std::ostream&, DispatchKeySet);
|
||||
C10_API std::string toString(DispatchKeySet /*ts*/);
|
||||
C10_API std::ostream& operator<<(std::ostream& /*os*/, DispatchKeySet /*ts*/);
|
||||
|
||||
inline int getDispatchTableIndexForDispatchKey(DispatchKey k) {
|
||||
return DispatchKeySet(k).getDispatchTableIndexForDispatchKeySet();
|
||||
|
@ -60,7 +60,7 @@ struct C10_API SafePyObject {
|
||||
c10::impl::PyInterpreter& pyinterpreter() const {
|
||||
return *pyinterpreter_;
|
||||
}
|
||||
PyObject* ptr(const c10::impl::PyInterpreter*) const;
|
||||
PyObject* ptr(const c10::impl::PyInterpreter* /*interpreter*/) const;
|
||||
|
||||
// stop tracking the current object, and return it
|
||||
PyObject* release() {
|
||||
@ -103,7 +103,7 @@ struct C10_API SafePyHandle {
|
||||
c10::impl::PyInterpreter& pyinterpreter() const {
|
||||
return *pyinterpreter_;
|
||||
}
|
||||
PyObject* ptr(const c10::impl::PyInterpreter*) const;
|
||||
PyObject* ptr(const c10::impl::PyInterpreter* /*interpreter*/) const;
|
||||
void reset() {
|
||||
data_ = nullptr;
|
||||
pyinterpreter_ = nullptr;
|
||||
|
@ -428,7 +428,7 @@ class C10_API Scalar {
|
||||
typename std::enable_if_t<
|
||||
std::is_integral_v<T> && !std::is_same_v<T, bool>,
|
||||
bool>* = nullptr>
|
||||
Scalar(T vv, bool) : tag(Tag::HAS_i) {
|
||||
Scalar(T vv, bool /*unused*/) : tag(Tag::HAS_i) {
|
||||
v.i = convert<decltype(v.i), T>(vv);
|
||||
}
|
||||
|
||||
@ -437,14 +437,14 @@ class C10_API Scalar {
|
||||
typename std::enable_if_t<
|
||||
!std::is_integral_v<T> && !c10::is_complex<T>::value,
|
||||
bool>* = nullptr>
|
||||
Scalar(T vv, bool) : tag(Tag::HAS_d) {
|
||||
Scalar(T vv, bool /*unused*/) : tag(Tag::HAS_d) {
|
||||
v.d = convert<decltype(v.d), T>(vv);
|
||||
}
|
||||
|
||||
template <
|
||||
typename T,
|
||||
typename std::enable_if_t<c10::is_complex<T>::value, bool>* = nullptr>
|
||||
Scalar(T vv, bool) : tag(Tag::HAS_z) {
|
||||
Scalar(T vv, bool /*unused*/) : tag(Tag::HAS_z) {
|
||||
v.z = convert<decltype(v.z), T>(vv);
|
||||
}
|
||||
};
|
||||
|
@ -78,7 +78,7 @@ struct C10_API Storage {
|
||||
resizable)) {}
|
||||
|
||||
protected:
|
||||
explicit Storage(unsafe_borrow_t, const Storage& rhs)
|
||||
explicit Storage(unsafe_borrow_t /*unused*/, const Storage& rhs)
|
||||
: storage_impl_(c10::intrusive_ptr<c10::StorageImpl>::reclaim(
|
||||
rhs.storage_impl_.get())) {}
|
||||
|
||||
|
@ -82,14 +82,15 @@ class C10_API Stream final {
|
||||
/// should use the provided APIs to get a stream. In particular,
|
||||
/// we don't require backends to give any guarantees about non-zero
|
||||
/// StreamIds; they are welcome to allocate in whatever way they like.
|
||||
explicit Stream(Unsafe, Device device, StreamId id)
|
||||
explicit Stream(Unsafe /*unused*/, Device device, StreamId id)
|
||||
: device_(device), id_(id) {}
|
||||
|
||||
/// Construct the default stream of a Device. The default stream is
|
||||
/// NOT the same as the current stream; default stream is a fixed stream
|
||||
/// that never changes, whereas the current stream may be changed by
|
||||
/// StreamGuard.
|
||||
explicit Stream(Default, Device device) : device_(device), id_(0) {}
|
||||
explicit Stream(Default /*unused*/, Device device)
|
||||
: device_(device), id_(0) {}
|
||||
|
||||
bool operator==(const Stream& other) const noexcept {
|
||||
return this->device_ == other.device_ && this->id_ == other.id_;
|
||||
|
@ -40,8 +40,8 @@ class C10_API SymBool {
|
||||
return *c;
|
||||
}
|
||||
|
||||
SymBool sym_and(const SymBool&) const;
|
||||
SymBool sym_or(const SymBool&) const;
|
||||
SymBool sym_and(const SymBool& /*sci*/) const;
|
||||
SymBool sym_or(const SymBool& /*sci*/) const;
|
||||
SymBool sym_not() const;
|
||||
|
||||
SymBool operator&(const SymBool& other) const {
|
||||
|
@ -43,17 +43,17 @@ class C10_API SymFloat {
|
||||
return data_;
|
||||
}
|
||||
|
||||
SymFloat operator+(const SymFloat&) const;
|
||||
SymFloat operator-(const SymFloat&) const;
|
||||
SymFloat operator*(const SymFloat&) const;
|
||||
SymFloat operator/(const SymFloat&) const;
|
||||
SymFloat operator+(const SymFloat& /*sci*/) const;
|
||||
SymFloat operator-(const SymFloat& /*sci*/) const;
|
||||
SymFloat operator*(const SymFloat& /*sci*/) const;
|
||||
SymFloat operator/(const SymFloat& /*sci*/) const;
|
||||
|
||||
SymBool sym_eq(const SymFloat&) const;
|
||||
SymBool sym_ne(const SymFloat&) const;
|
||||
SymBool sym_lt(const SymFloat&) const;
|
||||
SymBool sym_le(const SymFloat&) const;
|
||||
SymBool sym_gt(const SymFloat&) const;
|
||||
SymBool sym_ge(const SymFloat&) const;
|
||||
SymBool sym_eq(const SymFloat& /*sci*/) const;
|
||||
SymBool sym_ne(const SymFloat& /*sci*/) const;
|
||||
SymBool sym_lt(const SymFloat& /*sci*/) const;
|
||||
SymBool sym_le(const SymFloat& /*sci*/) const;
|
||||
SymBool sym_gt(const SymFloat& /*sci*/) const;
|
||||
SymBool sym_ge(const SymFloat& /*sci*/) const;
|
||||
|
||||
bool operator==(const SymFloat& o) const {
|
||||
return sym_eq(o).guard_bool(__FILE__, __LINE__);
|
||||
|
@ -52,7 +52,7 @@ class C10_API SymInt {
|
||||
// One appropriate use for this is when you are constructing a symint
|
||||
// in a situation where you know it is non-negative (or, if it is negative,
|
||||
// the negative value is -1; i.e., not user controlled)
|
||||
SymInt(Unchecked, int64_t d) : data_(d) {}
|
||||
SymInt(Unchecked /*unused*/, int64_t d) : data_(d) {}
|
||||
|
||||
// TODO: these implementations are not optimal because they allocate a
|
||||
// temporary and then use the move constructor/assignment
|
||||
|
@ -359,7 +359,7 @@ struct C10_API VariableVersion {
|
||||
// https://cplusplus.github.io/LWG/issue2334.
|
||||
VariableVersion(uint32_t version)
|
||||
: version_counter_(c10::make_intrusive<VersionCounter>(version)) {}
|
||||
VariableVersion(Disabled = DISABLED) {}
|
||||
VariableVersion(Disabled /*unused*/ = DISABLED) {}
|
||||
|
||||
bool enabled() const {
|
||||
return version_counter_;
|
||||
@ -522,21 +522,21 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
*/
|
||||
TensorImpl(
|
||||
Storage&& storage,
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*key_set*/,
|
||||
const caffe2::TypeMeta data_type);
|
||||
|
||||
// See Note [Enum ImplType]
|
||||
TensorImpl(
|
||||
ImplType,
|
||||
ImplType /*unused*/,
|
||||
Storage&& storage,
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*key_set*/,
|
||||
const caffe2::TypeMeta data_type);
|
||||
|
||||
/**
|
||||
* Construct a 1-dim 0 size tensor that doesn't have a storage.
|
||||
*/
|
||||
TensorImpl(
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*key_set*/,
|
||||
const caffe2::TypeMeta data_type,
|
||||
std::optional<c10::Device> device_opt);
|
||||
|
||||
@ -563,9 +563,9 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
// from under us.
|
||||
TensorImpl(
|
||||
Storage&& storage,
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*key_set*/,
|
||||
const caffe2::TypeMeta data_type,
|
||||
std::optional<c10::Device>);
|
||||
std::optional<c10::Device> /*device_opt*/);
|
||||
|
||||
public:
|
||||
TensorImpl(const TensorImpl&) = delete;
|
||||
|
@ -31,7 +31,7 @@ bool UndefinedTensorImpl::has_storage() const {
|
||||
}
|
||||
#endif
|
||||
|
||||
void UndefinedTensorImpl::set_storage_offset(int64_t) {
|
||||
void UndefinedTensorImpl::set_storage_offset(int64_t /*storage_offset*/) {
|
||||
TORCH_CHECK(false, "set_storage_offset() called on an undefined Tensor");
|
||||
}
|
||||
|
||||
|
@ -111,15 +111,16 @@ struct C10_API DeviceGuardImplInterface {
|
||||
/**
|
||||
* Get the default stream for a given device.
|
||||
*/
|
||||
virtual Stream getDefaultStream(Device) const {
|
||||
virtual Stream getDefaultStream(Device /*unused*/) const {
|
||||
TORCH_CHECK(false, "Backend doesn't support acquiring a default stream.")
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a stream from the global pool for a given device.
|
||||
*/
|
||||
virtual Stream getStreamFromGlobalPool(Device, bool isHighPriority = false)
|
||||
const {
|
||||
virtual Stream getStreamFromGlobalPool(
|
||||
Device /*unused*/,
|
||||
bool isHighPriority = false) const {
|
||||
(void)isHighPriority; // Suppress unused variable warning
|
||||
TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.")
|
||||
}
|
||||
@ -129,7 +130,7 @@ struct C10_API DeviceGuardImplInterface {
|
||||
* copied and shared around, device backend should be able to correctly handle
|
||||
* the lifetime of the stream.
|
||||
*/
|
||||
virtual Stream getNewStream(Device, int priority = 0) const {
|
||||
virtual Stream getNewStream(Device /*unused*/, int priority = 0) const {
|
||||
(void)priority;
|
||||
TORCH_CHECK(false, "Backend doesn't support create a new Stream.")
|
||||
}
|
||||
@ -228,8 +229,9 @@ struct C10_API DeviceGuardImplInterface {
|
||||
* being used on the given stream, and that it should thus avoid recycling the
|
||||
* DataPtr until all work on that stream is done.
|
||||
*/
|
||||
virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const {
|
||||
}
|
||||
virtual void recordDataPtrOnStream(
|
||||
const c10::DataPtr& /*unused*/,
|
||||
const Stream& /*unused*/) const {}
|
||||
|
||||
/**
|
||||
* Fetch the elapsed time between two recorded events.
|
||||
@ -257,31 +259,31 @@ struct NoOpDeviceGuardImpl : public DeviceGuardImplInterface {
|
||||
DeviceType type() const override {
|
||||
return D;
|
||||
}
|
||||
Device exchangeDevice(Device) const override {
|
||||
Device exchangeDevice(Device /*unused*/) const override {
|
||||
return Device(D, -1); // no-op
|
||||
}
|
||||
Device getDevice() const override {
|
||||
return Device(D, -1);
|
||||
}
|
||||
void setDevice(Device) const override {
|
||||
void setDevice(Device /*unused*/) const override {
|
||||
// no-op
|
||||
}
|
||||
void uncheckedSetDevice(Device) const noexcept override {
|
||||
void uncheckedSetDevice(Device /*unused*/) const noexcept override {
|
||||
// no-op
|
||||
}
|
||||
Stream getStream(Device) const noexcept override {
|
||||
Stream getStream(Device /*unused*/) const noexcept override {
|
||||
// no-op
|
||||
return Stream(Stream::DEFAULT, Device(D, -1));
|
||||
}
|
||||
|
||||
Stream getNewStream(Device, int priority = 0) const override {
|
||||
Stream getNewStream(Device /*unused*/, int priority = 0) const override {
|
||||
// no-op
|
||||
(void)priority;
|
||||
return Stream(Stream::DEFAULT, Device(D, -1));
|
||||
}
|
||||
|
||||
// NB: These do NOT set the current device
|
||||
Stream exchangeStream(Stream) const noexcept override {
|
||||
Stream exchangeStream(Stream /*unused*/) const noexcept override {
|
||||
// no-op
|
||||
return Stream(Stream::DEFAULT, Device(D, -1));
|
||||
}
|
||||
@ -344,7 +346,9 @@ extern C10_API std::array<
|
||||
|
||||
class C10_API DeviceGuardImplRegistrar {
|
||||
public:
|
||||
DeviceGuardImplRegistrar(DeviceType, const DeviceGuardImplInterface*);
|
||||
DeviceGuardImplRegistrar(
|
||||
DeviceType /*type*/,
|
||||
const DeviceGuardImplInterface* /*impl*/);
|
||||
};
|
||||
|
||||
#define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) \
|
||||
|
@ -19,7 +19,7 @@ template <DeviceType T>
|
||||
struct FakeGuardImpl final : public DeviceGuardImplInterface {
|
||||
static constexpr DeviceType static_type = T;
|
||||
// Runtime device type is not used
|
||||
FakeGuardImpl(DeviceType) {}
|
||||
FakeGuardImpl(DeviceType /*unused*/) {}
|
||||
FakeGuardImpl() = default;
|
||||
DeviceType type() const override {
|
||||
return T;
|
||||
|
@ -16,7 +16,7 @@ struct C10_API GPUTrace {
|
||||
|
||||
// This function will only register the first interpreter that tries to invoke
|
||||
// it. For all of the next ones it will be a no-op.
|
||||
static void set_trace(const PyInterpreter*);
|
||||
static void set_trace(const PyInterpreter* /*trace*/);
|
||||
|
||||
static const PyInterpreter* get_trace() {
|
||||
if (!haveState)
|
||||
|
@ -81,7 +81,7 @@ C10_API void _force_tls_local_dispatch_key_set(LocalDispatchKeySet key_set);
|
||||
|
||||
class C10_API IncludeDispatchKeyGuard {
|
||||
public:
|
||||
IncludeDispatchKeyGuard(DispatchKeySet);
|
||||
IncludeDispatchKeyGuard(DispatchKeySet /*include*/);
|
||||
IncludeDispatchKeyGuard(DispatchKey k)
|
||||
: IncludeDispatchKeyGuard(DispatchKeySet(k)) {}
|
||||
IncludeDispatchKeyGuard(const IncludeDispatchKeyGuard&) = delete;
|
||||
@ -99,7 +99,7 @@ class C10_API IncludeDispatchKeyGuard {
|
||||
|
||||
class C10_API ExcludeDispatchKeyGuard {
|
||||
public:
|
||||
ExcludeDispatchKeyGuard(DispatchKeySet);
|
||||
ExcludeDispatchKeyGuard(DispatchKeySet /*exclude*/);
|
||||
ExcludeDispatchKeyGuard(DispatchKey k)
|
||||
: ExcludeDispatchKeyGuard(DispatchKeySet(k)) {}
|
||||
ExcludeDispatchKeyGuard(const ExcludeDispatchKeyGuard&) = delete;
|
||||
|
@ -35,7 +35,7 @@ struct NoopPyInterpreterVTable final : public PyInterpreterVTable {
|
||||
|
||||
void python_op_registration_trampoline(
|
||||
const c10::OperatorHandle& op,
|
||||
c10::DispatchKey,
|
||||
c10::DispatchKey /*unused*/,
|
||||
c10::DispatchKeySet keyset,
|
||||
torch::jit::Stack* stack,
|
||||
bool with_keyset,
|
||||
@ -52,19 +52,21 @@ struct NoopPyInterpreterVTable final : public PyInterpreterVTable {
|
||||
|
||||
void python_dispatcher(
|
||||
const c10::OperatorHandle& op,
|
||||
c10::DispatchKeySet,
|
||||
c10::DispatchKeySet /*unused*/,
|
||||
torch::jit::Stack* stack) const override {
|
||||
PANIC(python_dispatcher);
|
||||
}
|
||||
|
||||
bool is_contiguous(const TensorImpl* self, at::MemoryFormat) const override {
|
||||
bool is_contiguous(const TensorImpl* self, at::MemoryFormat /*unused*/)
|
||||
const override {
|
||||
PANIC(is_contiguous);
|
||||
}
|
||||
c10::SymBool sym_is_contiguous(const TensorImpl* self, at::MemoryFormat)
|
||||
const override {
|
||||
c10::SymBool sym_is_contiguous(
|
||||
const TensorImpl* self,
|
||||
at::MemoryFormat /*unused*/) const override {
|
||||
PANIC(sym_is_contiguous);
|
||||
}
|
||||
bool is_strides_like(const TensorImpl* self, at::MemoryFormat)
|
||||
bool is_strides_like(const TensorImpl* self, at::MemoryFormat /*unused*/)
|
||||
const override {
|
||||
PANIC(is_strides_like);
|
||||
}
|
||||
|
@ -517,7 +517,7 @@ struct CudaMallocAsyncAllocator : public CUDAAllocator {
|
||||
}
|
||||
}
|
||||
|
||||
void enable(bool) override {
|
||||
void enable(bool /*value*/) override {
|
||||
// cannot disable
|
||||
}
|
||||
|
||||
@ -799,7 +799,7 @@ struct CudaMallocAsyncAllocator : public CUDAAllocator {
|
||||
void beginAllocateToPool(
|
||||
c10::DeviceIndex device,
|
||||
MempoolId_t mempool_id,
|
||||
std::function<bool(cudaStream_t)>) override {
|
||||
std::function<bool(cudaStream_t)> /*filter*/) override {
|
||||
std::lock_guard<std::mutex> lk(general_mutex);
|
||||
|
||||
TORCH_INTERNAL_ASSERT(capture_free_streams.empty());
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <string>
|
||||
|
||||
namespace c10::cuda {
|
||||
C10_CUDA_API std::string get_cuda_error_help(cudaError_t) noexcept;
|
||||
C10_CUDA_API std::string get_cuda_error_help(cudaError_t /*error*/) noexcept;
|
||||
C10_CUDA_API const char* get_cuda_check_suffix() noexcept;
|
||||
C10_CUDA_API std::mutex* getFreeMutex();
|
||||
} // namespace c10::cuda
|
||||
|
@ -70,7 +70,7 @@ class C10_CUDA_API CUDAStream {
|
||||
/// Construct a CUDAStream from a Stream with no error checking.
|
||||
/// This constructor uses the "named" constructor idiom, and can
|
||||
/// be invoked as: CUDAStream(CUDAStream::UNCHECKED, stream)
|
||||
explicit CUDAStream(Unchecked, Stream stream) : stream_(stream) {}
|
||||
explicit CUDAStream(Unchecked /*unused*/, Stream stream) : stream_(stream) {}
|
||||
|
||||
bool operator==(const CUDAStream& other) const noexcept {
|
||||
return unwrap() == other.unwrap();
|
||||
|
@ -43,7 +43,7 @@ class DynamicCounterBackendIf {
|
||||
virtual void unregisterCounter(std::string_view key) = 0;
|
||||
};
|
||||
|
||||
void C10_API
|
||||
registerDynamicCounterBackend(std::unique_ptr<DynamicCounterBackendIf>);
|
||||
void C10_API registerDynamicCounterBackend(
|
||||
std::unique_ptr<DynamicCounterBackendIf> /*backend*/);
|
||||
} // namespace detail
|
||||
} // namespace c10::monitor
|
||||
|
@ -217,7 +217,7 @@ class C10_API WarningHandlerGuard {
|
||||
/// The TORCH_WARN_ONCE macro is difficult to test for. Use
|
||||
/// setWarnAlways(true) to turn it into TORCH_WARN, which can be
|
||||
/// tested for more easily.
|
||||
C10_API void set_warnAlways(bool) noexcept(true);
|
||||
C10_API void set_warnAlways(bool /*setting*/) noexcept(true);
|
||||
C10_API bool get_warnAlways() noexcept(true);
|
||||
|
||||
// A RAII guard that sets warn_always (not thread-local) on
|
||||
|
@ -63,7 +63,7 @@ class ExclusivelyOwned {
|
||||
explicit ExclusivelyOwned(T&& t) : repr_(EOT::moveToRepr(std::move(t))) {}
|
||||
|
||||
template <class... Args>
|
||||
explicit ExclusivelyOwned(std::in_place_t, Args&&... args)
|
||||
explicit ExclusivelyOwned(std::in_place_t /*unused*/, Args&&... args)
|
||||
: repr_(EOT::createInPlace(std::forward<Args>(args)...)) {}
|
||||
|
||||
ExclusivelyOwned(const ExclusivelyOwned&) = delete;
|
||||
|
@ -52,12 +52,14 @@ class function_ref<Ret(Params...)> {
|
||||
function_ref(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
|
||||
Callable&& callable,
|
||||
std::enable_if_t<
|
||||
!std::is_same_v<std::remove_reference_t<Callable>, function_ref>>* =
|
||||
nullptr,
|
||||
std::enable_if_t<!std::is_same_v<
|
||||
std::remove_reference_t<Callable>,
|
||||
function_ref>>* /*unused*/
|
||||
= nullptr,
|
||||
std::enable_if_t<std::is_convertible_v<
|
||||
typename std::invoke_result_t<Callable, Params...>,
|
||||
Ret>>* = nullptr)
|
||||
Ret>>* /*unused*/
|
||||
= nullptr)
|
||||
: callback(callback_fn<std::remove_reference_t<Callable>>),
|
||||
callable(reinterpret_cast<intptr_t>(&callable)) {}
|
||||
|
||||
|
@ -26,7 +26,8 @@ class GaugeBackendFactoryIf {
|
||||
std::string_view key) noexcept = 0;
|
||||
};
|
||||
|
||||
void C10_API registerGaugeBackend(std::unique_ptr<GaugeBackendFactoryIf>);
|
||||
void C10_API
|
||||
registerGaugeBackend(std::unique_ptr<GaugeBackendFactoryIf> /*backend*/);
|
||||
} // namespace detail
|
||||
|
||||
// A handle to a Gauge.
|
||||
|
@ -307,10 +307,11 @@ class C10_API EventSampledHandler {
|
||||
|
||||
// Must be called in the main thread before any other threads are spawned.
|
||||
C10_API void InitEventSampledHandlers(
|
||||
std::vector<
|
||||
std::pair<std::string_view, std::unique_ptr<EventSampledHandler>>>);
|
||||
std::vector<std::pair<
|
||||
std::string_view,
|
||||
std::unique_ptr<EventSampledHandler>>> /*handlers*/);
|
||||
C10_API const std::unique_ptr<EventSampledHandler>& GetEventSampledHandler(
|
||||
std::string_view);
|
||||
std::string_view /*event*/);
|
||||
|
||||
/**
|
||||
* Very lightweight logging for the first time API usage. It's beneficial for
|
||||
|
@ -82,7 +82,7 @@ class MaybeOwned final {
|
||||
|
||||
/// Don't use this; use owned() instead.
|
||||
template <class... Args>
|
||||
explicit MaybeOwned(std::in_place_t, Args&&... args)
|
||||
explicit MaybeOwned(std::in_place_t /*unused*/, Args&&... args)
|
||||
: isBorrowed_(false), own_(std::forward<Args>(args)...) {}
|
||||
|
||||
public:
|
||||
@ -177,7 +177,7 @@ class MaybeOwned final {
|
||||
}
|
||||
|
||||
template <class... Args>
|
||||
static MaybeOwned owned(std::in_place_t, Args&&... args) {
|
||||
static MaybeOwned owned(std::in_place_t /*unused*/, Args&&... args) {
|
||||
return MaybeOwned(std::in_place, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ using make_offset_index_sequence =
|
||||
* 2>());
|
||||
*/
|
||||
template <class Tuple, size_t... Is>
|
||||
constexpr auto tuple_elements(Tuple t, std::index_sequence<Is...>) {
|
||||
constexpr auto tuple_elements(Tuple t, std::index_sequence<Is...> /*unused*/) {
|
||||
return std::tuple<std::tuple_element_t<Is, Tuple>...>(std::get<Is>(t)...);
|
||||
}
|
||||
|
||||
@ -209,7 +209,7 @@ auto tuple_map(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
|
||||
std::tuple<Args...>&& tuple,
|
||||
const Mapper& mapper,
|
||||
std::index_sequence<Indices...>) {
|
||||
std::index_sequence<Indices...> /*unused*/) {
|
||||
return std::tuple<decltype(mapper(std::forward<Args>(std::get<Indices>(
|
||||
tuple))))...>(mapper(std::forward<Args>(std::get<Indices>(tuple)))...);
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ class OptionalArrayRef final {
|
||||
|
||||
constexpr OptionalArrayRef() noexcept = default;
|
||||
|
||||
constexpr OptionalArrayRef(std::nullopt_t) noexcept {}
|
||||
constexpr OptionalArrayRef(std::nullopt_t /*unused*/) noexcept {}
|
||||
|
||||
OptionalArrayRef(const OptionalArrayRef& other) = default;
|
||||
|
||||
@ -89,7 +89,7 @@ class OptionalArrayRef final {
|
||||
|
||||
// Assignment
|
||||
|
||||
constexpr OptionalArrayRef& operator=(std::nullopt_t) noexcept {
|
||||
constexpr OptionalArrayRef& operator=(std::nullopt_t /*unused*/) noexcept {
|
||||
wrapped_opt_array_ref = std::nullopt;
|
||||
return *this;
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ class SmallVectorTemplateCommon
|
||||
class ItTy,
|
||||
std::enable_if_t<!std::is_same_v<std::remove_const_t<ItTy>, T*>, bool> =
|
||||
false>
|
||||
void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
|
||||
void assertSafeToReferenceAfterClear(ItTy /*unused*/, ItTy /*unused*/) {}
|
||||
|
||||
/// Check whether any part of the range will be invalidated by growing.
|
||||
void assertSafeToAddRange(const T* From, const T* To) {
|
||||
@ -228,7 +228,7 @@ class SmallVectorTemplateCommon
|
||||
class ItTy,
|
||||
std::enable_if_t<!std::is_same_v<std::remove_const_t<ItTy>, T*>, bool> =
|
||||
false>
|
||||
void assertSafeToAddRange(ItTy, ItTy) {}
|
||||
void assertSafeToAddRange(ItTy /*unused*/, ItTy /*unused*/) {}
|
||||
|
||||
/// Reserve enough space to add one element, and return the updated element
|
||||
/// pointer in case it was a reference to the storage.
|
||||
@ -538,7 +538,7 @@ class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
|
||||
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
|
||||
|
||||
// No need to do a destroy loop for POD's.
|
||||
static void destroy_range(T*, T*) {}
|
||||
static void destroy_range(T* /*unused*/, T* /*unused*/) {}
|
||||
|
||||
/// Move the range [I, E) onto the uninitialized memory
|
||||
/// starting with "Dest", constructing elements into it as needed.
|
||||
@ -563,8 +563,8 @@ class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
|
||||
T1* I,
|
||||
T1* E,
|
||||
T2* Dest,
|
||||
std::enable_if_t<std::is_same_v<std::remove_const_t<T1>, T2>>* =
|
||||
nullptr) {
|
||||
std::enable_if_t<std::is_same_v<std::remove_const_t<T1>, T2>>* /*unused*/
|
||||
= nullptr) {
|
||||
// Use memcpy for PODs iterated by pointers (which includes SmallVector
|
||||
// iterators): std::uninitialized_copy optimizes to memmove, but we can
|
||||
// use memcpy here. Note that I and E are iterators and thus might be
|
||||
|
@ -87,7 +87,7 @@ C10_API std::ostream& _str(std::ostream& ss, const std::wstring& wString);
|
||||
template <>
|
||||
inline std::ostream& _str<CompileTimeEmptyString>(
|
||||
std::ostream& ss,
|
||||
const CompileTimeEmptyString&) {
|
||||
const CompileTimeEmptyString& /*unused*/) {
|
||||
return ss;
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,6 @@
|
||||
|
||||
namespace c10::detail {
|
||||
|
||||
void deleteNothing(void*) {}
|
||||
void deleteNothing(void* /*unused*/) {}
|
||||
|
||||
} // namespace c10::detail
|
||||
|
@ -13,7 +13,7 @@ using DeleterFnPtr = void (*)(void*);
|
||||
namespace detail {
|
||||
|
||||
// Does not delete anything
|
||||
C10_API void deleteNothing(void*);
|
||||
C10_API void deleteNothing(void* /*unused*/);
|
||||
|
||||
// A detail::UniqueVoidPtr is an owning smart pointer like unique_ptr, but
|
||||
// with three major differences:
|
||||
|
@ -35,7 +35,7 @@ class WaitCounterBackendFactoryIf {
|
||||
};
|
||||
|
||||
C10_API void registerWaitCounterBackend(
|
||||
std::unique_ptr<WaitCounterBackendFactoryIf>);
|
||||
std::unique_ptr<WaitCounterBackendFactoryIf> /*factory*/);
|
||||
|
||||
C10_API std::vector<std::shared_ptr<WaitCounterBackendFactoryIf>>
|
||||
getRegisteredWaitCounterBackends();
|
||||
|
@ -573,13 +573,13 @@ class sherwood_v3_table : private EntryAlloc,
|
||||
return emplace(std::move(value));
|
||||
}
|
||||
template <typename... Args>
|
||||
iterator emplace_hint(const_iterator, Args&&... args) {
|
||||
iterator emplace_hint(const_iterator /*unused*/, Args&&... args) {
|
||||
return emplace(std::forward<Args>(args)...).first;
|
||||
}
|
||||
iterator insert(const_iterator, const value_type& value) {
|
||||
iterator insert(const_iterator /*unused*/, const value_type& value) {
|
||||
return emplace(value).first;
|
||||
}
|
||||
iterator insert(const_iterator, value_type&& value) {
|
||||
iterator insert(const_iterator /*unused*/, value_type&& value) {
|
||||
return emplace(std::move(value)).first;
|
||||
}
|
||||
|
||||
@ -896,7 +896,7 @@ class sherwood_v3_table : private EntryAlloc,
|
||||
} // namespace detailv3
|
||||
|
||||
struct prime_number_hash_policy {
|
||||
static uint64_t mod0(uint64_t) {
|
||||
static uint64_t mod0(uint64_t /*unused*/) {
|
||||
return 0llu;
|
||||
}
|
||||
static uint64_t mod2(uint64_t hash) {
|
||||
@ -1883,7 +1883,7 @@ struct power_of_two_hash_policy {
|
||||
size = detailv3::next_power_of_two(size);
|
||||
return 0;
|
||||
}
|
||||
void commit(int8_t) {}
|
||||
void commit(int8_t /*unused*/) {}
|
||||
void reset() {}
|
||||
};
|
||||
|
||||
@ -1989,14 +1989,14 @@ class flat_hash_map
|
||||
}
|
||||
template <typename M>
|
||||
typename Table::iterator insert_or_assign(
|
||||
typename Table::const_iterator,
|
||||
typename Table::const_iterator /*unused*/,
|
||||
const key_type& key,
|
||||
M&& m) {
|
||||
return insert_or_assign(key, std::forward<M>(m)).first;
|
||||
}
|
||||
template <typename M>
|
||||
typename Table::iterator insert_or_assign(
|
||||
typename Table::const_iterator,
|
||||
typename Table::const_iterator /*unused*/,
|
||||
key_type&& key,
|
||||
M&& m) {
|
||||
return insert_or_assign(std::move(key), std::forward<M>(m)).first;
|
||||
|
@ -79,8 +79,8 @@ class C10_API uint128 {
|
||||
// Make msvc happy with using operator<<= from DivModImpl
|
||||
// which is a static function, and linker complained about missing
|
||||
// static version of this overload
|
||||
friend uint128& operator<<=(uint128&, int);
|
||||
uint128& operator>>=(int);
|
||||
friend uint128& operator<<=(uint128& /*self*/, int /*amount*/);
|
||||
uint128& operator>>=(int /*amount*/);
|
||||
uint128& operator&=(const uint128& b);
|
||||
uint128& operator|=(const uint128& b);
|
||||
uint128& operator^=(const uint128& b);
|
||||
|
@ -399,7 +399,9 @@ class intrusive_ptr final {
|
||||
// This constructor will not increase the ref counter for you.
|
||||
// We use the tagged dispatch mechanism to explicitly mark this constructor
|
||||
// to not increase the refcount
|
||||
explicit intrusive_ptr(TTarget* target, raw::DontIncreaseRefcount) noexcept
|
||||
explicit intrusive_ptr(
|
||||
TTarget* target,
|
||||
raw::DontIncreaseRefcount /*unused*/) noexcept
|
||||
: target_(target) {}
|
||||
|
||||
explicit intrusive_ptr(std::unique_ptr<TTarget> rhs) noexcept
|
||||
|
@ -70,7 +70,7 @@ enum ZeroBehavior {
|
||||
namespace detail {
|
||||
template <typename T, std::size_t SizeOfT>
|
||||
struct TrailingZerosCounter {
|
||||
static std::size_t count(T Val, ZeroBehavior) {
|
||||
static std::size_t count(T Val, ZeroBehavior /*unused*/) {
|
||||
if (!Val)
|
||||
return std::numeric_limits<T>::digits;
|
||||
if (Val & 0x1)
|
||||
@ -147,7 +147,7 @@ std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
|
||||
namespace detail {
|
||||
template <typename T, std::size_t SizeOfT>
|
||||
struct LeadingZerosCounter {
|
||||
static std::size_t count(T Val, ZeroBehavior) {
|
||||
static std::size_t count(T Val, ZeroBehavior /*unused*/) {
|
||||
if (!Val)
|
||||
return std::numeric_limits<T>::digits;
|
||||
|
||||
|
@ -234,7 +234,9 @@ inline std::ostream& operator<<(
|
||||
return out;
|
||||
}
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& out, const std::nullptr_t&) {
|
||||
inline std::ostream& operator<<(
|
||||
std::ostream& out,
|
||||
const std::nullptr_t& /*unused*/) {
|
||||
out << "(null)";
|
||||
return out;
|
||||
}
|
||||
|
@ -560,13 +560,13 @@ class sherwood_v3_table : private EntryAlloc, private Hasher, private Equal {
|
||||
return emplace(std::move(value));
|
||||
}
|
||||
template <typename... Args>
|
||||
iterator emplace_hint(const_iterator, Args&&... args) {
|
||||
iterator emplace_hint(const_iterator /*unused*/, Args&&... args) {
|
||||
return emplace(std::forward<Args>(args)...).first;
|
||||
}
|
||||
iterator insert(const_iterator, const value_type& value) {
|
||||
iterator insert(const_iterator /*unused*/, const value_type& value) {
|
||||
return emplace(value).first;
|
||||
}
|
||||
iterator insert(const_iterator, value_type&& value) {
|
||||
iterator insert(const_iterator /*unused*/, value_type&& value) {
|
||||
return emplace(std::move(value)).first;
|
||||
}
|
||||
|
||||
@ -1013,7 +1013,7 @@ class sherwood_v3_table : private EntryAlloc, private Hasher, private Equal {
|
||||
} // namespace detailv3
|
||||
|
||||
struct prime_number_hash_policy {
|
||||
static uint64_t mod0(uint64_t) {
|
||||
static uint64_t mod0(uint64_t /*unused*/) {
|
||||
return 0llu;
|
||||
}
|
||||
static uint64_t mod2(uint64_t hash) {
|
||||
@ -2000,7 +2000,7 @@ struct power_of_two_hash_policy {
|
||||
size = detailv3::next_power_of_two(size);
|
||||
return 0;
|
||||
}
|
||||
void commit(int8_t) {}
|
||||
void commit(int8_t /*unused*/) {}
|
||||
void reset() {}
|
||||
};
|
||||
|
||||
@ -2106,14 +2106,14 @@ class order_preserving_flat_hash_map
|
||||
}
|
||||
template <typename M>
|
||||
typename Table::iterator insert_or_assign(
|
||||
typename Table::const_iterator,
|
||||
typename Table::const_iterator /*unused*/,
|
||||
const key_type& key,
|
||||
M&& m) {
|
||||
return insert_or_assign(key, std::forward<M>(m)).first;
|
||||
}
|
||||
template <typename M>
|
||||
typename Table::iterator insert_or_assign(
|
||||
typename Table::const_iterator,
|
||||
typename Table::const_iterator /*unused*/,
|
||||
key_type&& key,
|
||||
M&& m) {
|
||||
return insert_or_assign(std::move(key), std::forward<M>(m)).first;
|
||||
|
Reference in New Issue
Block a user