mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Mark unused parameters in C++ code (#164912)
This PR adds unused parameter name comments in C++ declarations to improve code readability. Pull Request resolved: https://github.com/pytorch/pytorch/pull/164912 Approved by: https://github.com/Skylion007
This commit is contained in:
committed by
PyTorch MergeBot
parent
a753ffa9af
commit
f231be25c6
@ -226,15 +226,15 @@ class TORCH_API Context {
|
||||
bool userEnabledMkldnn() const;
|
||||
void setUserEnabledMkldnn(bool e);
|
||||
bool benchmarkCuDNN() const;
|
||||
void setBenchmarkCuDNN(bool);
|
||||
void setBenchmarkCuDNN(bool /*b*/);
|
||||
int benchmarkLimitCuDNN() const;
|
||||
void setBenchmarkLimitCuDNN(int);
|
||||
void setBenchmarkLimitCuDNN(int /*b*/);
|
||||
bool immediateMiopen() const;
|
||||
void setImmediateMiopen(bool);
|
||||
void setImmediateMiopen(bool /*b*/);
|
||||
bool deterministicCuDNN() const;
|
||||
void setDeterministicCuDNN(bool);
|
||||
void setDeterministicCuDNN(bool /*b*/);
|
||||
bool deterministicMkldnn() const;
|
||||
void setDeterministicMkldnn(bool);
|
||||
void setDeterministicMkldnn(bool /*b*/);
|
||||
bool userEnabledNNPACK() const;
|
||||
void setUserEnabledNNPACK(bool e);
|
||||
|
||||
@ -252,32 +252,32 @@ class TORCH_API Context {
|
||||
void setSDPPriorityOrder(const std::vector<int64_t>& order);
|
||||
std::array<at::SDPBackend, at::num_sdp_backends> sDPPriorityOrder();
|
||||
|
||||
void setSDPUseFlash(bool);
|
||||
void setSDPUseFlash(bool /*e*/);
|
||||
bool userEnabledFlashSDP() const;
|
||||
|
||||
void setSDPUseMemEfficient(bool);
|
||||
void setSDPUseMemEfficient(bool /*e*/);
|
||||
bool userEnabledMemEfficientSDP() const;
|
||||
|
||||
void setSDPUseMath(bool);
|
||||
void setSDPUseMath(bool /*e*/);
|
||||
bool userEnabledMathSDP() const;
|
||||
|
||||
void setSDPUseCuDNN(bool);
|
||||
void setSDPUseCuDNN(bool /*e*/);
|
||||
bool userEnabledCuDNNSDP() const;
|
||||
|
||||
void setAllowFP16BF16ReductionMathSDP(bool);
|
||||
void setAllowFP16BF16ReductionMathSDP(bool /*e*/);
|
||||
bool allowFP16BF16ReductionMathSDP() const;
|
||||
|
||||
void setSDPUseOverrideable(bool);
|
||||
void setSDPUseOverrideable(bool /*e*/);
|
||||
bool userEnabledOverrideableSDP() const;
|
||||
|
||||
at::LinalgBackend linalgPreferredBackend() const;
|
||||
void setLinalgPreferredBackend(at::LinalgBackend);
|
||||
void setLinalgPreferredBackend(at::LinalgBackend /*b*/);
|
||||
|
||||
at::BlasBackend blasPreferredBackend();
|
||||
void setBlasPreferredBackend(at::BlasBackend);
|
||||
void setBlasPreferredBackend(at::BlasBackend /*b*/);
|
||||
|
||||
at::ROCmFABackend getROCmFAPreferredBackend();
|
||||
void setROCmFAPreferredBackend(at::ROCmFABackend);
|
||||
void setROCmFAPreferredBackend(at::ROCmFABackend /*b*/);
|
||||
|
||||
// Note [Enabling Deterministic Operations]
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -310,9 +310,9 @@ class TORCH_API Context {
|
||||
|
||||
bool deterministicAlgorithms() const;
|
||||
bool deterministicAlgorithmsWarnOnly() const;
|
||||
void setDeterministicAlgorithms(bool, bool);
|
||||
void setDeterministicAlgorithms(bool /*b*/, bool /*warn_only*/);
|
||||
bool deterministicFillUninitializedMemory() const;
|
||||
void setDeterministicFillUninitializedMemory(bool);
|
||||
void setDeterministicFillUninitializedMemory(bool /*b*/);
|
||||
|
||||
// Note [Writing Nondeterministic Operations]
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -356,11 +356,11 @@ class TORCH_API Context {
|
||||
Float32Op op,
|
||||
Float32Precision p);
|
||||
bool allowTF32CuDNN(std::optional<Float32Op> op = std::nullopt) const;
|
||||
void setAllowTF32CuDNN(bool);
|
||||
void setAllowTF32CuDNN(bool /*b*/);
|
||||
bool allowTF32OneDNN() const;
|
||||
void setAllowTF32OneDNN(bool);
|
||||
void setAllowTF32OneDNN(bool /*b*/);
|
||||
bool allowTF32CuBLAS() const;
|
||||
void setAllowTF32CuBLAS(bool);
|
||||
void setAllowTF32CuBLAS(bool /*b*/);
|
||||
Float32MatmulPrecision float32MatmulPrecision() const;
|
||||
Float32Precision float32Precision(Float32Backend backend, Float32Op op) const;
|
||||
CuBLASReductionOption allowFP16ReductionCuBLAS() const;
|
||||
@ -372,7 +372,7 @@ class TORCH_API Context {
|
||||
bool allow_reduced_precision,
|
||||
bool allow_splitk = true);
|
||||
bool allowFP16AccumulationCuBLAS() const;
|
||||
void setAllowFP16AccumulationCuBLAS(bool);
|
||||
void setAllowFP16AccumulationCuBLAS(bool /*b*/);
|
||||
|
||||
// Matmuls can use a so-called "persistent" kernel which launches one CUDA
|
||||
// block for each SM on the GPU, and each block then iterates over multiple
|
||||
@ -384,7 +384,7 @@ class TORCH_API Context {
|
||||
// to make matmuls target only a subset of the SMs, so they can fully schedule
|
||||
// even next to a comms kernel, and only be a few percent slower.
|
||||
std::optional<int32_t> _SMCarveout_EXPERIMENTAL() const;
|
||||
void _setSMCarveout_EXPERIMENTAL(std::optional<int32_t>);
|
||||
void _setSMCarveout_EXPERIMENTAL(std::optional<int32_t> /*c*/);
|
||||
|
||||
at::QEngine qEngine() const;
|
||||
void setQEngine(at::QEngine e);
|
||||
@ -405,7 +405,7 @@ class TORCH_API Context {
|
||||
void setDefaultMobileCPUAllocator();
|
||||
void unsetDefaultMobileCPUAllocator();
|
||||
bool allowFP16ReductionCPU() const;
|
||||
void setAllowFP16ReductionCPU(bool);
|
||||
void setAllowFP16ReductionCPU(bool /*b*/);
|
||||
|
||||
// Preserved for BC
|
||||
void lazyInitCUDA() {
|
||||
|
@ -62,7 +62,7 @@ constexpr const char* unknown_eventname = "eventname not specified";
|
||||
#endif
|
||||
} // namespace (anonymous)
|
||||
|
||||
MapAllocator::MapAllocator(WithFd, std::string_view filename, int fd, int flags, size_t size)
|
||||
MapAllocator::MapAllocator(WithFd /*unused*/, std::string_view filename, int fd, int flags, size_t size)
|
||||
: filename_(filename.empty() ? unknown_filename : filename)
|
||||
, size_(0) // to be filled later
|
||||
#ifdef _WIN32
|
||||
@ -494,7 +494,7 @@ RefcountedMapAllocator::RefcountedMapAllocator(const char *filename, int flags,
|
||||
|
||||
initializeAlloc();
|
||||
}
|
||||
RefcountedMapAllocator::RefcountedMapAllocator(WithFd, const char *filename, int fd, int flags, size_t size)
|
||||
RefcountedMapAllocator::RefcountedMapAllocator(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size)
|
||||
: RefcountedMapAllocatorArgCheck(flags)
|
||||
, MapAllocator(WITH_FD, filename, flags, fd, size + map_alloc_alignment) {
|
||||
|
||||
@ -614,7 +614,7 @@ at::DataPtr MapAllocator::makeDataPtr(std::string_view filename, int flags, size
|
||||
return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU};
|
||||
}
|
||||
|
||||
at::DataPtr MapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
|
||||
at::DataPtr MapAllocator::makeDataPtr(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
|
||||
auto* context = new MapAllocator(WITH_FD, filename, fd, flags, size);
|
||||
if (actual_size_out) *actual_size_out = context->size();
|
||||
return {context->data(), context, &deleteMapAllocator, at::DeviceType::CPU};
|
||||
@ -626,7 +626,7 @@ at::DataPtr RefcountedMapAllocator::makeDataPtr(const char *filename, int flags,
|
||||
return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU};
|
||||
}
|
||||
|
||||
at::DataPtr RefcountedMapAllocator::makeDataPtr(WithFd, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
|
||||
at::DataPtr RefcountedMapAllocator::makeDataPtr(WithFd /*unused*/, const char *filename, int fd, int flags, size_t size, size_t* actual_size_out) {
|
||||
auto* context = new RefcountedMapAllocator(WITH_FD, filename, fd, flags, size);
|
||||
if (actual_size_out) *actual_size_out = context->size() - map_alloc_alignment;
|
||||
return {context->data(), context, &deleteRefcountedMapAllocator, at::DeviceType::CPU};
|
||||
|
@ -25,7 +25,7 @@ class TORCH_API MapAllocator {
|
||||
public:
|
||||
MapAllocator(std::string_view filename, int flags, size_t size);
|
||||
MapAllocator(
|
||||
WithFd,
|
||||
WithFd /*unused*/,
|
||||
std::string_view filename,
|
||||
int fd,
|
||||
int flags,
|
||||
@ -59,14 +59,14 @@ class TORCH_API MapAllocator {
|
||||
return flags_;
|
||||
}
|
||||
|
||||
static MapAllocator* fromDataPtr(const at::DataPtr&);
|
||||
static MapAllocator* fromDataPtr(const at::DataPtr& /*dptr*/);
|
||||
static at::DataPtr makeDataPtr(
|
||||
std::string_view filename,
|
||||
int flags,
|
||||
size_t size,
|
||||
size_t* actual_size_out);
|
||||
static at::DataPtr makeDataPtr(
|
||||
WithFd,
|
||||
WithFd /*unused*/,
|
||||
const char* filename,
|
||||
int fd,
|
||||
int flags,
|
||||
@ -105,13 +105,13 @@ class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck,
|
||||
public:
|
||||
RefcountedMapAllocator(const char* filename, int flags, size_t size);
|
||||
RefcountedMapAllocator(
|
||||
WithFd,
|
||||
WithFd /*unused*/,
|
||||
const char* filename,
|
||||
int fd,
|
||||
int flags,
|
||||
size_t size);
|
||||
|
||||
static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&);
|
||||
static RefcountedMapAllocator* fromDataPtr(const at::DataPtr& /*dptr*/);
|
||||
RefcountedMapAllocator(const RefcountedMapAllocator&) = delete;
|
||||
RefcountedMapAllocator(RefcountedMapAllocator&&) = delete;
|
||||
RefcountedMapAllocator& operator=(const RefcountedMapAllocator&) = delete;
|
||||
@ -122,7 +122,7 @@ class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck,
|
||||
size_t size,
|
||||
size_t* actual_size_out);
|
||||
static at::DataPtr makeDataPtr(
|
||||
WithFd,
|
||||
WithFd /*unused*/,
|
||||
const char* filename,
|
||||
int fd,
|
||||
int flags,
|
||||
|
@ -273,7 +273,7 @@ c10::SymInt NestedTensorImpl::sym_numel_custom() const {
|
||||
return NestedTensorImpl::numel_custom();
|
||||
}
|
||||
|
||||
c10::SymBool NestedTensorImpl::sym_is_contiguous_custom(MemoryFormat) const {
|
||||
c10::SymBool NestedTensorImpl::sym_is_contiguous_custom(MemoryFormat /*memory_format*/) const {
|
||||
return nested_tensor_impl_is_contiguous(this);
|
||||
}
|
||||
IntArrayRef NestedTensorImpl::sizes_custom() const {
|
||||
|
@ -115,7 +115,8 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
|
||||
// with real implementations
|
||||
int64_t numel_custom() const override;
|
||||
c10::SymInt sym_numel_custom() const override;
|
||||
c10::SymBool sym_is_contiguous_custom(MemoryFormat) const override;
|
||||
c10::SymBool sym_is_contiguous_custom(
|
||||
MemoryFormat /*memory_format*/) const override;
|
||||
int64_t size_custom(int64_t d) const override {
|
||||
return this->size(d);
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ inline int64_t divup(int64_t x, int64_t y) {
|
||||
TORCH_API void init_num_threads();
|
||||
|
||||
// Sets the number of threads to be used in parallel region
|
||||
TORCH_API void set_num_threads(int);
|
||||
TORCH_API void set_num_threads(int /*nthreads*/);
|
||||
|
||||
// Returns the maximum number of threads that may be used in a parallel region
|
||||
TORCH_API int get_num_threads();
|
||||
@ -37,7 +37,7 @@ inline void lazy_init_num_threads() {
|
||||
}
|
||||
}
|
||||
|
||||
TORCH_API void set_thread_num(int);
|
||||
TORCH_API void set_thread_num(int /*id*/);
|
||||
|
||||
class TORCH_API ThreadIdGuard {
|
||||
public:
|
||||
@ -130,7 +130,7 @@ inline scalar_t parallel_reduce(
|
||||
TORCH_API std::string get_parallel_info();
|
||||
|
||||
// Sets number of threads used for inter-op parallelism
|
||||
TORCH_API void set_num_interop_threads(int);
|
||||
TORCH_API void set_num_interop_threads(int /*nthreads*/);
|
||||
|
||||
// Returns the number of threads used for inter-op parallelism
|
||||
TORCH_API size_t get_num_interop_threads();
|
||||
|
@ -252,7 +252,7 @@ void SparseCsrTensorImpl::set_stride(int64_t dim, int64_t new_stride) {
|
||||
void SparseCsrTensorImpl::set_storage_offset(int64_t storage_offset) {
|
||||
TORCH_CHECK(false, "Sparse ", at::sparse_csr::layoutToString(layout_, /*upper=*/true), " tensors do not have set_storage_offset.");
|
||||
}
|
||||
c10::SymBool SparseCsrTensorImpl::sym_is_contiguous_custom(MemoryFormat) const {
|
||||
c10::SymBool SparseCsrTensorImpl::sym_is_contiguous_custom(MemoryFormat /*memory_format*/) const {
|
||||
TORCH_CHECK(false, "Sparse ", at::sparse_csr::layoutToString(layout_, /*upper=*/true), " tensors do not have is_contiguous");
|
||||
}
|
||||
} // namespace at
|
||||
|
@ -32,10 +32,10 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
|
||||
|
||||
public:
|
||||
explicit SparseCsrTensorImpl(
|
||||
at::DispatchKeySet,
|
||||
at::DispatchKeySet /*key_set*/,
|
||||
at::Device device,
|
||||
Layout layout,
|
||||
const caffe2::TypeMeta);
|
||||
const caffe2::TypeMeta /*data_type*/);
|
||||
|
||||
void resize_(int64_t nnz, IntArrayRef size);
|
||||
void resize_and_clear_(
|
||||
@ -86,7 +86,8 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
|
||||
protected:
|
||||
IntArrayRef strides_custom() const override;
|
||||
SymIntArrayRef sym_strides_custom() const override;
|
||||
SymBool sym_is_contiguous_custom(MemoryFormat) const override;
|
||||
SymBool sym_is_contiguous_custom(
|
||||
MemoryFormat /*memory_format*/) const override;
|
||||
|
||||
public:
|
||||
void set_size(int64_t dim, int64_t new_size) override;
|
||||
|
@ -46,7 +46,9 @@ struct TORCH_API SparseTensorImpl : public TensorImpl {
|
||||
|
||||
public:
|
||||
// Public for now...
|
||||
explicit SparseTensorImpl(at::DispatchKeySet, const caffe2::TypeMeta);
|
||||
explicit SparseTensorImpl(
|
||||
at::DispatchKeySet /*key_set*/,
|
||||
const caffe2::TypeMeta /*data_type*/);
|
||||
|
||||
void release_resources() override;
|
||||
|
||||
@ -384,8 +386,8 @@ struct TORCH_API SparseTensorImpl : public TensorImpl {
|
||||
|
||||
private:
|
||||
explicit SparseTensorImpl(
|
||||
at::DispatchKeySet,
|
||||
const caffe2::TypeMeta,
|
||||
at::DispatchKeySet /*key_set*/,
|
||||
const caffe2::TypeMeta /*data_type*/,
|
||||
at::Tensor indices,
|
||||
at::Tensor values);
|
||||
|
||||
|
@ -112,10 +112,10 @@ TORCH_API std::ostream& operator<<(std::ostream& stream, const Slice& slice);
|
||||
// `torch.tensor([1, 2])`) | `torch::tensor({1, 2})`
|
||||
struct TORCH_API TensorIndex final {
|
||||
// Case 1: `at::indexing::None`
|
||||
TensorIndex(std::nullopt_t) : type_(TensorIndexType::None) {}
|
||||
TensorIndex(std::nullopt_t /*unused*/) : type_(TensorIndexType::None) {}
|
||||
|
||||
// Case 2: "..." / `at::indexing::Ellipsis`
|
||||
TensorIndex(at::indexing::EllipsisIndexType)
|
||||
TensorIndex(at::indexing::EllipsisIndexType /*unused*/)
|
||||
: type_(TensorIndexType::Ellipsis) {}
|
||||
TensorIndex(const char* str) : TensorIndex(at::indexing::Ellipsis) {
|
||||
TORCH_CHECK_VALUE(
|
||||
|
@ -250,7 +250,7 @@ struct TORCH_API TensorIteratorBase : public impl::MetaBase {
|
||||
using PtrVector = SmallVector<char*, 4>;
|
||||
using StrideVector = SmallVector<int64_t, 6>;
|
||||
|
||||
void build(TensorIteratorConfig&);
|
||||
void build(TensorIteratorConfig& /*config*/);
|
||||
|
||||
// The inner-loop function operates on the fastest moving dimension. It
|
||||
// implements element-wise operations in terms of 1-d strided tensors.
|
||||
@ -618,20 +618,20 @@ struct TORCH_API TensorIteratorBase : public impl::MetaBase {
|
||||
#undef TORCH_DISALLOW_TEMPORARIES
|
||||
protected:
|
||||
// Mutable reference as it moves tensors out of TensorIteratorConfig
|
||||
void populate_operands(TensorIteratorConfig&);
|
||||
void populate_operands(TensorIteratorConfig& /*config*/);
|
||||
void mark_outputs();
|
||||
void mark_resize_outputs(const TensorIteratorConfig&);
|
||||
void compute_mem_overlaps(const TensorIteratorConfig&);
|
||||
void compute_shape(const TensorIteratorConfig&);
|
||||
void compute_strides(const TensorIteratorConfig&);
|
||||
void mark_resize_outputs(const TensorIteratorConfig& /*config*/);
|
||||
void compute_mem_overlaps(const TensorIteratorConfig& /*config*/);
|
||||
void compute_shape(const TensorIteratorConfig& /*config*/);
|
||||
void compute_strides(const TensorIteratorConfig& /*config*/);
|
||||
void reorder_dimensions();
|
||||
void permute_dimensions(IntArrayRef perm);
|
||||
void compute_types(const TensorIteratorConfig&);
|
||||
void compute_types(const TensorIteratorConfig& /*config*/);
|
||||
ScalarType compute_common_dtype();
|
||||
void allocate_or_resize_outputs();
|
||||
bool fast_set_up(const TensorIteratorConfig&);
|
||||
FastSetupType compute_fast_setup_type(const TensorIteratorConfig&);
|
||||
void compute_names(const TensorIteratorConfig&);
|
||||
bool fast_set_up(const TensorIteratorConfig& /*config*/);
|
||||
FastSetupType compute_fast_setup_type(const TensorIteratorConfig& /*config*/);
|
||||
void compute_names(const TensorIteratorConfig& /*config*/);
|
||||
void propagate_names_to_outputs();
|
||||
void coalesce_dimensions();
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
namespace at {
|
||||
|
||||
TORCH_API int _crash_if_asan(int);
|
||||
TORCH_API int _crash_if_asan(int /*arg*/);
|
||||
|
||||
// Converts a TensorList (i.e. ArrayRef<Tensor> to vector of TensorImpl*)
|
||||
// NB: This is ONLY used by legacy TH bindings, and ONLY used by cat.
|
||||
|
@ -148,7 +148,7 @@ Tensor cached_cast(at::ScalarType to_type, const Tensor& arg, DeviceType device_
|
||||
Banned functions
|
||||
*******************************/
|
||||
|
||||
static Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const std::optional<Tensor>&, int64_t) {
|
||||
static Tensor binary_cross_entropy_banned(const Tensor & /*unused*/, const Tensor & /*unused*/, const std::optional<Tensor>& /*unused*/, int64_t /*unused*/) {
|
||||
TORCH_CHECK(false, "torch.nn.functional.binary_cross_entropy and torch.nn.BCELoss are unsafe to autocast.\n"
|
||||
"Many models use a sigmoid layer right before the binary cross entropy layer.\n"
|
||||
"In this case, combine the two layers using torch.nn.functional.binary_cross_entropy_with_logits\n"
|
||||
|
@ -27,11 +27,11 @@ struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface {
|
||||
HasNonWildcard
|
||||
};
|
||||
|
||||
explicit NamedTensorMeta(HAS_NON_WILDCARD, DimnameList names)
|
||||
explicit NamedTensorMeta(HAS_NON_WILDCARD /*unused*/, DimnameList names)
|
||||
: names_(names.vec()) {
|
||||
check_invariants();
|
||||
}
|
||||
explicit NamedTensorMeta(HAS_NON_WILDCARD, std::vector<Dimname>&& names)
|
||||
explicit NamedTensorMeta(HAS_NON_WILDCARD /*unused*/, std::vector<Dimname>&& names)
|
||||
: names_(std::move(names)) {
|
||||
check_invariants();
|
||||
}
|
||||
@ -52,13 +52,13 @@ struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface {
|
||||
std::any_of(names_.begin(), names_.end(), [](const Dimname& n) { return !n.isWildcard(); }));
|
||||
}
|
||||
|
||||
void set_names(HAS_NON_WILDCARD, DimnameList new_names) {
|
||||
void set_names(HAS_NON_WILDCARD /*unused*/, DimnameList new_names) {
|
||||
TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
|
||||
std::copy(new_names.begin(), new_names.end(), names_.begin());
|
||||
check_invariants();
|
||||
}
|
||||
|
||||
void set_names(HAS_NON_WILDCARD, std::vector<Dimname>&& new_names) {
|
||||
void set_names(HAS_NON_WILDCARD /*unused*/, std::vector<Dimname>&& new_names) {
|
||||
TORCH_INTERNAL_ASSERT(new_names.size() == names_.size());
|
||||
names_ = std::move(new_names);
|
||||
check_invariants();
|
||||
|
@ -13,7 +13,7 @@ class TORCH_API PythonOpRegistrationTrampoline final {
|
||||
public:
|
||||
// Returns true if you successfully registered yourself (that means
|
||||
// you are in the hot seat for doing the operator registrations!)
|
||||
static bool registerInterpreter(c10::impl::PyInterpreter*);
|
||||
static bool registerInterpreter(c10::impl::PyInterpreter* /*interp*/);
|
||||
|
||||
// Returns nullptr if no interpreter has been registered yet.
|
||||
static c10::impl::PyInterpreter* getInterpreter();
|
||||
|
@ -100,7 +100,7 @@ class TORCH_API TensorBase {
|
||||
// Create a Tensor with a +0 reference count. Special care must be
|
||||
// taken to avoid decrementing this reference count at destruction
|
||||
// time. Intended to support MaybeOwnedTraits<Tensor>.
|
||||
explicit TensorBase(unsafe_borrow_t, const TensorBase& rhs)
|
||||
explicit TensorBase(unsafe_borrow_t /*unused*/, const TensorBase& rhs)
|
||||
: impl_(c10::intrusive_ptr<at::TensorImpl, UndefinedTensorImpl>(rhs.impl_.get(), c10::raw::DontIncreaseRefcount{})) {}
|
||||
friend MaybeOwnedTraits<TensorBase>;
|
||||
|
||||
@ -954,7 +954,7 @@ protected:
|
||||
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> impl_;
|
||||
|
||||
private:
|
||||
TensorBase __dispatch_contiguous(c10::MemoryFormat) const;
|
||||
TensorBase __dispatch_contiguous(c10::MemoryFormat /*memory_format*/) const;
|
||||
};
|
||||
|
||||
inline DeviceIndex get_device(const TensorBase& self) {
|
||||
|
@ -18,10 +18,10 @@ class KernelFunction;
|
||||
// implementation notes; notably, this does NOT actually go through the
|
||||
// boxing/unboxing codepath.
|
||||
TORCH_API void fallthrough_kernel(
|
||||
OperatorKernel*,
|
||||
const OperatorHandle&,
|
||||
DispatchKeySet,
|
||||
Stack*);
|
||||
OperatorKernel* /*unused*/,
|
||||
const OperatorHandle& /*unused*/,
|
||||
DispatchKeySet /*unused*/,
|
||||
Stack* /*unused*/);
|
||||
|
||||
// Note [Ambiguity in AutogradOther kernel]
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -62,10 +62,10 @@ TORCH_API void fallthrough_kernel(
|
||||
// than arbitrarily pick one or the other, we just register a kernel that raises
|
||||
// an error and let the user decide how to proceed.
|
||||
TORCH_API void ambiguous_autogradother_kernel(
|
||||
OperatorKernel*,
|
||||
const OperatorHandle&,
|
||||
DispatchKeySet,
|
||||
Stack*);
|
||||
OperatorKernel* /*unused*/,
|
||||
const OperatorHandle& /*op*/,
|
||||
DispatchKeySet /*unused*/,
|
||||
Stack* /*unused*/);
|
||||
|
||||
// Note [named_not_supported_kernel]
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -75,10 +75,10 @@ TORCH_API void ambiguous_autogradother_kernel(
|
||||
// give a good error message in cases when boxing is not supported). When
|
||||
// boxing is universally supported this can be removed.
|
||||
[[noreturn]] TORCH_API void named_not_supported_kernel(
|
||||
OperatorKernel*,
|
||||
const OperatorHandle&,
|
||||
DispatchKeySet,
|
||||
Stack*);
|
||||
OperatorKernel* /*unused*/,
|
||||
const OperatorHandle& /*op*/,
|
||||
DispatchKeySet /*unused*/,
|
||||
Stack* /*unused*/);
|
||||
|
||||
/**
|
||||
* BoxedKernel is similar to a std::function storing a boxed kernel.
|
||||
@ -185,16 +185,16 @@ class TORCH_API BoxedKernel final {
|
||||
|
||||
template <BoxedKernelFunction* func>
|
||||
static void make_boxed_function(
|
||||
OperatorKernel*,
|
||||
OperatorKernel* /*unused*/,
|
||||
const OperatorHandle& opHandle,
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*unused*/,
|
||||
Stack* stack);
|
||||
|
||||
template <BoxedKernelFunction_withDispatchKeys* func>
|
||||
static void make_boxed_function(
|
||||
OperatorKernel*,
|
||||
OperatorKernel* /*unused*/,
|
||||
const OperatorHandle& opHandle,
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*ks*/,
|
||||
Stack* stack);
|
||||
|
||||
explicit BoxedKernel(
|
||||
|
@ -11,9 +11,9 @@ inline BoxedKernel::BoxedKernel(
|
||||
|
||||
template <BoxedKernel::BoxedKernelFunction* func>
|
||||
inline void BoxedKernel::make_boxed_function(
|
||||
OperatorKernel*,
|
||||
OperatorKernel* /*unused*/,
|
||||
const OperatorHandle& opHandle,
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*unused*/,
|
||||
Stack* stack) {
|
||||
// Note that we're dropping the DispatchKeySet argument.
|
||||
// See Note [Plumbing Keys Through The Dispatcher 2] for details.
|
||||
@ -22,7 +22,7 @@ inline void BoxedKernel::make_boxed_function(
|
||||
|
||||
template <BoxedKernel::BoxedKernelFunction_withDispatchKeys* func>
|
||||
inline void BoxedKernel::make_boxed_function(
|
||||
OperatorKernel*,
|
||||
OperatorKernel* /*unused*/,
|
||||
const OperatorHandle& opHandle,
|
||||
DispatchKeySet ks,
|
||||
Stack* stack) {
|
||||
|
@ -10,7 +10,7 @@ namespace c10 {
|
||||
// be handled specially. Its semantics is that it redispatches to the
|
||||
// *next* dispatch key that would have been processed, skipping the current
|
||||
// one.
|
||||
void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*) {
|
||||
void fallthrough_kernel(OperatorKernel* /*unused*/, const OperatorHandle& /*unused*/, DispatchKeySet /*unused*/, Stack* /*unused*/) {
|
||||
TORCH_INTERNAL_ASSERT(0,
|
||||
"fallthrough_kernel was executed but it should have been short-circuited by the dispatcher. "
|
||||
"This could occur if you registered a fallthrough kernel as a override for a specific operator "
|
||||
@ -19,7 +19,7 @@ void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet,
|
||||
"let us know in the bug tracker.");
|
||||
}
|
||||
|
||||
void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle& op, DispatchKeySet, Stack*) {
|
||||
void ambiguous_autogradother_kernel(OperatorKernel* /*unused*/, const OperatorHandle& op, DispatchKeySet /*unused*/, Stack* /*unused*/) {
|
||||
TORCH_INTERNAL_ASSERT(0,
|
||||
op.operator_name(), " has kernels registered to both CompositeImplicitAutograd and a backend mapped to AutogradOther. "
|
||||
"This makes the backend kernel unreachable; the dispatcher will always prefer the CompositeImplicitAutograd lowering "
|
||||
@ -32,7 +32,7 @@ void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle& op, D
|
||||
"\nCanonical state\n~~~~~~~~~~~\n", op.dumpState(), "\n\n");
|
||||
}
|
||||
|
||||
void named_not_supported_kernel(OperatorKernel*, const OperatorHandle& op, DispatchKeySet, Stack*) {
|
||||
void named_not_supported_kernel(OperatorKernel* /*unused*/, const OperatorHandle& op, DispatchKeySet /*unused*/, Stack* /*unused*/) {
|
||||
// DO NOT LOOK AT STACK, YOU HAVE SHORT CIRCUITED BOXING
|
||||
// See Note [named_not_supported_kernel]
|
||||
TORCH_CHECK(0,
|
||||
|
@ -229,7 +229,7 @@ class TORCH_API KernelFunction final {
|
||||
* &unboxed_func>();
|
||||
*/
|
||||
template <class FuncPtr, bool AllowLegacyTypes = false>
|
||||
static KernelFunction makeFromUnboxedFunction(FuncPtr);
|
||||
static KernelFunction makeFromUnboxedFunction(FuncPtr /*func_ptr*/);
|
||||
|
||||
/**
|
||||
* Create a KernelFunction from an unboxed function.
|
||||
@ -271,7 +271,7 @@ class TORCH_API KernelFunction final {
|
||||
|
||||
std::string dumpState() const;
|
||||
// For testing internal invariants only
|
||||
bool _equalsBoxedAndUnboxed(const KernelFunction&) const;
|
||||
bool _equalsBoxedAndUnboxed(const KernelFunction& /*other*/) const;
|
||||
|
||||
// Register a token to be invalidated when this KernelFunction is destroyed
|
||||
void registerToken(std::weak_ptr<KernelToken> token) const;
|
||||
|
@ -131,7 +131,7 @@ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(
|
||||
new (dest++) IValue(options.pinned_memory());
|
||||
}
|
||||
|
||||
inline void boxArgsToStack(IValue*&) {}
|
||||
inline void boxArgsToStack(IValue*& /*unused*/) {}
|
||||
|
||||
template <typename T, typename... Args>
|
||||
C10_ALWAYS_INLINE_UNLESS_MOBILE void boxArgsToStack(
|
||||
@ -185,7 +185,7 @@ struct PopResult<std::tuple<Types...>> final {
|
||||
template <size_t... indices>
|
||||
static Result pop_to_tuple_impl(
|
||||
Stack& stack,
|
||||
std::index_sequence<indices...>) {
|
||||
std::index_sequence<indices...> /*unused*/) {
|
||||
return std::make_tuple((std::move(stack[indices]).template to<Types>())...);
|
||||
}
|
||||
};
|
||||
|
@ -561,7 +561,7 @@ struct wrap_kernel_functor_unboxed_<
|
||||
// doesn't use &&
|
||||
static ReturnType call(
|
||||
OperatorKernel* functor,
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*unused*/,
|
||||
ParameterTypes... args) {
|
||||
KernelFunctor* functor_ = static_cast<KernelFunctor*>(functor);
|
||||
// Note [Plumbing Keys Through The Dispatcher 2]
|
||||
@ -629,8 +629,8 @@ call_functor_with_args_from_stack_(
|
||||
OperatorKernel* functor,
|
||||
DispatchKeySet dispatchKeySet,
|
||||
Stack* stack,
|
||||
std::index_sequence<ivalue_arg_indices...>,
|
||||
guts::typelist::typelist<ArgTypes...>*) {
|
||||
std::index_sequence<ivalue_arg_indices...> /*unused*/,
|
||||
guts::typelist::typelist<ArgTypes...>* /*unused*/) {
|
||||
(void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would
|
||||
// be unused and we have to silence the compiler warning.
|
||||
|
||||
@ -708,7 +708,7 @@ struct push_outputs<std::tuple<OutputTypes...>, AllowDeprecatedTypes> final {
|
||||
static void call_(
|
||||
std::tuple<OutputTypes...>&& output,
|
||||
Stack* stack,
|
||||
std::index_sequence<indices...>) {
|
||||
std::index_sequence<indices...> /*unused*/) {
|
||||
torch::jit::push(
|
||||
*stack,
|
||||
return_to_ivalue<OutputTypes, AllowDeprecatedTypes>::call(
|
||||
@ -718,7 +718,7 @@ struct push_outputs<std::tuple<OutputTypes...>, AllowDeprecatedTypes> final {
|
||||
static void copy_(
|
||||
const std::tuple<OutputTypes...>& output,
|
||||
Stack* stack,
|
||||
std::index_sequence<indices...>) {
|
||||
std::index_sequence<indices...> /*unused*/) {
|
||||
torch::jit::push(
|
||||
*stack,
|
||||
return_to_ivalue<OutputTypes, AllowDeprecatedTypes>::copy(
|
||||
@ -741,7 +741,7 @@ struct make_boxed_from_unboxed_functor final {
|
||||
|
||||
static void call(
|
||||
OperatorKernel* functor,
|
||||
const OperatorHandle&,
|
||||
const OperatorHandle& /*unused*/,
|
||||
DispatchKeySet dispatchKeySet,
|
||||
Stack* stack) {
|
||||
using ReturnType =
|
||||
|
@ -63,13 +63,13 @@ struct BuiltinOpFunction : public Function {
|
||||
|
||||
bool call(
|
||||
Stack& stack,
|
||||
std::optional<size_t>,
|
||||
c10::function_ref<void(const Code&)>) override {
|
||||
std::optional<size_t> /*unused*/,
|
||||
c10::function_ref<void(const Code&)> /*unused*/) override {
|
||||
run(stack);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool call(Stack& stack, c10::function_ref<void(const mobile::Code&)>)
|
||||
bool call(Stack& stack, c10::function_ref<void(const mobile::Code&)> /*unused*/)
|
||||
override {
|
||||
run(stack);
|
||||
return false;
|
||||
|
@ -80,7 +80,8 @@ struct MultiDispatchKeySet : at::IterArgs<MultiDispatchKeySet> {
|
||||
ts = ts | x.key_set();
|
||||
}
|
||||
}
|
||||
[[noreturn]] void operator()(at::ArrayRef<std::optional<at::Tensor>>) {
|
||||
[[noreturn]] void operator()(
|
||||
at::ArrayRef<std::optional<at::Tensor>> /*unused*/) {
|
||||
// Just checking that the handling of Tensor?[] didn't change.
|
||||
TORCH_INTERNAL_ASSERT(false);
|
||||
}
|
||||
@ -95,7 +96,7 @@ struct MultiDispatchKeySet : at::IterArgs<MultiDispatchKeySet> {
|
||||
}
|
||||
}
|
||||
template <typename T>
|
||||
void operator()(const T&) {
|
||||
void operator()(const T& /*unused*/) {
|
||||
// do nothing
|
||||
}
|
||||
};
|
||||
|
@ -633,7 +633,7 @@ class TypedOperatorHandle<Return(Args...)> final : public OperatorHandle {
|
||||
|
||||
namespace detail {
|
||||
template <class... Args>
|
||||
inline void unused_arg_(const Args&...) {}
|
||||
inline void unused_arg_(const Args&... /*unused*/) {}
|
||||
|
||||
// CaptureKernelCall is intended to capture return values from Dispatcher
|
||||
// unboxed kernel calls. A record function may request to get outputs from the
|
||||
|
@ -105,7 +105,7 @@ class TORCH_API OperatorEntry final {
|
||||
// versa that is an error. (Refcounting for the registrations is
|
||||
// handled in the OperatorHandle in Dispatcher)
|
||||
void registerSchema(
|
||||
FunctionSchema&&,
|
||||
FunctionSchema&& /*schema*/,
|
||||
std::string&& debug,
|
||||
std::vector<at::Tag> tags = {});
|
||||
void deregisterSchema();
|
||||
|
@ -177,7 +177,7 @@ bool DynamicType::equals(const Type& rhs) const {
|
||||
return equals(*create(rhs));
|
||||
}
|
||||
|
||||
bool DynamicType::isSubtypeOfExt(const Type& rhs, std::ostream*) const {
|
||||
bool DynamicType::isSubtypeOfExt(const Type& rhs, std::ostream* /*why_not*/) const {
|
||||
auto other = create(rhs);
|
||||
if (tag_ == other->tag_) {
|
||||
if (equals(*other)) {
|
||||
@ -371,7 +371,7 @@ DynamicTypePtr ivalue::TupleTypeFactory<c10::DynamicType>::create(
|
||||
}
|
||||
|
||||
DynamicTypePtr ivalue::TupleTypeFactory<c10::DynamicType>::fallback(
|
||||
const Type&) {
|
||||
const Type& /*unused*/) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -138,8 +138,8 @@ class DynamicType : public SharedType {
|
||||
|
||||
struct Arguments {
|
||||
Arguments() = default;
|
||||
Arguments(c10::ArrayRef<TypePtr>);
|
||||
Arguments(const std::vector<std::string_view>&, c10::ArrayRef<TypePtr>);
|
||||
Arguments(c10::ArrayRef<TypePtr> /*args*/);
|
||||
Arguments(const std::vector<std::string_view>& /*names*/, c10::ArrayRef<TypePtr> /*args*/);
|
||||
std::vector<LabeledDynamicType> elems;
|
||||
};
|
||||
|
||||
@ -156,15 +156,15 @@ class DynamicType : public SharedType {
|
||||
static const TypeKind Kind = TypeKind::DynamicType;
|
||||
static TORCH_API DynamicTypePtr create(Type& ty);
|
||||
|
||||
explicit DynamicType(Tag, Arguments);
|
||||
explicit DynamicType(Tag, std::string_view, Arguments);
|
||||
explicit DynamicType(Tag /*tag*/, Arguments /*arguments*/);
|
||||
explicit DynamicType(Tag /*tag*/, std::string_view /*name*/, Arguments /*arguments*/);
|
||||
|
||||
DynamicType(DynamicType&& other) = delete;
|
||||
DynamicType(const DynamicType&) = delete;
|
||||
DynamicType& operator=(const DynamicType&) = delete;
|
||||
DynamicType& operator=(DynamicType&&) = delete;
|
||||
|
||||
TypePtr containedType(size_t) const override;
|
||||
TypePtr containedType(size_t /*i*/) const override;
|
||||
size_t containedTypeSize() const override;
|
||||
Tag tag() const {
|
||||
return tag_;
|
||||
|
@ -96,15 +96,15 @@ struct TORCH_API Function {
|
||||
// Overload for server interpreter, a bailout size is needed for graph
|
||||
// executor.
|
||||
virtual bool call(
|
||||
Stack&,
|
||||
std::optional<size_t>,
|
||||
c10::function_ref<void(const Code&)>) {
|
||||
Stack& /*unused*/,
|
||||
std::optional<size_t> /*unused*/,
|
||||
c10::function_ref<void(const Code&)> /*unused*/) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Overload for mobile interpreter.
|
||||
virtual bool call(Stack&, c10::function_ref<void(const mobile::Code&)>) {
|
||||
virtual bool call(Stack& /*unused*/, c10::function_ref<void(const mobile::Code&)> /*unused*/) {
|
||||
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
|
||||
return false;
|
||||
}
|
||||
|
@ -847,7 +847,7 @@ struct TORCH_API IValue final {
|
||||
IValue(std::optional<T> v);
|
||||
template <class T, enable_if_list_is_ivalue_constructible<T> = nullptr>
|
||||
IValue(c10::OptionalArrayRef<T> v);
|
||||
IValue(std::nullopt_t);
|
||||
IValue(std::nullopt_t /*unused*/);
|
||||
|
||||
// ClassType
|
||||
IValue(c10::intrusive_ptr<ivalue::Object> v);
|
||||
|
@ -660,7 +660,7 @@ struct TORCH_API TupleTypeFactory<TupleType> {
|
||||
template <>
|
||||
struct TORCH_API TupleTypeFactory<c10::DynamicType> {
|
||||
static DynamicTypePtr create(const std::vector<TypePtr>& elemTypes);
|
||||
static DynamicTypePtr fallback(const Type&);
|
||||
static DynamicTypePtr fallback(const Type& /*unused*/);
|
||||
};
|
||||
|
||||
struct TORCH_API Tuple : c10::intrusive_ptr_target {
|
||||
@ -1682,7 +1682,7 @@ struct ivalue::EnumHolder : c10::intrusive_ptr_target {
|
||||
namespace detail {
|
||||
|
||||
struct _guarded_unsigned_long_unique_dummy final {
|
||||
_guarded_unsigned_long_unique_dummy(int64_t){}
|
||||
_guarded_unsigned_long_unique_dummy(int64_t /*unused*/){}
|
||||
};
|
||||
using _guarded_unsigned_long = std::conditional_t<
|
||||
std::is_same_v<unsigned long, uint32_t> ||
|
||||
@ -1776,7 +1776,7 @@ template <class Elem>
|
||||
// native_functions.yaml still return std::vector.
|
||||
// C10_DEPRECATED_MESSAGE("IValues based on std::vector<T> are potentially slow
|
||||
// and deprecated. Please use torch::List<T> instead.")
|
||||
std::vector<Elem> generic_to(IValue ivalue, _fake_type<std::vector<Elem>>) {
|
||||
std::vector<Elem> generic_to(IValue ivalue, _fake_type<std::vector<Elem>> /*unused*/) {
|
||||
// We need to do a deep copy of the vector because there might be other
|
||||
// references to this same IValue that also use the list. We can't just
|
||||
// move the elements out.
|
||||
@ -1826,18 +1826,18 @@ c10::intrusive_ptr<T> IValue::toCustomClass() const& {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T generic_to(IValue ivalue, _fake_type<T>) {
|
||||
T generic_to(IValue ivalue, _fake_type<T> /*unused*/) {
|
||||
using ElemType = typename std::remove_pointer<T>::type::element_type;
|
||||
return std::move(ivalue).template toCustomClass<ElemType>();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
tagged_capsule<T> generic_to(IValue ivalue, _fake_type<tagged_capsule<T>>) {
|
||||
tagged_capsule<T> generic_to(IValue ivalue, _fake_type<tagged_capsule<T>> /*unused*/) {
|
||||
return tagged_capsule<T>{std::move(ivalue)};
|
||||
}
|
||||
|
||||
template <typename Elem>
|
||||
c10::List<Elem> generic_to(IValue ivalue, _fake_type<c10::List<Elem>>) {
|
||||
c10::List<Elem> generic_to(IValue ivalue, _fake_type<c10::List<Elem>> /*unused*/) {
|
||||
return impl::toTypedList<Elem>(std::move(ivalue).toList());
|
||||
}
|
||||
|
||||
@ -1867,7 +1867,7 @@ std::vector<T> createVectorFromList(const c10::List<T>& impl) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
OptionalArray<T> generic_to(IValue ivalue, _fake_type<OptionalArray<T>>) {
|
||||
OptionalArray<T> generic_to(IValue ivalue, _fake_type<OptionalArray<T>> /*unused*/) {
|
||||
if (ivalue.isNone()) {
|
||||
return {};
|
||||
}
|
||||
@ -1880,8 +1880,8 @@ namespace detail {
|
||||
template <typename Elem, size_t... I>
|
||||
std::array<Elem, sizeof...(I)> generic_to_array(
|
||||
IValue ivalue,
|
||||
_fake_type<std::array<Elem, sizeof...(I)>>,
|
||||
std::index_sequence<I...>) {
|
||||
_fake_type<std::array<Elem, sizeof...(I)>> /*unused*/,
|
||||
std::index_sequence<I...> /*unused*/) {
|
||||
// We need to do a deep copy of the array because there might be other
|
||||
// references to this same IValue that also use the list. We can't just
|
||||
// move the elements out.
|
||||
@ -1906,7 +1906,7 @@ std::array<Elem, N> generic_to(
|
||||
template <typename Key, typename Value>
|
||||
c10::Dict<Key, Value> generic_to(
|
||||
IValue ivalue,
|
||||
_fake_type<c10::Dict<Key, Value>>) {
|
||||
_fake_type<c10::Dict<Key, Value>> /*unused*/) {
|
||||
return impl::toTypedDict<Key, Value>(std::move(ivalue).toGenericDict());
|
||||
}
|
||||
|
||||
@ -1915,7 +1915,7 @@ C10_DEPRECATED_MESSAGE(
|
||||
"IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict<K, V> instead.")
|
||||
std::unordered_map<K, V> generic_to(
|
||||
IValue ivalue,
|
||||
_fake_type<std::unordered_map<K, V>>) {
|
||||
_fake_type<std::unordered_map<K, V>> /*unused*/) {
|
||||
std::unordered_map<K, V> specialized_dict;
|
||||
|
||||
for (const auto& item : std::move(ivalue).toGenericDict()) {
|
||||
@ -1926,7 +1926,7 @@ std::unordered_map<K, V> generic_to(
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::optional<T> generic_to(IValue ivalue, _fake_type<std::optional<T>>) {
|
||||
std::optional<T> generic_to(IValue ivalue, _fake_type<std::optional<T>> /*unused*/) {
|
||||
if (ivalue.isNone()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
@ -1937,7 +1937,7 @@ namespace detail {
|
||||
template <typename Tuple, std::size_t... INDEX>
|
||||
Tuple generic_to_tuple_impl(
|
||||
const ivalue::TupleElements& t,
|
||||
std::index_sequence<INDEX...>) {
|
||||
std::index_sequence<INDEX...> /*unused*/) {
|
||||
return std::make_tuple(
|
||||
t[INDEX].to<typename std::tuple_element<INDEX, Tuple>::type>()...);
|
||||
}
|
||||
@ -1951,7 +1951,7 @@ template <
|
||||
std::is_lvalue_reference<Args>...,
|
||||
std::negation<std::is_constructible<IValue, Args>>...>,
|
||||
std::nullptr_t> = nullptr>
|
||||
std::tuple<Args...> generic_to(const IValue& ivalue, _fake_type<std::tuple<Args...>>) {
|
||||
std::tuple<Args...> generic_to(const IValue& ivalue, _fake_type<std::tuple<Args...>> /*unused*/) {
|
||||
const auto& vals = ivalue.toTupleRef().elements();
|
||||
TORCH_CHECK(vals.size() == sizeof...(Args));
|
||||
return detail::generic_to_tuple_impl<std::tuple<Args...>>(vals, Indices{});
|
||||
@ -2311,7 +2311,7 @@ inline IValue::IValue(std::optional<T> v) : IValue() {
|
||||
}
|
||||
}
|
||||
|
||||
inline IValue::IValue(std::nullopt_t) : IValue() {}
|
||||
inline IValue::IValue(std::nullopt_t /*unused*/) : IValue() {}
|
||||
|
||||
inline IValue::IValue(c10::intrusive_ptr<ivalue::Object> v)
|
||||
: tag(Tag::Object) {
|
||||
@ -2482,15 +2482,15 @@ namespace ivalue {
|
||||
namespace detail {
|
||||
|
||||
template <typename T>
|
||||
IValue from_(T&& x, std::true_type) {
|
||||
IValue from_(T&& x, std::true_type /*unused*/) {
|
||||
return IValue(std::forward<T>(x));
|
||||
}
|
||||
template <typename T>
|
||||
IValue from_(c10::intrusive_ptr<T> x, std::false_type) {
|
||||
IValue from_(c10::intrusive_ptr<T> x, std::false_type /*unused*/) {
|
||||
return IValue(std::move(x));
|
||||
}
|
||||
template <typename T>
|
||||
IValue from_(T&& /*x*/, std::false_type) {
|
||||
IValue from_(T&& /*x*/, std::false_type /*unused*/) {
|
||||
static_assert(
|
||||
guts::false_t<T>::value,
|
||||
"You are calling from with a type that it doesn't support, and isn't a potential custom class (ie: is an intrusive_ptr)");
|
||||
@ -2546,19 +2546,19 @@ struct MaybeOwnedTraits<IValue> {
|
||||
return &borrow;
|
||||
}
|
||||
|
||||
static bool debugBorrowIsValid(const borrow_type&) {
|
||||
static bool debugBorrowIsValid(const borrow_type& /*unused*/) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct IValue::TagType<c10::Type> {
|
||||
static TORCH_API c10::TypePtr get(const IValue&);
|
||||
static TORCH_API c10::TypePtr get(const IValue& /*v*/);
|
||||
};
|
||||
|
||||
template <>
|
||||
struct IValue::TagType<c10::DynamicType> {
|
||||
static TORCH_API c10::TypePtr get(const IValue&);
|
||||
static TORCH_API c10::TypePtr get(const IValue& /*v*/);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
@ -44,7 +44,7 @@ constexpr int checkStaticTypes() {
|
||||
}
|
||||
|
||||
template <typename... Ts, size_t... Is>
|
||||
constexpr std::array<ArgumentDef, sizeof...(Ts)> createArgumentVectorFromTypes(std::index_sequence<Is...>) {
|
||||
constexpr std::array<ArgumentDef, sizeof...(Ts)> createArgumentVectorFromTypes(std::index_sequence<Is...> /*unused*/) {
|
||||
return (
|
||||
// Check types for common errors
|
||||
checkStaticTypes<Ts...>(),
|
||||
|
@ -83,7 +83,7 @@ inline bool operator!=(const OperatorName& lhs, const OperatorName& rhs) {
|
||||
}
|
||||
|
||||
TORCH_API std::string toString(const OperatorName& opName);
|
||||
TORCH_API std::ostream& operator<<(std::ostream&, const OperatorName&);
|
||||
TORCH_API std::ostream& operator<<(std::ostream& /*os*/, const OperatorName& /*opName*/);
|
||||
|
||||
} // namespace c10
|
||||
|
||||
|
@ -16,7 +16,7 @@ class SingletonTypePtr {
|
||||
/* implicit */ SingletonTypePtr(T* p) : repr_(p) {}
|
||||
|
||||
// We need this to satisfy Pybind11, but it shouldn't be hit.
|
||||
explicit SingletonTypePtr(std::shared_ptr<T>) { TORCH_CHECK(false); }
|
||||
explicit SingletonTypePtr(std::shared_ptr<T> /*unused*/) { TORCH_CHECK(false); }
|
||||
|
||||
using element_type = typename std::shared_ptr<T>::element_type;
|
||||
|
||||
|
@ -342,19 +342,19 @@ class Vectorized<c10::complex<double>> {
|
||||
return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ);
|
||||
}
|
||||
Vectorized<c10::complex<double>> operator<(
|
||||
const Vectorized<c10::complex<double>>&) const {
|
||||
const Vectorized<c10::complex<double>>& /*unused*/) const {
|
||||
TORCH_CHECK(false, "not supported for complex numbers");
|
||||
}
|
||||
Vectorized<c10::complex<double>> operator<=(
|
||||
const Vectorized<c10::complex<double>>&) const {
|
||||
const Vectorized<c10::complex<double>>& /*unused*/) const {
|
||||
TORCH_CHECK(false, "not supported for complex numbers");
|
||||
}
|
||||
Vectorized<c10::complex<double>> operator>(
|
||||
const Vectorized<c10::complex<double>>&) const {
|
||||
const Vectorized<c10::complex<double>>& /*unused*/) const {
|
||||
TORCH_CHECK(false, "not supported for complex numbers");
|
||||
}
|
||||
Vectorized<c10::complex<double>> operator>=(
|
||||
const Vectorized<c10::complex<double>>&) const {
|
||||
const Vectorized<c10::complex<double>>& /*unused*/) const {
|
||||
TORCH_CHECK(false, "not supported for complex numbers");
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ TORCH_CUDA_CPP_API void set_magma_init_fn(void (*magma_init_fn)());
|
||||
|
||||
// The real implementation of CUDAHooksInterface
|
||||
struct CUDAHooks : public at::CUDAHooksInterface {
|
||||
CUDAHooks(at::CUDAHooksArgs) {}
|
||||
CUDAHooks(at::CUDAHooksArgs /*unused*/) {}
|
||||
void init() const override;
|
||||
Device getDeviceFromPtr(void* data) const override;
|
||||
bool isPinnedPtr(const void* data) const override;
|
||||
|
@ -29,7 +29,7 @@ template <typename ParamsT>
|
||||
class Callable {
|
||||
public:
|
||||
virtual ~Callable() = default;
|
||||
virtual TuningStatus Call(const ParamsT*) {
|
||||
virtual TuningStatus Call(const ParamsT* /*unused*/) {
|
||||
return FAIL;
|
||||
}
|
||||
virtual TuningStatus IsSupported(const ParamsT* params) {
|
||||
|
@ -25,7 +25,7 @@ struct TORCH_API HPUHooksInterface : AcceleratorHooksInterface {
|
||||
false, "Cannot get device of pointer on HPU without HPU backend");
|
||||
}
|
||||
|
||||
bool isPinnedPtr(const void*) const override {
|
||||
bool isPinnedPtr(const void* /*data*/) const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -410,7 +410,7 @@ struct ExistingBdimBatchRuleHelper<F, Func, c10::guts::typelist::typelist<A, T..
|
||||
|
||||
|
||||
template <typename F, F Method, typename... ExtraArgs>
|
||||
Tensor& unary_inplace_batch_rule(Tensor& self, std::optional<int64_t>, ExtraArgs... extra_args) {
|
||||
Tensor& unary_inplace_batch_rule(Tensor& self, std::optional<int64_t> /*unused*/, ExtraArgs... extra_args) {
|
||||
INVOKE(self, Method)(std::forward<ExtraArgs>(extra_args)...);
|
||||
return self;
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ extern std::atomic<const MetalInterface*> g_metal_impl_registry;
|
||||
|
||||
class MetalImplRegistrar {
|
||||
public:
|
||||
explicit MetalImplRegistrar(MetalInterface*);
|
||||
explicit MetalImplRegistrar(MetalInterface* /*impl*/);
|
||||
};
|
||||
|
||||
at::Tensor& metal_copy_(at::Tensor& self, const at::Tensor& src);
|
||||
|
@ -2060,7 +2060,7 @@ std::tuple<Tensor, Tensor> linalg_lu_factor(const Tensor& A, bool pivot) {
|
||||
}
|
||||
|
||||
// TODO Deprecate this function in favour of linalg_lu_factor_ex
|
||||
std::tuple<Tensor, Tensor, Tensor> _lu_with_info(const Tensor& self, bool compute_pivots, bool) {
|
||||
std::tuple<Tensor, Tensor, Tensor> _lu_with_info(const Tensor& self, bool compute_pivots, bool /*unused*/) {
|
||||
TORCH_WARN_ONCE(
|
||||
"torch.lu is deprecated in favor of torch.linalg.lu_factor / torch.linalg.lu_factor_ex and will be ",
|
||||
"removed in a future PyTorch release.\n",
|
||||
|
@ -346,17 +346,17 @@ template<typename acc_t>
|
||||
struct AbsSwitch {};
|
||||
|
||||
template<typename scalar_t, typename acc_t>
|
||||
inline C10_DEVICE acc_t abs_if_complex(scalar_t data, AbsSwitch<acc_t>) {
|
||||
inline C10_DEVICE acc_t abs_if_complex(scalar_t data, AbsSwitch<acc_t> /*unused*/) {
|
||||
return static_cast<acc_t>(data);
|
||||
}
|
||||
|
||||
template<typename scalar_t, typename acc_t>
|
||||
inline C10_DEVICE acc_t abs_if_complex(std::complex<scalar_t> data, AbsSwitch<acc_t>) {
|
||||
inline C10_DEVICE acc_t abs_if_complex(std::complex<scalar_t> data, AbsSwitch<acc_t> /*unused*/) {
|
||||
return static_cast<acc_t>(std::abs(data));
|
||||
}
|
||||
|
||||
template<typename scalar_t, typename acc_t>
|
||||
inline C10_DEVICE acc_t abs_if_complex(c10::complex<scalar_t> data, AbsSwitch<acc_t>) {
|
||||
inline C10_DEVICE acc_t abs_if_complex(c10::complex<scalar_t> data, AbsSwitch<acc_t> /*unused*/) {
|
||||
return static_cast<acc_t>(std::abs(at::opmath_type<c10::complex<scalar_t>>(data)));
|
||||
}
|
||||
|
||||
|
@ -846,7 +846,7 @@ TORCH_IMPL_FUNC(clamp_Tensor_out)
|
||||
(const Tensor& self,
|
||||
const OptionalTensorRef min,
|
||||
const OptionalTensorRef max,
|
||||
const Tensor&) {
|
||||
const Tensor& /*unused*/) {
|
||||
if (min && max) {
|
||||
clamp_stub(device_type(), *this);
|
||||
} else if (min) {
|
||||
|
@ -452,11 +452,11 @@ void convolution_depthwise3x3_winograd_impl(
|
||||
#else
|
||||
|
||||
void convolution_depthwise3x3_winograd_impl(
|
||||
const Arguments&,
|
||||
const float* const,
|
||||
const float* const,
|
||||
const float* const,
|
||||
float* const) {
|
||||
const Arguments& /*unused*/,
|
||||
const float* const /*unused*/,
|
||||
const float* const /*unused*/,
|
||||
const float* const /*unused*/,
|
||||
float* const /*unused*/) {
|
||||
}
|
||||
|
||||
#endif /* __ARM_NEON__ */
|
||||
|
@ -46,7 +46,7 @@ using namespace vec;
|
||||
template <typename traits, std::size_t... INDEX>
|
||||
typename traits::ArgsTuple
|
||||
dereference_impl(char* C10_RESTRICT data[], const int64_t* strides, int64_t i,
|
||||
std::index_sequence<INDEX...>) {
|
||||
std::index_sequence<INDEX...> /*unused*/) {
|
||||
return std::make_tuple(
|
||||
c10::load<typename traits::template arg<INDEX>::type>(
|
||||
data[INDEX] + i * strides[INDEX])...);
|
||||
@ -65,7 +65,7 @@ dereference_vec_impl(char* C10_RESTRICT data[],
|
||||
const typename traits::result_type& opt_scalar,
|
||||
size_t S,
|
||||
int64_t i,
|
||||
std::index_sequence<INDEX...>) {
|
||||
std::index_sequence<INDEX...> /*unused*/) {
|
||||
using Vec = typename traits::result_type;
|
||||
using scalar_t = typename Vec::value_type;
|
||||
return std::make_tuple(
|
||||
@ -231,7 +231,7 @@ vectorized_loop(char** C10_RESTRICT data_, int64_t n, int64_t S, func_t&& op, ve
|
||||
template <typename traits, typename cb_t>
|
||||
inline void unroll_contiguous_scalar_checks(
|
||||
const int64_t* /*strides*/,
|
||||
std::index_sequence<>,
|
||||
std::index_sequence<> /*unused*/,
|
||||
cb_t&& cb) {
|
||||
cb(0);
|
||||
}
|
||||
@ -239,7 +239,7 @@ inline void unroll_contiguous_scalar_checks(
|
||||
template <typename traits, typename cb_t, size_t INDEX0, size_t ...INDEX>
|
||||
inline void unroll_contiguous_scalar_checks(
|
||||
const int64_t* strides,
|
||||
std::index_sequence<INDEX0, INDEX...>,
|
||||
std::index_sequence<INDEX0, INDEX...> /*unused*/,
|
||||
cb_t&& cb) {
|
||||
if (is_contiguous_scalar<traits, INDEX0 + 1>(strides)) {
|
||||
cb(INDEX0 + 1);
|
||||
|
@ -86,7 +86,7 @@ namespace cuda { namespace detail {
|
||||
struct LinalgDispatch {
|
||||
Tensor (*cholesky_solve_helper)(const Tensor& self, const Tensor& A, bool upper);
|
||||
};
|
||||
C10_EXPORT void registerLinalgDispatch(const LinalgDispatch&);
|
||||
C10_EXPORT void registerLinalgDispatch(const LinalgDispatch& /*disp_*/);
|
||||
}} // namespace cuda::detail
|
||||
#endif
|
||||
|
||||
|
@ -31,7 +31,7 @@ TORCH_API float dequantize_vec(
|
||||
float* dst,
|
||||
size_t count = 8);
|
||||
template <typename SRC_T, typename DST_T>
|
||||
TORCH_API DST_T requantize_val(double, int64_t, double, int64_t, SRC_T src);
|
||||
TORCH_API DST_T requantize_val(double /*src_scale*/, int64_t /*src_zero_point*/, double /*dst_scale*/, int64_t /*dst_zero_point*/, SRC_T src);
|
||||
|
||||
// Given a multiplier and a zero_point, requantize int32_t computed values back
|
||||
// to quantized values. See comment above
|
||||
|
@ -104,27 +104,27 @@ Tensor empty_strided_unknown_quantized(
|
||||
|
||||
// Provide better error message if dtype is wrong
|
||||
Tensor empty_affine_quantized_other_backends_stub(
|
||||
IntArrayRef,
|
||||
std::optional<ScalarType>,
|
||||
std::optional<Layout>,
|
||||
std::optional<Device>,
|
||||
std::optional<bool>,
|
||||
double,
|
||||
int64_t,
|
||||
std::optional<c10::MemoryFormat>) {
|
||||
IntArrayRef /*unused*/,
|
||||
std::optional<ScalarType> /*unused*/,
|
||||
std::optional<Layout> /*unused*/,
|
||||
std::optional<Device> /*unused*/,
|
||||
std::optional<bool> /*unused*/,
|
||||
double /*unused*/,
|
||||
int64_t /*unused*/,
|
||||
std::optional<c10::MemoryFormat> /*unused*/) {
|
||||
TORCH_CHECK(false, "Creation of quantized tensor requires quantized dtype like torch.quint8");
|
||||
}
|
||||
|
||||
Tensor empty_per_channel_affine_quantized_other_backends_stub(
|
||||
IntArrayRef,
|
||||
const Tensor&,
|
||||
const Tensor&,
|
||||
int64_t,
|
||||
std::optional<ScalarType>,
|
||||
std::optional<Layout>,
|
||||
std::optional<Device>,
|
||||
std::optional<bool>,
|
||||
std::optional<c10::MemoryFormat>) {
|
||||
IntArrayRef /*unused*/,
|
||||
const Tensor& /*unused*/,
|
||||
const Tensor& /*unused*/,
|
||||
int64_t /*unused*/,
|
||||
std::optional<ScalarType> /*unused*/,
|
||||
std::optional<Layout> /*unused*/,
|
||||
std::optional<Device> /*unused*/,
|
||||
std::optional<bool> /*unused*/,
|
||||
std::optional<c10::MemoryFormat> /*unused*/) {
|
||||
TORCH_CHECK(false, "Creation of quantized tensor requires quantized dtype like torch.quint8");
|
||||
}
|
||||
|
||||
|
@ -29,63 +29,63 @@ bool available() {
|
||||
}
|
||||
|
||||
bool use_convolution2d(
|
||||
const Tensor&,
|
||||
const Tensor&,
|
||||
const at::OptionalIntArrayRef,
|
||||
const IntArrayRef,
|
||||
const IntArrayRef,
|
||||
const IntArrayRef,
|
||||
const int64_t,
|
||||
bool) {
|
||||
const Tensor& /*unused*/,
|
||||
const Tensor& /*unused*/,
|
||||
const at::OptionalIntArrayRef /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
const int64_t /*unused*/,
|
||||
bool /*unused*/) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Tensor convolution2d(
|
||||
const Tensor&,
|
||||
const Tensor&,
|
||||
const Tensor&,
|
||||
const IntArrayRef,
|
||||
const IntArrayRef,
|
||||
const IntArrayRef,
|
||||
const int64_t) {
|
||||
const Tensor& /*unused*/,
|
||||
const Tensor& /*unused*/,
|
||||
const Tensor& /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
const int64_t /*unused*/) {
|
||||
TORCH_CHECK(false, internal::kError);
|
||||
}
|
||||
|
||||
bool use_linear(
|
||||
const Tensor&,
|
||||
const Tensor&,
|
||||
const Tensor&) {
|
||||
const Tensor& /*unused*/,
|
||||
const Tensor& /*unused*/,
|
||||
const Tensor& /*unused*/) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Tensor linear(
|
||||
const Tensor&,
|
||||
const Tensor&,
|
||||
const Tensor&) {
|
||||
const Tensor& /*unused*/,
|
||||
const Tensor& /*unused*/,
|
||||
const Tensor& /*unused*/) {
|
||||
TORCH_CHECK(false, internal::kError);
|
||||
}
|
||||
|
||||
bool use_max_pool2d(
|
||||
const Tensor&,
|
||||
const IntArrayRef,
|
||||
const IntArrayRef,
|
||||
IntArrayRef,
|
||||
const IntArrayRef,
|
||||
const bool,
|
||||
const float,
|
||||
const float) {
|
||||
const Tensor& /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
IntArrayRef /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
const bool /*unused*/,
|
||||
const float /*unused*/,
|
||||
const float /*unused*/) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Tensor max_pool2d(
|
||||
const Tensor&,
|
||||
const IntArrayRef,
|
||||
const IntArrayRef,
|
||||
IntArrayRef,
|
||||
const IntArrayRef,
|
||||
const bool,
|
||||
const float,
|
||||
const float) {
|
||||
const Tensor& /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
IntArrayRef /*unused*/,
|
||||
const IntArrayRef /*unused*/,
|
||||
const bool /*unused*/,
|
||||
const float /*unused*/,
|
||||
const float /*unused*/) {
|
||||
TORCH_CHECK(false, internal::kError);
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,7 @@ namespace at {
|
||||
|
||||
namespace detail {
|
||||
|
||||
inline void noopDelete(void*) {}
|
||||
inline void noopDelete(void* /*unused*/) {}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
|
@ -18,7 +18,7 @@ extern std::atomic<const VulkanImplInterface*> g_vulkan_impl_registry;
|
||||
|
||||
class VulkanImplRegistrar {
|
||||
public:
|
||||
explicit VulkanImplRegistrar(VulkanImplInterface*);
|
||||
explicit VulkanImplRegistrar(VulkanImplInterface* /*impl*/);
|
||||
};
|
||||
|
||||
at::Tensor& vulkan_copy_(at::Tensor& self, const at::Tensor& src);
|
||||
|
@ -154,7 +154,7 @@ class DefaultMobileCPUAllocator final : public at::Allocator {
|
||||
}
|
||||
};
|
||||
|
||||
void NoDelete(void*) {}
|
||||
void NoDelete(void* /*unused*/) {}
|
||||
|
||||
at::Allocator* GetCPUAllocator() {
|
||||
return GetAllocator(DeviceType::CPU);
|
||||
|
@ -17,7 +17,7 @@ namespace c10 {
|
||||
using MemoryDeleter = void (*)(void*);
|
||||
|
||||
// A helper function that is basically doing nothing.
|
||||
C10_API void NoDelete(void*);
|
||||
C10_API void NoDelete(void* /*unused*/);
|
||||
|
||||
// A simple struct that is used to report C10's memory allocation,
|
||||
// deallocation status and out-of-memory events to the profiler
|
||||
|
@ -590,10 +590,12 @@ constexpr uint16_t num_runtime_entries = num_functionality_keys +
|
||||
constexpr uint16_t full_backend_mask =
|
||||
(static_cast<uint16_t>(1) << num_backends) - 1;
|
||||
|
||||
C10_API const char* toString(DispatchKey);
|
||||
C10_API const char* toString(BackendComponent);
|
||||
C10_API std::ostream& operator<<(std::ostream&, DispatchKey);
|
||||
C10_API std::ostream& operator<<(std::ostream&, BackendComponent);
|
||||
C10_API const char* toString(DispatchKey /*t*/);
|
||||
C10_API const char* toString(BackendComponent /*t*/);
|
||||
C10_API std::ostream& operator<<(std::ostream& /*str*/, DispatchKey /*rhs*/);
|
||||
C10_API std::ostream& operator<<(
|
||||
std::ostream& /*str*/,
|
||||
BackendComponent /*rhs*/);
|
||||
|
||||
C10_API DispatchKey getAutogradKeyFromBackend(BackendComponent k);
|
||||
|
||||
|
@ -172,10 +172,10 @@ class DispatchKeySet final {
|
||||
// use of DispatchKeySet in TLS requires this.
|
||||
constexpr DispatchKeySet() = default;
|
||||
|
||||
constexpr DispatchKeySet(Full)
|
||||
constexpr DispatchKeySet(Full /*unused*/)
|
||||
: repr_((1ULL << (num_backends + num_functionality_keys - 1)) - 1) {}
|
||||
|
||||
constexpr DispatchKeySet(FullAfter, DispatchKey t)
|
||||
constexpr DispatchKeySet(FullAfter /*unused*/, DispatchKey t)
|
||||
// LSB after t are OK, but not t itself.
|
||||
// "functionalities" have a notion of ordering (e.g. Autograd > Sparse >
|
||||
// Quantized > Dense). But backends don't really have an ordering.
|
||||
@ -191,7 +191,7 @@ class DispatchKeySet final {
|
||||
|
||||
// Public version of DispatchKeySet(uint64_t) API; external users
|
||||
// must be explicit when they do this!
|
||||
constexpr DispatchKeySet(Raw, uint64_t x) : repr_(x) {}
|
||||
constexpr DispatchKeySet(Raw /*unused*/, uint64_t x) : repr_(x) {}
|
||||
|
||||
constexpr explicit DispatchKeySet(BackendComponent k) {
|
||||
if (k == BackendComponent::InvalidBit) {
|
||||
@ -631,8 +631,8 @@ class DispatchKeySet final {
|
||||
}
|
||||
};
|
||||
|
||||
C10_API std::string toString(DispatchKeySet);
|
||||
C10_API std::ostream& operator<<(std::ostream&, DispatchKeySet);
|
||||
C10_API std::string toString(DispatchKeySet /*ts*/);
|
||||
C10_API std::ostream& operator<<(std::ostream& /*os*/, DispatchKeySet /*ts*/);
|
||||
|
||||
inline int getDispatchTableIndexForDispatchKey(DispatchKey k) {
|
||||
return DispatchKeySet(k).getDispatchTableIndexForDispatchKeySet();
|
||||
|
@ -60,7 +60,7 @@ struct C10_API SafePyObject {
|
||||
c10::impl::PyInterpreter& pyinterpreter() const {
|
||||
return *pyinterpreter_;
|
||||
}
|
||||
PyObject* ptr(const c10::impl::PyInterpreter*) const;
|
||||
PyObject* ptr(const c10::impl::PyInterpreter* /*interpreter*/) const;
|
||||
|
||||
// stop tracking the current object, and return it
|
||||
PyObject* release() {
|
||||
@ -103,7 +103,7 @@ struct C10_API SafePyHandle {
|
||||
c10::impl::PyInterpreter& pyinterpreter() const {
|
||||
return *pyinterpreter_;
|
||||
}
|
||||
PyObject* ptr(const c10::impl::PyInterpreter*) const;
|
||||
PyObject* ptr(const c10::impl::PyInterpreter* /*interpreter*/) const;
|
||||
void reset() {
|
||||
data_ = nullptr;
|
||||
pyinterpreter_ = nullptr;
|
||||
|
@ -428,7 +428,7 @@ class C10_API Scalar {
|
||||
typename std::enable_if_t<
|
||||
std::is_integral_v<T> && !std::is_same_v<T, bool>,
|
||||
bool>* = nullptr>
|
||||
Scalar(T vv, bool) : tag(Tag::HAS_i) {
|
||||
Scalar(T vv, bool /*unused*/) : tag(Tag::HAS_i) {
|
||||
v.i = convert<decltype(v.i), T>(vv);
|
||||
}
|
||||
|
||||
@ -437,14 +437,14 @@ class C10_API Scalar {
|
||||
typename std::enable_if_t<
|
||||
!std::is_integral_v<T> && !c10::is_complex<T>::value,
|
||||
bool>* = nullptr>
|
||||
Scalar(T vv, bool) : tag(Tag::HAS_d) {
|
||||
Scalar(T vv, bool /*unused*/) : tag(Tag::HAS_d) {
|
||||
v.d = convert<decltype(v.d), T>(vv);
|
||||
}
|
||||
|
||||
template <
|
||||
typename T,
|
||||
typename std::enable_if_t<c10::is_complex<T>::value, bool>* = nullptr>
|
||||
Scalar(T vv, bool) : tag(Tag::HAS_z) {
|
||||
Scalar(T vv, bool /*unused*/) : tag(Tag::HAS_z) {
|
||||
v.z = convert<decltype(v.z), T>(vv);
|
||||
}
|
||||
};
|
||||
|
@ -78,7 +78,7 @@ struct C10_API Storage {
|
||||
resizable)) {}
|
||||
|
||||
protected:
|
||||
explicit Storage(unsafe_borrow_t, const Storage& rhs)
|
||||
explicit Storage(unsafe_borrow_t /*unused*/, const Storage& rhs)
|
||||
: storage_impl_(c10::intrusive_ptr<c10::StorageImpl>::reclaim(
|
||||
rhs.storage_impl_.get())) {}
|
||||
|
||||
|
@ -82,14 +82,15 @@ class C10_API Stream final {
|
||||
/// should use the provided APIs to get a stream. In particular,
|
||||
/// we don't require backends to give any guarantees about non-zero
|
||||
/// StreamIds; they are welcome to allocate in whatever way they like.
|
||||
explicit Stream(Unsafe, Device device, StreamId id)
|
||||
explicit Stream(Unsafe /*unused*/, Device device, StreamId id)
|
||||
: device_(device), id_(id) {}
|
||||
|
||||
/// Construct the default stream of a Device. The default stream is
|
||||
/// NOT the same as the current stream; default stream is a fixed stream
|
||||
/// that never changes, whereas the current stream may be changed by
|
||||
/// StreamGuard.
|
||||
explicit Stream(Default, Device device) : device_(device), id_(0) {}
|
||||
explicit Stream(Default /*unused*/, Device device)
|
||||
: device_(device), id_(0) {}
|
||||
|
||||
bool operator==(const Stream& other) const noexcept {
|
||||
return this->device_ == other.device_ && this->id_ == other.id_;
|
||||
|
@ -40,8 +40,8 @@ class C10_API SymBool {
|
||||
return *c;
|
||||
}
|
||||
|
||||
SymBool sym_and(const SymBool&) const;
|
||||
SymBool sym_or(const SymBool&) const;
|
||||
SymBool sym_and(const SymBool& /*sci*/) const;
|
||||
SymBool sym_or(const SymBool& /*sci*/) const;
|
||||
SymBool sym_not() const;
|
||||
|
||||
SymBool operator&(const SymBool& other) const {
|
||||
|
@ -43,17 +43,17 @@ class C10_API SymFloat {
|
||||
return data_;
|
||||
}
|
||||
|
||||
SymFloat operator+(const SymFloat&) const;
|
||||
SymFloat operator-(const SymFloat&) const;
|
||||
SymFloat operator*(const SymFloat&) const;
|
||||
SymFloat operator/(const SymFloat&) const;
|
||||
SymFloat operator+(const SymFloat& /*sci*/) const;
|
||||
SymFloat operator-(const SymFloat& /*sci*/) const;
|
||||
SymFloat operator*(const SymFloat& /*sci*/) const;
|
||||
SymFloat operator/(const SymFloat& /*sci*/) const;
|
||||
|
||||
SymBool sym_eq(const SymFloat&) const;
|
||||
SymBool sym_ne(const SymFloat&) const;
|
||||
SymBool sym_lt(const SymFloat&) const;
|
||||
SymBool sym_le(const SymFloat&) const;
|
||||
SymBool sym_gt(const SymFloat&) const;
|
||||
SymBool sym_ge(const SymFloat&) const;
|
||||
SymBool sym_eq(const SymFloat& /*sci*/) const;
|
||||
SymBool sym_ne(const SymFloat& /*sci*/) const;
|
||||
SymBool sym_lt(const SymFloat& /*sci*/) const;
|
||||
SymBool sym_le(const SymFloat& /*sci*/) const;
|
||||
SymBool sym_gt(const SymFloat& /*sci*/) const;
|
||||
SymBool sym_ge(const SymFloat& /*sci*/) const;
|
||||
|
||||
bool operator==(const SymFloat& o) const {
|
||||
return sym_eq(o).guard_bool(__FILE__, __LINE__);
|
||||
|
@ -52,7 +52,7 @@ class C10_API SymInt {
|
||||
// One appropriate use for this is when you are constructing a symint
|
||||
// in a situation where you know it is non-negative (or, if it is negative,
|
||||
// the negative value is -1; i.e., not user controlled)
|
||||
SymInt(Unchecked, int64_t d) : data_(d) {}
|
||||
SymInt(Unchecked /*unused*/, int64_t d) : data_(d) {}
|
||||
|
||||
// TODO: these implementations are not optimal because they allocate a
|
||||
// temporary and then use the move constructor/assignment
|
||||
|
@ -359,7 +359,7 @@ struct C10_API VariableVersion {
|
||||
// https://cplusplus.github.io/LWG/issue2334.
|
||||
VariableVersion(uint32_t version)
|
||||
: version_counter_(c10::make_intrusive<VersionCounter>(version)) {}
|
||||
VariableVersion(Disabled = DISABLED) {}
|
||||
VariableVersion(Disabled /*unused*/ = DISABLED) {}
|
||||
|
||||
bool enabled() const {
|
||||
return version_counter_;
|
||||
@ -522,21 +522,21 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
*/
|
||||
TensorImpl(
|
||||
Storage&& storage,
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*key_set*/,
|
||||
const caffe2::TypeMeta data_type);
|
||||
|
||||
// See Note [Enum ImplType]
|
||||
TensorImpl(
|
||||
ImplType,
|
||||
ImplType /*unused*/,
|
||||
Storage&& storage,
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*key_set*/,
|
||||
const caffe2::TypeMeta data_type);
|
||||
|
||||
/**
|
||||
* Construct a 1-dim 0 size tensor that doesn't have a storage.
|
||||
*/
|
||||
TensorImpl(
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*key_set*/,
|
||||
const caffe2::TypeMeta data_type,
|
||||
std::optional<c10::Device> device_opt);
|
||||
|
||||
@ -563,9 +563,9 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
// from under us.
|
||||
TensorImpl(
|
||||
Storage&& storage,
|
||||
DispatchKeySet,
|
||||
DispatchKeySet /*key_set*/,
|
||||
const caffe2::TypeMeta data_type,
|
||||
std::optional<c10::Device>);
|
||||
std::optional<c10::Device> /*device_opt*/);
|
||||
|
||||
public:
|
||||
TensorImpl(const TensorImpl&) = delete;
|
||||
|
@ -31,7 +31,7 @@ bool UndefinedTensorImpl::has_storage() const {
|
||||
}
|
||||
#endif
|
||||
|
||||
void UndefinedTensorImpl::set_storage_offset(int64_t) {
|
||||
void UndefinedTensorImpl::set_storage_offset(int64_t /*storage_offset*/) {
|
||||
TORCH_CHECK(false, "set_storage_offset() called on an undefined Tensor");
|
||||
}
|
||||
|
||||
|
@ -111,15 +111,16 @@ struct C10_API DeviceGuardImplInterface {
|
||||
/**
|
||||
* Get the default stream for a given device.
|
||||
*/
|
||||
virtual Stream getDefaultStream(Device) const {
|
||||
virtual Stream getDefaultStream(Device /*unused*/) const {
|
||||
TORCH_CHECK(false, "Backend doesn't support acquiring a default stream.")
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a stream from the global pool for a given device.
|
||||
*/
|
||||
virtual Stream getStreamFromGlobalPool(Device, bool isHighPriority = false)
|
||||
const {
|
||||
virtual Stream getStreamFromGlobalPool(
|
||||
Device /*unused*/,
|
||||
bool isHighPriority = false) const {
|
||||
(void)isHighPriority; // Suppress unused variable warning
|
||||
TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.")
|
||||
}
|
||||
@ -129,7 +130,7 @@ struct C10_API DeviceGuardImplInterface {
|
||||
* copied and shared around, device backend should be able to correctly handle
|
||||
* the lifetime of the stream.
|
||||
*/
|
||||
virtual Stream getNewStream(Device, int priority = 0) const {
|
||||
virtual Stream getNewStream(Device /*unused*/, int priority = 0) const {
|
||||
(void)priority;
|
||||
TORCH_CHECK(false, "Backend doesn't support create a new Stream.")
|
||||
}
|
||||
@ -228,8 +229,9 @@ struct C10_API DeviceGuardImplInterface {
|
||||
* being used on the given stream, and that it should thus avoid recycling the
|
||||
* DataPtr until all work on that stream is done.
|
||||
*/
|
||||
virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const {
|
||||
}
|
||||
virtual void recordDataPtrOnStream(
|
||||
const c10::DataPtr& /*unused*/,
|
||||
const Stream& /*unused*/) const {}
|
||||
|
||||
/**
|
||||
* Fetch the elapsed time between two recorded events.
|
||||
@ -257,31 +259,31 @@ struct NoOpDeviceGuardImpl : public DeviceGuardImplInterface {
|
||||
DeviceType type() const override {
|
||||
return D;
|
||||
}
|
||||
Device exchangeDevice(Device) const override {
|
||||
Device exchangeDevice(Device /*unused*/) const override {
|
||||
return Device(D, -1); // no-op
|
||||
}
|
||||
Device getDevice() const override {
|
||||
return Device(D, -1);
|
||||
}
|
||||
void setDevice(Device) const override {
|
||||
void setDevice(Device /*unused*/) const override {
|
||||
// no-op
|
||||
}
|
||||
void uncheckedSetDevice(Device) const noexcept override {
|
||||
void uncheckedSetDevice(Device /*unused*/) const noexcept override {
|
||||
// no-op
|
||||
}
|
||||
Stream getStream(Device) const noexcept override {
|
||||
Stream getStream(Device /*unused*/) const noexcept override {
|
||||
// no-op
|
||||
return Stream(Stream::DEFAULT, Device(D, -1));
|
||||
}
|
||||
|
||||
Stream getNewStream(Device, int priority = 0) const override {
|
||||
Stream getNewStream(Device /*unused*/, int priority = 0) const override {
|
||||
// no-op
|
||||
(void)priority;
|
||||
return Stream(Stream::DEFAULT, Device(D, -1));
|
||||
}
|
||||
|
||||
// NB: These do NOT set the current device
|
||||
Stream exchangeStream(Stream) const noexcept override {
|
||||
Stream exchangeStream(Stream /*unused*/) const noexcept override {
|
||||
// no-op
|
||||
return Stream(Stream::DEFAULT, Device(D, -1));
|
||||
}
|
||||
@ -344,7 +346,9 @@ extern C10_API std::array<
|
||||
|
||||
class C10_API DeviceGuardImplRegistrar {
|
||||
public:
|
||||
DeviceGuardImplRegistrar(DeviceType, const DeviceGuardImplInterface*);
|
||||
DeviceGuardImplRegistrar(
|
||||
DeviceType /*type*/,
|
||||
const DeviceGuardImplInterface* /*impl*/);
|
||||
};
|
||||
|
||||
#define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) \
|
||||
|
@ -19,7 +19,7 @@ template <DeviceType T>
|
||||
struct FakeGuardImpl final : public DeviceGuardImplInterface {
|
||||
static constexpr DeviceType static_type = T;
|
||||
// Runtime device type is not used
|
||||
FakeGuardImpl(DeviceType) {}
|
||||
FakeGuardImpl(DeviceType /*unused*/) {}
|
||||
FakeGuardImpl() = default;
|
||||
DeviceType type() const override {
|
||||
return T;
|
||||
|
@ -16,7 +16,7 @@ struct C10_API GPUTrace {
|
||||
|
||||
// This function will only register the first interpreter that tries to invoke
|
||||
// it. For all of the next ones it will be a no-op.
|
||||
static void set_trace(const PyInterpreter*);
|
||||
static void set_trace(const PyInterpreter* /*trace*/);
|
||||
|
||||
static const PyInterpreter* get_trace() {
|
||||
if (!haveState)
|
||||
|
@ -81,7 +81,7 @@ C10_API void _force_tls_local_dispatch_key_set(LocalDispatchKeySet key_set);
|
||||
|
||||
class C10_API IncludeDispatchKeyGuard {
|
||||
public:
|
||||
IncludeDispatchKeyGuard(DispatchKeySet);
|
||||
IncludeDispatchKeyGuard(DispatchKeySet /*include*/);
|
||||
IncludeDispatchKeyGuard(DispatchKey k)
|
||||
: IncludeDispatchKeyGuard(DispatchKeySet(k)) {}
|
||||
IncludeDispatchKeyGuard(const IncludeDispatchKeyGuard&) = delete;
|
||||
@ -99,7 +99,7 @@ class C10_API IncludeDispatchKeyGuard {
|
||||
|
||||
class C10_API ExcludeDispatchKeyGuard {
|
||||
public:
|
||||
ExcludeDispatchKeyGuard(DispatchKeySet);
|
||||
ExcludeDispatchKeyGuard(DispatchKeySet /*exclude*/);
|
||||
ExcludeDispatchKeyGuard(DispatchKey k)
|
||||
: ExcludeDispatchKeyGuard(DispatchKeySet(k)) {}
|
||||
ExcludeDispatchKeyGuard(const ExcludeDispatchKeyGuard&) = delete;
|
||||
|
@ -35,7 +35,7 @@ struct NoopPyInterpreterVTable final : public PyInterpreterVTable {
|
||||
|
||||
void python_op_registration_trampoline(
|
||||
const c10::OperatorHandle& op,
|
||||
c10::DispatchKey,
|
||||
c10::DispatchKey /*unused*/,
|
||||
c10::DispatchKeySet keyset,
|
||||
torch::jit::Stack* stack,
|
||||
bool with_keyset,
|
||||
@ -52,19 +52,21 @@ struct NoopPyInterpreterVTable final : public PyInterpreterVTable {
|
||||
|
||||
void python_dispatcher(
|
||||
const c10::OperatorHandle& op,
|
||||
c10::DispatchKeySet,
|
||||
c10::DispatchKeySet /*unused*/,
|
||||
torch::jit::Stack* stack) const override {
|
||||
PANIC(python_dispatcher);
|
||||
}
|
||||
|
||||
bool is_contiguous(const TensorImpl* self, at::MemoryFormat) const override {
|
||||
bool is_contiguous(const TensorImpl* self, at::MemoryFormat /*unused*/)
|
||||
const override {
|
||||
PANIC(is_contiguous);
|
||||
}
|
||||
c10::SymBool sym_is_contiguous(const TensorImpl* self, at::MemoryFormat)
|
||||
const override {
|
||||
c10::SymBool sym_is_contiguous(
|
||||
const TensorImpl* self,
|
||||
at::MemoryFormat /*unused*/) const override {
|
||||
PANIC(sym_is_contiguous);
|
||||
}
|
||||
bool is_strides_like(const TensorImpl* self, at::MemoryFormat)
|
||||
bool is_strides_like(const TensorImpl* self, at::MemoryFormat /*unused*/)
|
||||
const override {
|
||||
PANIC(is_strides_like);
|
||||
}
|
||||
|
@ -517,7 +517,7 @@ struct CudaMallocAsyncAllocator : public CUDAAllocator {
|
||||
}
|
||||
}
|
||||
|
||||
void enable(bool) override {
|
||||
void enable(bool /*value*/) override {
|
||||
// cannot disable
|
||||
}
|
||||
|
||||
@ -799,7 +799,7 @@ struct CudaMallocAsyncAllocator : public CUDAAllocator {
|
||||
void beginAllocateToPool(
|
||||
c10::DeviceIndex device,
|
||||
MempoolId_t mempool_id,
|
||||
std::function<bool(cudaStream_t)>) override {
|
||||
std::function<bool(cudaStream_t)> /*filter*/) override {
|
||||
std::lock_guard<std::mutex> lk(general_mutex);
|
||||
|
||||
TORCH_INTERNAL_ASSERT(capture_free_streams.empty());
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <string>
|
||||
|
||||
namespace c10::cuda {
|
||||
C10_CUDA_API std::string get_cuda_error_help(cudaError_t) noexcept;
|
||||
C10_CUDA_API std::string get_cuda_error_help(cudaError_t /*error*/) noexcept;
|
||||
C10_CUDA_API const char* get_cuda_check_suffix() noexcept;
|
||||
C10_CUDA_API std::mutex* getFreeMutex();
|
||||
} // namespace c10::cuda
|
||||
|
@ -70,7 +70,7 @@ class C10_CUDA_API CUDAStream {
|
||||
/// Construct a CUDAStream from a Stream with no error checking.
|
||||
/// This constructor uses the "named" constructor idiom, and can
|
||||
/// be invoked as: CUDAStream(CUDAStream::UNCHECKED, stream)
|
||||
explicit CUDAStream(Unchecked, Stream stream) : stream_(stream) {}
|
||||
explicit CUDAStream(Unchecked /*unused*/, Stream stream) : stream_(stream) {}
|
||||
|
||||
bool operator==(const CUDAStream& other) const noexcept {
|
||||
return unwrap() == other.unwrap();
|
||||
|
@ -43,7 +43,7 @@ class DynamicCounterBackendIf {
|
||||
virtual void unregisterCounter(std::string_view key) = 0;
|
||||
};
|
||||
|
||||
void C10_API
|
||||
registerDynamicCounterBackend(std::unique_ptr<DynamicCounterBackendIf>);
|
||||
void C10_API registerDynamicCounterBackend(
|
||||
std::unique_ptr<DynamicCounterBackendIf> /*backend*/);
|
||||
} // namespace detail
|
||||
} // namespace c10::monitor
|
||||
|
@ -217,7 +217,7 @@ class C10_API WarningHandlerGuard {
|
||||
/// The TORCH_WARN_ONCE macro is difficult to test for. Use
|
||||
/// setWarnAlways(true) to turn it into TORCH_WARN, which can be
|
||||
/// tested for more easily.
|
||||
C10_API void set_warnAlways(bool) noexcept(true);
|
||||
C10_API void set_warnAlways(bool /*setting*/) noexcept(true);
|
||||
C10_API bool get_warnAlways() noexcept(true);
|
||||
|
||||
// A RAII guard that sets warn_always (not thread-local) on
|
||||
|
@ -63,7 +63,7 @@ class ExclusivelyOwned {
|
||||
explicit ExclusivelyOwned(T&& t) : repr_(EOT::moveToRepr(std::move(t))) {}
|
||||
|
||||
template <class... Args>
|
||||
explicit ExclusivelyOwned(std::in_place_t, Args&&... args)
|
||||
explicit ExclusivelyOwned(std::in_place_t /*unused*/, Args&&... args)
|
||||
: repr_(EOT::createInPlace(std::forward<Args>(args)...)) {}
|
||||
|
||||
ExclusivelyOwned(const ExclusivelyOwned&) = delete;
|
||||
|
@ -52,12 +52,14 @@ class function_ref<Ret(Params...)> {
|
||||
function_ref(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
|
||||
Callable&& callable,
|
||||
std::enable_if_t<
|
||||
!std::is_same_v<std::remove_reference_t<Callable>, function_ref>>* =
|
||||
nullptr,
|
||||
std::enable_if_t<!std::is_same_v<
|
||||
std::remove_reference_t<Callable>,
|
||||
function_ref>>* /*unused*/
|
||||
= nullptr,
|
||||
std::enable_if_t<std::is_convertible_v<
|
||||
typename std::invoke_result_t<Callable, Params...>,
|
||||
Ret>>* = nullptr)
|
||||
Ret>>* /*unused*/
|
||||
= nullptr)
|
||||
: callback(callback_fn<std::remove_reference_t<Callable>>),
|
||||
callable(reinterpret_cast<intptr_t>(&callable)) {}
|
||||
|
||||
|
@ -26,7 +26,8 @@ class GaugeBackendFactoryIf {
|
||||
std::string_view key) noexcept = 0;
|
||||
};
|
||||
|
||||
void C10_API registerGaugeBackend(std::unique_ptr<GaugeBackendFactoryIf>);
|
||||
void C10_API
|
||||
registerGaugeBackend(std::unique_ptr<GaugeBackendFactoryIf> /*backend*/);
|
||||
} // namespace detail
|
||||
|
||||
// A handle to a Gauge.
|
||||
|
@ -307,10 +307,11 @@ class C10_API EventSampledHandler {
|
||||
|
||||
// Must be called in the main thread before any other threads are spawned.
|
||||
C10_API void InitEventSampledHandlers(
|
||||
std::vector<
|
||||
std::pair<std::string_view, std::unique_ptr<EventSampledHandler>>>);
|
||||
std::vector<std::pair<
|
||||
std::string_view,
|
||||
std::unique_ptr<EventSampledHandler>>> /*handlers*/);
|
||||
C10_API const std::unique_ptr<EventSampledHandler>& GetEventSampledHandler(
|
||||
std::string_view);
|
||||
std::string_view /*event*/);
|
||||
|
||||
/**
|
||||
* Very lightweight logging for the first time API usage. It's beneficial for
|
||||
|
@ -82,7 +82,7 @@ class MaybeOwned final {
|
||||
|
||||
/// Don't use this; use owned() instead.
|
||||
template <class... Args>
|
||||
explicit MaybeOwned(std::in_place_t, Args&&... args)
|
||||
explicit MaybeOwned(std::in_place_t /*unused*/, Args&&... args)
|
||||
: isBorrowed_(false), own_(std::forward<Args>(args)...) {}
|
||||
|
||||
public:
|
||||
@ -177,7 +177,7 @@ class MaybeOwned final {
|
||||
}
|
||||
|
||||
template <class... Args>
|
||||
static MaybeOwned owned(std::in_place_t, Args&&... args) {
|
||||
static MaybeOwned owned(std::in_place_t /*unused*/, Args&&... args) {
|
||||
return MaybeOwned(std::in_place, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ using make_offset_index_sequence =
|
||||
* 2>());
|
||||
*/
|
||||
template <class Tuple, size_t... Is>
|
||||
constexpr auto tuple_elements(Tuple t, std::index_sequence<Is...>) {
|
||||
constexpr auto tuple_elements(Tuple t, std::index_sequence<Is...> /*unused*/) {
|
||||
return std::tuple<std::tuple_element_t<Is, Tuple>...>(std::get<Is>(t)...);
|
||||
}
|
||||
|
||||
@ -209,7 +209,7 @@ auto tuple_map(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
|
||||
std::tuple<Args...>&& tuple,
|
||||
const Mapper& mapper,
|
||||
std::index_sequence<Indices...>) {
|
||||
std::index_sequence<Indices...> /*unused*/) {
|
||||
return std::tuple<decltype(mapper(std::forward<Args>(std::get<Indices>(
|
||||
tuple))))...>(mapper(std::forward<Args>(std::get<Indices>(tuple)))...);
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ class OptionalArrayRef final {
|
||||
|
||||
constexpr OptionalArrayRef() noexcept = default;
|
||||
|
||||
constexpr OptionalArrayRef(std::nullopt_t) noexcept {}
|
||||
constexpr OptionalArrayRef(std::nullopt_t /*unused*/) noexcept {}
|
||||
|
||||
OptionalArrayRef(const OptionalArrayRef& other) = default;
|
||||
|
||||
@ -89,7 +89,7 @@ class OptionalArrayRef final {
|
||||
|
||||
// Assignment
|
||||
|
||||
constexpr OptionalArrayRef& operator=(std::nullopt_t) noexcept {
|
||||
constexpr OptionalArrayRef& operator=(std::nullopt_t /*unused*/) noexcept {
|
||||
wrapped_opt_array_ref = std::nullopt;
|
||||
return *this;
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ class SmallVectorTemplateCommon
|
||||
class ItTy,
|
||||
std::enable_if_t<!std::is_same_v<std::remove_const_t<ItTy>, T*>, bool> =
|
||||
false>
|
||||
void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
|
||||
void assertSafeToReferenceAfterClear(ItTy /*unused*/, ItTy /*unused*/) {}
|
||||
|
||||
/// Check whether any part of the range will be invalidated by growing.
|
||||
void assertSafeToAddRange(const T* From, const T* To) {
|
||||
@ -228,7 +228,7 @@ class SmallVectorTemplateCommon
|
||||
class ItTy,
|
||||
std::enable_if_t<!std::is_same_v<std::remove_const_t<ItTy>, T*>, bool> =
|
||||
false>
|
||||
void assertSafeToAddRange(ItTy, ItTy) {}
|
||||
void assertSafeToAddRange(ItTy /*unused*/, ItTy /*unused*/) {}
|
||||
|
||||
/// Reserve enough space to add one element, and return the updated element
|
||||
/// pointer in case it was a reference to the storage.
|
||||
@ -538,7 +538,7 @@ class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
|
||||
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
|
||||
|
||||
// No need to do a destroy loop for POD's.
|
||||
static void destroy_range(T*, T*) {}
|
||||
static void destroy_range(T* /*unused*/, T* /*unused*/) {}
|
||||
|
||||
/// Move the range [I, E) onto the uninitialized memory
|
||||
/// starting with "Dest", constructing elements into it as needed.
|
||||
@ -563,8 +563,8 @@ class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
|
||||
T1* I,
|
||||
T1* E,
|
||||
T2* Dest,
|
||||
std::enable_if_t<std::is_same_v<std::remove_const_t<T1>, T2>>* =
|
||||
nullptr) {
|
||||
std::enable_if_t<std::is_same_v<std::remove_const_t<T1>, T2>>* /*unused*/
|
||||
= nullptr) {
|
||||
// Use memcpy for PODs iterated by pointers (which includes SmallVector
|
||||
// iterators): std::uninitialized_copy optimizes to memmove, but we can
|
||||
// use memcpy here. Note that I and E are iterators and thus might be
|
||||
|
@ -87,7 +87,7 @@ C10_API std::ostream& _str(std::ostream& ss, const std::wstring& wString);
|
||||
template <>
|
||||
inline std::ostream& _str<CompileTimeEmptyString>(
|
||||
std::ostream& ss,
|
||||
const CompileTimeEmptyString&) {
|
||||
const CompileTimeEmptyString& /*unused*/) {
|
||||
return ss;
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,6 @@
|
||||
|
||||
namespace c10::detail {
|
||||
|
||||
void deleteNothing(void*) {}
|
||||
void deleteNothing(void* /*unused*/) {}
|
||||
|
||||
} // namespace c10::detail
|
||||
|
@ -13,7 +13,7 @@ using DeleterFnPtr = void (*)(void*);
|
||||
namespace detail {
|
||||
|
||||
// Does not delete anything
|
||||
C10_API void deleteNothing(void*);
|
||||
C10_API void deleteNothing(void* /*unused*/);
|
||||
|
||||
// A detail::UniqueVoidPtr is an owning smart pointer like unique_ptr, but
|
||||
// with three major differences:
|
||||
|
@ -35,7 +35,7 @@ class WaitCounterBackendFactoryIf {
|
||||
};
|
||||
|
||||
C10_API void registerWaitCounterBackend(
|
||||
std::unique_ptr<WaitCounterBackendFactoryIf>);
|
||||
std::unique_ptr<WaitCounterBackendFactoryIf> /*factory*/);
|
||||
|
||||
C10_API std::vector<std::shared_ptr<WaitCounterBackendFactoryIf>>
|
||||
getRegisteredWaitCounterBackends();
|
||||
|
@ -573,13 +573,13 @@ class sherwood_v3_table : private EntryAlloc,
|
||||
return emplace(std::move(value));
|
||||
}
|
||||
template <typename... Args>
|
||||
iterator emplace_hint(const_iterator, Args&&... args) {
|
||||
iterator emplace_hint(const_iterator /*unused*/, Args&&... args) {
|
||||
return emplace(std::forward<Args>(args)...).first;
|
||||
}
|
||||
iterator insert(const_iterator, const value_type& value) {
|
||||
iterator insert(const_iterator /*unused*/, const value_type& value) {
|
||||
return emplace(value).first;
|
||||
}
|
||||
iterator insert(const_iterator, value_type&& value) {
|
||||
iterator insert(const_iterator /*unused*/, value_type&& value) {
|
||||
return emplace(std::move(value)).first;
|
||||
}
|
||||
|
||||
@ -896,7 +896,7 @@ class sherwood_v3_table : private EntryAlloc,
|
||||
} // namespace detailv3
|
||||
|
||||
struct prime_number_hash_policy {
|
||||
static uint64_t mod0(uint64_t) {
|
||||
static uint64_t mod0(uint64_t /*unused*/) {
|
||||
return 0llu;
|
||||
}
|
||||
static uint64_t mod2(uint64_t hash) {
|
||||
@ -1883,7 +1883,7 @@ struct power_of_two_hash_policy {
|
||||
size = detailv3::next_power_of_two(size);
|
||||
return 0;
|
||||
}
|
||||
void commit(int8_t) {}
|
||||
void commit(int8_t /*unused*/) {}
|
||||
void reset() {}
|
||||
};
|
||||
|
||||
@ -1989,14 +1989,14 @@ class flat_hash_map
|
||||
}
|
||||
template <typename M>
|
||||
typename Table::iterator insert_or_assign(
|
||||
typename Table::const_iterator,
|
||||
typename Table::const_iterator /*unused*/,
|
||||
const key_type& key,
|
||||
M&& m) {
|
||||
return insert_or_assign(key, std::forward<M>(m)).first;
|
||||
}
|
||||
template <typename M>
|
||||
typename Table::iterator insert_or_assign(
|
||||
typename Table::const_iterator,
|
||||
typename Table::const_iterator /*unused*/,
|
||||
key_type&& key,
|
||||
M&& m) {
|
||||
return insert_or_assign(std::move(key), std::forward<M>(m)).first;
|
||||
|
@ -79,8 +79,8 @@ class C10_API uint128 {
|
||||
// Make msvc happy with using operator<<= from DivModImpl
|
||||
// which is a static function, and linker complained about missing
|
||||
// static version of this overload
|
||||
friend uint128& operator<<=(uint128&, int);
|
||||
uint128& operator>>=(int);
|
||||
friend uint128& operator<<=(uint128& /*self*/, int /*amount*/);
|
||||
uint128& operator>>=(int /*amount*/);
|
||||
uint128& operator&=(const uint128& b);
|
||||
uint128& operator|=(const uint128& b);
|
||||
uint128& operator^=(const uint128& b);
|
||||
|
@ -399,7 +399,9 @@ class intrusive_ptr final {
|
||||
// This constructor will not increase the ref counter for you.
|
||||
// We use the tagged dispatch mechanism to explicitly mark this constructor
|
||||
// to not increase the refcount
|
||||
explicit intrusive_ptr(TTarget* target, raw::DontIncreaseRefcount) noexcept
|
||||
explicit intrusive_ptr(
|
||||
TTarget* target,
|
||||
raw::DontIncreaseRefcount /*unused*/) noexcept
|
||||
: target_(target) {}
|
||||
|
||||
explicit intrusive_ptr(std::unique_ptr<TTarget> rhs) noexcept
|
||||
|
@ -70,7 +70,7 @@ enum ZeroBehavior {
|
||||
namespace detail {
|
||||
template <typename T, std::size_t SizeOfT>
|
||||
struct TrailingZerosCounter {
|
||||
static std::size_t count(T Val, ZeroBehavior) {
|
||||
static std::size_t count(T Val, ZeroBehavior /*unused*/) {
|
||||
if (!Val)
|
||||
return std::numeric_limits<T>::digits;
|
||||
if (Val & 0x1)
|
||||
@ -147,7 +147,7 @@ std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
|
||||
namespace detail {
|
||||
template <typename T, std::size_t SizeOfT>
|
||||
struct LeadingZerosCounter {
|
||||
static std::size_t count(T Val, ZeroBehavior) {
|
||||
static std::size_t count(T Val, ZeroBehavior /*unused*/) {
|
||||
if (!Val)
|
||||
return std::numeric_limits<T>::digits;
|
||||
|
||||
|
@ -234,7 +234,9 @@ inline std::ostream& operator<<(
|
||||
return out;
|
||||
}
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& out, const std::nullptr_t&) {
|
||||
inline std::ostream& operator<<(
|
||||
std::ostream& out,
|
||||
const std::nullptr_t& /*unused*/) {
|
||||
out << "(null)";
|
||||
return out;
|
||||
}
|
||||
|
@ -560,13 +560,13 @@ class sherwood_v3_table : private EntryAlloc, private Hasher, private Equal {
|
||||
return emplace(std::move(value));
|
||||
}
|
||||
template <typename... Args>
|
||||
iterator emplace_hint(const_iterator, Args&&... args) {
|
||||
iterator emplace_hint(const_iterator /*unused*/, Args&&... args) {
|
||||
return emplace(std::forward<Args>(args)...).first;
|
||||
}
|
||||
iterator insert(const_iterator, const value_type& value) {
|
||||
iterator insert(const_iterator /*unused*/, const value_type& value) {
|
||||
return emplace(value).first;
|
||||
}
|
||||
iterator insert(const_iterator, value_type&& value) {
|
||||
iterator insert(const_iterator /*unused*/, value_type&& value) {
|
||||
return emplace(std::move(value)).first;
|
||||
}
|
||||
|
||||
@ -1013,7 +1013,7 @@ class sherwood_v3_table : private EntryAlloc, private Hasher, private Equal {
|
||||
} // namespace detailv3
|
||||
|
||||
struct prime_number_hash_policy {
|
||||
static uint64_t mod0(uint64_t) {
|
||||
static uint64_t mod0(uint64_t /*unused*/) {
|
||||
return 0llu;
|
||||
}
|
||||
static uint64_t mod2(uint64_t hash) {
|
||||
@ -2000,7 +2000,7 @@ struct power_of_two_hash_policy {
|
||||
size = detailv3::next_power_of_two(size);
|
||||
return 0;
|
||||
}
|
||||
void commit(int8_t) {}
|
||||
void commit(int8_t /*unused*/) {}
|
||||
void reset() {}
|
||||
};
|
||||
|
||||
@ -2106,14 +2106,14 @@ class order_preserving_flat_hash_map
|
||||
}
|
||||
template <typename M>
|
||||
typename Table::iterator insert_or_assign(
|
||||
typename Table::const_iterator,
|
||||
typename Table::const_iterator /*unused*/,
|
||||
const key_type& key,
|
||||
M&& m) {
|
||||
return insert_or_assign(key, std::forward<M>(m)).first;
|
||||
}
|
||||
template <typename M>
|
||||
typename Table::iterator insert_or_assign(
|
||||
typename Table::const_iterator,
|
||||
typename Table::const_iterator /*unused*/,
|
||||
key_type&& key,
|
||||
M&& m) {
|
||||
return insert_or_assign(std::move(key), std::forward<M>(m)).first;
|
||||
|
@ -90,7 +90,7 @@ class class_ : public ::torch::detail::class_base {
|
||||
/// constructor taking an `int` and a `std::string` as argument.
|
||||
template <typename... Types>
|
||||
class_& def(
|
||||
torch::detail::types<void, Types...>,
|
||||
torch::detail::types<void, Types...> /*unused*/,
|
||||
std::string doc_string = "",
|
||||
std::initializer_list<arg> default_args =
|
||||
{}) { // Used in combination with
|
||||
@ -457,8 +457,8 @@ inline class_<CurClass> selective_class_(
|
||||
|
||||
template <class CurClass>
|
||||
inline detail::ClassNotSelected selective_class_(
|
||||
const std::string&,
|
||||
detail::SelectiveStr<false>) {
|
||||
const std::string& /*unused*/,
|
||||
detail::SelectiveStr<false> /*unused*/) {
|
||||
return detail::ClassNotSelected();
|
||||
}
|
||||
|
||||
@ -512,7 +512,7 @@ inline class_<CurClass> Library::class_(detail::SelectiveStr<true> className) {
|
||||
}
|
||||
|
||||
template <class CurClass>
|
||||
inline detail::ClassNotSelected Library::class_(detail::SelectiveStr<false>) {
|
||||
inline detail::ClassNotSelected Library::class_(detail::SelectiveStr<false> /*unused*/) {
|
||||
return detail::ClassNotSelected();
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ typename c10::guts::infer_function_traits_t<Functor>::return_type
|
||||
call_torchbind_method_from_stack(
|
||||
Functor& functor,
|
||||
jit::Stack& stack,
|
||||
std::index_sequence<ivalue_arg_indices...>) {
|
||||
std::index_sequence<ivalue_arg_indices...> /*unused*/) {
|
||||
(void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would
|
||||
// be unused and we have to silence the compiler warning.
|
||||
|
||||
|
@ -39,7 +39,9 @@ struct alignas(2) BFloat16 {
|
||||
return from_bits_t();
|
||||
}
|
||||
|
||||
constexpr C10_HOST_DEVICE BFloat16(unsigned short bits, from_bits_t)
|
||||
constexpr C10_HOST_DEVICE BFloat16(
|
||||
unsigned short bits,
|
||||
from_bits_t /*unused*/)
|
||||
: x(bits) {}
|
||||
/* implicit */ inline C10_HOST_DEVICE BFloat16(float value);
|
||||
inline C10_HOST_DEVICE operator float() const;
|
||||
|
@ -44,7 +44,7 @@ struct alignas(1) Float8_e4m3fn {
|
||||
|
||||
Float8_e4m3fn() = default;
|
||||
|
||||
constexpr C10_HOST_DEVICE Float8_e4m3fn(uint8_t bits, from_bits_t)
|
||||
constexpr C10_HOST_DEVICE Float8_e4m3fn(uint8_t bits, from_bits_t /*unused*/)
|
||||
: x(bits) {}
|
||||
inline C10_HOST_DEVICE Float8_e4m3fn(float value);
|
||||
inline C10_HOST_DEVICE operator float() const;
|
||||
|
@ -45,7 +45,9 @@ struct alignas(1) Float8_e4m3fnuz {
|
||||
|
||||
Float8_e4m3fnuz() = default;
|
||||
|
||||
constexpr C10_HOST_DEVICE Float8_e4m3fnuz(uint8_t bits, from_bits_t)
|
||||
constexpr C10_HOST_DEVICE Float8_e4m3fnuz(
|
||||
uint8_t bits,
|
||||
from_bits_t /*unused*/)
|
||||
: x(bits) {}
|
||||
inline C10_HOST_DEVICE Float8_e4m3fnuz(float value);
|
||||
inline C10_HOST_DEVICE operator float() const;
|
||||
|
@ -30,7 +30,8 @@ struct alignas(1) Float8_e5m2 {
|
||||
|
||||
Float8_e5m2() = default;
|
||||
|
||||
constexpr C10_HOST_DEVICE Float8_e5m2(uint8_t bits, from_bits_t) : x(bits) {}
|
||||
constexpr C10_HOST_DEVICE Float8_e5m2(uint8_t bits, from_bits_t /*unused*/)
|
||||
: x(bits) {}
|
||||
inline C10_HOST_DEVICE Float8_e5m2(float value);
|
||||
inline C10_HOST_DEVICE operator float() const;
|
||||
inline C10_HOST_DEVICE bool isnan() const;
|
||||
|
@ -44,7 +44,9 @@ struct alignas(1) Float8_e5m2fnuz {
|
||||
|
||||
Float8_e5m2fnuz() = default;
|
||||
|
||||
constexpr C10_HOST_DEVICE Float8_e5m2fnuz(uint8_t bits, from_bits_t)
|
||||
constexpr C10_HOST_DEVICE Float8_e5m2fnuz(
|
||||
uint8_t bits,
|
||||
from_bits_t /*unused*/)
|
||||
: x(bits) {}
|
||||
inline C10_HOST_DEVICE Float8_e5m2fnuz(float value);
|
||||
inline C10_HOST_DEVICE operator float() const;
|
||||
|
@ -39,7 +39,7 @@ struct alignas(1) Float8_e8m0fnu {
|
||||
|
||||
Float8_e8m0fnu() = default;
|
||||
|
||||
constexpr C10_HOST_DEVICE Float8_e8m0fnu(uint8_t bits, from_bits_t)
|
||||
constexpr C10_HOST_DEVICE Float8_e8m0fnu(uint8_t bits, from_bits_t /*unused*/)
|
||||
: x(bits) {}
|
||||
inline C10_HOST_DEVICE Float8_e8m0fnu(float value);
|
||||
inline C10_HOST_DEVICE operator float() const;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user