[2/N] Mark unused parameters in C++ code (#165121)

This is follow-up of #164912 to mark unused C++ parameters to improve code readability.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/165121
Approved by: https://github.com/Skylion007
This commit is contained in:
Yuanyuan Chen
2025-10-15 03:04:35 +00:00
committed by PyTorch MergeBot
parent b4fd47179e
commit 36871622f1
82 changed files with 371 additions and 310 deletions

View File

@ -65,7 +65,7 @@ struct default_constructible
namespace impl { namespace impl {
template <typename T> template <typename T>
constexpr bool supports_default_construction(const ::strong::default_constructible::modifier<T>*) constexpr bool supports_default_construction(const ::strong::default_constructible::modifier<T>* /*unused*/)
{ {
return true; return true;
} }
@ -76,7 +76,7 @@ class type : public modifier<M, type<T, Tag, M...>>...
{ {
public: public:
template <typename TT = T, typename = std::enable_if_t<std::is_trivially_constructible<TT>{}>> template <typename TT = T, typename = std::enable_if_t<std::is_trivially_constructible<TT>{}>>
explicit type(uninitialized_t) explicit type(uninitialized_t /*unused*/)
noexcept noexcept
{ {
} }
@ -138,7 +138,7 @@ private:
namespace impl { namespace impl {
template <typename T, typename Tag, typename ... Ms> template <typename T, typename Tag, typename ... Ms>
constexpr bool is_strong_type_func(const strong::type<T, Tag, Ms...>*) { return true;} constexpr bool is_strong_type_func(const strong::type<T, Tag, Ms...>* /*unused*/) { return true;}
constexpr bool is_strong_type_func(...) { return false;} constexpr bool is_strong_type_func(...) { return false;}
template <typename T, typename Tag, typename ... Ms> template <typename T, typename Tag, typename ... Ms>
constexpr T underlying_type(strong::type<T, Tag, Ms...>*); constexpr T underlying_type(strong::type<T, Tag, Ms...>*);

View File

@ -252,10 +252,10 @@ PyWarningHandler::PyWarningHandler() noexcept(true)
// Get the Python warning type for a warning // Get the Python warning type for a warning
static PyObject* map_warning_to_python_type(const c10::Warning& warning) { static PyObject* map_warning_to_python_type(const c10::Warning& warning) {
struct Visitor { struct Visitor {
PyObject* operator()(const c10::UserWarning&) const { PyObject* operator()(const c10::UserWarning& /*unused*/) const {
return PyExc_UserWarning; return PyExc_UserWarning;
} }
PyObject* operator()(const c10::DeprecationWarning&) const { PyObject* operator()(const c10::DeprecationWarning& /*unused*/) const {
return PyExc_DeprecationWarning; return PyExc_DeprecationWarning;
} }
}; };

View File

@ -269,7 +269,8 @@ bool THPException_init(PyObject* module);
namespace torch { namespace torch {
// Set python current exception from a C++ exception // Set python current exception from a C++ exception
TORCH_PYTHON_API void translate_exception_to_python(const std::exception_ptr&); TORCH_PYTHON_API void translate_exception_to_python(
const std::exception_ptr& /*e_ptr*/);
TORCH_PYTHON_API std::string processErrorMsg(std::string str); TORCH_PYTHON_API std::string processErrorMsg(std::string str);
@ -358,8 +359,8 @@ using Arg = typename invoke_traits<Func>::template arg<i>::type;
template <typename Func, size_t... Is, bool release_gil> template <typename Func, size_t... Is, bool release_gil>
auto wrap_pybind_function_impl_( auto wrap_pybind_function_impl_(
Func&& f, Func&& f,
std::index_sequence<Is...>, std::index_sequence<Is...> /*unused*/,
std::bool_constant<release_gil>) { std::bool_constant<release_gil> /*unused*/) {
namespace py = pybind11; namespace py = pybind11;
// f=f is needed to handle function references on older compilers // f=f is needed to handle function references on older compilers
@ -371,7 +372,7 @@ auto wrap_pybind_function_impl_(
}; };
} }
PyObject* _new_accelerator_error_object(const c10::AcceleratorError&); PyObject* _new_accelerator_error_object(const c10::AcceleratorError& /*e*/);
} // namespace detail } // namespace detail
// Wrap a function with TH error and warning handling. // Wrap a function with TH error and warning handling.

View File

@ -57,7 +57,7 @@ struct ConcretePyInterpreterVTable final
void reportErrorCallback(PyObject* callback, DispatchKey key) const override; void reportErrorCallback(PyObject* callback, DispatchKey key) const override;
void python_dispatcher( void python_dispatcher(
const c10::OperatorHandle& op, const c10::OperatorHandle& op,
c10::DispatchKeySet, c10::DispatchKeySet /*ks*/,
torch::jit::Stack* stack) const override; torch::jit::Stack* stack) const override;
// NB: this is defined in python_dispatch.cpp // NB: this is defined in python_dispatch.cpp
void python_op_registration_trampoline( void python_op_registration_trampoline(
@ -80,12 +80,15 @@ struct ConcretePyInterpreterVTable final
opname, pymodule, context); opname, pymodule, context);
} }
bool is_contiguous(const c10::TensorImpl* self, at::MemoryFormat) bool is_contiguous(
const override; const c10::TensorImpl* self,
c10::SymBool sym_is_contiguous(const c10::TensorImpl* self, at::MemoryFormat) at::MemoryFormat /*memory_format*/) const override;
const override; c10::SymBool sym_is_contiguous(
bool is_strides_like(const c10::TensorImpl* self, at::MemoryFormat) const c10::TensorImpl* self,
const override; at::MemoryFormat /*memory_format*/) const override;
bool is_strides_like(
const c10::TensorImpl* self,
at::MemoryFormat /*memory_format*/) const override;
bool is_non_overlapping_and_dense(const c10::TensorImpl* self) const override; bool is_non_overlapping_and_dense(const c10::TensorImpl* self) const override;
c10::Device device(const c10::TensorImpl* self) const override; c10::Device device(const c10::TensorImpl* self) const override;
int64_t dim(const c10::TensorImpl* self) const override; int64_t dim(const c10::TensorImpl* self) const override;

View File

@ -3,7 +3,8 @@
namespace torch::detail { namespace torch::detail {
PyInterpreterHooks::PyInterpreterHooks(c10::impl::PyInterpreterHooksArgs) {} PyInterpreterHooks::PyInterpreterHooks(
c10::impl::PyInterpreterHooksArgs /*unused*/) {}
c10::impl::PyInterpreter* PyInterpreterHooks::getPyInterpreter() const { c10::impl::PyInterpreter* PyInterpreterHooks::getPyInterpreter() const {
// Delegate to the existing implementation // Delegate to the existing implementation

View File

@ -7,7 +7,7 @@ namespace torch::detail {
// Concrete implementation of PyInterpreterHooks // Concrete implementation of PyInterpreterHooks
class PyInterpreterHooks : public c10::impl::PyInterpreterHooksInterface { class PyInterpreterHooks : public c10::impl::PyInterpreterHooksInterface {
public: public:
explicit PyInterpreterHooks(c10::impl::PyInterpreterHooksArgs); explicit PyInterpreterHooks(c10::impl::PyInterpreterHooksArgs /*unused*/);
c10::impl::PyInterpreter* getPyInterpreter() const override; c10::impl::PyInterpreter* getPyInterpreter() const override;
}; };

View File

@ -117,7 +117,7 @@ static PyObject* THPDTypeInfo_compare(
return Py_INCREF(Py_NotImplemented), Py_NotImplemented; return Py_INCREF(Py_NotImplemented), Py_NotImplemented;
} }
static PyObject* THPDTypeInfo_bits(THPDTypeInfo* self, void*) { static PyObject* THPDTypeInfo_bits(THPDTypeInfo* self, void* /*unused*/) {
uint64_t bits = elementSize(self->type) * CHAR_BIT; uint64_t bits = elementSize(self->type) * CHAR_BIT;
return THPUtils_packUInt64(bits); return THPUtils_packUInt64(bits);
} }
@ -133,7 +133,7 @@ static PyObject* THPDTypeInfo_bits(THPDTypeInfo* self, void*) {
at::ScalarType::BFloat16, \ at::ScalarType::BFloat16, \
AT_EXPAND(AT_FLOAT8_TYPES)) AT_EXPAND(AT_FLOAT8_TYPES))
static PyObject* THPFInfo_eps(THPFInfo* self, void*) { static PyObject* THPFInfo_eps(THPFInfo* self, void* /*unused*/) {
HANDLE_TH_ERRORS HANDLE_TH_ERRORS
return _AT_DISPATCH_FINFO_TYPES(self->type, "epsilon", [] { return _AT_DISPATCH_FINFO_TYPES(self->type, "epsilon", [] {
return PyFloat_FromDouble( return PyFloat_FromDouble(
@ -142,7 +142,7 @@ static PyObject* THPFInfo_eps(THPFInfo* self, void*) {
END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS
} }
static PyObject* THPFInfo_max(THPFInfo* self, void*) { static PyObject* THPFInfo_max(THPFInfo* self, void* /*unused*/) {
HANDLE_TH_ERRORS HANDLE_TH_ERRORS
return _AT_DISPATCH_FINFO_TYPES(self->type, "max", [] { return _AT_DISPATCH_FINFO_TYPES(self->type, "max", [] {
return PyFloat_FromDouble( return PyFloat_FromDouble(
@ -151,7 +151,7 @@ static PyObject* THPFInfo_max(THPFInfo* self, void*) {
END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS
} }
static PyObject* THPFInfo_min(THPFInfo* self, void*) { static PyObject* THPFInfo_min(THPFInfo* self, void* /*unused*/) {
HANDLE_TH_ERRORS HANDLE_TH_ERRORS
return _AT_DISPATCH_FINFO_TYPES(self->type, "lowest", [] { return _AT_DISPATCH_FINFO_TYPES(self->type, "lowest", [] {
return PyFloat_FromDouble( return PyFloat_FromDouble(
@ -164,7 +164,7 @@ static PyObject* THPFInfo_min(THPFInfo* self, void*) {
AT_DISPATCH_V2( \ AT_DISPATCH_V2( \
TYPE, NAME, AT_WRAP(__VA_ARGS__), AT_EXPAND(AT_INTEGRAL_TYPES_V2)) TYPE, NAME, AT_WRAP(__VA_ARGS__), AT_EXPAND(AT_INTEGRAL_TYPES_V2))
static PyObject* THPIInfo_max(THPIInfo* self, void*) { static PyObject* THPIInfo_max(THPIInfo* self, void* /*unused*/) {
HANDLE_TH_ERRORS HANDLE_TH_ERRORS
if (at::isIntegralType(self->type, /*includeBool=*/false)) { if (at::isIntegralType(self->type, /*includeBool=*/false)) {
return AT_DISPATCH_IINFO_TYPES(self->type, "max", [] { return AT_DISPATCH_IINFO_TYPES(self->type, "max", [] {
@ -182,7 +182,7 @@ static PyObject* THPIInfo_max(THPIInfo* self, void*) {
END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS
} }
static PyObject* THPIInfo_min(THPIInfo* self, void*) { static PyObject* THPIInfo_min(THPIInfo* self, void* /*unused*/) {
HANDLE_TH_ERRORS HANDLE_TH_ERRORS
if (at::isIntegralType(self->type, /*includeBool=*/false)) { if (at::isIntegralType(self->type, /*includeBool=*/false)) {
return AT_DISPATCH_IINFO_TYPES(self->type, "min", [] { return AT_DISPATCH_IINFO_TYPES(self->type, "min", [] {
@ -200,7 +200,7 @@ static PyObject* THPIInfo_min(THPIInfo* self, void*) {
END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS
} }
static PyObject* THPIInfo_dtype(THPIInfo* self, void*) { static PyObject* THPIInfo_dtype(THPIInfo* self, void* /*unused*/) {
HANDLE_TH_ERRORS HANDLE_TH_ERRORS
auto primary_name = c10::getDtypeNames(self->type).first; auto primary_name = c10::getDtypeNames(self->type).first;
return AT_DISPATCH_IINFO_TYPES(self->type, "dtype", [&primary_name] { return AT_DISPATCH_IINFO_TYPES(self->type, "dtype", [&primary_name] {
@ -209,7 +209,7 @@ static PyObject* THPIInfo_dtype(THPIInfo* self, void*) {
END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS
} }
static PyObject* THPFInfo_smallest_normal(THPFInfo* self, void*) { static PyObject* THPFInfo_smallest_normal(THPFInfo* self, void* /*unused*/) {
HANDLE_TH_ERRORS HANDLE_TH_ERRORS
return _AT_DISPATCH_FINFO_TYPES(self->type, "min", [] { return _AT_DISPATCH_FINFO_TYPES(self->type, "min", [] {
return PyFloat_FromDouble( return PyFloat_FromDouble(
@ -218,12 +218,12 @@ static PyObject* THPFInfo_smallest_normal(THPFInfo* self, void*) {
END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS
} }
static PyObject* THPFInfo_tiny(THPFInfo* self, void*) { static PyObject* THPFInfo_tiny(THPFInfo* self, void* /*unused*/) {
// see gh-70909, essentially the array_api prefers smallest_normal over tiny // see gh-70909, essentially the array_api prefers smallest_normal over tiny
return THPFInfo_smallest_normal(self, nullptr); return THPFInfo_smallest_normal(self, nullptr);
} }
static PyObject* THPFInfo_resolution(THPFInfo* self, void*) { static PyObject* THPFInfo_resolution(THPFInfo* self, void* /*unused*/) {
HANDLE_TH_ERRORS HANDLE_TH_ERRORS
return _AT_DISPATCH_FINFO_TYPES(self->type, "digits10", [] { return _AT_DISPATCH_FINFO_TYPES(self->type, "digits10", [] {
return PyFloat_FromDouble(std::pow( return PyFloat_FromDouble(std::pow(
@ -233,7 +233,7 @@ static PyObject* THPFInfo_resolution(THPFInfo* self, void*) {
END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS
} }
static PyObject* THPFInfo_dtype(THPFInfo* self, void*) { static PyObject* THPFInfo_dtype(THPFInfo* self, void* /*unused*/) {
HANDLE_TH_ERRORS HANDLE_TH_ERRORS
auto primary_name = c10::getDtypeNames(self->type).first; auto primary_name = c10::getDtypeNames(self->type).first;
return _AT_DISPATCH_FINFO_TYPES(self->type, "dtype", [&primary_name] { return _AT_DISPATCH_FINFO_TYPES(self->type, "dtype", [&primary_name] {

View File

@ -76,18 +76,19 @@ struct PythonDeviceGuard final : public c10::impl::DeviceGuardImplInterface {
} }
void setDevice(c10::Device device) const override {} void setDevice(c10::Device device) const override {}
void uncheckedSetDevice(c10::Device device) const noexcept override {} void uncheckedSetDevice(c10::Device device) const noexcept override {}
c10::Stream getStream(c10::Device) const noexcept override { c10::Stream getStream(c10::Device /*unused*/) const noexcept override {
// no-op // no-op
return c10::Stream(c10::Stream::DEFAULT, getDevice()); return c10::Stream(c10::Stream::DEFAULT, getDevice());
} }
c10::Stream getNewStream(c10::Device, int priority = 0) const override { c10::Stream getNewStream(c10::Device /*unused*/, int priority = 0)
const override {
// no-op // no-op
(void)priority; (void)priority;
return c10::Stream(c10::Stream::DEFAULT, getDevice()); return c10::Stream(c10::Stream::DEFAULT, getDevice());
} }
c10::Stream exchangeStream(c10::Stream) const noexcept override { c10::Stream exchangeStream(c10::Stream /*unused*/) const noexcept override {
// no-op // no-op
return c10::Stream(c10::Stream::DEFAULT, getDevice()); return c10::Stream(c10::Stream::DEFAULT, getDevice());
} }

View File

@ -8,11 +8,11 @@ namespace torch::nn::functional {
#ifndef DOXYGEN_SHOULD_SKIP_THIS #ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace detail { namespace detail {
inline std::string padding_unwrap(enumtype::kValid) { inline std::string padding_unwrap(enumtype::kValid /*unused*/) {
return "valid"; return "valid";
} }
inline std::string padding_unwrap(enumtype::kSame) { inline std::string padding_unwrap(enumtype::kSame /*unused*/) {
return "same"; return "same";
} }

View File

@ -185,11 +185,12 @@ class AnyModule {
typename... ArgumentTypes> typename... ArgumentTypes>
std::unique_ptr<AnyModulePlaceholder> make_holder( std::unique_ptr<AnyModulePlaceholder> make_holder(
std::shared_ptr<ModuleType>&& module, std::shared_ptr<ModuleType>&& module,
ReturnType (Class::*)(ArgumentTypes...)); ReturnType (Class::* /*unused*/)(ArgumentTypes...));
/// Helper method invoked by const and non-const `get()`. /// Helper method invoked by const and non-const `get()`.
template <typename ModuleType, typename ReturnType, typename... ArgumentTypes> template <typename ModuleType, typename ReturnType, typename... ArgumentTypes>
ModuleType& get_(ReturnType (ModuleType::*)(ArgumentTypes...)) const; ModuleType& get_(
ReturnType (ModuleType::* /*unused*/)(ArgumentTypes...)) const;
/// Helper method invoked by const and non-const `get()`. /// Helper method invoked by const and non-const `get()`.
template <typename ModuleType> template <typename ModuleType>
@ -320,7 +321,7 @@ template <
typename... ArgumentTypes> typename... ArgumentTypes>
std::unique_ptr<AnyModulePlaceholder> AnyModule::make_holder( std::unique_ptr<AnyModulePlaceholder> AnyModule::make_holder(
std::shared_ptr<ModuleType>&& module, std::shared_ptr<ModuleType>&& module,
ReturnType (Class::*)(ArgumentTypes...)) { ReturnType (Class::* /*unused*/)(ArgumentTypes...)) {
static_assert( static_assert(
torch::detail::check_not_lvalue_references<ArgumentTypes...>(), torch::detail::check_not_lvalue_references<ArgumentTypes...>(),
"Modules stored inside AnyModule must not take references. " "Modules stored inside AnyModule must not take references. "
@ -345,7 +346,7 @@ ModuleType& AnyModule::get_() const {
template <typename ModuleType, typename ReturnType, typename... ArgumentTypes> template <typename ModuleType, typename ReturnType, typename... ArgumentTypes>
ModuleType& AnyModule::get_( ModuleType& AnyModule::get_(
ReturnType (ModuleType::*)(ArgumentTypes...)) const { ReturnType (ModuleType::* /*unused*/)(ArgumentTypes...)) const {
if (typeid(ModuleType).hash_code() == type_info().hash_code()) { if (typeid(ModuleType).hash_code() == type_info().hash_code()) {
return *static_cast<AnyModuleHolder<ModuleType, ArgumentTypes...>&>( return *static_cast<AnyModuleHolder<ModuleType, ArgumentTypes...>&>(
*content_) *content_)

View File

@ -279,7 +279,7 @@ std::tuple<at::Tensor, at::Tensor> clamp_backward_min_max(
const at::Tensor& self, const at::Tensor& self,
const at::Tensor& min, const at::Tensor& min,
const at::Tensor& max, const at::Tensor& max,
const std::array<bool, 2>&); const std::array<bool, 2>& /*grad_input_mask*/);
at::Tensor clamp_jvp( at::Tensor clamp_jvp(
const Tensor& self_p, const Tensor& self_p,
const Tensor& self_t, const Tensor& self_t,

View File

@ -517,8 +517,9 @@ struct GenericViewFunc : public ViewFunc {
} }
std::unique_ptr<ViewFunc> clone_and_set( std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = std::nullopt, std::optional<std::vector<c10::SymInt>> /*unused*/ = std::nullopt,
std::optional<std::vector<at::Tensor>> = std::nullopt) const override { std::optional<std::vector<at::Tensor>> /*unused*/ =
std::nullopt) const override {
return std::make_unique<GenericViewFunc>( return std::make_unique<GenericViewFunc>(
non_tensor_stack_, aliased_input_idx_val_, op_); non_tensor_stack_, aliased_input_idx_val_, op_);
} }

View File

@ -60,8 +60,8 @@ struct TORCH_API PostAccumulateGradHook {
} }
virtual void apply_with_saved( virtual void apply_with_saved(
Variable&, Variable& /*unused*/,
torch::dynamo::autograd::SwapSavedVariables&) { torch::dynamo::autograd::SwapSavedVariables& /*unused*/) {
TORCH_CHECK_NOT_IMPLEMENTED( TORCH_CHECK_NOT_IMPLEMENTED(
false, false,
std::string("compiled_args nyi, see [Note: Compiled Autograd] ") + std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +

View File

@ -222,7 +222,7 @@ struct AddTensorboardFields : public MetadataBase {
} }
template <typename T> template <typename T>
void operator()(const T&) {} void operator()(const T& /*unused*/) {}
}; };
struct AddGenericMetadata : public MetadataBase { struct AddGenericMetadata : public MetadataBase {
@ -346,7 +346,7 @@ struct AddGenericMetadata : public MetadataBase {
} }
template <typename T> template <typename T>
void operator()(const T&) {} void operator()(const T& /*unused*/) {}
private: private:
/* To get names of the performance events */ /* To get names of the performance events */

View File

@ -23,7 +23,7 @@ using extra_meta_t = std::unordered_map<std::string, std::string>;
struct TORCH_API KinetoEvent { struct TORCH_API KinetoEvent {
KinetoEvent( KinetoEvent(
const std::shared_ptr<const torch::profiler::impl::Result>&, const std::shared_ptr<const torch::profiler::impl::Result>& /*result*/,
const bool verbose); const bool verbose);
uint64_t startThreadId() const; uint64_t startThreadId() const;
@ -63,7 +63,7 @@ struct TORCH_API KinetoEvent {
bool isPythonFunction() const; bool isPythonFunction() const;
int64_t cudaElapsedUs() const; int64_t cudaElapsedUs() const;
int64_t privateuse1ElapsedUs() const; int64_t privateuse1ElapsedUs() const;
void getPerfEventCounters(torch::profiler::perf_counters_t&) const; void getPerfEventCounters(torch::profiler::perf_counters_t& /*in*/) const;
extra_meta_t extraMeta() const; extra_meta_t extraMeta() const;
std::string metadataJson() const; std::string metadataJson() const;

View File

@ -328,7 +328,7 @@ struct TORCH_API ProfilerDisableOptions {
// NOTE: profiler mode is thread local, with automatic propagation // NOTE: profiler mode is thread local, with automatic propagation
// across thread boundary (e.g. at::launch tasks) // across thread boundary (e.g. at::launch tasks)
TORCH_API void enableProfilerLegacy( TORCH_API void enableProfilerLegacy(
const torch::profiler::impl::ProfilerConfig&); const torch::profiler::impl::ProfilerConfig& /*new_config*/);
using thread_event_lists = std::vector<std::vector<LegacyEvent>>; using thread_event_lists = std::vector<std::vector<LegacyEvent>>;
TORCH_API thread_event_lists disableProfilerLegacy( TORCH_API thread_event_lists disableProfilerLegacy(
std::optional<ProfilerDisableOptions> profilerDisableOptions = std::optional<ProfilerDisableOptions> profilerDisableOptions =

View File

@ -365,7 +365,9 @@ std::vector<std::pair<std::string, TensorMetadata>> ValueCache::unpackTensorMap(
} }
template <> template <>
void ValueCache::store<CallType::PyCall>(const PyCallKey& key, no_ephemeral_t) { void ValueCache::store<CallType::PyCall>(
const PyCallKey& key,
no_ephemeral_t /*unused*/) {
auto& locations = std::get<CallType::PyCall>(state_); auto& locations = std::get<CallType::PyCall>(state_);
if (C10_UNLIKELY(locations.find(key) == locations.end())) { if (C10_UNLIKELY(locations.find(key) == locations.end())) {
locations[key] = { locations[key] = {
@ -1432,7 +1434,7 @@ struct PythonIDVisitor {
} }
template <typename T> template <typename T>
void operator()(T&) {} void operator()(T& /*unused*/) {}
size_t current_python_id_{0}; size_t current_python_id_{0};
ska::flat_hash_map<PyModuleCls, ska::flat_hash_map<PyModuleSelf, size_t>> ska::flat_hash_map<PyModuleCls, ska::flat_hash_map<PyModuleSelf, size_t>>

View File

@ -686,7 +686,7 @@ static Tensor make_tensor_for_subclass_helper(
} }
static PyObject* THPVariable_make_wrapper_subclass( static PyObject* THPVariable_make_wrapper_subclass(
PyObject*, PyObject* /*unused*/,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
HANDLE_TH_ERRORS HANDLE_TH_ERRORS
@ -895,7 +895,7 @@ static c10::SymDimVector tuple_to_symintlist(PyObject* obj) {
// DTensor-specific variant of make_wrapper_subclass to minimize DTensor // DTensor-specific variant of make_wrapper_subclass to minimize DTensor
// overhead. // overhead.
static PyObject* THPVariable_dtensor_new( static PyObject* THPVariable_dtensor_new(
PyObject*, PyObject* /*unused*/,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
HANDLE_TH_ERRORS HANDLE_TH_ERRORS

View File

@ -108,31 +108,35 @@ namespace impl {
// WARNING: This may return a nullptr. If you require AutogradMeta to return // WARNING: This may return a nullptr. If you require AutogradMeta to return
// a materialized structure, use materialize_autograd_meta instead. // a materialized structure, use materialize_autograd_meta instead.
TORCH_API AutogradMeta* get_autograd_meta(const at::TensorBase&); TORCH_API AutogradMeta* get_autograd_meta(const at::TensorBase& /*self*/);
// WARNING: This will return a nullptr if the Tensor is not a view. // WARNING: This will return a nullptr if the Tensor is not a view.
TORCH_API DifferentiableViewMeta* get_view_autograd_meta(const at::TensorBase&); TORCH_API DifferentiableViewMeta* get_view_autograd_meta(
const at::TensorBase& /*self*/);
// Returns the current autograd meta, materializing it if it was previously // Returns the current autograd meta, materializing it if it was previously
// none. This counts as a *mutating* operation, so do not call it on // none. This counts as a *mutating* operation, so do not call it on
// "read-only" operators; in particular, this is NOT thread safe // "read-only" operators; in particular, this is NOT thread safe
TORCH_API AutogradMeta* materialize_autograd_meta(const at::TensorBase&); TORCH_API AutogradMeta* materialize_autograd_meta(
const at::TensorBase& /*self*/);
/// Set the gradient accumulator of the `Variable`. This is only applicable to /// Set the gradient accumulator of the `Variable`. This is only applicable to
/// leaf variables. Interior variables should call `set_gradient_edge()`. /// leaf variables. Interior variables should call `set_gradient_edge()`.
TORCH_API void set_grad_accumulator( TORCH_API void set_grad_accumulator(
const Variable&, const Variable& /*self*/,
std::weak_ptr<Node> grad_accumulator); std::weak_ptr<Node> grad_accumulator);
/// Attempts to get a pointer to the gradient accumulator of the `Variable`, /// Attempts to get a pointer to the gradient accumulator of the `Variable`,
/// if it still exists. If the gradient accumulator function has been /// if it still exists. If the gradient accumulator function has been
/// destroyed, returns a `nullptr`. /// destroyed, returns a `nullptr`.
TORCH_API std::shared_ptr<Node> try_get_grad_accumulator(const Variable&); TORCH_API std::shared_ptr<Node> try_get_grad_accumulator(
TORCH_API std::shared_ptr<Node> try_get_grad_accumulator(const at::TensorBase&); const Variable& /*self*/);
TORCH_API std::shared_ptr<Node> try_get_grad_accumulator(
const at::TensorBase& /*self*/);
/// Gets the gradient accumulator of the `Variable` if it has one, or else /// Gets the gradient accumulator of the `Variable` if it has one, or else
/// create one on the fly and return it. /// create one on the fly and return it.
TORCH_API std::shared_ptr<Node> grad_accumulator(const Variable&); TORCH_API std::shared_ptr<Node> grad_accumulator(const Variable& /*self*/);
/// Returns the "canonical" gradient edge of this `Variable`, i.e. either the /// Returns the "canonical" gradient edge of this `Variable`, i.e. either the
/// gradient function if this is an interior `Variable`, or the gradient /// gradient function if this is an interior `Variable`, or the gradient
@ -142,7 +146,7 @@ TORCH_API std::shared_ptr<Node> grad_accumulator(const Variable&);
/// zero. Note that `set_gradient_edge` and `gradient_edge` are not /// zero. Note that `set_gradient_edge` and `gradient_edge` are not
/// symmetric. You must use `set_gradient_edge` to set the `grad_fn` and /// symmetric. You must use `set_gradient_edge` to set the `grad_fn` and
/// `set_grad_accumulator` to set the accumulator. /// `set_grad_accumulator` to set the accumulator.
TORCH_API Edge gradient_edge(const Variable&); TORCH_API Edge gradient_edge(const Variable& /*self*/);
/// Set the gradient edge -- i.e. `grad_fn` and `input_nr` -- of the /// Set the gradient edge -- i.e. `grad_fn` and `input_nr` -- of the
/// `Variable`. /// `Variable`.
@ -150,7 +154,7 @@ TORCH_API Edge gradient_edge(const Variable&);
/// and never the `grad_accumulator`. For the latter, use /// and never the `grad_accumulator`. For the latter, use
/// `set_grad_accumulator`. This allows late construction of an interior /// `set_grad_accumulator`. This allows late construction of an interior
/// `Variable`. /// `Variable`.
TORCH_API void set_gradient_edge(const Variable&, Edge edge); TORCH_API void set_gradient_edge(const Variable& /*self*/, Edge edge);
// Autograd Graph Interaction // Autograd Graph Interaction
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -161,36 +165,37 @@ TORCH_API void set_gradient_edge(const Variable&, Edge edge);
/// For View Variables: /// For View Variables:
/// Called after in-place modifications. Modifies the grad_fn of the base /// Called after in-place modifications. Modifies the grad_fn of the base
/// Variable. /// Variable.
TORCH_API void rebase_history(const Variable&, Edge gradient_edge); TORCH_API void rebase_history(const Variable& /*self*/, Edge gradient_edge);
/// Gets the raw gradient function pointer, whatever it currently is. /// Gets the raw gradient function pointer, whatever it currently is.
TORCH_API Node* grad_fn_unsafe(const Variable&); TORCH_API Node* grad_fn_unsafe(const Variable& /*self*/);
/// Increments the version count of this `Variable`. /// Increments the version count of this `Variable`.
TORCH_API void bump_version(const Variable&); TORCH_API void bump_version(const Variable& /*self*/);
TORCH_API void set_version_counter( TORCH_API void set_version_counter(
const Variable&, const Variable& /*self*/,
const c10::VariableVersion& version_counter); const c10::VariableVersion& version_counter);
/// Retrieves this `Variable`s version counter. /// Retrieves this `Variable`s version counter.
TORCH_API const c10::VariableVersion& version_counter(const Variable&); TORCH_API const c10::VariableVersion& version_counter(const Variable& /*self*/);
TORCH_API void set_name(const Variable&, const std::string& name); TORCH_API void set_name(const Variable& /*self*/, const std::string& name);
TORCH_API void add_hook( TORCH_API void add_hook(
const at::TensorBase&, const at::TensorBase& /*self*/,
std::unique_ptr<FunctionPreHook> hook); std::unique_ptr<FunctionPreHook> hook);
TORCH_API std::vector<std::unique_ptr<FunctionPreHook>>& hooks(const Variable&); TORCH_API std::vector<std::unique_ptr<FunctionPreHook>>& hooks(
TORCH_API void clear_hooks(const at::TensorBase&); const Variable& /*self*/);
TORCH_API void clear_hooks(const at::TensorBase& /*self*/);
TORCH_API void set_post_acc_grad_hooks( TORCH_API void set_post_acc_grad_hooks(
const at::TensorBase&, const at::TensorBase& /*self*/,
std::unique_ptr<PostAccumulateGradHook> dict); std::unique_ptr<PostAccumulateGradHook> dict);
TORCH_API std::unique_ptr<PostAccumulateGradHook>& post_acc_grad_hooks( TORCH_API std::unique_ptr<PostAccumulateGradHook>& post_acc_grad_hooks(
const Variable&); const Variable& /*self*/);
TORCH_API void create_cpp_hook( TORCH_API void create_cpp_hook(
const at::TensorBase&, const at::TensorBase& /*self*/,
bool is_retains_grad_hooks = false); bool is_retains_grad_hooks = false);
} // namespace impl } // namespace impl
@ -373,12 +378,12 @@ struct TORCH_API ViewFunc {
/// must match the number of SymInts in the saved state (i.e. the size of the /// must match the number of SymInts in the saved state (i.e. the size of the
/// list returned by get_symints()). /// list returned by get_symints()).
/// NOLINTNEXTLINE(performance-unnecessary-value-param) /// NOLINTNEXTLINE(performance-unnecessary-value-param)
virtual void set_symints(std::vector<c10::SymInt>) {} virtual void set_symints(std::vector<c10::SymInt> /*unused*/) {}
/// Sets the values of any Tensors in the saved state. The input vector size /// Sets the values of any Tensors in the saved state. The input vector size
/// must match the number of Tensors in the saved state (i.e. the size of the /// must match the number of Tensors in the saved state (i.e. the size of the
/// list returned by get_tensors()). /// list returned by get_tensors()).
/// NOLINTNEXTLINE(performance-unnecessary-value-param) /// NOLINTNEXTLINE(performance-unnecessary-value-param)
virtual void set_tensors(std::vector<at::Tensor>) {} virtual void set_tensors(std::vector<at::Tensor> /*unused*/) {}
}; };
/// ViewFunc that represents a chain of two ViewFuncs. /// ViewFunc that represents a chain of two ViewFuncs.
@ -396,10 +401,13 @@ struct ChainedViewFunc : public ViewFunc {
size_t num_tensors() const override { size_t num_tensors() const override {
return first->num_tensors() + second->num_tensors(); return first->num_tensors() + second->num_tensors();
} }
at::Tensor operator()(const at::Tensor&) const override; at::Tensor operator()(
const at::Tensor& /*input_base*/ /*unused*/) const override;
std::unique_ptr<ViewFunc> clone_and_set( std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = std::nullopt, std::optional<std::vector<c10::SymInt>> /*symints*/ /*unused*/ =
std::optional<std::vector<at::Tensor>> = std::nullopt) const override; std::nullopt,
std::optional<std::vector<at::Tensor>> /*tensors*/ /*unused*/ =
std::nullopt) const override;
private: private:
std::unique_ptr<ViewFunc> first; std::unique_ptr<ViewFunc> first;
@ -410,12 +418,13 @@ struct ChainedViewFunc : public ViewFunc {
struct ErroringViewFunc : public ViewFunc { struct ErroringViewFunc : public ViewFunc {
ErroringViewFunc(std::string error_msg) : error_msg(std::move(error_msg)) {} ErroringViewFunc(std::string error_msg) : error_msg(std::move(error_msg)) {}
~ErroringViewFunc() override = default; ~ErroringViewFunc() override = default;
at::Tensor operator()(const at::Tensor&) const override { at::Tensor operator()(const at::Tensor& /*unused*/) const override {
TORCH_CHECK(false, error_msg); TORCH_CHECK(false, error_msg);
} }
std::unique_ptr<ViewFunc> clone_and_set( std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = std::nullopt, std::optional<std::vector<c10::SymInt>> /*unused*/ = std::nullopt,
std::optional<std::vector<at::Tensor>> = std::nullopt) const override { std::optional<std::vector<at::Tensor>> /*unused*/ =
std::nullopt) const override {
return std::make_unique<ErroringViewFunc>(error_msg); return std::make_unique<ErroringViewFunc>(error_msg);
} }
@ -923,19 +932,24 @@ inline Variable make_variable(
} }
struct VariableHooks final : at::impl::VariableHooksInterface { struct VariableHooks final : at::impl::VariableHooksInterface {
at::TensorBase tensor_data(const at::TensorBase&) const override; at::TensorBase tensor_data(
at::TensorBase variable_data(const at::TensorBase&) const override; const at::TensorBase& /*self*/ /*unused*/) const override;
at::TensorBase variable_data(
const at::TensorBase& /*self*/ /*unused*/) const override;
const std::shared_ptr<torch::autograd::Node>& grad_fn( const std::shared_ptr<torch::autograd::Node>& grad_fn(
const at::TensorBase&) const override; const at::TensorBase& /*self*/ /*unused*/) const override;
unsigned _register_hook( unsigned _register_hook(
const at::TensorBase&, const at::TensorBase& /*self*/ /*unused*/,
std::function<at::TensorBase(const at::TensorBase&)> hook) const override; std::function<at::TensorBase(const at::TensorBase&)> hook) const override;
void remove_hook(const at::TensorBase&, unsigned pos) const override; void remove_hook(const at::TensorBase& /*self*/ /*unused*/, unsigned pos)
bool is_view(const at::TensorBase&) const override; const override;
const at::TensorBase& base(const at::TensorBase&) const override; bool is_view(const at::TensorBase& /*self*/ /*unused*/) const override;
const std::string& name(const at::TensorBase&) const override; const at::TensorBase& base(
bool is_leaf(const at::TensorBase&) const override; const at::TensorBase& /*self*/ /*unused*/) const override;
int64_t output_nr(const at::TensorBase&) const override; const std::string& name(
const at::TensorBase& /*self*/ /*unused*/) const override;
bool is_leaf(const at::TensorBase& /*self*/ /*unused*/) const override;
int64_t output_nr(const at::TensorBase& /*self*/ /*unused*/) const override;
void set_data(const at::TensorBase& self, const at::TensorBase& new_data) void set_data(const at::TensorBase& self, const at::TensorBase& new_data)
const override; const override;
at::TensorBase data(const at::TensorBase& self) const override; at::TensorBase data(const at::TensorBase& self) const override;
@ -955,10 +969,11 @@ struct VariableHooks final : at::impl::VariableHooksInterface {
c10::DispatchKeySet dispatch_keys, c10::DispatchKeySet dispatch_keys,
torch::jit::Stack* stack) const override; torch::jit::Stack* stack) const override;
std::optional<c10::ScalarType> grad_dtype( std::optional<c10::ScalarType> grad_dtype(
const at::TensorBase&) const override; const at::TensorBase& /*self*/ /*unused*/) const override;
void set_grad_dtype( void set_grad_dtype(
const at::TensorBase&, const at::TensorBase& /*self*/ /*unused*/,
const std::optional<c10::ScalarType>&) const override; const std::optional<c10::ScalarType>& /*grad_dtype*/ /*unused*/)
const override;
}; };
namespace utils { namespace utils {

View File

@ -135,7 +135,7 @@ class TORCH_API Work : public torch::CustomClassHolder {
OpType retrieveOpType() const; OpType retrieveOpType() const;
static c10::intrusive_ptr<Work> create_from_future( static c10::intrusive_ptr<Work> create_from_future(
const c10::intrusive_ptr<c10::ivalue::Future>&); const c10::intrusive_ptr<c10::ivalue::Future>& /*future*/);
protected: protected:
// Completes the work object and optionally sets the exception in a // Completes the work object and optionally sets the exception in a

View File

@ -153,7 +153,7 @@ class TORCH_API C10dLogger {
virtual ~C10dLogger() = default; virtual ~C10dLogger() = default;
virtual void log(const C10dLoggingData& data); virtual void log(const C10dLoggingData& data);
static C10dLogger* getLogger(); static C10dLogger* getLogger();
static void registerLogger(std::unique_ptr<C10dLogger>); static void registerLogger(std::unique_ptr<C10dLogger> /*logger*/);
protected: protected:
// singletion, hide constructor from the public // singletion, hide constructor from the public

View File

@ -225,7 +225,7 @@ class TORCH_API RRefContext {
c10::intrusive_ptr<JitFuture> confirmationFuture_; c10::intrusive_ptr<JitFuture> confirmationFuture_;
}; };
RRefContext(std::shared_ptr<RpcAgent>); RRefContext(std::shared_ptr<RpcAgent> /*agent*/);
c10::intrusive_ptr<UserRRef> createUserRRef( c10::intrusive_ptr<UserRRef> createUserRRef(
worker_id_t ownerId, worker_id_t ownerId,

View File

@ -232,11 +232,11 @@ class TORCH_API TensorPipeAgent : public RpcAgent {
// messages by server, and write request messages by client. This // messages by server, and write request messages by client. This
// is a protected method since it is overwritten by FaultyTensorPipeAgent // is a protected method since it is overwritten by FaultyTensorPipeAgent
virtual void pipeWrite( virtual void pipeWrite(
const std::shared_ptr<tensorpipe::Pipe>&, const std::shared_ptr<tensorpipe::Pipe>& /*pipe*/,
const c10::intrusive_ptr<Message>& message, const c10::intrusive_ptr<Message>& message,
std::vector<c10::Device>&& devices, std::vector<c10::Device>&& devices,
std::vector<c10::Stream> streams, std::vector<c10::Stream> streams,
std::function<void(const tensorpipe::Error&)>) noexcept; std::function<void(const tensorpipe::Error&)> /*fn*/) noexcept;
private: private:
// Removes the given messageId with the given expirationTime from the // Removes the given messageId with the given expirationTime from the
@ -257,11 +257,11 @@ class TORCH_API TensorPipeAgent : public RpcAgent {
// TensorPipe read function that could be used to read response messages // TensorPipe read function that could be used to read response messages
// by client, and read request messages by server. // by client, and read request messages by server.
void pipeRead( void pipeRead(
const std::shared_ptr<tensorpipe::Pipe>&, const std::shared_ptr<tensorpipe::Pipe>& /*pipe*/,
std::function<void( std::function<void(
const tensorpipe::Error&, const tensorpipe::Error&,
c10::intrusive_ptr<Message>, c10::intrusive_ptr<Message>,
std::vector<c10::Stream>)>) noexcept; std::vector<c10::Stream>)> /*fn*/) noexcept;
// Callback of listener accept() // Callback of listener accept()
void onListenerAccepted( void onListenerAccepted(

View File

@ -49,8 +49,8 @@ extern TORCH_API std::array<
class TORCH_API TensorpipeDeviceTypeConverterRegistrar { class TORCH_API TensorpipeDeviceTypeConverterRegistrar {
public: public:
TensorpipeDeviceTypeConverterRegistrar( TensorpipeDeviceTypeConverterRegistrar(
DeviceType, DeviceType /*type*/,
const TensorpipeDeviceTypeConverter*); const TensorpipeDeviceTypeConverter* /*impl*/);
}; };
#define C10_REGISTER_TENSORPIPE_DEVICE_TYPE_CONVERTER( \ #define C10_REGISTER_TENSORPIPE_DEVICE_TYPE_CONVERTER( \

View File

@ -32,7 +32,7 @@ struct TORCH_API GloballyUniqueId final {
bool operator!=(const GloballyUniqueId& other) const; bool operator!=(const GloballyUniqueId& other) const;
at::IValue toIValue() const; at::IValue toIValue() const;
static GloballyUniqueId fromIValue(const at::IValue&); static GloballyUniqueId fromIValue(const at::IValue& /*ivalue*/);
struct Hash { struct Hash {
size_t operator()(const GloballyUniqueId& key) const { size_t operator()(const GloballyUniqueId& key) const {

View File

@ -105,7 +105,7 @@ class AOTIPythonKernelHolder : public c10::OperatorKernel {
void init_aoti_kernel_cache(); void init_aoti_kernel_cache();
// Load the AOTIModelContainerRunner object from the given file path. // Load the AOTIModelContainerRunner object from the given file path.
std::shared_ptr<AOTIModelContainerRunner> load_aoti_model_runner( std::shared_ptr<AOTIModelContainerRunner> load_aoti_model_runner(
const std::string&); const std::string& /*so_path*/);
}; };
} // namespace torch::inductor } // namespace torch::inductor

View File

@ -40,7 +40,7 @@ namespace torch::aot_inductor {
using DeleterFnPtr = void (*)(void*); using DeleterFnPtr = void (*)(void*);
inline void noop_deleter(void*) {} inline void noop_deleter(void* /*unused*/) {}
inline void delete_record_function_object(void* ptr) { inline void delete_record_function_object(void* ptr) {
AOTI_TORCH_ERROR_CODE_CHECK(aoti_record_function_end( AOTI_TORCH_ERROR_CODE_CHECK(aoti_record_function_end(

View File

@ -62,7 +62,7 @@ T& toGraphFunctionImpl(F& function) {
} // namespace } // namespace
static void placeholderCreator(GraphFunction&) { static void placeholderCreator(GraphFunction& /*unused*/) {
throw RecursiveMethodCallError(); throw RecursiveMethodCallError();
} }

View File

@ -173,8 +173,8 @@ struct TORCH_API GraphFunction : public Function {
}; };
// Short hands for dynamic_cast<GraphFunction*>. // Short hands for dynamic_cast<GraphFunction*>.
TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept; TORCH_API GraphFunction* tryToGraphFunction(Function& /*function*/) noexcept;
TORCH_API GraphFunction& toGraphFunction(Function&); TORCH_API GraphFunction& toGraphFunction(Function& /*function*/);
TORCH_API const GraphFunction& toGraphFunction(const Function&); TORCH_API const GraphFunction& toGraphFunction(const Function& /*function*/);
} // namespace torch::jit } // namespace torch::jit
C10_DECLARE_bool(torch_jit_do_not_store_optimized_graph); C10_DECLARE_bool(torch_jit_do_not_store_optimized_graph);

View File

@ -65,7 +65,9 @@ struct TORCH_API Method : public torch::IMethod {
} }
private: private:
void setArgumentNames(std::vector<std::string>&) const override; void setArgumentNames(
std::vector<std::string>& /*argumentNames*/ /*argumentNamesOut*/)
const override;
// Methods are uniqued owned by a single module. This raw pointer allows // Methods are uniqued owned by a single module. This raw pointer allows
// looking up the module. // looking up the module.

View File

@ -93,7 +93,7 @@ struct TORCH_API Module : public Object {
Module(Module&&) noexcept = default; Module(Module&&) noexcept = default;
Module& operator=(Module&&) noexcept = default; Module& operator=(Module&&) noexcept = default;
Module( Module(
c10::QualifiedName, c10::QualifiedName /*class_name*/,
std::shared_ptr<CompilationUnit> cu, std::shared_ptr<CompilationUnit> cu,
bool shouldMangle = false); bool shouldMangle = false);
Module(ModulePtr module_value) : Object(std::move(module_value)) {} Module(ModulePtr module_value) : Object(std::move(module_value)) {}

View File

@ -38,7 +38,7 @@ TORCH_API CudaFuserInterface* getFuserInterface();
TORCH_API void compileFusionGroup(Node* fusion_node); TORCH_API void compileFusionGroup(Node* fusion_node);
TORCH_API void runFusionGroup(const Node* fusion_node, Stack& stack); TORCH_API void runFusionGroup(const Node* fusion_node, Stack& stack);
TORCH_API void fuseGraph(std::shared_ptr<Graph>&); TORCH_API void fuseGraph(std::shared_ptr<Graph>& /*graph*/);
TORCH_API bool canFuseNode(const Node* node); TORCH_API bool canFuseNode(const Node* node);
TORCH_API void InsertProfileNodesForCUDAFuser(ProfilingRecord* pr); TORCH_API void InsertProfileNodesForCUDAFuser(ProfilingRecord* pr);
TORCH_API bool profileNode(const Node* node); TORCH_API bool profileNode(const Node* node);

View File

@ -388,7 +388,7 @@ template <
!std::is_convertible_v< !std::is_convertible_v<
std::decay_t<T>, std::decay_t<T>,
c10::intrusive_ptr<c10::ivalue::Object>>)>> c10::intrusive_ptr<c10::ivalue::Object>>)>>
void addOutput(Node* node, T&&) { void addOutput(Node* node, T&& /*unused*/) {
TORCH_CHECK( TORCH_CHECK(
false, false,
"Found an unsupported argument type ", "Found an unsupported argument type ",

View File

@ -190,7 +190,7 @@ struct TORCH_API InlinedCallStack : public c10::intrusive_ptr_target {
// Return callstack as a vector of [Function, SourceRange] pairs. // Return callstack as a vector of [Function, SourceRange] pairs.
std::vector<InlinedCallStackEntry> vec(); std::vector<InlinedCallStackEntry> vec();
void setCallee(std::optional<InlinedCallStackPtr>); void setCallee(std::optional<InlinedCallStackPtr> /*callee*/);
bool operator==(const InlinedCallStack& rhs) const { bool operator==(const InlinedCallStack& rhs) const {
// No need to compare fn_, since source_range equivalence check // No need to compare fn_, since source_range equivalence check

View File

@ -154,34 +154,34 @@ class FlatbufferLoader final {
}; };
IValue parseList( IValue parseList(
FlatbufferLoader&, FlatbufferLoader& /*loader*/,
const mobile::serialization::IValue& ivalue); const mobile::serialization::IValue& ivalue);
IValue parseTensor( IValue parseTensor(
FlatbufferLoader&, FlatbufferLoader& /*loader*/,
const mobile::serialization::IValue& ivalue); const mobile::serialization::IValue& ivalue);
IValue parseTuple( IValue parseTuple(
FlatbufferLoader&, FlatbufferLoader& /*loader*/,
const mobile::serialization::IValue& ivalue); const mobile::serialization::IValue& ivalue);
IValue parseDict( IValue parseDict(
FlatbufferLoader&, FlatbufferLoader& /*loader*/,
const mobile::serialization::IValue& ivalue); const mobile::serialization::IValue& ivalue);
IValue parseObject( IValue parseObject(
FlatbufferLoader&, FlatbufferLoader& /*loader*/,
const mobile::serialization::IValue& ivalue); const mobile::serialization::IValue& ivalue);
IValue parseIntList( IValue parseIntList(
FlatbufferLoader&, FlatbufferLoader& /*unused*/,
const mobile::serialization::IValue& ivalue); const mobile::serialization::IValue& ivalue);
IValue parseDoubleList( IValue parseDoubleList(
FlatbufferLoader&, FlatbufferLoader& /*unused*/,
const mobile::serialization::IValue& ivalue); const mobile::serialization::IValue& ivalue);
IValue parseBoolList( IValue parseBoolList(
FlatbufferLoader&, FlatbufferLoader& /*unused*/,
const mobile::serialization::IValue& ivalue); const mobile::serialization::IValue& ivalue);
IValue parseBasic( IValue parseBasic(
FlatbufferLoader&, FlatbufferLoader& /*unused*/,
const mobile::serialization::IValue& ivalue); const mobile::serialization::IValue& ivalue);
IValue parseEnum( IValue parseEnum(
FlatbufferLoader&, FlatbufferLoader& /*loader*/,
const mobile::serialization::IValue& ivalue); const mobile::serialization::IValue& ivalue);
TypePtr resolveType( TypePtr resolveType(
@ -442,7 +442,7 @@ IValue parseEnum(
} }
IValue parseBasic( IValue parseBasic(
FlatbufferLoader&, FlatbufferLoader& /*unused*/,
const mobile::serialization::IValue& ivalue) { const mobile::serialization::IValue& ivalue) {
switch (ivalue.val_type()) { switch (ivalue.val_type()) {
case mobile::serialization::IValueUnion::NONE: case mobile::serialization::IValueUnion::NONE:
@ -546,21 +546,21 @@ std::vector<T> parseListNative(const U* list) {
} }
IValue parseIntList( IValue parseIntList(
FlatbufferLoader&, FlatbufferLoader& /*unused*/,
const mobile::serialization::IValue& ivalue) { const mobile::serialization::IValue& ivalue) {
const auto& list = ivalue.val_as_IntList(); const auto& list = ivalue.val_as_IntList();
return parseListNative<int64_t>(list); return parseListNative<int64_t>(list);
} }
IValue parseDoubleList( IValue parseDoubleList(
FlatbufferLoader&, FlatbufferLoader& /*unused*/,
const mobile::serialization::IValue& ivalue) { const mobile::serialization::IValue& ivalue) {
const auto& list = ivalue.val_as_DoubleList(); const auto& list = ivalue.val_as_DoubleList();
return parseListNative<double>(list); return parseListNative<double>(list);
} }
IValue parseBoolList( IValue parseBoolList(
FlatbufferLoader&, FlatbufferLoader& /*unused*/,
const mobile::serialization::IValue& ivalue) { const mobile::serialization::IValue& ivalue) {
const auto& list = ivalue.val_as_BoolList(); const auto& list = ivalue.val_as_BoolList();
std::vector<uint8_t> res = parseListNative<uint8_t>(list); std::vector<uint8_t> res = parseListNative<uint8_t>(list);
@ -690,8 +690,8 @@ IValue FlatbufferLoader::parseIValue(
*this, *ivalue); *this, *ivalue);
} }
void deleteNothing2(void*); void deleteNothing2(void* /*unused*/);
void deleteNothing2(void*) {} void deleteNothing2(void* /*unused*/) {}
c10::Storage FlatbufferLoader::getStorage(uint32_t index) { c10::Storage FlatbufferLoader::getStorage(uint32_t index) {
TORCH_CHECK(index < storage_loaded_.size()); TORCH_CHECK(index < storage_loaded_.size());
@ -760,7 +760,7 @@ void FlatbufferLoader::extractJitSourceAndConstants(
mobile::Module parse_and_initialize_mobile_module( mobile::Module parse_and_initialize_mobile_module(
void* data, void* data,
size_t size, size_t size,
std::optional<at::Device>, std::optional<at::Device> /*unused*/,
ExtraFilesMap* extra_files, ExtraFilesMap* extra_files,
bool should_copy_tensor_memory) { bool should_copy_tensor_memory) {
// TODO(T128189662): If not copying, enforce that data is aligned to // TODO(T128189662): If not copying, enforce that data is aligned to
@ -806,7 +806,7 @@ mobile::Module parse_and_initialize_mobile_module_for_jit(
size_t size, size_t size,
ExtraFilesMap& jit_sources, ExtraFilesMap& jit_sources,
std::vector<IValue>& jit_constants, std::vector<IValue>& jit_constants,
std::optional<at::Device>, std::optional<at::Device> /*unused*/,
ExtraFilesMap* extra_files) { ExtraFilesMap* extra_files) {
TORCH_CHECK( TORCH_CHECK(
mobile::serialization::ModuleBufferHasIdentifier(data), "Format error"); mobile::serialization::ModuleBufferHasIdentifier(data), "Format error");

View File

@ -149,7 +149,9 @@ size_t Function::num_inputs() const {
return schema_->arguments().size(); return schema_->arguments().size();
} }
bool Function::call(Stack&, c10::function_ref<void(const mobile::Code&)> f) { bool Function::call(
Stack& /*unused*/,
c10::function_ref<void(const mobile::Code&)> f) {
initialize_operators(true); initialize_operators(true);
f(code_); f(code_);
return true; return true;

View File

@ -26,7 +26,9 @@ class TORCH_API Function : public torch::jit::Function {
void ensure_defined() override {} void ensure_defined() override {}
size_t num_inputs() const override; size_t num_inputs() const override;
const c10::QualifiedName& qualname() const override; const c10::QualifiedName& qualname() const override;
bool call(Stack&, c10::function_ref<void(const mobile::Code&)>) override; bool call(
Stack& /*unused*/,
c10::function_ref<void(const mobile::Code&)> /*f*/ /*unused*/) override;
// NOTE: the APIs below is dangerous: if you call append_instruction with // NOTE: the APIs below is dangerous: if you call append_instruction with
// dbg_handle and then call it without; then the dbg_handle will become // dbg_handle and then call it without; then the dbg_handle will become

View File

@ -12,7 +12,7 @@ struct InterpreterState {
TORCH_API bool run(Stack& stack); TORCH_API bool run(Stack& stack);
private: private:
void enterFrame(const Code&); void enterFrame(const Code& /*code*/);
void leaveFrame(); void leaveFrame();
void saveExceptionDebugHandles(); void saveExceptionDebugHandles();
void callFunction(torch::jit::Function& f, Stack& stack); void callFunction(torch::jit::Function& f, Stack& stack);

View File

@ -67,26 +67,28 @@ class MobileModuleObserver {
public: public:
virtual ~MobileModuleObserver() = default; virtual ~MobileModuleObserver() = default;
virtual void onEnterRunMethod(const int32_t) {} virtual void onEnterRunMethod(const int32_t /*unused*/) {}
virtual void onExitRunMethod( virtual void onExitRunMethod(
const std::unordered_map<std::string, std::string>&, const std::unordered_map<std::string, std::string>& /*unused*/,
const std::string&, const std::string& /*unused*/,
const int32_t) {} const int32_t /*unused*/) {}
virtual void onFailRunMethod( virtual void onFailRunMethod(
const std::unordered_map<std::string, std::string>&, const std::unordered_map<std::string, std::string>& /*unused*/,
const std::string&, const std::string& /*unused*/,
const int32_t, const int32_t /*unused*/,
const char*) {} const char* /*unused*/) {}
virtual void onEnterLoadModel(const int32_t) {} virtual void onEnterLoadModel(const int32_t /*unused*/) {}
virtual void onExitLoadModel( virtual void onExitLoadModel(
const int32_t, const int32_t /*unused*/,
const std::unordered_map<std::string, std::string>&) { const std::unordered_map<std::string, std::string>& /*unused*/) {
} // key: filename, value: file content } // key: filename, value: file content
virtual void onFailLoadModel(const int32_t, const char*) {}
virtual void onFailLoadModel( virtual void onFailLoadModel(
const int32_t, const int32_t /*unused*/,
const char*, const char* /*unused*/) {}
const std::unordered_map<std::string, std::string>&) {} virtual void onFailLoadModel(
const int32_t /*unused*/,
const char* /*unused*/,
const std::unordered_map<std::string, std::string>& /*unused*/) {}
virtual std::vector<std::string> getDefaultExtraFiles() = 0; virtual std::vector<std::string> getDefaultExtraFiles() = 0;
virtual std::unordered_map<std::string, std::string> processMetadataFromExtra( virtual std::unordered_map<std::string, std::string> processMetadataFromExtra(
const std::unordered_map<std::string, std::string>&) = 0; const std::unordered_map<std::string, std::string>&) = 0;

View File

@ -87,14 +87,14 @@ struct FunctionExtractor {
const std::shared_ptr<Graph>& graph); const std::shared_ptr<Graph>& graph);
static void HandleNoScopeNodes( static void HandleNoScopeNodes(
scope_ctx_map&, scope_ctx_map& /*scope_ctxs*/,
const node_list& no_scope_nlist); const node_list& no_scope_nlist);
std::tuple<scope_ctx_map, node_list> PartitionNodesByScope(Block* b); std::tuple<scope_ctx_map, node_list> PartitionNodesByScope(Block* b);
scope_ctx_map PartitionNodesByScope(const std::shared_ptr<Graph>& graph); scope_ctx_map PartitionNodesByScope(const std::shared_ptr<Graph>& graph);
static std::unordered_map<ScopePtr, scope_list> PartitionIdenticalScopes( static std::unordered_map<ScopePtr, scope_list> PartitionIdenticalScopes(
scope_ctx_map& scope_ctxs); scope_ctx_map& scope_ctxs);
static scope_list SortScopesByMaxDepth( static scope_list SortScopesByMaxDepth(
std::unordered_map<ScopePtr, scope_list>&); std::unordered_map<ScopePtr, scope_list>& /*identical_scope_map*/);
Node* CreateFunctionDefNode( Node* CreateFunctionDefNode(
FunctionContext& func_ctx, FunctionContext& func_ctx,
const std::shared_ptr<Graph>& graph, const std::shared_ptr<Graph>& graph,
@ -107,7 +107,7 @@ struct FunctionExtractor {
const std::string& domain_name, const std::string& domain_name,
const std::string& func_name); const std::string& func_name);
static void DebugPrintScopeContexts(const scope_ctx_map&); static void DebugPrintScopeContexts(const scope_ctx_map& /*scope_ctxs*/);
static void DebugPrintGraphWithFunction(const std::shared_ptr<Graph>& g); static void DebugPrintGraphWithFunction(const std::shared_ptr<Graph>& g);
static void DebugPrintConstantDiff(const FunctionContext&); static void DebugPrintConstantDiff(const FunctionContext&);

View File

@ -85,7 +85,7 @@ class NodeNameGenerator {
protected: protected:
virtual void CreateNodeName(Node* n) = 0; virtual void CreateNodeName(Node* n) = 0;
void PopulateNodeNames(Block*); void PopulateNodeNames(Block* /*b*/);
void UpdateOutputsNames(Node* n); void UpdateOutputsNames(Node* n);
bool IsGraphOutput(const Value* v, const std::shared_ptr<Graph>& graph) const; bool IsGraphOutput(const Value* v, const std::shared_ptr<Graph>& graph) const;

View File

@ -750,7 +750,7 @@ class InsertQuantDeQuantHelper {
} }
} }
void collectObserverNodesAndValueToQuantize(Module& module, Value*); void collectObserverNodesAndValueToQuantize(Module& module, Value* /*v*/);
void cleanup(Module& module, Graph* g); void cleanup(Module& module, Graph* g);
void removeObserverNodes(Graph* g); void removeObserverNodes(Graph* g);
void quantizeTensors(Module& module, Graph* g, Value* self); void quantizeTensors(Module& module, Graph* g, Value* self);

View File

@ -113,7 +113,7 @@ struct type_caster<torch::jit::IValue> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(torch::jit::IValue, _("IValue")); PYBIND11_TYPE_CASTER(torch::jit::IValue, _("IValue"));
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
try { try {
value = torch::jit::toTypeInferredIValue(src); value = torch::jit::toTypeInferredIValue(src);
return true; return true;
@ -136,7 +136,7 @@ struct type_caster<torch::jit::Symbol> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(torch::jit::Symbol, _("Symbol")); PYBIND11_TYPE_CASTER(torch::jit::Symbol, _("Symbol"));
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
// TODO: Is there a way to py::cast that doesn't raise an exception on // TODO: Is there a way to py::cast that doesn't raise an exception on
// failure? Can we catch pybind11::cast_error here instead? // failure? Can we catch pybind11::cast_error here instead?
std::string src_str; std::string src_str;
@ -164,7 +164,7 @@ struct type_caster<torch::jit::AttributeKind> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(torch::jit::AttributeKind, _("AttributeKind")); PYBIND11_TYPE_CASTER(torch::jit::AttributeKind, _("AttributeKind"));
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
return false; return false;
} }
@ -186,7 +186,7 @@ template <>
struct type_caster<std::vector<torch::jit::Node*>> : ListCasterBase { struct type_caster<std::vector<torch::jit::Node*>> : ListCasterBase {
static handle cast( static handle cast(
const std::vector<torch::jit::Node*>& src, const std::vector<torch::jit::Node*>& src,
return_value_policy, return_value_policy /*unused*/,
handle parent) { handle parent) {
return ListCasterBase::cast(src, return_value_policy::reference, parent); return ListCasterBase::cast(src, return_value_policy::reference, parent);
} }

View File

@ -62,7 +62,10 @@ void eraseAllOutputs(Node* opt_pn) {
} }
} }
void insertTracingNodes(Block*, ProfilingRecord*, TracingData&); void insertTracingNodes(
Block* /*block*/,
ProfilingRecord* /*pr*/,
TracingData& /*td*/);
// The subtlety in `createPropNodeForIfBlock` is that we need to create // The subtlety in `createPropNodeForIfBlock` is that we need to create
// a "propagate" node that will propagate the mapping between the outputs // a "propagate" node that will propagate the mapping between the outputs

View File

@ -81,7 +81,8 @@ namespace torch::jit {
using ::c10::TensorTypePtr; using ::c10::TensorTypePtr;
using Dimension = int64_t; using Dimension = int64_t;
TORCH_API void RegisterProfilingNode(const std::function<bool(const Node*)>&); TORCH_API void RegisterProfilingNode(
const std::function<bool(const Node*)>& /*func*/);
struct ProfilingRecord; struct ProfilingRecord;

View File

@ -418,8 +418,8 @@ struct OperatorGeneratorArgs {
template <typename... Args> template <typename... Args>
explicit constexpr OperatorGeneratorArgs( explicit constexpr OperatorGeneratorArgs(
torch::detail::SelectiveStr<false>, torch::detail::SelectiveStr<false> /*unused*/,
Args...) Args... /*unused*/)
: schema_str(nullptr), : schema_str(nullptr),
isOperationCreator(false), isOperationCreator(false),
operation(nullptr), operation(nullptr),

View File

@ -24,7 +24,7 @@ struct Datapoint {
class TORCH_API InstructionSpan { class TORCH_API InstructionSpan {
public: public:
explicit InstructionSpan(Node&); explicit InstructionSpan(Node& /*node*/);
~InstructionSpan(); ~InstructionSpan();
InstructionSpan(InstructionSpan&&) = delete; InstructionSpan(InstructionSpan&&) = delete;
InstructionSpan& operator=(InstructionSpan&&) = delete; InstructionSpan& operator=(InstructionSpan&&) = delete;
@ -91,7 +91,7 @@ class TORCH_API ScriptProfile : public CustomClassHolder {
void enable(); void enable();
void disable(); void disable();
const SourceMap& dumpStats(); const SourceMap& dumpStats();
void addDatapoint(std::shared_ptr<profiling::Datapoint>); void addDatapoint(std::shared_ptr<profiling::Datapoint> /*datapoint*/);
~ScriptProfile() override; ~ScriptProfile() override;
private: private:

View File

@ -22,7 +22,7 @@ namespace torch::jit {
using SROpFunctor = SROperator (*)(Node* n); using SROpFunctor = SROperator (*)(Node* n);
struct SROperatorFunctor { struct SROperatorFunctor {
virtual SROperator Generate(Node*) { virtual SROperator Generate(Node* /*unused*/) {
SROperator out; SROperator out;
return out; return out;
} }
@ -165,7 +165,7 @@ inline void LogAndDumpSchema(const Node* node) {
VLOG(1) << "Found schema mismatch for: " << node->schema(); VLOG(1) << "Found schema mismatch for: " << node->schema();
} }
inline bool sr_schema_check(torch::jit::Node*) { inline bool sr_schema_check(torch::jit::Node* /*unused*/) {
return true; return true;
} }

View File

@ -26,35 +26,35 @@ class TORCH_API CppPrinter : public IRPrinter {
using IRPrinter::visit; using IRPrinter::visit;
// Binary expressions. // Binary expressions.
void visit(const ModPtr&) override; void visit(const ModPtr& /*v*/) override;
void visit(const MaxPtr&) override; void visit(const MaxPtr& /*v*/) override;
void visit(const MinPtr&) override; void visit(const MinPtr& /*v*/) override;
// Conditional expressions. // Conditional expressions.
void visit(const CompareSelectPtr&) override; void visit(const CompareSelectPtr& /*v*/) override;
void visit(const IfThenElsePtr&) override; void visit(const IfThenElsePtr& /*v*/) override;
// Tensor operations. // Tensor operations.
void visit(const AllocatePtr&) override; void visit(const AllocatePtr& /*v*/) override;
void visit(const FreePtr&) override; void visit(const FreePtr& /*v*/) override;
void visit(const LoadPtr&) override; void visit(const LoadPtr& /*v*/) override;
void visit(const StorePtr&) override; void visit(const StorePtr& /*v*/) override;
// Casts. // Casts.
void visit(const CastPtr&) override; void visit(const CastPtr& /*v*/) override;
void visit(const BitCastPtr&) override; void visit(const BitCastPtr& /*v*/) override;
// Calls. // Calls.
void visit(const IntrinsicsPtr&) override; void visit(const IntrinsicsPtr& /*v*/) override;
void visit(const ExternalCallPtr&) override; void visit(const ExternalCallPtr& /*v*/) override;
// Vars. // Vars.
void visit(const LetPtr&) override; void visit(const LetPtr& /*v*/) override;
void visit(const VarPtr&) override; void visit(const VarPtr& /*v*/) override;
// Vector data types. // Vector data types.
void visit(const RampPtr&) override; void visit(const RampPtr& /*v*/) override;
void visit(const BroadcastPtr&) override; void visit(const BroadcastPtr& /*v*/) override;
private: private:
int lane_; int lane_;

View File

@ -14,8 +14,10 @@ class Stmt;
// Forward declarations of functions // Forward declarations of functions
namespace std { namespace std {
TORCH_API std::string to_string(const torch::jit::tensorexpr::ExprPtr&); TORCH_API std::string to_string(
TORCH_API std::string to_string(const torch::jit::tensorexpr::StmtPtr&); const torch::jit::tensorexpr::ExprPtr& /*expr*/);
TORCH_API std::string to_string(
const torch::jit::tensorexpr::StmtPtr& /*stmt*/);
} // namespace std } // namespace std
namespace torch::jit::tensorexpr { namespace torch::jit::tensorexpr {

View File

@ -378,7 +378,7 @@ void nnc_aten_quantized_conv1d(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
const int64_t x_qzero = extra_args[1]; const int64_t x_qzero = extra_args[1];
@ -408,7 +408,7 @@ void nnc_aten_quantized_conv1d_out(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const size_t bufs_out_num = 1u; const size_t bufs_out_num = 1u;
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
@ -442,7 +442,7 @@ void nnc_aten_quantized_conv2d(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
const int64_t x_qzero = extra_args[1]; const int64_t x_qzero = extra_args[1];
@ -470,7 +470,7 @@ void nnc_aten_quantized_conv2d_out(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const size_t bufs_out_num = 1u; const size_t bufs_out_num = 1u;
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
@ -502,7 +502,7 @@ void nnc_aten_quantized_conv2d_relu(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
const int64_t x_qzero = extra_args[1]; const int64_t x_qzero = extra_args[1];
@ -530,7 +530,7 @@ void nnc_aten_quantized_conv2d_relu_out(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const size_t bufs_out_num = 1u; const size_t bufs_out_num = 1u;
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
@ -562,7 +562,7 @@ void nnc_aten_quantized_linear(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
const int64_t x_qzero = extra_args[1]; const int64_t x_qzero = extra_args[1];
@ -590,7 +590,7 @@ void nnc_aten_quantized_linear_out(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const size_t bufs_out_num = 1u; const size_t bufs_out_num = 1u;
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
@ -622,7 +622,7 @@ void nnc_aten_quantized_linear_relu(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
const int64_t x_qzero = extra_args[1]; const int64_t x_qzero = extra_args[1];
@ -651,7 +651,7 @@ void nnc_aten_quantized_add(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
// TORCH_INTERNAL_ASSERT(tensors.size() == 3); // TORCH_INTERNAL_ASSERT(tensors.size() == 3);
@ -684,7 +684,7 @@ void nnc_aten_quantized_mul(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const double a_qscale = ((double*)extra_args)[0]; const double a_qscale = ((double*)extra_args)[0];
const int64_t a_qzero = extra_args[1]; const int64_t a_qzero = extra_args[1];
@ -714,7 +714,7 @@ void nnc_aten_quantized_mul_out(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const size_t bufs_out_num = 1u; const size_t bufs_out_num = 1u;
const double a_qscale = ((double*)extra_args)[0]; const double a_qscale = ((double*)extra_args)[0];
@ -748,7 +748,7 @@ void nnc_aten_quantized_mul_scalar(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
const int64_t x_qzero = extra_args[1]; const int64_t x_qzero = extra_args[1];
@ -773,7 +773,7 @@ void nnc_aten_quantized_mul_scalar_out(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const size_t bufs_out_num = 1u; const size_t bufs_out_num = 1u;
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
@ -802,7 +802,7 @@ void nnc_aten_quantized_relu(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
const int64_t x_qzero = extra_args[1]; const int64_t x_qzero = extra_args[1];
@ -826,7 +826,7 @@ void nnc_aten_quantized_sigmoid(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
const int64_t x_qzero = extra_args[1]; const int64_t x_qzero = extra_args[1];
@ -851,7 +851,7 @@ void nnc_aten_quantized_sigmoid_out(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
const int64_t x_qzero = extra_args[1]; const int64_t x_qzero = extra_args[1];
@ -880,7 +880,7 @@ void nnc_aten_quantized_cat(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
std::vector<std::pair<size_t, QIData>> qdata; std::vector<std::pair<size_t, QIData>> qdata;
const auto in_bufs_num = bufs_num - 1; const auto in_bufs_num = bufs_num - 1;
@ -914,7 +914,7 @@ void nnc_aten_upsample_nearest2d(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
// NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds) // NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds)
const double x_qscale = ((double*)extra_args)[0]; const double x_qscale = ((double*)extra_args)[0];
@ -956,7 +956,7 @@ void nnc_aten_upsample_nearest2d_out(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const size_t bufs_out_num = 1u; const size_t bufs_out_num = 1u;
// NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds) // NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds)
@ -1008,7 +1008,7 @@ void nnc_aten_quantize_per_tensor(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
auto tensors = constructTensors( auto tensors = constructTensors(
bufs_num, buf_data, buf_ranks, buf_dims, buf_strides, buf_dtypes); bufs_num, buf_data, buf_ranks, buf_dims, buf_strides, buf_dtypes);
@ -1028,7 +1028,7 @@ void nnc_aten_quantize_per_tensor_out(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const size_t bufs_out_num = 1u; const size_t bufs_out_num = 1u;
auto tensors = constructTensors2( auto tensors = constructTensors2(
@ -1058,7 +1058,7 @@ void nnc_aten_dequantize(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const double qscale = ((double*)extra_args)[0]; const double qscale = ((double*)extra_args)[0];
const int64_t qzero = extra_args[1]; const int64_t qzero = extra_args[1];
@ -1083,7 +1083,7 @@ void nnc_aten_dequantize_out(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
const size_t bufs_out_num = 1u; const size_t bufs_out_num = 1u;
const double qscale = ((double*)extra_args)[0]; const double qscale = ((double*)extra_args)[0];
@ -1275,7 +1275,7 @@ void nnc_aten_max_red_out(
int64_t* buf_dims, int64_t* buf_dims,
int64_t* buf_strides, int64_t* buf_strides,
int8_t* buf_dtypes, int8_t* buf_dtypes,
int64_t, int64_t /*unused*/,
int64_t* extra_args) { int64_t* extra_args) {
size_t bufs_out_num = 1u; size_t bufs_out_num = 1u;
auto tensors = constructTensors2( auto tensors = constructTensors2(

View File

@ -901,13 +901,13 @@ class TORCH_API Intrinsics : public ExprNode<Intrinsics> {
}; };
TORCH_API std::vector<ExprPtr> ExprHandleVectorToExprVector( TORCH_API std::vector<ExprPtr> ExprHandleVectorToExprVector(
const std::vector<ExprHandle>&); const std::vector<ExprHandle>& /*v*/);
TORCH_API std::vector<ExprHandle> ExprVectorToExprHandleVector( TORCH_API std::vector<ExprHandle> ExprVectorToExprHandleVector(
const std::vector<ExprPtr>&); const std::vector<ExprPtr>& /*v*/);
TORCH_API std::vector<VarPtr> VarHandleVectorToVarVector( TORCH_API std::vector<VarPtr> VarHandleVectorToVarVector(
const std::vector<VarHandle>&); const std::vector<VarHandle>& /*v*/);
TORCH_API std::vector<VarHandle> VarVectorToVarHandleVector( TORCH_API std::vector<VarHandle> VarVectorToVarHandleVector(
const std::vector<VarPtr>&); const std::vector<VarPtr>& /*v*/);
TORCH_API ExprPtr flatten_index( TORCH_API ExprPtr flatten_index(
const std::vector<ExprPtr>& dims, const std::vector<ExprPtr>& dims,
const std::vector<ExprPtr>& indices, const std::vector<ExprPtr>& indices,

View File

@ -15,9 +15,9 @@ class TORCH_API IRPrinter : public IRVisitor {
public: public:
explicit IRPrinter(std::ostream& os) : printer_os_(this, os) {} explicit IRPrinter(std::ostream& os) : printer_os_(this, os) {}
void print(ExprHandle); void print(ExprHandle /*expr*/);
void print(Expr&); void print(Expr& /*expr*/);
void print(Stmt&); void print(Stmt& /*stmt*/);
void visit(const AddPtr& v) override; void visit(const AddPtr& v) override;
void visit(const SubPtr& v) override; void visit(const SubPtr& v) override;
void visit(const MulPtr& v) override; void visit(const MulPtr& v) override;
@ -105,10 +105,12 @@ class TORCH_API IRPrinter : public IRVisitor {
UniqueNameManager name_manager_; UniqueNameManager name_manager_;
}; };
TORCH_API std::ostream& operator<<(std::ostream& stream, const Expr&); TORCH_API std::ostream& operator<<(std::ostream& stream, const Expr& /*expr*/);
TORCH_API std::ostream& operator<<(std::ostream& stream, const ExprHandle&); TORCH_API std::ostream& operator<<(
TORCH_API std::ostream& operator<<(std::ostream& stream, const Stmt&); std::ostream& stream,
TORCH_API std::ostream& operator<<(std::ostream& stream, const Tensor&); const ExprHandle& /*expr*/);
TORCH_API std::ostream& operator<<(std::ostream& stream, const Stmt& /*stmt*/);
TORCH_API std::ostream& operator<<(std::ostream& stream, const Tensor& /*t*/);
TORCH_API void print(const ExprPtr& expr); TORCH_API void print(const ExprPtr& expr);
TORCH_API void print(const StmtPtr& stmt); TORCH_API void print(const StmtPtr& stmt);

View File

@ -47,8 +47,8 @@ class TORCH_API IRVerifier : public IRVisitor {
void visit(const BlockPtr& v) override; void visit(const BlockPtr& v) override;
}; };
TORCH_API void verify(const StmtPtr&); TORCH_API void verify(const StmtPtr& /*s*/);
TORCH_API void verify(const ExprPtr&); TORCH_API void verify(const ExprPtr& /*e*/);
TORCH_API void verify(const ExprHandle&); TORCH_API void verify(const ExprHandle& /*e*/);
} // namespace torch::jit::tensorexpr } // namespace torch::jit::tensorexpr

View File

@ -43,11 +43,11 @@ class TORCH_API LoopNest {
return root_stmt_; return root_stmt_;
} }
std::vector<ForPtr> getLoopStmtsFor(const Tensor&) const; std::vector<ForPtr> getLoopStmtsFor(const Tensor& /*t*/) const;
std::vector<ForPtr> getLoopStmtsFor(const BufPtr&) const; std::vector<ForPtr> getLoopStmtsFor(const BufPtr& /*buf*/) const;
std::vector<ForPtr> getLoopStmtsFor(StmtPtr) const; std::vector<ForPtr> getLoopStmtsFor(StmtPtr /*s*/) const;
StmtPtr getLoopBodyFor(const Tensor&) const; StmtPtr getLoopBodyFor(const Tensor& /*t*/) const;
StmtPtr getLoopBodyFor(BufPtr) const; StmtPtr getLoopBodyFor(BufPtr /*buf*/) const;
// Returns the For stmt indexed by 'indices' in the 'root' For stmt. // Returns the For stmt indexed by 'indices' in the 'root' For stmt.
//'indices' indicates the path to the returned loop from 'root' in AST, e.g., //'indices' indicates the path to the returned loop from 'root' in AST, e.g.,
@ -77,7 +77,7 @@ class TORCH_API LoopNest {
static std::vector<ForPtr> getEnclosingLoopNest(const StmtPtr& st); static std::vector<ForPtr> getEnclosingLoopNest(const StmtPtr& st);
// Returns a list of all Stmts that write to the given buf. // Returns a list of all Stmts that write to the given buf.
std::vector<StmtPtr> getAllWritesToBuf(BufPtr) const; std::vector<StmtPtr> getAllWritesToBuf(BufPtr /*buf*/) const;
// The following methods return the For loops that contain writes to // The following methods return the For loops that contain writes to
// the given buf. // the given buf.
@ -97,13 +97,14 @@ class TORCH_API LoopNest {
// to buf. // to buf.
// For the above example: // For the above example:
// getAllInnermostLoopsWritingToBuf(a) => {j1, k2, j3} // getAllInnermostLoopsWritingToBuf(a) => {j1, k2, j3}
std::vector<ForPtr> getAllInnermostLoopsWritingToBuf(BufPtr) const; std::vector<ForPtr> getAllInnermostLoopsWritingToBuf(BufPtr /*buf*/) const;
// Returns a list of For loopnests which contain a Stmt that writes to // Returns a list of For loopnests which contain a Stmt that writes to
// the given buf. Each loopnest here is a vector For loops. // the given buf. Each loopnest here is a vector For loops.
// For the above example: // For the above example:
// getAllLoopNestsWritingToBuf(a) => {{i1,j1}, {i2,j2,k2}, {i2,j3}} // getAllLoopNestsWritingToBuf(a) => {{i1,j1}, {i2,j2,k2}, {i2,j3}}
std::vector<std::vector<ForPtr>> getAllLoopNestsWritingToBuf(BufPtr) const; std::vector<std::vector<ForPtr>> getAllLoopNestsWritingToBuf(
BufPtr /*buf*/) const;
StmtPtr simplify(); StmtPtr simplify();
@ -561,7 +562,7 @@ class TORCH_API LoopNest {
// Vectorize the given loop. This method requires that the given loop // Vectorize the given loop. This method requires that the given loop
// does not perform a reduction. // does not perform a reduction.
// It returns true if vectorization is successful and false otherwise. // It returns true if vectorization is successful and false otherwise.
static bool vectorize(const ForPtr&); static bool vectorize(const ForPtr& /*f*/);
// Find the inner-most loops and vectorize them. Currently, this only works // Find the inner-most loops and vectorize them. Currently, this only works
// for the LLVM backend, when no reductions are involved. // for the LLVM backend, when no reductions are involved.

View File

@ -139,8 +139,8 @@ Tensor computeQuantizePerTensor(
const std::vector<ArgValue>& inputs, const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape, const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides, const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>&, const std::optional<ScalarType>& /*unused*/,
at::Device) { at::Device /*unused*/) {
std::vector<VarPtr> vars; std::vector<VarPtr> vars;
std::vector<ExprHandle> indices; std::vector<ExprHandle> indices;
for (const auto& os : outputShape) { for (const auto& os : outputShape) {
@ -180,7 +180,7 @@ Tensor computeQuantizedAdd(
const std::vector<ExprHandle>& outputShape, const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides, const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType, const std::optional<ScalarType>& outputType,
at::Device) { at::Device /*unused*/) {
const BufHandle& QA = std::get<BufHandle>(inputs[0]); const BufHandle& QA = std::get<BufHandle>(inputs[0]);
const BufHandle& QB = std::get<BufHandle>(inputs[1]); const BufHandle& QB = std::get<BufHandle>(inputs[1]);
auto qa_scale = ExprHandle(QA.node()->qscale()); auto qa_scale = ExprHandle(QA.node()->qscale());
@ -223,7 +223,7 @@ Tensor computeQuantizePerTensorExternalCall(
const std::vector<ExprHandle>& outputShape, const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides, const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType, const std::optional<ScalarType>& outputType,
at::Device) { at::Device /*unused*/) {
const BufHandle& x = std::get<BufHandle>(inputs[0]); const BufHandle& x = std::get<BufHandle>(inputs[0]);
const auto qscale = std::get<double>(inputs[1]); const auto qscale = std::get<double>(inputs[1]);
const auto qzero = std::get<int64_t>(inputs[2]); const auto qzero = std::get<int64_t>(inputs[2]);
@ -255,7 +255,7 @@ Tensor computeDequantizeExternalCall(
const std::vector<ExprHandle>& outputShape, const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides, const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType, const std::optional<ScalarType>& outputType,
at::Device) { at::Device /*unused*/) {
Dtype dtype = kFloat; Dtype dtype = kFloat;
if (outputType) { if (outputType) {
dtype = Dtype(*outputType); dtype = Dtype(*outputType);
@ -280,7 +280,7 @@ Tensor computeQuantizedConv2dPrepack(
const std::vector<ExprHandle>& outputShape, const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides, const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType, const std::optional<ScalarType>& outputType,
at::Device) { at::Device /*unused*/) {
Dtype dtype = kFloat; Dtype dtype = kFloat;
if (outputType) { if (outputType) {
dtype = Dtype(*outputType); dtype = Dtype(*outputType);
@ -634,7 +634,7 @@ Tensor computeDequantize(
const std::vector<ExprHandle>& outputShape, const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides, const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType, const std::optional<ScalarType>& outputType,
at::Device) { at::Device /*unused*/) {
Dtype dtype = kFloat; Dtype dtype = kFloat;
if (outputType) { if (outputType) {
dtype = Dtype(*outputType); dtype = Dtype(*outputType);
@ -666,7 +666,7 @@ Tensor computeUpsampleNearest2d(
const std::vector<ExprHandle>& outputShape, const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides, const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType, const std::optional<ScalarType>& outputType,
at::Device) { at::Device /*unused*/) {
const auto& A = std::get<BufHandle>(inputs[0]); const auto& A = std::get<BufHandle>(inputs[0]);
const auto& output_height = outputShape[2]; const auto& output_height = outputShape[2];
const auto& output_width = outputShape[3]; const auto& output_width = outputShape[3];
@ -713,7 +713,7 @@ Tensor computeUpsampleNearest2dExternalCall(
const std::vector<ExprHandle>& outputShape, const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides, const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType, const std::optional<ScalarType>& outputType,
at::Device) { at::Device /*unused*/) {
Dtype dtype = kFloat; Dtype dtype = kFloat;
if (outputType) { if (outputType) {
dtype = Dtype(*outputType); dtype = Dtype(*outputType);
@ -772,7 +772,7 @@ Tensor computeQuantizedSigmoidExternalCall(
const std::vector<ExprHandle>& outputShape, const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides, const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType, const std::optional<ScalarType>& outputType,
at::Device) { at::Device /*unused*/) {
const BufHandle& qx = std::get<BufHandle>(inputs[0]); const BufHandle& qx = std::get<BufHandle>(inputs[0]);
const auto out_qdtype = immQDType(qx); const auto out_qdtype = immQDType(qx);

View File

@ -145,5 +145,5 @@ TORCH_API Tensor computeQuantizedSigmoidExternalCall(
const std::vector<ExprHandle>& outputShape, const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides, const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType, const std::optional<ScalarType>& outputType,
at::Device); at::Device /*unused*/);
} // namespace torch::jit::tensorexpr } // namespace torch::jit::tensorexpr

View File

@ -21,7 +21,7 @@ class TORCH_API LazyGraphExecutor {
}; };
// Register a lazy graph executor instance that can be retrieved using Get() // Register a lazy graph executor instance that can be retrieved using Get()
static void Register(LazyGraphExecutor*); static void Register(LazyGraphExecutor* /*executor*/);
static LazyGraphExecutor* Get(); static LazyGraphExecutor* Get();
virtual ~LazyGraphExecutor() = default; virtual ~LazyGraphExecutor() = default;

View File

@ -253,7 +253,7 @@ TORCH_API at::Tensor to_lazy_tensor(
template <size_t... Indices> template <size_t... Indices>
auto TupleAtenFromLtcTensorsImpl( auto TupleAtenFromLtcTensorsImpl(
const std::vector<LazyTensorPtr>& tensors, const std::vector<LazyTensorPtr>& tensors,
std::index_sequence<Indices...>) { std::index_sequence<Indices...> /*unused*/) {
return std::make_tuple(CreateAtenFromLtcTensor(tensors[Indices])...); return std::make_tuple(CreateAtenFromLtcTensor(tensors[Indices])...);
} }

View File

@ -24,7 +24,7 @@ struct type_caster<torch::monitor::data_value_t> {
PYBIND11_TYPE_CASTER(torch::monitor::data_value_t, _("data_value_t")); PYBIND11_TYPE_CASTER(torch::monitor::data_value_t, _("data_value_t"));
// Python -> C++ // Python -> C++
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
PyObject* source = src.ptr(); PyObject* source = src.ptr();
if (THPUtils_checkLong(source)) { if (THPUtils_checkLong(source)) {
this->value = THPUtils_unpackLong(source); this->value = THPUtils_unpackLong(source);

View File

@ -1198,7 +1198,7 @@ class TransferEvents {
class TransferEvents { class TransferEvents {
public: public:
template <class... Args> template <class... Args>
TransferEvents(Args&&...) {} TransferEvents(Args&&... /*unused*/) {}
}; };
#endif #endif

View File

@ -447,7 +447,7 @@ struct TORCH_API Result : public std::enable_shared_from_this<Result> {
extra_fields_{std::move(extra_fields)} {} extra_fields_{std::move(extra_fields)} {}
template <EventType E> template <EventType E>
static EventType deduceTag(const ExtraFields<E>&) { static EventType deduceTag(const ExtraFields<E>& /*unused*/) {
return E; return E;
} }
}; };
@ -689,21 +689,22 @@ class TORCH_API RecordQueue {
}; };
TORCH_API bool get_record_concrete_inputs_enabled(); TORCH_API bool get_record_concrete_inputs_enabled();
TORCH_API void set_record_concrete_inputs_enabled_fn(std::function<bool()>); TORCH_API void set_record_concrete_inputs_enabled_fn(
TORCH_API void set_record_concrete_inputs_enabled_val(bool); std::function<bool()> /*fn*/);
TORCH_API void set_record_concrete_inputs_enabled_val(bool /*val*/);
TORCH_API bool get_fwd_bwd_enabled(); TORCH_API bool get_fwd_bwd_enabled();
TORCH_API void set_fwd_bwd_enabled_fn(std::function<bool()>); TORCH_API void set_fwd_bwd_enabled_fn(std::function<bool()> /*fn*/);
TORCH_API void set_fwd_bwd_enabled_val(bool); TORCH_API void set_fwd_bwd_enabled_val(bool /*val*/);
TORCH_API bool get_cuda_sync_enabled(); TORCH_API bool get_cuda_sync_enabled();
TORCH_API void set_cuda_sync_enabled_fn(std::function<bool()>); TORCH_API void set_cuda_sync_enabled_fn(std::function<bool()> /*fn*/);
TORCH_API void set_cuda_sync_enabled_val(bool); TORCH_API void set_cuda_sync_enabled_val(bool /*val*/);
// Comms related RecordFunctions will record information about tensor storage // Comms related RecordFunctions will record information about tensor storage
// locations. // locations.
TORCH_API bool get_record_tensor_addrs_enabled(); TORCH_API bool get_record_tensor_addrs_enabled();
TORCH_API void set_record_tensor_addrs_enabled_fn(std::function<bool()>); TORCH_API void set_record_tensor_addrs_enabled_fn(std::function<bool()> /*fn*/);
TORCH_API void set_record_tensor_addrs_enabled_val(bool); TORCH_API void set_record_tensor_addrs_enabled_val(bool /*val*/);
} // namespace torch::profiler::impl } // namespace torch::profiler::impl

View File

@ -50,7 +50,7 @@ struct RawTensors {
} }
template <typename T> template <typename T>
void operator()(T&) {} void operator()(T& /*unused*/) {}
std::vector<RawTensorInfo> tensors_; std::vector<RawTensorInfo> tensors_;
}; };

View File

@ -13,9 +13,9 @@ struct NoOpPythonTracer : public PythonTracerBase {
void restart() override {} void restart() override {}
void register_gc_callback() override {} void register_gc_callback() override {}
std::vector<std::shared_ptr<Result>> getEvents( std::vector<std::shared_ptr<Result>> getEvents(
std::function<c10::time_t(c10::approx_time_t)>, std::function<c10::time_t(c10::approx_time_t)> /*time_converter*/,
std::vector<CompressedEvent>&, std::vector<CompressedEvent>& /*enters*/,
c10::time_t) override { c10::time_t /*end_time_ns*/) override {
return {}; return {};
} }
}; };
@ -25,7 +25,7 @@ struct NoOpMemoryPythonTracer : public PythonMemoryTracerBase {
~NoOpMemoryPythonTracer() override = default; ~NoOpMemoryPythonTracer() override = default;
void start() override {} void start() override {}
void stop() override {} void stop() override {}
void export_memory_history(const std::string&) override {} void export_memory_history(const std::string& /*path*/) override {}
}; };
} // namespace } // namespace

View File

@ -88,7 +88,7 @@ class PerfProfiler {
/* Disable counting and fill in the caller supplied container with delta /* Disable counting and fill in the caller supplied container with delta
* calculated from the start count values since last Enable() */ * calculated from the start count values since last Enable() */
void Disable(perf_counters_t&); void Disable(perf_counters_t& /*vals*/);
private: private:
uint64_t CalcDelta(uint64_t start, uint64_t end) const; uint64_t CalcDelta(uint64_t start, uint64_t end) const;

View File

@ -89,7 +89,7 @@ struct type_caster<std::shared_ptr<torch::CapturedTraceback>> {
std::shared_ptr<torch::CapturedTraceback>, std::shared_ptr<torch::CapturedTraceback>,
_("torch._C._profiler.CapturedTraceback")); _("torch._C._profiler.CapturedTraceback"));
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
if (Py_TYPE(src.ptr()) == &THPCapturedTracebackType) { if (Py_TYPE(src.ptr()) == &THPCapturedTracebackType) {
value = reinterpret_cast<THPCapturedTraceback*>(src.ptr())->data; value = reinterpret_cast<THPCapturedTraceback*>(src.ptr())->data;
return true; return true;

View File

@ -20,8 +20,12 @@ struct ITTThreadLocalState : ProfilerStateBase {
return ActiveProfilerType::ITT; return ActiveProfilerType::ITT;
} }
void reportMemoryUsage(void*, int64_t, size_t, size_t, c10::Device) override { void reportMemoryUsage(
} void* /*ptr*/,
int64_t /*alloc_size*/,
size_t /*total_allocated*/,
size_t /*total_reserved*/,
c10::Device /*device*/) override {}
static ITTThreadLocalState* getTLS() { static ITTThreadLocalState* getTLS() {
auto tls = ProfilerStateBase::get(/*global=*/false); auto tls = ProfilerStateBase::get(/*global=*/false);

View File

@ -20,8 +20,12 @@ struct NVTXThreadLocalState : ProfilerStateBase {
return ActiveProfilerType::NVTX; return ActiveProfilerType::NVTX;
} }
void reportMemoryUsage(void*, int64_t, size_t, size_t, c10::Device) override { void reportMemoryUsage(
} void* /*ptr*/,
int64_t /*alloc_size*/,
size_t /*total_allocated*/,
size_t /*total_reserved*/,
c10::Device /*device*/) override {}
static NVTXThreadLocalState* getTLS() { static NVTXThreadLocalState* getTLS() {
auto tls = ProfilerStateBase::get(/*global=*/false); auto tls = ProfilerStateBase::get(/*global=*/false);

View File

@ -354,7 +354,7 @@ std::string dispatch_keyset_string(c10::DispatchKeySet keyset) {
namespace pybind11::detail { namespace pybind11::detail {
bool type_caster<at::Tensor>::load(handle src, bool) { bool type_caster<at::Tensor>::load(handle src, bool /*unused*/) {
PyObject* obj = src.ptr(); PyObject* obj = src.ptr();
if (THPVariable_Check(obj)) { if (THPVariable_Check(obj)) {
value = THPVariable_Unpack(obj); value = THPVariable_Unpack(obj);
@ -370,7 +370,7 @@ handle type_caster<at::Tensor>::cast(
return handle(THPVariable_Wrap(src)); return handle(THPVariable_Wrap(src));
} }
bool type_caster<at::IntArrayRef>::load(handle src, bool) { bool type_caster<at::IntArrayRef>::load(handle src, bool /*unused*/) {
PyObject* source = src.ptr(); PyObject* source = src.ptr();
auto tuple = PyTuple_Check(source); auto tuple = PyTuple_Check(source);
if (tuple || PyList_Check(source)) { if (tuple || PyList_Check(source)) {
@ -403,7 +403,7 @@ handle type_caster<at::IntArrayRef>::cast(
return handle(THPUtils_packInt64Array(src.size(), src.data())); return handle(THPUtils_packInt64Array(src.size(), src.data()));
} }
bool type_caster<at::SymIntArrayRef>::load(handle src, bool) { bool type_caster<at::SymIntArrayRef>::load(handle src, bool /*unused*/) {
PyObject* source = src.ptr(); PyObject* source = src.ptr();
auto tuple = PyTuple_Check(source); auto tuple = PyTuple_Check(source);
@ -444,7 +444,9 @@ handle type_caster<at::SymIntArrayRef>::cast(
return t.release(); return t.release();
} }
bool type_caster<at::ArrayRef<c10::SymNode>>::load(handle src, bool) { bool type_caster<at::ArrayRef<c10::SymNode>>::load(
handle src,
bool /*unused*/) {
TORCH_INTERNAL_ASSERT(0, "NYI"); TORCH_INTERNAL_ASSERT(0, "NYI");
} }
handle type_caster<at::ArrayRef<c10::SymNode>>::cast( handle type_caster<at::ArrayRef<c10::SymNode>>::cast(

View File

@ -172,7 +172,7 @@ template <>
TORCH_API void THP_decodeBuffer<bool, bool>( TORCH_API void THP_decodeBuffer<bool, bool>(
bool* dst, bool* dst,
const uint8_t* src, const uint8_t* src,
bool, bool /*unused*/,
size_t len) { size_t len) {
for (const auto i : c10::irange(len)) { for (const auto i : c10::irange(len)) {
dst[i] = (int)src[i] != 0 ? true : false; dst[i] = (int)src[i] != 0 ? true : false;

View File

@ -348,7 +348,7 @@ inline static bool array_has_torch_function(
return false; return false;
} }
PyObject* THPModule_has_torch_function(PyObject*, PyObject* arg) { PyObject* THPModule_has_torch_function(PyObject* /*unused*/, PyObject* arg) {
bool result = false; bool result = false;
if (PyTuple_CheckExact(arg) || PyList_CheckExact(arg)) { if (PyTuple_CheckExact(arg) || PyList_CheckExact(arg)) {
// Fast path: // Fast path:
@ -372,7 +372,9 @@ PyObject* THPModule_has_torch_function(PyObject*, PyObject* arg) {
Py_RETURN_FALSE; Py_RETURN_FALSE;
} }
PyObject* THPModule_has_torch_function_unary(PyObject*, PyObject* obj) { PyObject* THPModule_has_torch_function_unary(
PyObject* /*unused*/,
PyObject* obj) {
// Special case `THPModule_has_torch_function` for the single arg case. // Special case `THPModule_has_torch_function` for the single arg case.
if (torch::check_has_torch_function(obj)) { if (torch::check_has_torch_function(obj)) {
Py_RETURN_TRUE; Py_RETURN_TRUE;
@ -381,7 +383,7 @@ PyObject* THPModule_has_torch_function_unary(PyObject*, PyObject* obj) {
} }
PyObject* THPModule_has_torch_function_variadic( PyObject* THPModule_has_torch_function_variadic(
PyObject*, PyObject* /*unused*/,
PyObject* const* args, PyObject* const* args,
Py_ssize_t nargs) { Py_ssize_t nargs) {
if (array_has_torch_function(args, nargs)) { if (array_has_torch_function(args, nargs)) {

View File

@ -37,9 +37,11 @@ PyObject* THPModule_DisableTorchFunctionType();
PyObject* THPModule_DisableTorchFunctionSubclassType(); PyObject* THPModule_DisableTorchFunctionSubclassType();
PyObject* THPModule_disable_torch_function(PyObject* self, PyObject* args); PyObject* THPModule_disable_torch_function(PyObject* self, PyObject* args);
PyObject* THPModule_disable_torch_dispatch(PyObject* self, PyObject* args); PyObject* THPModule_disable_torch_dispatch(PyObject* self, PyObject* args);
PyObject* THPModule_has_torch_function(PyObject*, PyObject* arg); PyObject* THPModule_has_torch_function(PyObject* /*unused*/, PyObject* arg);
PyObject* THPModule_has_torch_function_unary(PyObject*, PyObject* obj); PyObject* THPModule_has_torch_function_unary(
PyObject* /*unused*/,
PyObject* obj);
PyObject* THPModule_has_torch_function_variadic( PyObject* THPModule_has_torch_function_variadic(
PyObject*, PyObject* /*unused*/,
PyObject* const* args, PyObject* const* args,
Py_ssize_t nargs); Py_ssize_t nargs);

View File

@ -4,7 +4,7 @@
namespace pybind11::detail { namespace pybind11::detail {
bool type_caster<c10::SymInt>::load(py::handle src, bool) { bool type_caster<c10::SymInt>::load(py::handle src, bool /*unused*/) {
if (torch::is_symint(src)) { if (torch::is_symint(src)) {
auto node = src.attr("node"); auto node = src.attr("node");
if (py::isinstance<c10::SymNodeImpl>(node)) { if (py::isinstance<c10::SymNodeImpl>(node)) {
@ -62,7 +62,7 @@ py::handle type_caster<c10::SymInt>::cast(
} }
} }
bool type_caster<c10::SymFloat>::load(py::handle src, bool) { bool type_caster<c10::SymFloat>::load(py::handle src, bool /*unused*/) {
if (torch::is_symfloat(src)) { if (torch::is_symfloat(src)) {
value = c10::SymFloat(static_cast<c10::SymNode>( value = c10::SymFloat(static_cast<c10::SymNode>(
c10::make_intrusive<torch::impl::PythonSymNodeImpl>(src.attr("node")))); c10::make_intrusive<torch::impl::PythonSymNodeImpl>(src.attr("node"))));
@ -92,7 +92,7 @@ py::handle type_caster<c10::SymFloat>::cast(
} }
} }
bool type_caster<c10::SymBool>::load(py::handle src, bool) { bool type_caster<c10::SymBool>::load(py::handle src, bool /*unused*/) {
if (torch::is_symbool(src)) { if (torch::is_symbool(src)) {
value = c10::SymBool(static_cast<c10::SymNode>( value = c10::SymBool(static_cast<c10::SymNode>(
c10::make_intrusive<torch::impl::PythonSymNodeImpl>(src.attr("node")))); c10::make_intrusive<torch::impl::PythonSymNodeImpl>(src.attr("node"))));
@ -122,7 +122,7 @@ py::handle type_caster<c10::SymBool>::cast(
} }
} }
bool type_caster<c10::Scalar>::load(py::handle src, bool) { bool type_caster<c10::Scalar>::load(py::handle src, bool /*unused*/) {
TORCH_INTERNAL_ASSERT( TORCH_INTERNAL_ASSERT(
0, "pybind11 loading for c10::Scalar NYI (file a bug if you need it)"); 0, "pybind11 loading for c10::Scalar NYI (file a bug if you need it)");
} }

View File

@ -38,7 +38,7 @@ struct TORCH_PYTHON_API type_caster<at::Tensor> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::Tensor, _("torch.Tensor")); PYBIND11_TYPE_CASTER(at::Tensor, _("torch.Tensor"));
bool load(handle src, bool); bool load(handle src, bool /*unused*/);
static handle cast( static handle cast(
const at::Tensor& src, const at::Tensor& src,
@ -53,7 +53,7 @@ struct type_caster<at::Storage> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::Storage, _("torch.StorageBase")); PYBIND11_TYPE_CASTER(at::Storage, _("torch.StorageBase"));
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
PyObject* obj = src.ptr(); PyObject* obj = src.ptr();
if (torch::isStorage(obj)) { if (torch::isStorage(obj)) {
value = torch::createStorage(obj); value = torch::createStorage(obj);
@ -76,7 +76,7 @@ struct type_caster<at::Generator> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::Generator, _("torch.Generator")); PYBIND11_TYPE_CASTER(at::Generator, _("torch.Generator"));
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
PyObject* obj = src.ptr(); PyObject* obj = src.ptr();
if (THPGenerator_Check(obj)) { if (THPGenerator_Check(obj)) {
value = reinterpret_cast<THPGenerator*>(obj)->cdata; value = reinterpret_cast<THPGenerator*>(obj)->cdata;
@ -99,7 +99,7 @@ struct TORCH_PYTHON_API type_caster<at::IntArrayRef> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::IntArrayRef, _("Tuple[int, ...]")); PYBIND11_TYPE_CASTER(at::IntArrayRef, _("Tuple[int, ...]"));
bool load(handle src, bool); bool load(handle src, bool /*unused*/);
static handle cast( static handle cast(
at::IntArrayRef src, at::IntArrayRef src,
return_value_policy /* policy */, return_value_policy /* policy */,
@ -115,7 +115,7 @@ struct TORCH_PYTHON_API type_caster<at::SymIntArrayRef> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::SymIntArrayRef, _("List[int]")); PYBIND11_TYPE_CASTER(at::SymIntArrayRef, _("List[int]"));
bool load(handle src, bool); bool load(handle src, bool /*unused*/);
static handle cast( static handle cast(
at::SymIntArrayRef src, at::SymIntArrayRef src,
return_value_policy /* policy */, return_value_policy /* policy */,
@ -131,7 +131,7 @@ struct TORCH_PYTHON_API type_caster<at::ArrayRef<c10::SymNode>> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::ArrayRef<c10::SymNode>, _("List[SymNode]")); PYBIND11_TYPE_CASTER(at::ArrayRef<c10::SymNode>, _("List[SymNode]"));
bool load(handle src, bool); bool load(handle src, bool /*unused*/);
static handle cast( static handle cast(
at::ArrayRef<c10::SymNode> src, at::ArrayRef<c10::SymNode> src,
return_value_policy /* policy */, return_value_policy /* policy */,
@ -147,7 +147,7 @@ struct type_caster<at::MemoryFormat> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(at::MemoryFormat, _("torch.memory_format")); PYBIND11_TYPE_CASTER(at::MemoryFormat, _("torch.memory_format"));
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
PyObject* obj = src.ptr(); PyObject* obj = src.ptr();
if (THPMemoryFormat_Check(obj)) { if (THPMemoryFormat_Check(obj)) {
value = reinterpret_cast<THPMemoryFormat*>(obj)->memory_format; value = reinterpret_cast<THPMemoryFormat*>(obj)->memory_format;
@ -175,7 +175,7 @@ struct type_caster<at::Device> {
// after a successful call to load. // after a successful call to load.
type_caster() : value(c10::kCPU) {} type_caster() : value(c10::kCPU) {}
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
PyObject* obj = src.ptr(); PyObject* obj = src.ptr();
if (THPDevice_Check(obj)) { if (THPDevice_Check(obj)) {
value = reinterpret_cast<THPDevice*>(obj)->device; value = reinterpret_cast<THPDevice*>(obj)->device;
@ -204,7 +204,7 @@ struct type_caster<at::ScalarType> {
// after a successful call to load. // after a successful call to load.
type_caster() : value(at::kFloat) {} type_caster() : value(at::kFloat) {}
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
PyObject* obj = src.ptr(); PyObject* obj = src.ptr();
if (THPDtype_Check(obj)) { if (THPDtype_Check(obj)) {
value = reinterpret_cast<THPDtype*>(obj)->scalar_type; value = reinterpret_cast<THPDtype*>(obj)->scalar_type;
@ -233,7 +233,7 @@ struct type_caster<c10::Stream> {
// after a successful call to load. // after a successful call to load.
type_caster() : value(c10::Stream::DEFAULT, c10::Device(c10::kCPU, 0)) {} type_caster() : value(c10::Stream::DEFAULT, c10::Device(c10::kCPU, 0)) {}
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
PyObject* obj = src.ptr(); PyObject* obj = src.ptr();
if (THPStream_Check(obj)) { if (THPStream_Check(obj)) {
value = c10::Stream::unpack3( value = c10::Stream::unpack3(
@ -286,7 +286,7 @@ struct TORCH_PYTHON_API type_caster<c10::Scalar> {
PYBIND11_TYPE_CASTER( PYBIND11_TYPE_CASTER(
c10::Scalar, c10::Scalar,
_("Union[Number, torch.SymInt, torch.SymFloat, torch.SymBool]")); _("Union[Number, torch.SymInt, torch.SymFloat, torch.SymBool]"));
bool load(py::handle src, bool); bool load(py::handle src, bool /*unused*/);
static py::handle cast( static py::handle cast(
const c10::Scalar& si, const c10::Scalar& si,
@ -298,7 +298,7 @@ template <>
struct TORCH_PYTHON_API type_caster<c10::SymInt> { struct TORCH_PYTHON_API type_caster<c10::SymInt> {
public: public:
PYBIND11_TYPE_CASTER(c10::SymInt, _("Union[int, torch.SymInt]")); PYBIND11_TYPE_CASTER(c10::SymInt, _("Union[int, torch.SymInt]"));
bool load(py::handle src, bool); bool load(py::handle src, bool /*unused*/);
static py::handle cast( static py::handle cast(
const c10::SymInt& si, const c10::SymInt& si,
@ -310,7 +310,7 @@ template <>
struct TORCH_PYTHON_API type_caster<c10::SymFloat> { struct TORCH_PYTHON_API type_caster<c10::SymFloat> {
public: public:
PYBIND11_TYPE_CASTER(c10::SymFloat, _("float")); PYBIND11_TYPE_CASTER(c10::SymFloat, _("float"));
bool load(py::handle src, bool); bool load(py::handle src, bool /*unused*/);
static py::handle cast( static py::handle cast(
const c10::SymFloat& si, const c10::SymFloat& si,
@ -322,7 +322,7 @@ template <>
struct TORCH_PYTHON_API type_caster<c10::SymBool> { struct TORCH_PYTHON_API type_caster<c10::SymBool> {
public: public:
PYBIND11_TYPE_CASTER(c10::SymBool, _("Union[bool, torch.SymBool]")); PYBIND11_TYPE_CASTER(c10::SymBool, _("Union[bool, torch.SymBool]"));
bool load(py::handle src, bool); bool load(py::handle src, bool /*unused*/);
static py::handle cast( static py::handle cast(
const c10::SymBool& si, const c10::SymBool& si,
@ -336,7 +336,7 @@ struct type_caster<c10::complex<T>> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(c10::complex<T>, _("complex")); PYBIND11_TYPE_CASTER(c10::complex<T>, _("complex"));
bool load(handle src, bool) { bool load(handle src, bool /*unused*/) {
PyObject* obj = src.ptr(); PyObject* obj = src.ptr();
// Referred from `THPUtils_unpackComplexDouble` // Referred from `THPUtils_unpackComplexDouble`

View File

@ -9,6 +9,7 @@ namespace torch::utils {
void initializeMemoryFormats(); void initializeMemoryFormats();
// This methods returns a borrowed reference! // This methods returns a borrowed reference!
TORCH_PYTHON_API PyObject* getTHPMemoryFormat(c10::MemoryFormat); TORCH_PYTHON_API PyObject* getTHPMemoryFormat(
c10::MemoryFormat /*memory_format*/);
} // namespace torch::utils } // namespace torch::utils

View File

@ -101,7 +101,10 @@ template <
typename Function, typename Function,
typename Accessor, typename Accessor,
size_t... Is> size_t... Is>
ReturnType unpack(Function function, Accessor accessor, Indices<Is...>) { ReturnType unpack(
Function function,
Accessor accessor,
Indices<Is...> /*unused*/) {
return ReturnType(function(accessor.template operator()<Ts>(Is)...)); return ReturnType(function(accessor.template operator()<Ts>(Is)...));
} }

View File

@ -36,7 +36,7 @@ class THManagedMapAllocator : private THManagedMapAllocatorInit,
const char* filename, const char* filename,
int flags, int flags,
size_t size); size_t size);
static THManagedMapAllocator* fromDataPtr(const at::DataPtr&); static THManagedMapAllocator* fromDataPtr(const at::DataPtr& /*dptr*/);
const char* manager_handle() const { const char* manager_handle() const {
return manager_handle_.c_str(); return manager_handle_.c_str();

View File

@ -27,7 +27,7 @@ int unistd_close(int fh) {
#endif #endif
} }
inline void incr(ssize_t) {} inline void incr(ssize_t /*unused*/) {}
template <typename Offset> template <typename Offset>
inline void incr(ssize_t n, Offset& offset) { inline void incr(ssize_t n, Offset& offset) {
offset += static_cast<Offset>(n); offset += static_cast<Offset>(n);

View File

@ -111,8 +111,8 @@ class File {
void swap(File& other) noexcept; void swap(File& other) noexcept;
// movable // movable
File(File&&) noexcept; File(File&& /*other*/) noexcept;
File& operator=(File&&) noexcept; File& operator=(File&& /*other*/) noexcept;
private: private:
// unique // unique

View File

@ -32,7 +32,7 @@ using ITreeMapNoReturnFn =
using IValueApplyFn = using IValueApplyFn =
void (*)(ITreeMapNoReturnFn, const c10::IValue&, const ITreeSpec&); void (*)(ITreeMapNoReturnFn, const c10::IValue&, const ITreeSpec&);
nlohmann::json defaultContextLoadFn(std::string_view); nlohmann::json defaultContextLoadFn(std::string_view /*context*/);
struct NodeDef { struct NodeDef {
ITreeFlattenFn flattenFn; ITreeFlattenFn flattenFn;

View File

@ -138,8 +138,8 @@ void ExecutionFrame::updateMovableOutputs() {
ExecutionFrame::ExecutionFrame( ExecutionFrame::ExecutionFrame(
const Graph& graph, const Graph& graph,
size_t numValues, size_t numValues,
const std::vector<ValueId>&, const std::vector<ValueId>& /*unused*/,
const std::vector<ValueId>&) const std::vector<ValueId>& /*unused*/)
: graph_(graph) { : graph_(graph) {
allValues_.resize(numValues); allValues_.resize(numValues);
} }

View File

@ -71,7 +71,7 @@ class Type {
// These are all the constant types that are allowed as attributes on Nodes. // These are all the constant types that are allowed as attributes on Nodes.
struct None {}; struct None {};
// None always equals itself // None always equals itself
inline bool operator==(const None&, const None&) { inline bool operator==(const None& /*unused*/, const None& /*unused*/) {
return true; return true;
} }