[3/N] Fix Wunused-parameter warnings (#131271)

Follows #131170

Pull Request resolved: https://github.com/pytorch/pytorch/pull/131271
Approved by: https://github.com/ezyang
This commit is contained in:
cyy
2024-07-20 23:31:03 +00:00
committed by PyTorch MergeBot
parent d57af32e63
commit 1d1d074072
12 changed files with 42 additions and 30 deletions

View File

@ -330,7 +330,7 @@ struct MetaAllocator final : public at::Allocator {
static void deleter(void* const pointer) { static void deleter(void* const pointer) {
TORCH_INTERNAL_ASSERT(!pointer); TORCH_INTERNAL_ASSERT(!pointer);
} }
DataPtr allocate(const size_t nbytes) override { DataPtr allocate(const size_t nbytes [[maybe_unused]]) override {
return {nullptr, nullptr, &deleter, at::Device(DeviceType::Meta)}; return {nullptr, nullptr, &deleter, at::Device(DeviceType::Meta)};
} }
DeleterFnPtr raw_deleter() const override { DeleterFnPtr raw_deleter() const override {

View File

@ -28,7 +28,7 @@
#endif #endif
namespace { namespace {
void functionalizeFallback(const c10::OperatorHandle& op, c10::DispatchKeySet dispatchKeySet, torch::jit::Stack* stack) { void functionalizeFallback(const c10::OperatorHandle& op, c10::DispatchKeySet dispatchKeySet [[maybe_unused]], torch::jit::Stack* stack) {
const auto& schema = op.schema(); const auto& schema = op.schema();
// NB: auto_functionalize handles the case where outputs do not have alias info. // NB: auto_functionalize handles the case where outputs do not have alias info.
// This error message therefore suggests users to modify their custom op to the // This error message therefore suggests users to modify their custom op to the
@ -125,7 +125,7 @@ namespace {
// - when we resize to a larger size, it acts as a mutation // - when we resize to a larger size, it acts as a mutation
// - when we resize to a smaller size, it acts as a view // - when we resize to a smaller size, it acts as a view
// See Note [resize_ in Functionalization] for more dtails // See Note [resize_ in Functionalization] for more dtails
static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, std::optional<at::MemoryFormat> memory_format) { static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatchKeySet [[maybe_unused]], const at::Tensor & self, at::IntArrayRef size, std::optional<at::MemoryFormat> memory_format) {
// First unwrap the tensor arguments // First unwrap the tensor arguments
at::Tensor self_; at::Tensor self_;
if (at::functionalization::impl::isFunctionalTensor(self)) { if (at::functionalization::impl::isFunctionalTensor(self)) {
@ -169,14 +169,14 @@ static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatch
// We have to emulate this "slicing" with an as_strided call. // We have to emulate this "slicing" with an as_strided call.
auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS(); auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
[reapply_views = reapply_views, size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { [reapply_views = reapply_views, size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx [[maybe_unused]]) -> at::Tensor {
if (reapply_views) { if (reapply_views) {
return base.as_strided(size, c10::contiguous_strides(size)); return base.as_strided(size, c10::contiguous_strides(size));
} else { } else {
return at::as_strided_copy(base, size, c10::contiguous_strides(size)); return at::as_strided_copy(base, size, c10::contiguous_strides(size));
} }
}, },
[size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { [size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx [[maybe_unused]]) -> at::Tensor {
return base.as_strided_scatter(mutated_view, size, c10::contiguous_strides(size)); return base.as_strided_scatter(mutated_view, size, c10::contiguous_strides(size));
}, },
/*has_symbolic_inputs=*/false /*has_symbolic_inputs=*/false
@ -302,10 +302,10 @@ static at::Tensor _unsafe_view_functionalize(const at::Tensor & self, at::SymInt
bool has_symbolic_inputs = std::any_of(size.begin(), size.end(), [=](auto& s) { return s.is_symbolic(); }); bool has_symbolic_inputs = std::any_of(size.begin(), size.end(), [=](auto& s) { return s.is_symbolic(); });
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
[size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor { [size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx [[maybe_unused]]) -> at::Tensor {
return at::_unsafe_view_symint(base, size); return at::_unsafe_view_symint(base, size);
}, },
[size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor { [size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx [[maybe_unused]]) -> at::Tensor {
return at::_unsafe_view_symint(mutated_view, base.sym_sizes()); return at::_unsafe_view_symint(mutated_view, base.sym_sizes());
}, },
/*has_symbolic_inputs=*/has_symbolic_inputs /*has_symbolic_inputs=*/has_symbolic_inputs

View File

@ -558,7 +558,7 @@ static void checkBasicAsStridedValidForSlice(
"rewrite the `as_strided` call as a sequence of PyTorch view operations"); "rewrite the `as_strided` call as a sequence of PyTorch view operations");
} }
Tensor _reshape_alias_batching_rule(const Tensor& self, IntArrayRef sizes, IntArrayRef strides) { Tensor _reshape_alias_batching_rule(const Tensor& self, IntArrayRef sizes, IntArrayRef strides [[maybe_unused]]) {
return reshape_batching_rule(self, sizes); return reshape_batching_rule(self, sizes);
} }

View File

@ -93,11 +93,11 @@ struct TORCH_API MetaBase {
// output. If `strides` does not match the given output strides, proxy outputs // output. If `strides` does not match the given output strides, proxy outputs
// will be created and passed to the IMPL function. // will be created and passed to the IMPL function.
virtual void set_output_strided( virtual void set_output_strided(
int64_t output_idx, int64_t output_idx [[maybe_unused]],
IntArrayRef sizes, IntArrayRef sizes [[maybe_unused]],
IntArrayRef strides, IntArrayRef strides [[maybe_unused]],
TensorOptions options, TensorOptions options [[maybe_unused]],
DimnameList names = {}) { DimnameList names [[maybe_unused]] = {}) {
TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented."); TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
} }
@ -105,11 +105,11 @@ struct TORCH_API MetaBase {
// outputs. This function has the same behavior as the old `set_output`: it // outputs. This function has the same behavior as the old `set_output`: it
// will only re-stride if the given output was resized. // will only re-stride if the given output was resized.
virtual void set_output_raw_strided( virtual void set_output_raw_strided(
int64_t output_idx, int64_t output_idx [[maybe_unused]],
IntArrayRef sizes, IntArrayRef sizes [[maybe_unused]],
IntArrayRef strides_hint, IntArrayRef strides_hint [[maybe_unused]],
TensorOptions options, TensorOptions options [[maybe_unused]],
DimnameList names = {}) { DimnameList names [[maybe_unused]] = {}) {
TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented."); TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
} }

View File

@ -1348,7 +1348,7 @@ struct TORCH_API SymIntType : public Type {
std::string str() const override { std::string str() const override {
return "SymInt"; return "SymInt";
} }
std::string annotation_str_impl(const TypePrinter& printer = nullptr) const override { std::string annotation_str_impl(const TypePrinter& printer [[maybe_unused]] = nullptr) const override {
return "int"; return "int";
} }
static const TypeKind Kind = TypeKind::SymIntType; static const TypeKind Kind = TypeKind::SymIntType;
@ -1368,7 +1368,7 @@ struct TORCH_API SymFloatType : public Type {
std::string str() const override { std::string str() const override {
return "SymFloat"; return "SymFloat";
} }
std::string annotation_str_impl(const TypePrinter& printer = nullptr) const override { std::string annotation_str_impl(const TypePrinter& printer [[maybe_unused]] = nullptr) const override {
return "float"; return "float";
} }
static const TypeKind Kind = TypeKind::SymFloatType; static const TypeKind Kind = TypeKind::SymFloatType;
@ -1388,7 +1388,7 @@ struct TORCH_API SymBoolType : public Type {
std::string str() const override { std::string str() const override {
return "SymBool"; return "SymBool";
} }
std::string annotation_str_impl(const TypePrinter& printer = nullptr) const override { std::string annotation_str_impl(const TypePrinter& printer [[maybe_unused]] = nullptr) const override {
return "bool"; return "bool";
} }
static const TypeKind Kind = TypeKind::SymBoolType; static const TypeKind Kind = TypeKind::SymBoolType;

View File

@ -9,7 +9,7 @@
#include <ATen/detail/AcceleratorHooksInterface.h> #include <ATen/detail/AcceleratorHooksInterface.h>
#include <string> #include <string>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-parameter")
namespace at { namespace at {
class Context; class Context;
} }
@ -101,3 +101,4 @@ TORCH_API const MTIAHooksInterface& getMTIAHooks();
TORCH_API bool isMTIAHooksBuilt(); TORCH_API bool isMTIAHooksBuilt();
} // namespace detail } // namespace detail
} // namespace at } // namespace at
C10_DIAGNOSTIC_POP()

View File

@ -6,6 +6,7 @@
#include <c10/core/Device.h> #include <c10/core/Device.h>
#include <c10/core/Storage.h> #include <c10/core/Storage.h>
#include <c10/util/Exception.h> #include <c10/util/Exception.h>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-parameter")
namespace at { namespace at {
struct TORCH_API PrivateUse1HooksInterface : AcceleratorHooksInterface { struct TORCH_API PrivateUse1HooksInterface : AcceleratorHooksInterface {
@ -59,3 +60,4 @@ TORCH_API const at::PrivateUse1HooksInterface& getPrivateUse1Hooks();
} // namespace detail } // namespace detail
} // namespace at } // namespace at
C10_DIAGNOSTIC_POP()

View File

@ -130,7 +130,7 @@ struct ZeroTensorAllocator final : public at::Allocator {
DeleterFnPtr raw_deleter() const override { DeleterFnPtr raw_deleter() const override {
return deleter; return deleter;
} }
void copy_data(void* dest, const void* src, std::size_t count) const final {} void copy_data(void* dest [[maybe_unused]], const void* src [[maybe_unused]], std::size_t count [[maybe_unused]]) const final {}
at::Device device_; at::Device device_;
}; };

View File

@ -33,15 +33,21 @@ class C10_API ConstantSymNodeImpl : public SymNodeImpl {
bool is_float() override { bool is_float() override {
return false; return false;
} }
int64_t guard_int(const char* file, int64_t line) override { int64_t guard_int(
const char* file [[maybe_unused]],
int64_t line [[maybe_unused]]) override {
TORCH_CHECK(is_int(), "not an int"); TORCH_CHECK(is_int(), "not an int");
return int_(); return int_();
} }
bool guard_bool(const char* file, int64_t line) override { bool guard_bool(
const char* file [[maybe_unused]],
int64_t line [[maybe_unused]]) override {
TORCH_CHECK(is_bool(), "not a bool"); TORCH_CHECK(is_bool(), "not a bool");
return bool_(); return bool_();
} }
double guard_float(const char* file, int64_t line) override { double guard_float(
const char* file [[maybe_unused]],
int64_t line [[maybe_unused]]) override {
TORCH_CHECK(false, "not a float"); TORCH_CHECK(false, "not a float");
} }
int64_t int_() override { int64_t int_() override {

View File

@ -90,7 +90,10 @@ C10_API std::ostream& operator<<(std::ostream& os, const SymBool& s);
#define TORCH_SYM_INTERNAL_ASSERT(cond, ...) \ #define TORCH_SYM_INTERNAL_ASSERT(cond, ...) \
TORCH_INTERNAL_ASSERT((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__) TORCH_INTERNAL_ASSERT((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__)
inline bool guard_size_oblivious(bool b, const char* file, int64_t line) { inline bool guard_size_oblivious(
bool b,
const char* file [[maybe_unused]],
int64_t line [[maybe_unused]]) {
return b; return b;
} }

View File

@ -1891,7 +1891,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
* storage / storage_offset). See NOTE [ Metadata Change for a Detached Tensor * storage / storage_offset). See NOTE [ Metadata Change for a Detached Tensor
* ] for details. * ] for details.
*/ */
void set_allow_tensor_metadata_change(bool value) { void set_allow_tensor_metadata_change(bool value [[maybe_unused]]) {
// TODO: at some point, we should kill this field completely. // TODO: at some point, we should kill this field completely.
allow_tensor_metadata_change_ = true; allow_tensor_metadata_change_ = true;
} }

View File

@ -737,14 +737,14 @@ class TORCH_API Library final {
// These overloads cover cases when a SelectiveStr (see Note [Selective // These overloads cover cases when a SelectiveStr (see Note [Selective
// build]) has been disabled at compile time. In that case, don't generate // build]) has been disabled at compile time. In that case, don't generate
// any code referencing the passed in functions at all. // any code referencing the passed in functions at all.
Library& def(detail::SelectiveStr<false>, const std::vector<at::Tag>& tags = {}) & { Library& def(detail::SelectiveStr<false>, const std::vector<at::Tag>& tags [[maybe_unused]] = {}) & {
return *this; return *this;
} }
Library& def(detail::SelectiveStr<true> raw_schema, const std::vector<at::Tag>& tags = {}) & { Library& def(detail::SelectiveStr<true> raw_schema, const std::vector<at::Tag>& tags = {}) & {
return def(raw_schema.operator const char*(), tags); return def(raw_schema.operator const char*(), tags);
} }
template <typename Func> template <typename Func>
Library& def(detail::SelectiveStr<false>, Func&& /*raw_f*/, const std::vector<at::Tag>& tags = {}) & { Library& def(detail::SelectiveStr<false>, Func&& /*raw_f*/, const std::vector<at::Tag>& tags [[maybe_unused]] = {}) & {
return *this; return *this;
} }
template <typename Func> template <typename Func>