mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[3/N] Fix Wunused-parameter warnings (#131271)
Follows #131170 Pull Request resolved: https://github.com/pytorch/pytorch/pull/131271 Approved by: https://github.com/ezyang
This commit is contained in:
@ -330,7 +330,7 @@ struct MetaAllocator final : public at::Allocator {
|
||||
static void deleter(void* const pointer) {
|
||||
TORCH_INTERNAL_ASSERT(!pointer);
|
||||
}
|
||||
DataPtr allocate(const size_t nbytes) override {
|
||||
DataPtr allocate(const size_t nbytes [[maybe_unused]]) override {
|
||||
return {nullptr, nullptr, &deleter, at::Device(DeviceType::Meta)};
|
||||
}
|
||||
DeleterFnPtr raw_deleter() const override {
|
||||
|
@ -28,7 +28,7 @@
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
void functionalizeFallback(const c10::OperatorHandle& op, c10::DispatchKeySet dispatchKeySet, torch::jit::Stack* stack) {
|
||||
void functionalizeFallback(const c10::OperatorHandle& op, c10::DispatchKeySet dispatchKeySet [[maybe_unused]], torch::jit::Stack* stack) {
|
||||
const auto& schema = op.schema();
|
||||
// NB: auto_functionalize handles the case where outputs do not have alias info.
|
||||
// This error message therefore suggests users to modify their custom op to the
|
||||
@ -125,7 +125,7 @@ namespace {
|
||||
// - when we resize to a larger size, it acts as a mutation
|
||||
// - when we resize to a smaller size, it acts as a view
|
||||
// See Note [resize_ in Functionalization] for more dtails
|
||||
static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, std::optional<at::MemoryFormat> memory_format) {
|
||||
static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatchKeySet [[maybe_unused]], const at::Tensor & self, at::IntArrayRef size, std::optional<at::MemoryFormat> memory_format) {
|
||||
// First unwrap the tensor arguments
|
||||
at::Tensor self_;
|
||||
if (at::functionalization::impl::isFunctionalTensor(self)) {
|
||||
@ -169,14 +169,14 @@ static const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatch
|
||||
// We have to emulate this "slicing" with an as_strided call.
|
||||
auto reapply_views = at::functionalization::impl::getFunctionalizationReapplyViewsTLS();
|
||||
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
|
||||
[reapply_views = reapply_views, size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
|
||||
[reapply_views = reapply_views, size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx [[maybe_unused]]) -> at::Tensor {
|
||||
if (reapply_views) {
|
||||
return base.as_strided(size, c10::contiguous_strides(size));
|
||||
} else {
|
||||
return at::as_strided_copy(base, size, c10::contiguous_strides(size));
|
||||
}
|
||||
},
|
||||
[size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
|
||||
[size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx [[maybe_unused]]) -> at::Tensor {
|
||||
return base.as_strided_scatter(mutated_view, size, c10::contiguous_strides(size));
|
||||
},
|
||||
/*has_symbolic_inputs=*/false
|
||||
@ -302,10 +302,10 @@ static at::Tensor _unsafe_view_functionalize(const at::Tensor & self, at::SymInt
|
||||
bool has_symbolic_inputs = std::any_of(size.begin(), size.end(), [=](auto& s) { return s.is_symbolic(); });
|
||||
|
||||
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
|
||||
[size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx) -> at::Tensor {
|
||||
[size = size.vec()](const at::Tensor & base, int64_t mutated_view_idx [[maybe_unused]]) -> at::Tensor {
|
||||
return at::_unsafe_view_symint(base, size);
|
||||
},
|
||||
[size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx) -> at::Tensor {
|
||||
[size = size.vec()](const at::Tensor & base, const at::Tensor & mutated_view, int64_t mutated_view_idx [[maybe_unused]]) -> at::Tensor {
|
||||
return at::_unsafe_view_symint(mutated_view, base.sym_sizes());
|
||||
},
|
||||
/*has_symbolic_inputs=*/has_symbolic_inputs
|
||||
|
@ -558,7 +558,7 @@ static void checkBasicAsStridedValidForSlice(
|
||||
"rewrite the `as_strided` call as a sequence of PyTorch view operations");
|
||||
}
|
||||
|
||||
Tensor _reshape_alias_batching_rule(const Tensor& self, IntArrayRef sizes, IntArrayRef strides) {
|
||||
Tensor _reshape_alias_batching_rule(const Tensor& self, IntArrayRef sizes, IntArrayRef strides [[maybe_unused]]) {
|
||||
return reshape_batching_rule(self, sizes);
|
||||
}
|
||||
|
||||
|
@ -93,11 +93,11 @@ struct TORCH_API MetaBase {
|
||||
// output. If `strides` does not match the given output strides, proxy outputs
|
||||
// will be created and passed to the IMPL function.
|
||||
virtual void set_output_strided(
|
||||
int64_t output_idx,
|
||||
IntArrayRef sizes,
|
||||
IntArrayRef strides,
|
||||
TensorOptions options,
|
||||
DimnameList names = {}) {
|
||||
int64_t output_idx [[maybe_unused]],
|
||||
IntArrayRef sizes [[maybe_unused]],
|
||||
IntArrayRef strides [[maybe_unused]],
|
||||
TensorOptions options [[maybe_unused]],
|
||||
DimnameList names [[maybe_unused]] = {}) {
|
||||
TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
|
||||
}
|
||||
|
||||
@ -105,11 +105,11 @@ struct TORCH_API MetaBase {
|
||||
// outputs. This function has the same behavior as the old `set_output`: it
|
||||
// will only re-stride if the given output was resized.
|
||||
virtual void set_output_raw_strided(
|
||||
int64_t output_idx,
|
||||
IntArrayRef sizes,
|
||||
IntArrayRef strides_hint,
|
||||
TensorOptions options,
|
||||
DimnameList names = {}) {
|
||||
int64_t output_idx [[maybe_unused]],
|
||||
IntArrayRef sizes [[maybe_unused]],
|
||||
IntArrayRef strides_hint [[maybe_unused]],
|
||||
TensorOptions options [[maybe_unused]],
|
||||
DimnameList names [[maybe_unused]] = {}) {
|
||||
TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented.");
|
||||
}
|
||||
|
||||
|
@ -1348,7 +1348,7 @@ struct TORCH_API SymIntType : public Type {
|
||||
std::string str() const override {
|
||||
return "SymInt";
|
||||
}
|
||||
std::string annotation_str_impl(const TypePrinter& printer = nullptr) const override {
|
||||
std::string annotation_str_impl(const TypePrinter& printer [[maybe_unused]] = nullptr) const override {
|
||||
return "int";
|
||||
}
|
||||
static const TypeKind Kind = TypeKind::SymIntType;
|
||||
@ -1368,7 +1368,7 @@ struct TORCH_API SymFloatType : public Type {
|
||||
std::string str() const override {
|
||||
return "SymFloat";
|
||||
}
|
||||
std::string annotation_str_impl(const TypePrinter& printer = nullptr) const override {
|
||||
std::string annotation_str_impl(const TypePrinter& printer [[maybe_unused]] = nullptr) const override {
|
||||
return "float";
|
||||
}
|
||||
static const TypeKind Kind = TypeKind::SymFloatType;
|
||||
@ -1388,7 +1388,7 @@ struct TORCH_API SymBoolType : public Type {
|
||||
std::string str() const override {
|
||||
return "SymBool";
|
||||
}
|
||||
std::string annotation_str_impl(const TypePrinter& printer = nullptr) const override {
|
||||
std::string annotation_str_impl(const TypePrinter& printer [[maybe_unused]] = nullptr) const override {
|
||||
return "bool";
|
||||
}
|
||||
static const TypeKind Kind = TypeKind::SymBoolType;
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <ATen/detail/AcceleratorHooksInterface.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-parameter")
|
||||
namespace at {
|
||||
class Context;
|
||||
}
|
||||
@ -101,3 +101,4 @@ TORCH_API const MTIAHooksInterface& getMTIAHooks();
|
||||
TORCH_API bool isMTIAHooksBuilt();
|
||||
} // namespace detail
|
||||
} // namespace at
|
||||
C10_DIAGNOSTIC_POP()
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <c10/core/Device.h>
|
||||
#include <c10/core/Storage.h>
|
||||
#include <c10/util/Exception.h>
|
||||
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-parameter")
|
||||
namespace at {
|
||||
|
||||
struct TORCH_API PrivateUse1HooksInterface : AcceleratorHooksInterface {
|
||||
@ -59,3 +60,4 @@ TORCH_API const at::PrivateUse1HooksInterface& getPrivateUse1Hooks();
|
||||
} // namespace detail
|
||||
|
||||
} // namespace at
|
||||
C10_DIAGNOSTIC_POP()
|
||||
|
@ -130,7 +130,7 @@ struct ZeroTensorAllocator final : public at::Allocator {
|
||||
DeleterFnPtr raw_deleter() const override {
|
||||
return deleter;
|
||||
}
|
||||
void copy_data(void* dest, const void* src, std::size_t count) const final {}
|
||||
void copy_data(void* dest [[maybe_unused]], const void* src [[maybe_unused]], std::size_t count [[maybe_unused]]) const final {}
|
||||
at::Device device_;
|
||||
};
|
||||
|
||||
|
@ -33,15 +33,21 @@ class C10_API ConstantSymNodeImpl : public SymNodeImpl {
|
||||
bool is_float() override {
|
||||
return false;
|
||||
}
|
||||
int64_t guard_int(const char* file, int64_t line) override {
|
||||
int64_t guard_int(
|
||||
const char* file [[maybe_unused]],
|
||||
int64_t line [[maybe_unused]]) override {
|
||||
TORCH_CHECK(is_int(), "not an int");
|
||||
return int_();
|
||||
}
|
||||
bool guard_bool(const char* file, int64_t line) override {
|
||||
bool guard_bool(
|
||||
const char* file [[maybe_unused]],
|
||||
int64_t line [[maybe_unused]]) override {
|
||||
TORCH_CHECK(is_bool(), "not a bool");
|
||||
return bool_();
|
||||
}
|
||||
double guard_float(const char* file, int64_t line) override {
|
||||
double guard_float(
|
||||
const char* file [[maybe_unused]],
|
||||
int64_t line [[maybe_unused]]) override {
|
||||
TORCH_CHECK(false, "not a float");
|
||||
}
|
||||
int64_t int_() override {
|
||||
|
@ -90,7 +90,10 @@ C10_API std::ostream& operator<<(std::ostream& os, const SymBool& s);
|
||||
#define TORCH_SYM_INTERNAL_ASSERT(cond, ...) \
|
||||
TORCH_INTERNAL_ASSERT((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__)
|
||||
|
||||
inline bool guard_size_oblivious(bool b, const char* file, int64_t line) {
|
||||
inline bool guard_size_oblivious(
|
||||
bool b,
|
||||
const char* file [[maybe_unused]],
|
||||
int64_t line [[maybe_unused]]) {
|
||||
return b;
|
||||
}
|
||||
|
||||
|
@ -1891,7 +1891,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
* storage / storage_offset). See NOTE [ Metadata Change for a Detached Tensor
|
||||
* ] for details.
|
||||
*/
|
||||
void set_allow_tensor_metadata_change(bool value) {
|
||||
void set_allow_tensor_metadata_change(bool value [[maybe_unused]]) {
|
||||
// TODO: at some point, we should kill this field completely.
|
||||
allow_tensor_metadata_change_ = true;
|
||||
}
|
||||
|
@ -737,14 +737,14 @@ class TORCH_API Library final {
|
||||
// These overloads cover cases when a SelectiveStr (see Note [Selective
|
||||
// build]) has been disabled at compile time. In that case, don't generate
|
||||
// any code referencing the passed in functions at all.
|
||||
Library& def(detail::SelectiveStr<false>, const std::vector<at::Tag>& tags = {}) & {
|
||||
Library& def(detail::SelectiveStr<false>, const std::vector<at::Tag>& tags [[maybe_unused]] = {}) & {
|
||||
return *this;
|
||||
}
|
||||
Library& def(detail::SelectiveStr<true> raw_schema, const std::vector<at::Tag>& tags = {}) & {
|
||||
return def(raw_schema.operator const char*(), tags);
|
||||
}
|
||||
template <typename Func>
|
||||
Library& def(detail::SelectiveStr<false>, Func&& /*raw_f*/, const std::vector<at::Tag>& tags = {}) & {
|
||||
Library& def(detail::SelectiveStr<false>, Func&& /*raw_f*/, const std::vector<at::Tag>& tags [[maybe_unused]] = {}) & {
|
||||
return *this;
|
||||
}
|
||||
template <typename Func>
|
||||
|
Reference in New Issue
Block a user