mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Enable -Wunused on torch targets (#150077)
For GCC, ``-Wunused`` contains: ``` -Wunused-function Warn whenever a static function is declared but not defined or a non\-inline static function is unused. -Wunused-label Warn whenever a label is declared but not used. To suppress this warning use the unused attribute. -Wunused-parameter Warn whenever a function parameter is unused aside from its declaration. To suppress this warning use the unused attribute. -Wunused-variable Warn whenever a local variable or non-constant static variable is unused aside from its declaration To suppress this warning use the unused attribute. ``` For Clang, some of the diagnostics controlled by ``-Wunused`` are enabled by default: ``` Controls [-Wunused-argument](https://clang.llvm.org/docs/DiagnosticsReference.html#wunused-argument), [-Wunused-but-set-variable](https://clang.llvm.org/docs/DiagnosticsReference.html#wunused-but-set-variable), [-Wunused-function](https://clang.llvm.org/docs/DiagnosticsReference.html#wunused-function), [-Wunused-label](https://clang.llvm.org/docs/DiagnosticsReference.html#wunused-label), [-Wunused-lambda-capture](https://clang.llvm.org/docs/DiagnosticsReference.html#wunused-lambda-capture), [-Wunused-local-typedef](https://clang.llvm.org/docs/DiagnosticsReference.html#wunused-local-typedef), [-Wunused-private-field](https://clang.llvm.org/docs/DiagnosticsReference.html#wunused-private-field), [-Wunused-property-ivar](https://clang.llvm.org/docs/DiagnosticsReference.html#wunused-property-ivar), [-Wunused-value](https://clang.llvm.org/docs/DiagnosticsReference.html#wunused-value), [-Wunused-variable](https://clang.llvm.org/docs/DiagnosticsReference.html#wunused-variable). ``` These checks are all usefull. This PR aims to enable ``-Wunused`` without breaking code. Pull Request resolved: https://github.com/pytorch/pytorch/pull/150077 Approved by: https://github.com/zou3519, https://github.com/wdvr
This commit is contained in:
@ -86,7 +86,7 @@ struct TORCH_API CUDAHooksInterface : AcceleratorHooksInterface {
|
||||
TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP);
|
||||
}
|
||||
|
||||
bool isPinnedPtr(const void* data) const override {
|
||||
bool isPinnedPtr(const void* /*data*/) const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -6,8 +6,6 @@
|
||||
|
||||
#include <ATen/detail/AcceleratorHooksInterface.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
// NB: Class must live in `at` due to limitations of Registry.h.
|
||||
namespace at {
|
||||
|
||||
@ -37,7 +35,7 @@ struct TORCH_API HIPHooksInterface : AcceleratorHooksInterface {
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool isPinnedPtr(const void* data) const override {
|
||||
bool isPinnedPtr(const void* /*data*/ ) const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -49,7 +47,7 @@ struct TORCH_API HIPHooksInterface : AcceleratorHooksInterface {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool hasPrimaryContext(DeviceIndex device_index) const override {
|
||||
bool hasPrimaryContext(DeviceIndex /*device_index*/ ) const override {
|
||||
TORCH_CHECK(false, "Cannot check primary context without ATen_hip library.");
|
||||
}
|
||||
};
|
||||
|
@ -15,7 +15,7 @@ struct TORCH_API IPUHooksInterface : AcceleratorHooksInterface {
|
||||
TORCH_CHECK(false, "Cannot initialize IPU without ATen_ipu library.");
|
||||
}
|
||||
|
||||
bool hasPrimaryContext(DeviceIndex device_index) const override {
|
||||
bool hasPrimaryContext(DeviceIndex /*device_index*/) const override {
|
||||
TORCH_CHECK(false, "Cannot initialize IPU without ATen_ipu library.");
|
||||
return false;
|
||||
}
|
||||
@ -26,7 +26,7 @@ struct TORCH_API IPUHooksInterface : AcceleratorHooksInterface {
|
||||
}
|
||||
|
||||
Generator getNewGenerator(
|
||||
DeviceIndex device_index [[maybe_unused]] = -1) const override {
|
||||
DeviceIndex /*device_index*/ = -1) const override {
|
||||
TORCH_CHECK(false, "Cannot initialize IPU without ATen_ipu library.");
|
||||
}
|
||||
};
|
||||
|
@ -17,7 +17,7 @@ struct TORCH_API MAIAHooksInterface : AcceleratorHooksInterface {
|
||||
TORCH_CHECK(false, "Cannot initialize MAIA without ATen_maia library.");
|
||||
}
|
||||
|
||||
bool hasPrimaryContext(DeviceIndex device_index) const override {
|
||||
bool hasPrimaryContext(DeviceIndex /*device_index*/) const override {
|
||||
TORCH_CHECK(false, "Cannot initialize MAIA without ATen_maia library.");
|
||||
return false;
|
||||
}
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <ATen/detail/AcceleratorHooksInterface.h>
|
||||
|
||||
#include <string>
|
||||
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-parameter")
|
||||
namespace at {
|
||||
class Context;
|
||||
}
|
||||
@ -46,7 +45,7 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual void deviceSynchronize(c10::DeviceIndex device_index) const {
|
||||
virtual void deviceSynchronize(c10::DeviceIndex /*device_index*/) const {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
}
|
||||
|
||||
@ -54,11 +53,11 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
}
|
||||
|
||||
bool hasPrimaryContext(DeviceIndex device_index) const override {
|
||||
bool hasPrimaryContext(DeviceIndex /*device_index*/) const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
void setCurrentDevice(DeviceIndex device) const override {
|
||||
void setCurrentDevice(DeviceIndex /*device*/) const override {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
}
|
||||
|
||||
@ -67,36 +66,36 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
||||
return -1;
|
||||
}
|
||||
|
||||
DeviceIndex exchangeDevice(DeviceIndex device) const override {
|
||||
DeviceIndex exchangeDevice(DeviceIndex /*device*/) const override {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
DeviceIndex maybeExchangeDevice(DeviceIndex device) const override {
|
||||
DeviceIndex maybeExchangeDevice(DeviceIndex /*device*/) const override {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
virtual c10::Stream getCurrentStream(DeviceIndex device) const {
|
||||
virtual c10::Stream getCurrentStream(DeviceIndex /*device*/) const {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA);
|
||||
}
|
||||
|
||||
virtual int64_t getCurrentRawStream(DeviceIndex device) const {
|
||||
virtual int64_t getCurrentRawStream(DeviceIndex /*device*/) const {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
virtual c10::Stream getDefaultStream(DeviceIndex device) const {
|
||||
virtual c10::Stream getDefaultStream(DeviceIndex /*device*/) const {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA);
|
||||
}
|
||||
|
||||
virtual void setCurrentStream(const c10::Stream& stream) const {
|
||||
virtual void setCurrentStream(const c10::Stream& /*stream*/ ) const {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
}
|
||||
|
||||
bool isPinnedPtr(const void* data) const override {
|
||||
bool isPinnedPtr(const void* /*data*/) const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -105,12 +104,12 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual PyObject* memoryStats(DeviceIndex device) const {
|
||||
virtual PyObject* memoryStats(DeviceIndex /*device*/) const {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual PyObject* getDeviceCapability(DeviceIndex device) const {
|
||||
virtual PyObject* getDeviceCapability(DeviceIndex /*device*/) const {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
return nullptr;
|
||||
}
|
||||
@ -121,9 +120,9 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
||||
|
||||
|
||||
virtual void recordMemoryHistory(
|
||||
const std::optional<std::string>& enabled,
|
||||
const std::string& stacks,
|
||||
size_t max_entries) const {
|
||||
const std::optional<std::string>& /*enabled*/,
|
||||
const std::string& /*stacks*/,
|
||||
size_t /*max_entries*/) const {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
}
|
||||
|
||||
@ -137,7 +136,7 @@ struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual void resetPeakMemoryStats(DeviceIndex device) const {
|
||||
virtual void resetPeakMemoryStats(DeviceIndex /*device*/) const {
|
||||
FAIL_MTIAHOOKS_FUNC(__func__);
|
||||
}
|
||||
|
||||
@ -158,4 +157,3 @@ TORCH_API const MTIAHooksInterface& getMTIAHooks();
|
||||
TORCH_API bool isMTIAHooksBuilt();
|
||||
} // namespace detail
|
||||
} // namespace at
|
||||
C10_DIAGNOSTIC_POP()
|
||||
|
@ -4201,8 +4201,7 @@ static inline void handle_unflatten_exception(
|
||||
const std::runtime_error& e,
|
||||
const Tensor& self,
|
||||
int64_t dim,
|
||||
SymIntArrayRef sizes,
|
||||
std::optional<DimnameList> names) {
|
||||
SymIntArrayRef sizes) {
|
||||
if (!strstr(e.what(), "is invalid for input of size")) {
|
||||
TORCH_CHECK(false, "unflatten got an unexpected error:\n", e.what());
|
||||
}
|
||||
@ -4256,7 +4255,7 @@ static Tensor unflatten_impl(
|
||||
// at::infer_size would throw std::runtime_error for invalid size,
|
||||
// catch the runtime_error and display the error message in a more
|
||||
// user-friendly way for both tensors and named tensors
|
||||
handle_unflatten_exception(e, self, dim, sizes, names);
|
||||
handle_unflatten_exception(e, self, dim, sizes);
|
||||
}
|
||||
|
||||
SymDimVector shape(self.sym_sizes().begin(), self.sym_sizes().end());
|
||||
|
@ -48,7 +48,7 @@ struct static_unroll {
|
||||
template<template<int i> typename func, int end>
|
||||
struct static_unroll<func, end, end> {
|
||||
template<typename... Args>
|
||||
static inline C10_HOST_DEVICE void with_args(Args... args) {}
|
||||
static inline C10_HOST_DEVICE void with_args(Args... /*args*/) {}
|
||||
};
|
||||
|
||||
// helper structs to be used with static_unroll to load arguments
|
||||
@ -516,7 +516,7 @@ inline C10_HOST_DEVICE int can_vectorize_up_to(char *pointer) {
|
||||
template<int i>
|
||||
struct can_vectorize_up_to_helper {
|
||||
template <typename array_t, typename traits>
|
||||
static C10_HOST_DEVICE void apply(int &result, array_t pointers, traits _) {
|
||||
static C10_HOST_DEVICE void apply(int &result, array_t pointers, traits /*_*/) {
|
||||
using arg_t = typename traits::template arg<i>::type;
|
||||
// `pointers` hold the data_ptr for tensors [output, input0, input1, ...], so we
|
||||
// need a +1 offset to get the input
|
||||
|
@ -331,6 +331,9 @@ if(NOT TARGET clog)
|
||||
"${CONFU_DEPENDENCIES_BINARY_DIR}/clog")
|
||||
# We build static version of clog but a dynamic library may indirectly depend on it
|
||||
set_property(TARGET clog PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
target_compile_options(clog PRIVATE "-Wno-unused-result")
|
||||
endif()
|
||||
endif()
|
||||
target_link_libraries(pytorch_qnnpack PUBLIC clog)
|
||||
|
||||
|
@ -32,7 +32,7 @@ c10::intrusive_ptr<GeneratorImpl> GeneratorImpl::clone() const {
|
||||
}
|
||||
|
||||
void GeneratorImpl::graphsafe_set_state(
|
||||
const c10::intrusive_ptr<c10::GeneratorImpl>& state) {
|
||||
const c10::intrusive_ptr<c10::GeneratorImpl>& /*state*/) {
|
||||
TORCH_CHECK_NOT_IMPLEMENTED(
|
||||
false, "graphsafe_set_state is not supported in this Generator");
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ TensorImpl::TensorImpl(
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
TensorImpl::TensorImpl(
|
||||
ImplType type,
|
||||
ImplType /*type*/,
|
||||
Storage&& storage,
|
||||
DispatchKeySet key_set,
|
||||
const caffe2::TypeMeta data_type)
|
||||
|
@ -59,15 +59,15 @@ struct FakeGuardImpl final : public DeviceGuardImplInterface {
|
||||
|
||||
// Event-related functions
|
||||
void record(
|
||||
void** event,
|
||||
const Stream& stream,
|
||||
const DeviceIndex device_index,
|
||||
const EventFlag flag) const override {}
|
||||
void block(void* event, const Stream& stream) const override {}
|
||||
bool queryEvent(void* event) const override {
|
||||
void** /*event*/,
|
||||
const Stream& /*stream*/,
|
||||
const DeviceIndex /*device_index*/,
|
||||
const EventFlag /*flag*/) const override {}
|
||||
void block(void* /*event*/, const Stream& /*stream*/) const override {}
|
||||
bool queryEvent(void* /*event*/) const override {
|
||||
return true;
|
||||
}
|
||||
void destroyEvent(void* event, const DeviceIndex device_index)
|
||||
void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/)
|
||||
const noexcept override {}
|
||||
|
||||
// Convenience methods for testing
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include <c10/core/SymIntArrayRef.h>
|
||||
#include <c10/core/TensorImpl.h>
|
||||
#include <c10/core/impl/PyInterpreter.h>
|
||||
|
||||
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-parameter")
|
||||
namespace c10::impl {
|
||||
|
||||
struct NoopPyInterpreterVTable final : public PyInterpreterVTable {
|
||||
@ -145,3 +145,4 @@ void PyInterpreter::disarm() noexcept {
|
||||
}
|
||||
|
||||
} // namespace c10::impl
|
||||
C10_DIAGNOSTIC_POP()
|
||||
|
@ -70,7 +70,7 @@ inline bool is_thp_alloc(size_t nbytes) {
|
||||
}
|
||||
|
||||
#elif !defined(__ANDROID__) && !defined(_MSC_VER)
|
||||
constexpr size_t c10_compute_alignment([[maybe_unused]] size_t nbytes) {
|
||||
constexpr size_t c10_compute_alignment(size_t /*nbytes*/) {
|
||||
return gAlignment;
|
||||
}
|
||||
|
||||
|
@ -227,7 +227,9 @@ class CUDAAllocator : public Allocator {
|
||||
c10::DeviceIndex device,
|
||||
MempoolId_t mempool_id) = 0;
|
||||
virtual void releasePool(c10::DeviceIndex device, MempoolId_t mempool_id) = 0;
|
||||
virtual int getPoolUseCount(c10::DeviceIndex device, MempoolId_t mempool_id) {
|
||||
virtual int getPoolUseCount(
|
||||
c10::DeviceIndex /*device*/,
|
||||
MempoolId_t /*mempool_id*/) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
name(),
|
||||
@ -235,8 +237,8 @@ class CUDAAllocator : public Allocator {
|
||||
"If you need it, please file an issue describing your use case.");
|
||||
}
|
||||
virtual void ensureExistsAndIncrefPool(
|
||||
c10::DeviceIndex device,
|
||||
MempoolId_t mempool_id) {
|
||||
c10::DeviceIndex /*device*/,
|
||||
MempoolId_t /*mempool_id*/) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
name(),
|
||||
@ -256,9 +258,9 @@ class CUDAAllocator : public Allocator {
|
||||
|
||||
// returns true if the allocated blocks are equal to expected live allocations
|
||||
virtual bool checkPoolLiveAllocations(
|
||||
c10::DeviceIndex device,
|
||||
MempoolId_t mempool_id,
|
||||
const std::unordered_set<void*>& expected_live_allocations) {
|
||||
c10::DeviceIndex /*device*/,
|
||||
MempoolId_t /*mempool_id*/,
|
||||
const std::unordered_set<void*>& /*expected_live_allocations*/) {
|
||||
TORCH_CHECK(
|
||||
false,
|
||||
name(),
|
||||
@ -281,7 +283,7 @@ class CUDAAllocator : public Allocator {
|
||||
RecordContext when,
|
||||
bool clearHistory) = 0;
|
||||
virtual void recordAnnotation(
|
||||
const std::vector<std::pair<std::string, std::string>>& md) {}
|
||||
const std::vector<std::pair<std::string, std::string>>& /*md*/) {}
|
||||
virtual void attachOutOfMemoryObserver(OutOfMemoryObserver observer) = 0;
|
||||
|
||||
// Attached AllocatorTraceTracker callbacks will be called while the
|
||||
|
@ -213,11 +213,11 @@ bool CUDAKernelLaunchRegistry::check_env_for_dsa_enabled() const {
|
||||
}
|
||||
|
||||
uint32_t CUDAKernelLaunchRegistry::insert(
|
||||
const char* launch_filename,
|
||||
const char* launch_function,
|
||||
const uint32_t launch_linenum,
|
||||
const char* kernel_name,
|
||||
const int32_t stream_id) {
|
||||
const char* launch_filename [[maybe_unused]],
|
||||
const char* launch_function [[maybe_unused]],
|
||||
const uint32_t launch_linenum [[maybe_unused]],
|
||||
const char* kernel_name [[maybe_unused]],
|
||||
const int32_t stream_id [[maybe_unused]]) {
|
||||
#ifdef TORCH_USE_CUDA_DSA
|
||||
if (!enabled_at_runtime) {
|
||||
return 0;
|
||||
|
@ -10,9 +10,9 @@ namespace c10::cuda {
|
||||
|
||||
void c10_cuda_check_implementation(
|
||||
const int32_t err,
|
||||
const char* filename,
|
||||
const char* function_name,
|
||||
const int line_number,
|
||||
const char* /*filename*/,
|
||||
const char* /*function_name*/,
|
||||
const int /*line_number*/,
|
||||
const bool include_device_assertions) {
|
||||
const auto cuda_error = static_cast<cudaError_t>(err);
|
||||
const auto cuda_kernel_failure = include_device_assertions
|
||||
@ -24,7 +24,6 @@ void c10_cuda_check_implementation(
|
||||
}
|
||||
|
||||
[[maybe_unused]] auto error_unused = cudaGetLastError();
|
||||
(void)error_unused;
|
||||
|
||||
std::string check_message;
|
||||
#ifndef STRIP_ERROR_MESSAGES
|
||||
|
@ -213,7 +213,7 @@ Warning::Warning(
|
||||
Warning::Warning(
|
||||
warning_variant_t type,
|
||||
SourceLocation source_location,
|
||||
detail::CompileTimeEmptyString msg,
|
||||
detail::CompileTimeEmptyString /*msg*/,
|
||||
const bool verbatim)
|
||||
: Warning(type, source_location, "", verbatim) {}
|
||||
|
||||
|
@ -139,7 +139,7 @@ APIUsageLoggerType* GetAPIUsageLogger() {
|
||||
APIUsageMetadataLoggerType* GetAPIUsageMetadataLogger() {
|
||||
static APIUsageMetadataLoggerType func =
|
||||
[](const std::string&,
|
||||
const std::map<std::string, std::string>& metadata_map) {};
|
||||
const std::map<std::string, std::string>& /*metadata_map*/) {};
|
||||
return &func;
|
||||
}
|
||||
|
||||
@ -386,7 +386,7 @@ void initLogging() {
|
||||
detail::setLogLevelFlagFromEnv();
|
||||
}
|
||||
|
||||
bool InitCaffeLogging(int* argc, char** argv) {
|
||||
bool InitCaffeLogging(int* argc, char** /*argv*/) {
|
||||
// When doing InitCaffeLogging, we will assume that caffe's flag parser has
|
||||
// already finished.
|
||||
if (*argc == 0)
|
||||
|
@ -216,9 +216,7 @@ struct DinicFlowGraph {
|
||||
return seen;
|
||||
}
|
||||
|
||||
std::pair<std::vector<size_t>, std::vector<size_t>> partition(
|
||||
size_t s,
|
||||
size_t t) {
|
||||
std::pair<std::vector<size_t>, std::vector<size_t>> partition(size_t t) {
|
||||
// Note: the partitioning returns "reachable" / "unreachable",
|
||||
// but specifically, for "unreachable", it returns "all vertices
|
||||
// that are reachable from t in the reverse residual graph"
|
||||
@ -258,7 +256,7 @@ struct DinicFlowGraph {
|
||||
};
|
||||
}
|
||||
|
||||
auto [reachable_idxs, unreachable_idxs] = partition(s_int, t_int);
|
||||
auto [reachable_idxs, unreachable_idxs] = partition(t_int);
|
||||
std::vector<std::string> reachable, unreachable;
|
||||
|
||||
auto idxs_to_names = [&](std::vector<size_t>& src,
|
||||
|
@ -113,9 +113,9 @@ bool IsNUMAEnabled() {
|
||||
return false;
|
||||
}
|
||||
|
||||
void NUMABind(int numa_node_id) {}
|
||||
void NUMABind(int /*numa_node_id*/) {}
|
||||
|
||||
int GetNUMANode(const void* ptr) {
|
||||
int GetNUMANode(const void* /*ptr*/) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -123,7 +123,7 @@ int GetNumNUMANodes() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
void NUMAMove(void* ptr, size_t size, int numa_node_id) {}
|
||||
void NUMAMove(void* /*ptr*/, size_t /*size*/, int /*numa_node_id*/) {}
|
||||
|
||||
int GetCurrentNUMANode() {
|
||||
return -1;
|
||||
|
@ -383,6 +383,7 @@ function(torch_compile_options libname)
|
||||
-Wall
|
||||
-Wextra
|
||||
-Wdeprecated
|
||||
-Wunused
|
||||
-Wno-unused-parameter
|
||||
-Wno-missing-field-initializers
|
||||
-Wno-array-bounds
|
||||
@ -390,13 +391,11 @@ function(torch_compile_options libname)
|
||||
-Wno-strict-overflow
|
||||
-Wno-strict-aliasing
|
||||
)
|
||||
list(APPEND private_compile_options -Wunused-function)
|
||||
list(APPEND private_compile_options -Wunused-variable)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
list(APPEND private_compile_options -Wunused-but-set-variable -Wredundant-move)
|
||||
list(APPEND private_compile_options -Wredundant-move)
|
||||
endif()
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||
list(APPEND private_compile_options -Wunused-private-field -Wextra-semi -Wno-error=extra-semi -Wmove)
|
||||
list(APPEND private_compile_options -Wextra-semi -Wno-error=extra-semi -Wmove)
|
||||
else()
|
||||
list(APPEND private_compile_options
|
||||
# Considered to be flaky. See the discussion at
|
||||
@ -409,9 +408,9 @@ function(torch_compile_options libname)
|
||||
-Werror
|
||||
-Werror=inconsistent-missing-override
|
||||
-Werror=inconsistent-missing-destructor-override
|
||||
-Werror=unused-function
|
||||
-Werror=unused-variable
|
||||
-Werror=pedantic
|
||||
-Werror=unused
|
||||
-Wno-error=unused-parameter
|
||||
)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
list(APPEND private_compile_options -Werror=unused-but-set-variable)
|
||||
|
Reference in New Issue
Block a user