[2/N] Fix some violations of unused-function and unused-variable checks in torch_cpu (#129878)

Follows #128670

Pull Request resolved: https://github.com/pytorch/pytorch/pull/129878
Approved by: https://github.com/ezyang
This commit is contained in:
cyy
2024-07-04 00:39:28 +00:00
committed by PyTorch MergeBot
parent d95a019704
commit efb73eda51
23 changed files with 60 additions and 21 deletions

View File

@ -275,7 +275,9 @@ MapAllocator::MapAllocator(WithFd, c10::string_view filename, int fd, int flags,
struct stat file_stat{};
if (fstat(fd, &file_stat) == -1) {
#ifndef STRIP_ERROR_MESSAGES
int last_err = errno;
#endif
if (!(flags_ & ALLOCATOR_MAPPED_FROMFD)) {
::close(fd);
}
@ -289,7 +291,9 @@ MapAllocator::MapAllocator(WithFd, c10::string_view filename, int fd, int flags,
TORCH_CHECK(false, "unable to resize file <", filename_, "> to the right size: ", strerror(errno), " (", errno, ")");
}
if (fstat(fd, &file_stat) == -1 || file_stat.st_size < static_cast<int64_t>(size)) {
#ifndef STRIP_ERROR_MESSAGES
int last_err = errno;
#endif
::close(fd);
TORCH_CHECK(false, "unable to stretch file <", filename_, "> to the right size: ", strerror(last_err), " (", last_err, ")");
}
@ -298,7 +302,9 @@ MapAllocator::MapAllocator(WithFd, c10::string_view filename, int fd, int flags,
*/
#ifndef __APPLE__
if ((write(fd, "", 1)) != 1) /* note that the string "" contains the '\0' byte ... */ {
#ifndef STRIP_ERROR_MESSAGES
int last_err = errno;
#endif
::close(fd);
TORCH_CHECK(false, "unable to write to file <", filename_, ">: ", strerror(last_err), " (", last_err, ")");
}

View File

@ -7,6 +7,7 @@ namespace c10 {
namespace impl {
namespace {
#ifndef STRIP_ERROR_MESSAGES
std::string toString(std::optional<DispatchKey> k) {
if (k.has_value()) {
return toString(*k);
@ -14,6 +15,7 @@ namespace {
return "(catch all)";
}
}
#endif
}
OperatorEntry::OperatorEntry(OperatorName&& operator_name)
@ -510,6 +512,7 @@ void OperatorEntry::reportSignatureError(const CppSignature& call_signature, con
);
};
#ifndef STRIP_ERROR_MESSAGES
static std::string post_process_dispatch_key_str(std::string dispatch_key) {
const std::string substr = "PrivateUse1";
if (substr.size() <= dispatch_key.size() && std::equal(substr.rbegin(), substr.rend(), dispatch_key.rbegin())) {
@ -525,6 +528,7 @@ static std::string post_process_dispatch_key_str(std::string dispatch_key) {
}
return dispatch_key;
}
#endif
void OperatorEntry::reportError(DispatchKey dispatchKey) const {
// If there is an invariant problem, report it now.

View File

@ -98,7 +98,7 @@ at::Tensor& random_impl(at::Tensor& self, std::optional<Generator> generator) {
"This warning will become an error in version 1.7 release, please fix the code in advance"); \
}
static void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMeta dtype) {
inline void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMeta dtype) {
const auto scalar_type = typeMetaToScalarType(dtype);
if (isFloatingType(scalar_type)) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "check_random_fp_bounds", [&] {
@ -290,7 +290,7 @@ at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, std::optiona
uniform_impl_<uniform_kernel, RNG>(float_tensor, from, to, generator);
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "check_uniform_bounds", [&] {
const auto dtype = self.dtype();
[[maybe_unused]] const auto dtype = self.dtype();
const auto min = static_cast<double>(std::numeric_limits<scalar_t>::lowest());
const auto max = static_cast<double>(std::numeric_limits<scalar_t>::max());
CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);

View File

@ -278,10 +278,12 @@ Tensor einsum(c10::string_view equation, TensorList operands, at::OptionalIntArr
return std::isupper(label) ? label - 'A' : label - 'a' + NUM_OF_LETTERS;
};
#ifndef STRIP_ERROR_MESSAGES
// Convert subscript in [0, TOTAL_LABELS) to label in [A-Za-z]
auto subscript_to_label = [=](uint8_t s) -> unsigned char {
return s < NUM_OF_LETTERS ? s + 'A' : s + 'a' - NUM_OF_LETTERS;
};
#endif
// Find arrow (->) to split equation into lhs and rhs
const auto arrow_pos = equation.find("->");

View File

@ -3546,7 +3546,9 @@ Tensor _weight_int8pack_mm_cpu(
}
Tensor& _int_mm_out_cpu(const Tensor& self, const Tensor& mat2, Tensor& result) {
#ifndef STRIP_ERROR_MESSAGES
static constexpr c10::string_view func_name = "int_mm_out_cpu";
#endif
TORCH_CHECK(self.dim() == 2, func_name, ": Expected self to be of dimension 2 but got ", self.dim());
TORCH_CHECK(mat2.dim() == 2, func_name, ": Expected mat2 to be of dimension 2 but got ", mat2.dim());
TORCH_CHECK(self.size(1) == mat2.size(0), func_name, ": self.size(1) needs to match mat2.size(0) but got ", self.size(1), " and ", mat2.size(0));

View File

@ -114,7 +114,9 @@ static void multilabel_margin_loss_forward_out_cpu_template(
Tensor& output,
Tensor& is_target,
int64_t reduction) {
#ifndef STRIP_ERROR_MESSAGES
auto target_arg = TensorArg(target, "target", 2);
#endif
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t nframe, dim;
const int64_t ndims = input.dim();
@ -161,7 +163,9 @@ static void multilabel_margin_loss_backward_out_frame(
const Tensor& is_target_contiguous,
int64_t nframe,
int64_t dim) {
#ifndef STRIP_ERROR_MESSAGES
auto is_target_arg = TensorArg(is_target_contiguous, "is_target", 5);
#endif
TORCH_CHECK(
is_target_contiguous.min().item<scalar_t>() >= 0, is_target_arg, " is out of range");

View File

@ -60,8 +60,10 @@ static void report_moving_unnamed_dim_error(
static void report_not_a_subsequence_error(
DimnameList names, DimnameList other, bool is_aligning_two_tensors) {
if (is_aligning_two_tensors) {
#ifndef STRIP_ERROR_MESSAGES
auto shorter = names.size() > other.size() ? other : names;
auto longer = names.size() > other.size() ? names : other;
#endif
TORCH_CHECK(false,
"Could not align Tensor", shorter, " and Tensor", longer,
" because ", shorter, " is not a subsequence of ", longer, ". ");

View File

@ -126,7 +126,9 @@ pool2d_shape_check(
int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format)
{
const int64_t ndim = input.ndimension();
#ifndef STRIP_ERROR_MESSAGES
const int64_t nOutputPlane = nInputPlane;
#endif
TORCH_CHECK(kW > 0 && kH > 0,
"kernel size should be greater than zero, but got ",

View File

@ -5,7 +5,8 @@
namespace at::native {
namespace {
static std::string shapes_as_str(TensorList tensors) {
#ifndef STRIP_ERROR_MESSAGES
inline std::string shapes_as_str(TensorList tensors) {
std::ostringstream os;
bool first = true;
for (auto& tensor : tensors) {
@ -19,9 +20,10 @@ static std::string shapes_as_str(TensorList tensors) {
}
return os.str();
}
#endif
} // anonymous namespace
static std::tuple<bool, Tensor> canDispatchToMaskedFill(const Tensor& self, const torch::List<std::optional<at::Tensor>>& indices,
inline std::tuple<bool, Tensor> canDispatchToMaskedFill(const Tensor& self, const torch::List<std::optional<at::Tensor>>& indices,
const Tensor& value){
if (!(value.numel() ==1 && value.device().is_cpu())){
return std::make_tuple(false,Tensor());
@ -54,7 +56,7 @@ const Tensor& value){
return std::make_tuple(true, mask);
}
static AdvancedIndex make_info(Tensor self, IOptTensorListRef orig) {
inline AdvancedIndex make_info(Tensor self, IOptTensorListRef orig) {
checkIndexTensorTypes(orig, /*allow_int*/ true);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);

View File

@ -673,7 +673,7 @@ Tensor sparse_compressed_to_dense(
// dims into a single dim, so that the remaining dims are only block
// dims eventually, and then dense dims.
auto n_batch = values.size(0);
int64_t nrows, ncols;
int64_t nrows = 0, ncols = 0;
auto dense_reshaped_sizes = dense.sizes().vec();
if (!block_sparse) {
nrows = self.size(batch_ndim);
@ -928,7 +928,6 @@ static std::pair<Tensor, Tensor> _not_zero_mask_to_col_row_indices(
static inline
void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self, const int64_t sparse_dim) {
auto layout_from = self.layout();
auto layout_to = kSparse;
auto layout_from_valid = layout_from == kStrided || layout_from == kSparse || at::sparse_csr::is_sparse_compressed(layout_from);
if (!layout_from_valid) {
@ -944,11 +943,11 @@ void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self,
}
} else if (layout_from == kSparse) {
if (sparse_dim != self.sparse_dim()) {
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", layout_to, " with sparse_dim argument !=self.sparse_dim() is not supported");
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", kSparse, " with sparse_dim argument !=self.sparse_dim() is not supported");
}
} else if (at::sparse_csr::is_sparse_compressed(layout_from)) {
if (sparse_dim != 2) {
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", layout_to, " with sparse_dim argument !=2 is not supported");
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", kSparse, " with sparse_dim argument !=2 is not supported");
}
}
}
@ -1758,7 +1757,7 @@ Tensor _compressed_to_block_compressed_cpu(const Tensor& self, IntArrayRef block
// First we determine the number of blocks needed. For each given
// block, if it contains a non-zero element we will allocate values
// and indices for it.
int64_t num_blocks;
int64_t num_blocks = 0;
auto compressed_dim = (target_layout == Layout::SparseBsr) ? self.size(0) : self.size(1);
auto plain_dim = (target_layout == Layout::SparseBsr) ? self.size(1) : self.size(0);
auto compressed_blocksize = (target_layout == Layout::SparseBsr) ? blocksize[0] : blocksize[1];
@ -1859,10 +1858,9 @@ Tensor sparse_compressed_to_sparse_bsc(const Tensor& self, IntArrayRef blocksize
}
Tensor sparse_coo_to_sparse(const Tensor& self, const int64_t sparse_dim) {
auto layout_to = kSparse;
_to_sparse_check_arguments("sparse_coo_to_sparse", self, sparse_dim);
AT_ERROR("sparse_coo_to_sparse: ", self.layout(), " to ", layout_to, " conversion not supported");
AT_ERROR("sparse_coo_to_sparse: ", self.layout(), " to ", kSparse, " conversion not supported");
return Tensor{};
}

View File

@ -3026,6 +3026,7 @@ static inline Tensor sparse_compressed_transpose(
const auto transpose_type = classify_dim(dim0);
{
#ifndef STRIP_ERROR_MESSAGES
auto dim_type_name = [](const TransposeDim dim) {
switch (dim) {
case TransposeDim::Batch:
@ -3041,6 +3042,7 @@ static inline Tensor sparse_compressed_transpose(
static_cast<std::underlying_type_t<TransposeDim>>(dim));
}
};
#endif
const auto dim1_type = classify_dim(dim1);
TORCH_CHECK(
dim1_type == transpose_type,

View File

@ -37,8 +37,8 @@ void cpu_max_unpool(
// treat batch size and channels as one dimension
// and the feature map as another dimension
int64_t channels, output_depth, output_height, output_width;
if (is_3d) {
[[maybe_unused]] int64_t channels, output_depth, output_height, output_width;
if constexpr (is_3d) {
TORCH_CHECK(ndim == 4 || ndim == 5, "MaxUnpool3d: expect input to be 4d or 5d tensor.");
channels = ndim == 4 ? input.size(0) : input.size(0) * input.size(1);
output_depth = output.size(-3);
@ -79,7 +79,7 @@ void cpu_max_unpool(
});
if (optional_error_index) {
if (is_3d) {
if constexpr (is_3d) {
AT_ERROR("Found an invalid max index: ", optional_error_index.value(),
" (output volumes are of size ", output_depth,
"x", output_height, "x", output_width);

View File

@ -91,7 +91,9 @@ bool can_use_native_serial_stack_impl(Tensor& result, TensorListType tensors, in
}
// check remainder of inputs
#ifndef STRIP_ERROR_MESSAGES
auto const &first_tensor_shape = first_tensor.sizes();
#endif
for (const auto i : c10::irange(1, tensors.size())) {
auto const &tensor = tensors[i];
TORCH_CHECK(tensors[i].sizes() == first_tensor.sizes(),

View File

@ -1793,7 +1793,9 @@ class QConvAddInt8 final {
const c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>>& packed_weight,
double output_scale,
int64_t output_zero_point) {
#if AT_MKLDNN_ENABLED() || !defined(STRIP_ERROR_MESSAGES)
auto& ctx = at::globalContext();
#endif
#if AT_MKLDNN_ENABLED()
if (ctx.qEngine() == at::QEngine::ONEDNN) {
if (kReluFused) {

View File

@ -1131,7 +1131,9 @@ class QLinearLeakyReluInt8 final {
double output_scale,
int64_t output_zero_point,
double negative_slope) {
#if AT_MKLDNN_ENABLED() || !defined(STRIP_ERROR_MESSAGES)
auto& ctx = at::globalContext();
#endif
#if AT_MKLDNN_ENABLED()
if (ctx.qEngine() == at::QEngine::ONEDNN) {
return dynamic_cast<PackedLinearWeightsOnednn*>(packed_weight.get())->apply_leaky_relu(
@ -1153,7 +1155,9 @@ class QLinearTanhInt8 final {
const c10::intrusive_ptr<LinearPackedParamsBase>& packed_weight,
double output_scale,
int64_t output_zero_point) {
#if AT_MKLDNN_ENABLED() || !defined(STRIP_ERROR_MESSAGES)
auto& ctx = at::globalContext();
#endif
#if AT_MKLDNN_ENABLED()
if (ctx.qEngine() == at::QEngine::ONEDNN) {
return dynamic_cast<PackedLinearWeightsOnednn*>(packed_weight.get())->apply_tanh(

View File

@ -938,7 +938,9 @@ Tensor empty_like_sparse_csr(
template <bool require_view, bool require_copy>
Tensor select_sparse_csr_worker(const Tensor& self, int64_t dim, int64_t index) {
#ifndef STRIP_ERROR_MESSAGES
constexpr const char* select_name = (require_view ? "select()" : "select_copy()");
#endif
AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS(
self.layout(), "select", []() { return; });
TORCH_CHECK_INDEX(

View File

@ -264,11 +264,13 @@ static optional_variable_list _process_backward_mode_ad(
const _view_as_self_fn_t& view_as_self_fn) {
auto num_outputs = raw_outputs.size();
#ifndef STRIP_ERROR_MESSAGES
const char* error_msg_input_returned_as_is =
"A input that has been returned as-is as output is being saved for backward. "
"This is not supported if you override setup_context. You should return and "
"save a view of the input instead, e.g. with x.view_as(x) or setup ctx inside "
"the forward function itself.";
#endif
// Sets the grad_fn and output_nr of an output Variable.
auto set_history = [&](Variable& var,

View File

@ -358,8 +358,7 @@ bool InterpreterState::run(Stack& stack) {
// when STRIP_ERROR_MESSAGES is defined (which happens for production
// mobile builds). This will cause the stack to be in an inconsistent
// state. It has previously resulted in a SEV (S22350).
const auto& sref = stack.back().toStringRef();
TORCH_WARN(sref);
TORCH_WARN(stack.back().toStringRef());
stack.pop_back();
frame.step();
} break;

View File

@ -845,9 +845,8 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target {
}
stack.pop_back();
} else {
const auto& msg = stack.back().toStringRef();
if (need_warn) {
TORCH_WARN(msg);
TORCH_WARN(stack.back().toStringRef());
}
stack.pop_back();
}

View File

@ -55,12 +55,13 @@ C10_DEFINE_bool(
namespace torch::jit {
namespace {
#ifndef STRIP_ERROR_MESSAGES
std::string iValueToString(const c10::IValue& val) {
std::ostringstream oss;
oss << val;
return oss.str();
}
#endif
bool allArgsAreTensors(const Node* node) {
const auto& inputs = node->inputs();

View File

@ -44,7 +44,9 @@ static void postSetStateValidate(const IValue& v) {
const auto& objType = obj->type();
for (const auto i : c10::irange(objType->numAttributes())) {
const auto& attrType = objType->getAttribute(i);
#ifndef STRIP_ERROR_MESSAGES
const auto& attrName = objType->getAttributeName(i);
#endif
const auto& slot = obj->getSlot(i);
// const auto attrType = objType->getAttribute(i);
// Verify that all the non-optional attributes have been initialized

View File

@ -232,7 +232,7 @@ struct UnwindCache {
const LibraryInfo& libraryFor(uint64_t addr) {
auto* r = findLibraryFor(addr);
if (!r) {
for (const auto& l : libraries_with_no_unwind_) {
for ([[maybe_unused]] const auto& l : libraries_with_no_unwind_) {
TORCH_WARN("Did not find a PT_GNU_EH_FRAME segment for ", l);
}
libraries_with_no_unwind_.clear();

View File

@ -335,11 +335,13 @@ class class_ : public ::torch::detail::class_base {
// type validation
auto getstate_schema = classTypePtr->getMethod("__getstate__").getSchema();
#ifndef STRIP_ERROR_MESSAGES
auto format_getstate_schema = [&getstate_schema]() {
std::stringstream ss;
ss << getstate_schema;
return ss.str();
};
#endif
TORCH_CHECK(
getstate_schema.arguments().size() == 1,
"__getstate__ should take exactly one argument: self. Got: ",