Fix clang-tidy warnings in jit code (#138974)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/138974
Approved by: https://github.com/ezyang
This commit is contained in:
cyy
2024-10-29 04:33:38 +00:00
committed by PyTorch MergeBot
parent 48b55ca1b1
commit 0274d16c01
19 changed files with 43 additions and 47 deletions

View File

@ -1,6 +1,8 @@
#pragma once
#include <c10/util/Exception.h>
#include <utility>
namespace c10 {
class TORCH_API BackendRuntimeException : public c10::Error {
public:
@ -9,7 +11,7 @@ class TORCH_API BackendRuntimeException : public c10::Error {
SourceLocation loc,
std::string msg,
int64_t debug_handle)
: c10::Error(loc, msg) {
: c10::Error(loc, std::move(msg)) {
debug_handles.push_back(debug_handle);
}
// If rethrowing, can push another debug_handle

View File

@ -5,6 +5,7 @@
#include <oneapi/dnnl/dnnl_graph.hpp>
#include <torch/csrc/jit/ir/ir.h>
#include <utility>
namespace torch::jit::fuser::onednn {
@ -42,8 +43,8 @@ struct LlgaTensorDesc {
desc::data_type dtype,
desc::property_type property_type)
: tid_(tid),
sizes_(sizes),
strides_(strides),
sizes_(std::move(sizes)),
strides_(std::move(strides)),
dtype_(dtype),
property_type_(property_type),
layout_type_(desc::layout_type::strided),
@ -221,7 +222,7 @@ struct LlgaTensorDesc {
private:
bool is_dimensionality_unknown() const {
return sizes_.size() == 0;
return sizes_.empty();
}
size_t tid_;
@ -236,7 +237,7 @@ struct LlgaTensorDesc {
// compute_inplace would be true, and input_tensor_index would be the index of
// the corresponding input tensor in inputSpecs_ of the LlgaKernel object.
bool compute_inplace_ = false;
size_t input_tensor_index_;
size_t input_tensor_index_{};
};
// Initially, oneDNN Graph also used to have blocked layout for tensors between

View File

@ -126,7 +126,7 @@ std::tuple<RunArgs, RunArgs> LlgaKernel::prepareRunArgs(
auto numInputs = runArgsIdx_.size();
for (const auto i : c10::irange(numInputs)) {
auto spec = inputSpecs_[i];
auto input = inputs[runArgsIdx_[i]];
const auto& input = inputs[runArgsIdx_[i]];
runInputs.push_back(
{spec.logical_tensor(), Engine::getEngine(), input.data_ptr()});
}

View File

@ -339,8 +339,7 @@ void BytecodeDeserializer::parseMethods(
auto element = std::move(vals[i]);
auto m_tuple = std::move(element.toTupleRef()).elements();
const std::string& function_name = m_tuple[0].toStringRef();
auto codeTableElements =
std::move(std::move(m_tuple[1]).toTupleRef()).elements();
auto codeTableElements = std::move(m_tuple[1].toTupleRef()).elements();
IValue* schemaTable = // older files do not store function schema
(bytecode_version_ > 0x4L ||
(bytecode_version_ == 0x4L && m_tuple.size() >= 3))

View File

@ -196,7 +196,7 @@ c10::IValue Function::serialize() const {
}
void Function::init_execution_state() const {
if (execution_state_.get() != nullptr) {
if (execution_state_ != nullptr) {
return;
}

View File

@ -85,7 +85,7 @@ void merge_sets(
}
// no uses of tensors in container types
void assertNonTensorTypeDoesNotContainTensors(TypePtr type) {
void assertNonTensorTypeDoesNotContainTensors(const TypePtr& type) {
if (type->cast<TensorType>()) {
return;
}
@ -94,7 +94,7 @@ void assertNonTensorTypeDoesNotContainTensors(TypePtr type) {
}
}
void InplaceMKLDNNSubgraph(std::shared_ptr<Graph> graph) {
void InplaceMKLDNNSubgraph(const std::shared_ptr<Graph>& graph) {
// This function first calculates aliasing sets,
// then calculates the last node each aliasing set is alive for.
// Then we go through each node, if it's a node which has an equivalent
@ -234,7 +234,7 @@ void InplaceMKLDNNSubgraph(std::shared_ptr<Graph> graph) {
// innermost dimension is padded with 0s. The precondition, `aten_op(0) == 0`
// allows us to avoid any special casing of padded elements.
Operation createUnaryOp(
std::function<void(at::Tensor output, at::Tensor input)> aten_op,
const std::function<void(at::Tensor output, at::Tensor input)>& aten_op,
bool inplace = false) {
return [aten_op, inplace](Stack& stack) {
auto a = pop(stack).toTensor();
@ -395,7 +395,7 @@ static std::function<void(at::Tensor output, at::Tensor input)> hardtanh_helper(
const Node* n) {
auto min_val = n->f(attr::min_val);
auto max_val = n->f(attr::max_val);
return [min_val, max_val](at::Tensor output, at::Tensor input) {
return [min_val, max_val](at::Tensor output, const at::Tensor& input) {
at::cpu::hardtanh_out(output, input, min_val, max_val);
};
}
@ -404,7 +404,7 @@ static std::function<void(at::Tensor output, at::Tensor input)> clamp_helper(
const Node* n) {
auto min_val = n->f(attr::min_val);
auto max_val = n->f(attr::max_val);
return [min_val, max_val](at::Tensor output, at::Tensor input) {
return [min_val, max_val](at::Tensor output, const at::Tensor& input) {
at::cpu::clamp_out(output, input, min_val, max_val);
};
}
@ -415,7 +415,7 @@ const RegisterOperators MKLDNNHardSwishOpReg({
torch::jit::Operator(
"prim::MKLDNNHardSwish_(Tensor(a!) self) -> Tensor(a!)",
createUnaryOp(
[](at::Tensor output, at::Tensor input) {
[](at::Tensor output, const at::Tensor& input) {
at::cpu::hardswish_out(output, input);
},
true),
@ -423,7 +423,7 @@ const RegisterOperators MKLDNNHardSwishOpReg({
torch::jit::Operator(
"prim::MKLDNNHardSigmoid_(Tensor(a!) self) -> Tensor(a!)",
createUnaryOp(
[](at::Tensor output, at::Tensor input) {
[](at::Tensor output, const at::Tensor& input) {
at::cpu::hardsigmoid_out(output, input);
},
true),
@ -443,7 +443,7 @@ const RegisterOperators MKLDNNHardSwishOpReg({
torch::jit::Operator(
"prim::MKLDNNHardSwish(Tensor a) -> Tensor",
createUnaryOp(
[](at::Tensor output, at::Tensor input) {
[](at::Tensor output, const at::Tensor& input) {
at::cpu::hardswish_out(output, input);
},
false),
@ -451,7 +451,7 @@ const RegisterOperators MKLDNNHardSwishOpReg({
torch::jit::Operator(
"prim::MKLDNNHardSigmoid(Tensor a) -> Tensor",
createUnaryOp(
[](at::Tensor output, at::Tensor input) {
[](at::Tensor output, const at::Tensor& input) {
at::cpu::hardsigmoid_out(output, input);
},
false),

View File

@ -7,7 +7,7 @@ namespace torch::jit::onnx {
namespace ONNXScopeName {
using NameFunc = std::string (*)(torch::jit::ScopePtr scope);
using NameFunc = std::string (*)(const torch::jit::ScopePtr& scope);
const std::string name_separator = "::";
@ -48,7 +48,7 @@ std::string createFullScopeName(
return std::string(class_name).append(name_separator).append(variable_name);
}
std::string variableName(torch::jit::ScopePtr scope) {
std::string variableName(const torch::jit::ScopePtr& scope) {
return parseNameFromScope(scope).second;
}
@ -58,7 +58,7 @@ std::string variableNameFromRoot(
return nameFromRoot(scope, layer_separator, &variableName);
}
std::string className(torch::jit::ScopePtr scope) {
std::string className(const torch::jit::ScopePtr& scope) {
return parseNameFromScope(scope).first;
}

View File

@ -9,11 +9,11 @@ namespace ONNXScopeName {
std::string createFullScopeName(
const std::string& class_name,
const std::string& variable_name);
std::string variableName(torch::jit::ScopePtr scope);
std::string variableName(const torch::jit::ScopePtr& scope);
std::string variableNameFromRoot(
const torch::jit::ScopePtr& scope,
const std::string& layer_separator);
std::string className(torch::jit::ScopePtr scope);
std::string className(const torch::jit::ScopePtr& scope);
std::string classNameFromRoot(
const torch::jit::ScopePtr& scope,
const std::string& layer_separator);

View File

@ -6,7 +6,6 @@
#include <torch/csrc/jit/frontend/error_report.h>
#include <torch/csrc/jit/jit_log.h>
#include <torch/csrc/jit/passes/dead_code_elimination.h>
#include <torch/csrc/jit/passes/onnx/helper.h>
#include <torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.h>
#include <c10/util/irange.h>

View File

@ -25,7 +25,9 @@ std::string getExtraArgList(std::vector<std::string> extra_args) {
extra_args.begin(),
extra_args.end(),
std::string(),
[](std::string acc, const std::string& arg) { return acc + ", " + arg; });
[](const std::string& acc, const std::string& arg) {
return acc + ", " + arg;
});
}
// Get the pattern we want to replace the match with

View File

@ -1732,7 +1732,7 @@ void initJITBindings(PyObject* module) {
bool allow_numbers_as_tensors = opAllowsNumbersAsTensors(symbol);
ToIValueAllowNumbersAsTensors g(allow_numbers_as_tensors);
const auto overloads = getAllSortedOperatorsFor(symbol);
auto opWithStack = getOpWithStack(overloads, std::move(args), kwargs);
auto opWithStack = getOpWithStack(overloads, args, kwargs);
std::shared_ptr<Operator> overload = std::get<0>(opWithStack);
auto result = overload->schema().overload_name();
if (result.empty()) {

View File

@ -48,7 +48,6 @@
#include <torch/csrc/jit/runtime/instruction.h>
#include <torch/csrc/jit/runtime/interpreter.h>
#include <torch/csrc/jit/runtime/logging.h>
#include <torch/csrc/jit/serialization/export_bytecode.h>
#include <torch/csrc/jit/serialization/import_source.h>
#include <torch/csrc/jit/serialization/pickle.h>
#include <torch/csrc/jit/serialization/python_print.h>

View File

@ -10,9 +10,7 @@
#include <c10/util/Exception.h>
#include <torch/csrc/autograd/jit_decomp_interface.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/passes/constant_propagation.h>
#include <torch/csrc/jit/passes/inliner.h>
#include <torch/csrc/jit/passes/peephole.h>
#include <torch/csrc/jit/runtime/graph_executor.h>
#include <memory>
#include <unordered_map>
@ -79,8 +77,7 @@ static void DecomposeOp(Node* n) {
return;
}
WithInsertPoint guard(n);
auto outputs =
insertGraph(*n->owningGraph(), *decomposition->get(), n->inputs());
auto outputs = insertGraph(*n->owningGraph(), **decomposition, n->inputs());
TORCH_INTERNAL_ASSERT(outputs.size() == n->outputs().size());
for (size_t i : c10::irange(outputs.size())) {
n->outputs().at(i)->replaceAllUsesWith(outputs[i]);

View File

@ -2,6 +2,7 @@
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
#include <c10/util/irange.h>
@ -945,7 +946,11 @@ struct MobileCodeImpl : CodeImpl {
bool support_default_args_before_out,
bool emit_promoted_ops,
size_t remaining_bailout_depth)
: CodeImpl(graph, function_name, remaining_bailout_depth, false),
: CodeImpl(
graph,
std::move(function_name),
remaining_bailout_depth,
false),
emit_default_input_instructions_(emit_default_input_instructions),
support_default_args_before_out_(support_default_args_before_out),
emit_promoted_ops_(emit_promoted_ops) {

View File

@ -209,6 +209,6 @@ PreprocessGraph::PreprocessGraph(Graph& g) : graph(g.copy()) {
dropUnused(graph->block());
// fill in move_flags by scanning blocks;
insertLastUses(*graph);
can_emit_inline = std::move(CanEmitInline(*graph.get()).can_emit_inline_);
can_emit_inline = std::move(CanEmitInline(*graph).can_emit_inline_);
}
} // namespace torch::jit::interpreter

View File

@ -46,8 +46,8 @@ class TORCH_API SourceStats : public CustomClassHolder {
public:
using LineMap = c10::Dict<int64_t, c10::intrusive_ptr<InstructionStats>>;
SourceStats(SourceRef source, LineMap lineMap)
: source_(std::move(source)), lineMap_(std::move(lineMap)) {}
SourceStats(SourceRef source, const LineMap& lineMap)
: source_(std::move(source)), lineMap_(lineMap) {}
const SourceRef& getSourceRef() const {
return source_;

View File

@ -859,12 +859,9 @@ class TORCH_API Intrinsics : public ExprNode<Intrinsics> {
}
}
Intrinsics(
IntrinsicsOp op_type,
Dtype dtype,
const std::vector<ExprPtr>& params)
Intrinsics(IntrinsicsOp op_type, Dtype dtype, std::vector<ExprPtr> params)
: ExprNodeBase(IntrinsicsDtype(op_type, dtype)),
params_(params),
params_(std::move(params)),
op_type_(op_type) {
if (OpArgCount(op_type) != nparams()) {
throw malformed_input("bad arg count in Intrinsics");

View File

@ -25,11 +25,6 @@
#include <torch/csrc/jit/tensorexpr/ir_verifier.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
#include <stdexcept>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace torch::jit::tensorexpr {
LoopNest::LoopNest(const LoopNest& other)

View File

@ -7,14 +7,14 @@ namespace torch::jit {
static ModuleHook emit_module_callback;
void didFinishEmitModule(Module module) {
if (emit_module_callback) {
emit_module_callback(module);
emit_module_callback(std::move(module));
}
}
static FunctionHook emit_function_callback;
void didFinishEmitFunction(StrongFunctionPtr fn) {
if (emit_function_callback) {
emit_function_callback(fn);
emit_function_callback(std::move(fn));
}
}