remove useless clang-tidy suppression (#92287)

remove NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
remove NOLINTNEXTLINE(performance-move-const-arg)
remove NOLINTNEXTLINE(performance-no-automatic-move)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/92287
Approved by: https://github.com/albanD
This commit is contained in:
cyy
2023-01-21 02:33:24 +00:00
committed by PyTorch MergeBot
parent 5489b32337
commit 85851b1e8f
23 changed files with 10 additions and 61 deletions

View File

@ -277,7 +277,7 @@ std::vector<Dimname> compute_diagonal_outnames(
static void check_feature_names_are_distinct(
DimnameList self_names,
DimnameList other_names,
DimnameList outnames) {
const DimnameList& outnames) {
if (self_names.size() < 2 || other_names.size() < 2) {
// There are less than 2 feature dims in outnames so there is nothing to check
return;
@ -335,10 +335,9 @@ static std::vector<Dimname> compute_matmul_outnames(
if (other_names.size() >= 2) {
working_names.append(TensorName(other_names, -1));
}
const auto result = working_names.toDimnameVec();
auto result = working_names.toDimnameVec();
check_feature_names_are_distinct(self_names, other_names, result);
// NOLINTNEXTLINE(performance-no-automatic-move)
return result;
}

View File

@ -83,7 +83,6 @@ void launch(std::function<void()> func) {
// NOLINTNEXTLINE(modernize-avoid-bind)
internal::launch_no_thread_state(std::bind([](
std::function<void()> f, ThreadLocalState thread_locals) {
// NOLINTNEXTLINE(performance-move-const-arg)
ThreadLocalStateGuard guard(std::move(thread_locals));
f();
},

View File

@ -260,7 +260,6 @@ RegistrationHandleRAII Dispatcher::registerImpl(
*this,
dispatch_key,
std::move(kernel),
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(cpp_signature),
std::move(inferred_function_schema),
std::move(debug)

View File

@ -42,7 +42,6 @@ namespace {
CppFunction::CppFunction(c10::KernelFunction func, c10::optional<c10::impl::CppSignature> cpp_signature, std::unique_ptr<c10::FunctionSchema> schema)
: func_(std::move(func))
// NOLINTNEXTLINE(performance-move-const-arg)
, cpp_signature_(std::move(cpp_signature))
, schema_(std::move(schema))
, debug_()
@ -171,7 +170,6 @@ Library& Library::_def(c10::either<c10::OperatorName, c10::FunctionSchema>&& nam
std::move(name),
dispatch_key,
std::move(f.func_),
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(f.cpp_signature_),
std::move(f.schema_),
debugString(std::move(f.debug_), file_, line_)
@ -223,7 +221,6 @@ Library& Library::_impl(const char* name_str, CppFunction&& f, _RegisterOrVerify
std::move(name),
dispatch_key,
std::move(f.func_),
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(f.cpp_signature_),
std::move(f.schema_),
debugString(std::move(f.debug_), file_, line_)

View File

@ -103,7 +103,6 @@ void RegisterOperators::registerOp_(Options&& options) {
for (auto& kernel : options.kernels) {
registrars_.emplace_back(
// NOLINTNEXTLINE(performance-move-const-arg)
Dispatcher::singleton().registerImpl(op_name, kernel.dispatch_key, std::move(kernel.func), std::move(kernel.cpp_signature), std::move(kernel.inferred_function_schema), "registered by RegisterOperators")
);
}

View File

@ -301,14 +301,10 @@ struct QuantizedCellParams : public CellParamsBase {
/*packed_hh=*/std::move(packed_hh),
/*col_offsets_ih=*/std::move(col_offsets_ih),
/*col_offsets_hh=*/std::move(col_offsets_hh),
// NOLINTNEXTLINE(performance-move-const-arg)
/*scale_ih=*/std::move(scale_ih),
// NOLINTNEXTLINE(performance-move-const-arg)
/*scale_hh=*/std::move(scale_hh),
// NOLINTNEXTLINE(performance-move-const-arg)
/*zero_point_ih=*/std::move(zero_point_ih),
// NOLINTNEXTLINE(performance-move-const-arg)
/*zero_point_hh=*/std::move(zero_point_hh));
/*scale_ih=*/scale_ih,
/*scale_hh=*/scale_hh,
/*zero_point_ih=*/zero_point_ih,
/*zero_point_hh=*/zero_point_hh);
}
};
@ -342,13 +338,9 @@ c10::intrusive_ptr<CellParamsBase> make_quantized_cell_params(
/*packed_hh=*/std::move(packed_hh),
/*col_offsets_ih=*/std::move(col_offsets_ih),
/*col_offsets_hh=*/std::move(col_offsets_hh),
// NOLINTNEXTLINE(performance-move-const-arg)
/*scale_ih=*/std::move(scale_ih),
// NOLINTNEXTLINE(performance-move-const-arg)
/*scale_hh=*/std::move(scale_hh),
// NOLINTNEXTLINE(performance-move-const-arg)
/*zero_point_ih=*/std::move(zero_point_ih),
// NOLINTNEXTLINE(performance-move-const-arg)
/*zero_point_hh=*/std::move(zero_point_hh));
}
@ -427,11 +419,9 @@ struct QuantizedCellParamsDynamic : public CellParamsBase {
// reduce_range parameter is serialized along with the int field values.
return CellParamsSerializationType(
"quantized_dynamic",
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(tensors_to_serialize),
{},
{reduce_range_},
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(packed_params_to_serialize));
}
static c10::intrusive_ptr<CellParamsBase> __setstate__(
@ -508,7 +498,6 @@ struct QuantizedCellParamsFP16 : public CellParamsBase {
packed_params_to_serialize{packed_ih, packed_hh};
return CellParamsSerializationType(
// NOLINTNEXTLINE(performance-move-const-arg)
"quantized_fp16", {}, {}, {}, std::move(packed_params_to_serialize));
}
static c10::intrusive_ptr<CellParamsBase> __setstate__(

View File

@ -167,9 +167,10 @@ ScalarType result_type(ITensorListRef tensors) {
}
ScalarType result_type(const Tensor &tensor, const Tensor &other) {
// NOLINTNEXTLINE(performance-move-const-arg)
std::vector<Tensor> tensors{std::move(tensor), std::move(other)};
return native::result_type(tensors);
ResultTypeState state = {};
state = update_result_type_state(tensor, state);
state = update_result_type_state(other, state);
return result_type(state);
}
ScalarType result_type(const Tensor &tensor, const Scalar& other) {

View File

@ -27,11 +27,8 @@ TORCH_LIBRARY(mkldnn, m) {
std::move(std::get<2>(state)),
std::move(std::get<3>(state)),
std::move(std::get<4>(state)),
// NOLINTNEXTLINE(performance-move-const-arg,cppcoreguidelines-avoid-magic-numbers)
std::move(std::get<5>(state)),
// NOLINTNEXTLINE(performance-move-const-arg,cppcoreguidelines-avoid-magic-numbers)
std::move(std::get<6>(state)),
// NOLINTNEXTLINE(performance-move-const-arg,cppcoreguidelines-avoid-magic-numbers)
std::move(std::get<7>(state)));
});

View File

@ -27,9 +27,7 @@ TORCH_LIBRARY(xnnpack, m) {
return createLinearClampPrePackOpContext(
std::move(std::get<0>(state)),
std::move(std::get<1>(state)),
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(std::get<2>(state)),
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(std::get<3>(state)));
});
@ -47,11 +45,8 @@ TORCH_LIBRARY(xnnpack, m) {
std::move(std::get<2>(state)),
std::move(std::get<3>(state)),
std::move(std::get<4>(state)),
// NOLINTNEXTLINE(performance-move-const-arg,cppcoreguidelines-avoid-magic-numbers)
std::move(std::get<5>(state)),
// NOLINTNEXTLINE(performance-move-const-arg,cppcoreguidelines-avoid-magic-numbers)
std::move(std::get<6>(state)),
// NOLINTNEXTLINE(performance-move-const-arg,cppcoreguidelines-avoid-magic-numbers)
std::move(std::get<7>(state)));
});
@ -70,11 +65,8 @@ TORCH_LIBRARY(xnnpack, m) {
std::move(std::get<3>(state)),
std::move(std::get<4>(state)),
std::move(std::get<5>(state)),
// NOLINTNEXTLINE(performance-move-const-arg,cppcoreguidelines-avoid-magic-numbers)
std::move(std::get<6>(state)),
// NOLINTNEXTLINE(performance-move-const-arg,cppcoreguidelines-avoid-magic-numbers)
std::move(std::get<7>(state)),
// NOLINTNEXTLINE(performance-move-const-arg,cppcoreguidelines-avoid-magic-numbers)
std::move(std::get<8>(state)));
});

View File

@ -645,13 +645,11 @@ bool hasThreadLocalCallbacks() {
CallbackHandle addThreadLocalCallback(
RecordFunctionCallback cb) {
// NOLINTNEXTLINE(performance-move-const-arg)
return LocalCallbackManager::get().addCallback(std::move(cb));
}
CallbackHandle addGlobalCallback(
RecordFunctionCallback cb) {
// NOLINTNEXTLINE(performance-move-const-arg)
return GlobalCallbackManager::get().addCallback(std::move(cb));
}

View File

@ -430,7 +430,6 @@ void ThresholdImpl::pretty_print(std::ostream& stream) const {
// ============================================================================
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
MultiheadAttentionImpl::MultiheadAttentionImpl(
const MultiheadAttentionOptions& options_)
: Module("torch::nn::MultiheadAttention"), options(options_) {

View File

@ -173,7 +173,6 @@ struct TORCH_API VariableInfo {
// backward function for Function<T>. Calls to CppNode::apply are forward to
// T::backward().
template <class T>
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct CppNode : public Node {
variable_list apply(variable_list&& inputs) override;
AutogradContext ctx_;

View File

@ -254,7 +254,6 @@ struct TORCH_API Engine {
// allocated inside Engine::execute and lives for the duration of execute
std::queue<std::weak_ptr<GraphTask>> graphtasks_queue_;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
ThreadPoolShared() : num_workers_(0) {}
};

View File

@ -113,7 +113,6 @@ TORCH_API std::shared_ptr<Node> get_current_node();
struct TORCH_API Node : std::enable_shared_from_this<Node> {
public:
/// Construct a new `Node` with the given `next_edges`
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
explicit Node(uint64_t sequence_nr, edge_list&& next_edges = edge_list())
: sequence_nr_(sequence_nr), next_edges_(std::move(next_edges)) {
for (const Edge& edge : next_edges_) {
@ -135,7 +134,6 @@ struct TORCH_API Node : std::enable_shared_from_this<Node> {
thread_id_ = at::RecordFunction::currentThreadId();
}
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
explicit Node(edge_list&& next_edges = edge_list())
: Node(
/*sequence_nr=*/at::sequence_number::get_and_increment(),
@ -659,7 +657,6 @@ struct TraceableFunction : public Node {
namespace detail {
// Implementation of `collect_next_edges` (see below).
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct MakeNextFunctionList : IterArgs<MakeNextFunctionList> {
edge_list next_edges;
using IterArgs<MakeNextFunctionList>::operator();

View File

@ -69,7 +69,6 @@ struct TORCH_API UndefinedGradBackward : public Node {
};
struct TORCH_API GraphRoot : public Node {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
GraphRoot(edge_list functions, variable_list inputs)
: Node(std::move(functions)), outputs(std::move(inputs)) {
// Ensures calls to stream() on a GraphRoot instance reflect current

View File

@ -16,7 +16,6 @@
namespace torch {
namespace autograd {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
Scatter::Scatter(
std::vector<at::Device> devices,
c10::optional<std::vector<int64_t>> chunk_sizes,
@ -95,10 +94,8 @@ variable_list Gather::apply(variable_list&& inputs) {
std::shared_ptr<Node> grad_fn;
// compute this before moving variables from `inputs`
if (compute_requires_grad(inputs)) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<at::Device> source_devices;
source_devices.reserve(inputs.size());
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<int64_t> input_sizes;
input_sizes.reserve(inputs.size());
for (auto& input : inputs) {
@ -114,7 +111,6 @@ variable_list Gather::apply(variable_list&& inputs) {
grad_fn->set_next_edges(collect_next_edges(inputs));
}
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<at::Tensor> tensors;
tensors.reserve(inputs.size());
for (auto& variable : inputs) {

View File

@ -49,13 +49,11 @@ struct GraphTask : std::enable_shared_from_this<GraphTask> {
// executed through .grad(), or when inputs arg is specified for .backward(),
// exec_info will be non-empty.
//
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct ExecInfo {
struct Capture {
Capture(const Capture&) = delete;
Capture(Capture&&) = default;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
Capture(int input_idx, int output_idx)
: input_idx_(input_idx), output_idx_(output_idx) {}
int input_idx_; // within Node inputs
@ -195,7 +193,6 @@ struct GraphTask : std::enable_shared_from_this<GraphTask> {
uint64_t id_;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
GraphTask(
bool keep_graph,
bool grad_mode,

View File

@ -17,11 +17,9 @@ namespace torch {
namespace autograd {
struct InputBuffer {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
explicit InputBuffer(size_t size) : buffer(size) {}
InputBuffer(const InputBuffer& other) = delete;
InputBuffer(InputBuffer&& other) = default;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
explicit InputBuffer(variable_list&& inputs) : buffer(std::move(inputs)){};
InputBuffer& operator=(InputBuffer&& other) = default;

View File

@ -27,7 +27,6 @@ namespace autograd {
// A Function which is implemented by a Python object (i.e., a THPFunction).
// Calls to 'apply' are forwarded to the Python method implementation.
struct PyNode : public Node {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
PyNode(THPObjectPtr obj) : obj(obj.release()) {}
variable_list apply(variable_list&& inputs) override;

View File

@ -14,7 +14,6 @@
namespace py = pybind11;
// Python object that backs torch.autograd.Variable
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct THPVariable {
PyObject_HEAD;
// Payload

View File

@ -19,7 +19,6 @@ TORCH_API extern const char* ERR_BACKWARD_TWICE;
/// A snapshot of a variable at a certain version. A `SavedVariable` stores
/// enough information to reconstruct a variable from a certain point in time.
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
class TORCH_API SavedVariable {
public:
SavedVariable() = default;

View File

@ -286,7 +286,6 @@ struct TORCH_API AutogradMeta : public c10::AutogradMetaInterface {
uint64_t level,
bool is_inplace_op) override;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
AutogradMeta(
at::TensorImpl* self_impl = nullptr,
bool requires_grad = false,

View File

@ -4,7 +4,6 @@
#include <ATen/cuda/CUDAEvent.h>
#include <torch/csrc/python_headers.h>
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct THCPEvent {
PyObject_HEAD at::cuda::CUDAEvent cuda_event;
};