Fix clang-tidy warnings in torch/jit (#146963)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/146963
Approved by: https://github.com/davidberard98
This commit is contained in:
cyy
2025-02-15 03:36:59 +00:00
committed by PyTorch MergeBot
parent 4233a77960
commit 8f291e8c00
24 changed files with 42 additions and 60 deletions

View File

@ -6,7 +6,7 @@
namespace torch::jit::mobile::coreml { namespace torch::jit::mobile::coreml {
struct TensorSpec { struct TensorSpec {
std::string name = ""; std::string name;
c10::ScalarType dtype = c10::ScalarType::Float; c10::ScalarType dtype = c10::ScalarType::Float;
}; };

View File

@ -43,7 +43,7 @@ c10::IValue preprocess(
// Test that method_compile_spec contains the necessary keys and // Test that method_compile_spec contains the necessary keys and
// Tensor/TensorList input // Tensor/TensorList input
c10::IValue inp; c10::IValue inp;
std::string error = ""; std::string error;
if (!method_compile_spec.contains("forward")) { if (!method_compile_spec.contains("forward")) {
error = R"(method_compile_spec does not contain the "forward" key.)"; error = R"(method_compile_spec does not contain the "forward" key.)";
} else { } else {

View File

@ -8,10 +8,7 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
namespace torch { namespace torch::jit::xnnpack::delegate {
namespace jit {
namespace xnnpack {
namespace delegate {
class XNNCompiler { class XNNCompiler {
public: public:
@ -24,7 +21,4 @@ class XNNCompiler {
XNNExecutor* executor); XNNExecutor* executor);
}; };
} // namespace delegate } // namespace torch::jit::xnnpack::delegate
} // namespace xnnpack
} // namespace jit
} // namespace torch

View File

@ -8,10 +8,7 @@
#include <ATen/core/List.h> #include <ATen/core/List.h>
#include <torch/csrc/jit/backends/xnnpack/xnnpack_graph_builder.h> #include <torch/csrc/jit/backends/xnnpack/xnnpack_graph_builder.h>
namespace torch { namespace torch::jit::xnnpack::delegate {
namespace jit {
namespace xnnpack {
namespace delegate {
// Expected method_compile_spec should look something like this: // Expected method_compile_spec should look something like this:
// { // {
@ -126,7 +123,4 @@ c10::IValue preprocess(
constexpr auto backend_name = "xnnpack"; constexpr auto backend_name = "xnnpack";
static auto pre_reg = backend_preprocess_register(backend_name, preprocess); static auto pre_reg = backend_preprocess_register(backend_name, preprocess);
} // namespace delegate } // namespace torch::jit::xnnpack::delegate
} // namespace xnnpack
} // namespace jit
} // namespace torch

View File

@ -58,12 +58,9 @@ struct TORCH_API KernelSpec {
: key_{_key}, : key_{_key},
graph_{_graph}, graph_{_graph},
code_{_graph, "<fused code>"}, code_{_graph, "<fused code>"},
nInputs_{_graph->inputs().size()}, nInputs_{_graph->inputs().size()}
inputBroadcastGroups_{}, {
inputChunks_{},
kernels_{} {
// No need to iterate over reference since n is pointer // No need to iterate over reference since n is pointer
for (const auto n : graph_->nodes()) { for (const auto n : graph_->nodes()) {
static_assert(std::is_pointer_v<decltype(n)>, "n must be a pointer"); static_assert(std::is_pointer_v<decltype(n)>, "n must be a pointer");

View File

@ -88,7 +88,7 @@ struct ParsedLiteral {
AttributeKind k = AttributeKind::t; AttributeKind k = AttributeKind::t;
int64_t i = 0; int64_t i = 0;
std::string s = ""; std::string s;
double f = 0.0; double f = 0.0;
c10::complex<double> c = c10::complex<double>(0, 0); c10::complex<double> c = c10::complex<double>(0, 0);
TypePtr ty; TypePtr ty;

View File

@ -11,7 +11,7 @@
#include <c10/util/irange.h> #include <c10/util/irange.h>
namespace torch::jit { namespace torch::jit {
std::ostream& operator<<(std::ostream& out, Instruction inst);
namespace mobile { namespace mobile {
void CompilationUnit::register_function(std::unique_ptr<Function> fn) { void CompilationUnit::register_function(std::unique_ptr<Function> fn) {
@ -178,7 +178,7 @@ const std::vector<at::Tensor> Module::parameters() const {
// loading of a mobile module. TODO // loading of a mobile module. TODO
const std::map<std::string, at::Tensor> Module::named_parameters() const { const std::map<std::string, at::Tensor> Module::named_parameters() const {
std::map<std::string, at::Tensor> params; std::map<std::string, at::Tensor> params;
const std::string name = ""; const std::string name;
slot_named_params_recurse(object_, &params, name); slot_named_params_recurse(object_, &params, name);
return params; return params;
} }

View File

@ -31,7 +31,7 @@ std::vector<std::string> splitName(const std::string& name) {
template <typename Iter> template <typename Iter>
std::string concatName(const Iter& begin, const Iter& end) { std::string concatName(const Iter& begin, const Iter& end) {
std::string combined_name = ""; std::string combined_name;
for (Iter it = begin; it != end; ++it) { for (Iter it = begin; it != end; ++it) {
const std::string& sub_name = *it; const std::string& sub_name = *it;
if (!combined_name.empty()) { if (!combined_name.empty()) {

View File

@ -57,7 +57,7 @@ static void hoistConvPackedParams(
// create the new name // create the new name
std::string suffix = ""; std::string suffix;
for (const auto& attrName : rootToConvPath) { for (const auto& attrName : rootToConvPath) {
suffix += attrName + "."; suffix += attrName + ".";
} }

View File

@ -8,14 +8,14 @@ namespace torch::jit {
namespace { namespace {
const std::string kTopModuleVariableName = ""; const std::string kTopModuleVariableName;
std::string TidyClassNameFromTorchScript( std::string TidyClassNameFromTorchScript(
const std::optional<c10::QualifiedName>& class_name) { const std::optional<c10::QualifiedName>& class_name) {
if (!class_name) { if (!class_name) {
return "UNKNOWN_CLASS"; return "UNKNOWN_CLASS";
} }
std::string out = ""; std::string out;
for (const auto& atom : class_name->atoms()) { for (const auto& atom : class_name->atoms()) {
bool is_internal_torch_atom = (atom == "__torch__"); bool is_internal_torch_atom = (atom == "__torch__");
bool is_mangle_atom = (atom.find("__torch_mangle") != std::string::npos); bool is_mangle_atom = (atom.find("__torch_mangle") != std::string::npos);

View File

@ -100,7 +100,7 @@ std::vector<IValue> getParamAttributes(
auto attr = attrModule.attr(name); auto attr = attrModule.attr(name);
Value* paramConst = nullptr; Value* paramConst = nullptr;
std::string fullName(""); std::string fullName;
for (auto& name : moduleNames) { for (auto& name : moduleNames) {
fullName += name + '.'; fullName += name + '.';
} }

View File

@ -654,7 +654,7 @@ void InplaceConverter::gatherAttrNameInitialValueMap(
auto moduleNames = auto moduleNames =
findSubModuleAttr(n->inputs().at(0), name, attrModule, graph_); findSubModuleAttr(n->inputs().at(0), name, attrModule, graph_);
std::string fullName(""); std::string fullName;
for (auto& name : moduleNames) { for (auto& name : moduleNames) {
fullName += name + '.'; fullName += name + '.';
} }

View File

@ -240,7 +240,7 @@ struct CompleteArgumentInfo;
struct CompleteArgumentSpec { struct CompleteArgumentSpec {
CompleteArgumentSpec(bool with_grad, at::ArrayRef<IValue> inputs) CompleteArgumentSpec(bool with_grad, at::ArrayRef<IValue> inputs)
: hash_code(0), ninputs(inputs.size()) { : ninputs(inputs.size()) {
int32_t all_dims = 0; int32_t all_dims = 0;
const auto num_inputs = inputs.size(); const auto num_inputs = inputs.size();
for (const auto i : c10::irange(num_inputs)) { for (const auto i : c10::irange(num_inputs)) {
@ -325,7 +325,7 @@ struct CompleteArgumentSpec {
int64_t* sizes_strides() { int64_t* sizes_strides() {
return data.data() + ninputs; return data.data() + ninputs;
} }
size_t hash_code; // precomputed on construction size_t hash_code{0}; // precomputed on construction
size_t ninputs; size_t ninputs;
// layout is ninputs of TensorPOD (each 64-bit) followed by their size and // layout is ninputs of TensorPOD (each 64-bit) followed by their size and
// stride info for 3 tensors: // stride info for 3 tensors:

View File

@ -19,8 +19,6 @@ TORCH_DECLARE_bool(torch_jit_enable_expanded_stacks);
namespace torch::jit { namespace torch::jit {
std::ostream& operator<<(std::ostream& out, Instruction inst);
namespace interpreter { namespace interpreter {
template <class Ttarget, class Tsource> template <class Ttarget, class Tsource>
@ -62,10 +60,10 @@ struct WithCurrentNode {
}; };
struct NodeSourceInfo { struct NodeSourceInfo {
const char* func_name_; const char* func_name_{nullptr};
const char* file_name_; const char* file_name_{nullptr};
size_t line_; size_t line_{0};
NodeSourceInfo() : func_name_(nullptr), file_name_(nullptr), line_(0) {} NodeSourceInfo() {}
}; };
struct CodeImpl { struct CodeImpl {

View File

@ -2,8 +2,8 @@
namespace torch::jit { namespace torch::jit {
static thread_local std::string caughtOriginalMsg = ""; static thread_local std::string caughtOriginalMsg;
static thread_local std::string caughtPythonClassName = ""; static thread_local std::string caughtPythonClassName;
JITException::JITException( JITException::JITException(
const std::string& msg, const std::string& msg,

View File

@ -48,9 +48,9 @@ class TORCH_API LockingLogger : public LoggerBase {
private: private:
mutable std::mutex m; mutable std::mutex m;
struct RawCounter { struct RawCounter {
RawCounter() : sum(0), count(0) {} RawCounter() = default;
int64_t sum; int64_t sum{0};
size_t count; size_t count{0};
}; };
std::unordered_map<std::string, RawCounter> raw_counters; std::unordered_map<std::string, RawCounter> raw_counters;
std::unordered_map<std::string, AggregationType> agg_types; std::unordered_map<std::string, AggregationType> agg_types;

View File

@ -37,7 +37,7 @@ std::string stringSlice(
slice_indices_adjust(string.size(), &start_val, &end_val, step); slice_indices_adjust(string.size(), &start_val, &end_val, step);
int64_t i = start_val; int64_t i = start_val;
std::string result = ""; std::string result;
for ([[maybe_unused]] const auto j : c10::irange(num_vals)) { for ([[maybe_unused]] const auto j : c10::irange(num_vals)) {
result += string[i]; result += string[i];
i += step; i += step;

View File

@ -117,8 +117,7 @@ class ScriptModuleDeserializer final {
: compilation_unit_(std::move(cu)), : compilation_unit_(std::move(cu)),
reader_(std::move(reader)), reader_(std::move(reader)),
code_prefix_("code/"), code_prefix_("code/"),
pickle_dir_prefix_(""),
tensor_dir_prefix_(""),
source_importer_( source_importer_(
compilation_unit_, compilation_unit_,
&constants_table_, &constants_table_,

View File

@ -1585,7 +1585,7 @@ struct PythonPrintImpl {
} else if (auto enumType = type->cast<EnumType>()) { } else if (auto enumType = type->cast<EnumType>()) {
body_ << "class " << enumType->qualifiedClassName().name() << "(Enum):\n"; body_ << "class " << enumType->qualifiedClassName().name() << "(Enum):\n";
std::string value_wrapper = ""; std::string value_wrapper;
if (enumType->getValueType() == StringType::get()) { if (enumType->getValueType() == StringType::get()) {
value_wrapper = "\""; value_wrapper = "\"";
} }

View File

@ -73,7 +73,7 @@ class TORCH_API Unpickler {
TypeParserT type_parser = defaultTypeParser, TypeParserT type_parser = defaultTypeParser,
std::shared_ptr<DeserializationStorageContext> storage_context = nullptr) std::shared_ptr<DeserializationStorageContext> storage_context = nullptr)
: reader_(std::move(reader)), : reader_(std::move(reader)),
tensor_table_(),
type_resolver_(std::move(type_resolver)), type_resolver_(std::move(type_resolver)),
obj_loader_(std::move(obj_loader)), obj_loader_(std::move(obj_loader)),
read_record_(std::move(read_record)), read_record_(std::move(read_record)),

View File

@ -35,7 +35,7 @@ struct TORCH_API Bound {
bool operator>(const Bound& other) const; bool operator>(const Bound& other) const;
bool operator>=(const Bound& other) const; bool operator>=(const Bound& other) const;
void swap() { void swap() noexcept {
std::swap(start, end); std::swap(start, end);
swapped = !swapped; swapped = !swapped;
} }

View File

@ -411,7 +411,7 @@ class TORCH_API BufHandle : public ExprHandle {
class TORCH_API VarHandle : public ExprHandle { class TORCH_API VarHandle : public ExprHandle {
public: public:
// Creates an empty VarHandle whose base Var is set to nullptr. // Creates an empty VarHandle whose base Var is set to nullptr.
VarHandle() : ExprHandle() {} VarHandle() = default;
explicit VarHandle(Dtype dtype) : ExprHandle(Var::make(dtype)) {} explicit VarHandle(Dtype dtype) : ExprHandle(Var::make(dtype)) {}

View File

@ -101,7 +101,7 @@ static void printHistory(int index, std::string message) {
template <typename T> template <typename T>
std::string join(std::vector<T> indices, char sep = ',') { std::string join(std::vector<T> indices, char sep = ',') {
std::string s = ""; std::string s;
for (const auto& index : indices) { for (const auto& index : indices) {
s += std::to_string(index) + sep; s += std::to_string(index) + sep;
} }
@ -111,7 +111,7 @@ std::string join(std::vector<T> indices, char sep = ',') {
static std::string join( static std::string join(
const std::vector<std::string>& indices, const std::vector<std::string>& indices,
char sep = ',') { char sep = ',') {
std::string s = ""; std::string s;
for (const auto& index : indices) { for (const auto& index : indices) {
s += index + sep; s += index + sep;
} }
@ -141,7 +141,7 @@ void loopnestRandomization(int64_t seed, LoopNest& l) {
int max_allowed_transformations = 20; int max_allowed_transformations = 20;
int n_transforms = randomization_helper::max_transformations( int n_transforms = randomization_helper::max_transformations(
std::rand() % max_allowed_transformations); std::rand() % max_allowed_transformations);
std::string message = ""; std::string message;
// clang-format off // clang-format off
// Transformations list: // Transformations list:
// //

View File

@ -100,8 +100,8 @@ size_t assertFind(
std::stringstream ss; std::stringstream ss;
ss << "Expected to find "; ss << "Expected to find ";
c10::printQuotedString(ss, sub); c10::printQuotedString(ss, sub);
ss << " but did not find it" << std::endl; ss << " but did not find it" << '\n';
ss << "Searched string:" << std::endl; ss << "Searched string:" << '\n';
found_range.highlight(ss); found_range.highlight(ss);
if (extra_msg) { if (extra_msg) {
extra_msg(ss); extra_msg(ss);
@ -139,8 +139,8 @@ size_t assertFindRegex(
std::stringstream ss; std::stringstream ss;
ss << "Expected to find regex "; ss << "Expected to find regex ";
c10::printQuotedString(ss, sub); c10::printQuotedString(ss, sub);
ss << " but did not find it" << std::endl; ss << " but did not find it" << '\n';
ss << "Searched string:" << std::endl; ss << "Searched string:" << '\n';
if (extra_msg) { if (extra_msg) {
extra_msg(ss); extra_msg(ss);
} }
@ -363,7 +363,7 @@ struct FileCheckImpl {
std::stringstream ss; std::stringstream ss;
ss << "Expected to find "; ss << "Expected to find ";
c10::printQuotedString(ss, check.search_str_); c10::printQuotedString(ss, check.search_str_);
ss << "highlighted but it is not." << std::endl; ss << "highlighted but it is not." << '\n';
error_range.highlight(ss); error_range.highlight(ss);
throw std::runtime_error(ss.str()); throw std::runtime_error(ss.str());
}; };