[1/N] Fix clang-tidy warnings in torch/csrc/jit/serialization (#129055)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/129055
Approved by: https://github.com/r-barnes
This commit is contained in:
cyy
2024-06-21 14:56:31 +00:00
committed by PyTorch MergeBot
parent 53be7ff0e4
commit 2c7c286fa4
32 changed files with 118 additions and 163 deletions

View File

@ -231,8 +231,6 @@ exclude_patterns = [
'torch/csrc/dynamo/eval_frame.h',
'torch/csrc/inductor/**/*',
'torch/csrc/jit/**/*',
'torch/csrc/jit/serialization/import_legacy.cpp',
'torch/csrc/jit/serialization/export.cpp',
'torch/csrc/lazy/**/*',
]
init_command = [

View File

@ -47,7 +47,7 @@ c10::IValue readArchive(
caffe2::serialize::PyTorchStreamReader& stream_reader);
bool check_zip_file(
std::shared_ptr<caffe2::serialize::ReadAdapterInterface> rai);
const std::shared_ptr<caffe2::serialize::ReadAdapterInterface>& rai);
// The family of methods below to get the root ops and information from a model
TORCH_API std::unordered_map<std::string, OperatorInfo> _get_model_ops_and_info(

View File

@ -14,8 +14,7 @@ namespace c10 {
struct IValue;
}
namespace torch {
namespace jit {
namespace torch::jit {
class Pickler;
class InlinedCallStackSerializer {
@ -87,5 +86,4 @@ class TORCH_API CallStackDebugInfoUnpickler {
InlinedCallStackDeserializer csds_;
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -20,7 +20,6 @@
#include <torch/csrc/onnx/back_compat.h>
#include <torch/csrc/onnx/onnx.h>
#include <torch/version.h>
#include <atomic>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wnewline-eof")
#include <onnx/checker.h>
@ -31,11 +30,12 @@ C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
#include <onnx/shape_inference/implementation.h>
C10_DIAGNOSTIC_POP()
#include <fstream>
#include <memory>
#include <regex>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
namespace torch::jit {
@ -238,7 +238,7 @@ class GraphEncoder {
bool add_node_names,
bool use_external_data_format,
const std::string& onnx_file_path,
const NodeAttrNameMap& node_attr_to_name = {});
NodeAttrNameMap node_attr_to_name = {});
std::shared_ptr<onnx::ModelProto> get_model_proto() {
return model_proto_;
@ -343,7 +343,7 @@ class GraphEncoder {
void EncodeTensor(
onnx::TensorProto* tensor_proto,
const at::Tensor& tensor,
const std::optional<std::string> external_ref = {},
const std::optional<std::string>& external_ref = {},
const bool use_external_data_format = false,
const std::string& onnx_file_path = std::string());
@ -363,7 +363,7 @@ class GraphEncoder {
void EncodeValueInfoType(
onnx::TypeProto* onnx_type,
const TypePtr node_type,
const TypePtr& node_type,
const Value* n,
const std::unordered_map<
std::string,
@ -396,9 +396,9 @@ class GraphEncoder {
SymbolDimMap symbol_dim_map_;
std::shared_ptr<onnx::ModelProto> model_proto_;
size_t num_blocks_;
size_t num_op_nodes_;
size_t num_external_data_;
size_t num_blocks_{0};
size_t num_op_nodes_{0};
size_t num_external_data_{0};
onnx_torch::OperatorExportTypes operator_export_type_;
bool strip_doc_;
std::set<std::string> domains_;
@ -415,7 +415,7 @@ class GraphEncoder {
// tensor, beyond which the parameter is stored in a separate file (if
// use_external_data_format_ is True). This threshold is in place
// so as not to create too many external files.
const size_t ParamSizeThresholdForExternalStorage = 1024;
static constexpr size_t ParamSizeThresholdForExternalStorage = 1024;
};
onnx::TensorProto_DataType ATenTypeToOnnxType(at::ScalarType at_type) {
@ -514,11 +514,9 @@ GraphEncoder::GraphEncoder(
bool add_node_names,
bool use_external_data_format,
const std::string& onnx_file_path,
const NodeAttrNameMap& node_attr_to_name)
NodeAttrNameMap node_attr_to_name)
: model_proto_(std::make_shared<onnx::ModelProto>()),
num_blocks_(0),
num_op_nodes_(0),
num_external_data_(0),
operator_export_type_(operator_export_type),
strip_doc_(strip_doc),
defer_weight_export_(defer_weight_export),
@ -526,7 +524,7 @@ GraphEncoder::GraphEncoder(
onnx_opset_version_(onnx_opset_version),
custom_opsets_(custom_opsets),
graph_(graph),
node_attr_to_name_(node_attr_to_name) {
node_attr_to_name_(std::move(node_attr_to_name)) {
model_proto_->set_producer_name("pytorch");
TORCH_CHECK(
onnx_opset_version > 0 &&
@ -642,7 +640,7 @@ void GraphEncoder::TensorTypeToONNXType(
void GraphEncoder::EncodeValueInfoType(
onnx::TypeProto* onnx_type,
const TypePtr node_type,
const TypePtr& node_type,
const Value* n,
const std::unordered_map<
std::string,
@ -1228,7 +1226,7 @@ void GraphEncoder::EncodeLocalFunction(
// encode attributes names
if (n->hasAttribute(Symbol::attr("attributes"))) {
for (auto attr_name : n->ss(Symbol::attr("attributes"))) {
for (const auto& attr_name : n->ss(Symbol::attr("attributes"))) {
AddAttribute(func_proto, attr_name);
}
}
@ -1280,7 +1278,7 @@ void GraphEncoder::EncodeTypeProto(
void GraphEncoder::EncodeTensor(
onnx::TensorProto* tensor_proto,
const at::Tensor& tensor,
const std::optional<std::string> external_ref,
const std::optional<std::string>& external_ref,
const bool use_external_data_format,
const std::string& onnx_file_path) {
for (auto d : tensor.sizes()) {
@ -1462,8 +1460,8 @@ void check_onnx_proto(const std::string& proto_string) {
try {
auto* schema_registry = onnx::OpSchemaRegistry::Instance();
onnx::ShapeInferenceOptions options{
/*check_type=*/true,
/*error_mode=*/true};
/*check_type_val=*/true,
/*strict_mode_val=*/true};
onnx::shape_inference::InferShapes(model, schema_registry, options);
} catch (const onnx::InferenceError& ex) {
TORCH_WARN(

View File

@ -16,8 +16,7 @@ namespace ONNX_NAMESPACE {
class ModelProto;
}
namespace torch {
namespace jit {
namespace torch::jit {
// This map is used to keep track of parameters that should be exported
// externally. When `defer_weight_export` is true, the returned map contains
@ -72,7 +71,7 @@ class TORCH_API ScriptModuleSerializer {
public:
explicit ScriptModuleSerializer(
caffe2::serialize::PyTorchStreamWriter& export_writer)
: writer_(export_writer), current_source_range_tag_(0) {}
: writer_(export_writer) {}
void writeFiles(const std::string& code_dir);
void serialize(
@ -138,7 +137,7 @@ class TORCH_API ScriptModuleSerializer {
// just source information about where the node is, since bytecode inlines the
// graph before saving it.
SourceRangeTagMap source_range_tags_;
int64_t current_source_range_tag_;
int64_t current_source_range_tag_{0};
};
// For testing purposes
@ -277,5 +276,4 @@ TORCH_API void save_jit_module_to_write_func(
bool save_mobile_debug_info,
const std::function<size_t(const void*, size_t)>& writer_func);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -32,7 +32,7 @@
namespace torch::jit {
static std::vector<Method> gatherGetSetStates(ObjectPtr obj) {
static std::vector<Method> gatherGetSetStates(const ObjectPtr& obj) {
std::vector<Method> methods;
// Use DFS on IValue's to traverse dependencies of module._ivalue and
// add all setstate/getstates to initial stack.

View File

@ -13,8 +13,7 @@
#include <torch/csrc/jit/runtime/interpreter.h>
#include <torch/csrc/jit/serialization/type_name_uniquer.h>
namespace torch {
namespace jit {
namespace torch::jit {
struct TORCH_API CompilationOptions {
bool incl_interface_call = false;
@ -42,5 +41,4 @@ TORCH_API IValue convertMobileFunctionToCodeTable(
const mobile::Function& func,
const CompilationOptions& compilation_options);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -36,6 +36,7 @@
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
namespace torch::jit {
@ -344,8 +345,8 @@ void pushMobileFunctionsToIValues(
}
struct ModuleMethod {
ModuleMethod(const Module& m, const GraphFunction& f, c10::QualifiedName n)
: module(m), function(f), exportName(std::move(n)) {}
ModuleMethod(Module m, const GraphFunction& f, c10::QualifiedName n)
: module(std::move(m)), function(f), exportName(std::move(n)) {}
Module module;
const GraphFunction& function;
c10::QualifiedName exportName;
@ -481,7 +482,7 @@ void ScriptModuleSerializer::serialize(
/*archive_dir=*/"",
/*tensor_dir=*/"constants/");
}
if (module.retrieve_traced_inputs().size() > 0) {
if (!module.retrieve_traced_inputs().empty()) {
writeArchive(
module.retrieve_traced_inputs(),
/*archive_name=*/"traced_inputs",
@ -543,7 +544,6 @@ void ScriptModuleSerializer::writeArchive(
data_pickle.stop();
// write out tensor data
size_t i = 0;
std::string prefix = archive_name + "/";
TORCH_INTERNAL_ASSERT(tensor_names.size() == data_pickle.tensorData().size());
@ -870,8 +870,7 @@ void ExportModule(
if (errno == ENOENT) {
message << "Parent directory of " << filename << " does not exist.\n";
} else {
message << "Error while opening file: " << errno << std::endl;
;
message << "Error while opening file: " << errno << '\n';
}
TORCH_CHECK(false, message.str());
}

View File

@ -129,11 +129,11 @@ class FlatbufferSerializer {
flatbuffers::FlatBufferBuilder& fbb,
const std::vector<Argument>& args,
const std::vector<Argument>& returns,
c10::TypePrinter type_printer);
const c10::TypePrinter& type_printer);
flatbuffers::Offset<mobile::serialization::ObjectType> classTypeToFB(
flatbuffers::FlatBufferBuilder& fbb,
ClassTypePtr class_ptr);
const ClassTypePtr& class_ptr);
uint32_t storeIValueAndGetIndex(
flatbuffers::FlatBufferBuilder& fbb,
@ -145,7 +145,7 @@ class FlatbufferSerializer {
uint32_t storeClassTypeAndGetIndex(
flatbuffers::FlatBufferBuilder& fbb,
ClassTypePtr class_type);
const ClassTypePtr& class_type);
flatbuffers::Offset<flatbuffers::Vector<
flatbuffers::Offset<mobile::serialization::ExtraFile>>>
@ -210,7 +210,7 @@ flatbuffers::Offset<jit::mobile::serialization::Schema> FlatbufferSerializer::
flatbuffers::FlatBufferBuilder& fbb,
const std::vector<Argument>& args,
const std::vector<Argument>& returns,
c10::TypePrinter type_printer) {
const c10::TypePrinter& type_printer) {
std::vector<flatbuffers::Offset<jit::mobile::serialization::Arg>> arg_vec;
arg_vec.reserve(args.size());
std::vector<flatbuffers::Offset<jit::mobile::serialization::Arg>> return_vec;
@ -499,7 +499,7 @@ flatbuffers::Offset<mobile::serialization::Dict> FlatbufferSerializer::dictToFB(
}
flatbuffers::Offset<mobile::serialization::ObjectType> FlatbufferSerializer::
classTypeToFB(FlatBufferBuilder& fbb, ClassTypePtr class_ptr) {
classTypeToFB(FlatBufferBuilder& fbb, const ClassTypePtr& class_ptr) {
mobile::serialization::TypeType typetype =
mobile::serialization::TypeType::UNSET;
@ -552,7 +552,7 @@ uint32_t FlatbufferSerializer::storeFunctionAndGetIndex(
uint32_t FlatbufferSerializer::storeClassTypeAndGetIndex(
FlatBufferBuilder& fbb,
ClassTypePtr class_ptr) {
const ClassTypePtr& class_ptr) {
const auto& type_str = class_ptr->name()->qualifiedName();
auto iter = qn_to_serialized_values_.find(type_str);
if (iter != qn_to_serialized_values_.end()) {

View File

@ -16,8 +16,7 @@
* types, to avoid leaking those details to PyTorch clients.
*/
namespace torch {
namespace jit {
namespace torch::jit {
/// Maps file names to file contents.
using ExtraFilesMap = std::unordered_map<std::string, std::string>;
@ -90,5 +89,4 @@ TORCH_API void save_mobile_module_to_func(
// TODO(qihan): delete
TORCH_API bool register_flatbuffer_serializer();
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,10 +2,8 @@
#include <torch/csrc/jit/serialization/flatbuffer_serializer.h>
namespace torch {
namespace jit {
namespace torch::jit {
TORCH_API bool register_flatbuffer_all();
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -32,6 +32,7 @@
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
namespace torch::jit {
@ -180,7 +181,7 @@ IValue ScriptModuleDeserializer::readArchive(const std::string& archive_name) {
type_resolver,
ObjLoaderFunc,
device_,
*reader_.get(),
*reader_,
nullptr,
storage_context_);
}
@ -312,7 +313,7 @@ Module import_ir_module(
}
static Module _load_jit_module_from_bytes(
std::shared_ptr<char> data,
const std::shared_ptr<char>& data,
size_t size,
std::shared_ptr<CompilationUnit> cu,
std::optional<c10::Device> device,
@ -320,7 +321,7 @@ static Module _load_jit_module_from_bytes(
bool restore_shapes);
Module parse_and_initialize_jit_module(
std::shared_ptr<char> data,
const std::shared_ptr<char>& data,
size_t size,
ExtraFilesMap& extra_files,
std::optional<at::Device> device) {
@ -345,7 +346,7 @@ Module load_jit_module_from_file(
std::optional<at::Device> device) {
auto data = get_file_content(filename.c_str());
return parse_and_initialize_jit_module(
std::move(std::get<0>(data)), std::get<1>(data), extra_files, device);
std::get<0>(data), std::get<1>(data), extra_files, device);
}
Module load_jit_module_from_stream(
@ -354,7 +355,7 @@ Module load_jit_module_from_stream(
std::optional<at::Device> device) {
auto data = get_stream_content(in);
return parse_and_initialize_jit_module(
std::move(std::get<0>(data)), std::get<1>(data), extra_files, device);
std::get<0>(data), std::get<1>(data), extra_files, device);
}
Module import_ir_module(
@ -384,13 +385,13 @@ Module import_ir_module(
std::shared_ptr<PyTorchStreamReader> reader,
std::shared_ptr<DeserializationStorageContext> storage_context,
std::optional<at::Device> device,
std::string ts_id) {
const std::string& ts_id) {
ScriptModuleDeserializer deserializer(
std::move(cu),
std::move(reader),
/* pickle_dir_prefix = */ ".data/ts_code/" + ts_id + "/",
/* tensor_dir_prefix = */ ".data/",
storage_context);
std::move(storage_context));
ExtraFilesMap extra_files;
return deserializer.deserialize(device, extra_files);
}
@ -443,7 +444,7 @@ Module import_ir_module(
bool load_debug_files) {
std::shared_ptr<ReadAdapterInterface> rai_shared = std::move(rai);
return import_ir_module(
cu, rai_shared, device, extra_files, load_debug_files);
std::move(cu), rai_shared, device, extra_files, load_debug_files);
}
Module import_ir_module(
@ -515,7 +516,7 @@ Module load(
}
Module _load_jit_module_from_bytes(
std::shared_ptr<char> data,
const std::shared_ptr<char>& data,
size_t size,
std::shared_ptr<CompilationUnit> cu,
std::optional<c10::Device> device,
@ -544,7 +545,7 @@ Module _load_jit_module_from_bytes(
// methods are attached to type; we need to replace it's type.
// Non-objects are unchanged; however, nested structures such as list, dict
// are also reconstructed because they might contain an object.
static IValue recreateObject(IValue ivalue, TypeResolver resolver) {
static IValue recreateObject(IValue ivalue, const TypeResolver& resolver) {
if (ivalue.isObject()) {
auto obj = ivalue.toObject();
auto classtype_old = obj->type();

View File

@ -7,14 +7,11 @@
#include <istream>
namespace caffe2 {
namespace serialize {
namespace caffe2::serialize {
class ReadAdapterInterface;
} // namespace serialize
} // namespace caffe2
} // namespace caffe2::serialize
namespace torch {
namespace jit {
namespace torch::jit {
class DeserializationStorageContext;
@ -50,7 +47,7 @@ TORCH_API Module import_ir_module(
std::shared_ptr<caffe2::serialize::PyTorchStreamReader> reader,
std::shared_ptr<torch::jit::DeserializationStorageContext> storage_context,
std::optional<at::Device> device,
std::string ts_id /* torchscript identifier inside package */);
const std::string& ts_id /* torchscript identifier inside package */);
TORCH_API Module import_ir_module(
std::shared_ptr<CompilationUnit> cu,
@ -128,7 +125,7 @@ TORCH_API Module jitModuleFromSourceAndConstants(
int32_t version);
TORCH_API Module parse_and_initialize_jit_module(
std::shared_ptr<char> data,
const std::shared_ptr<char>& data,
size_t size,
ExtraFilesMap& extra_files,
std::optional<at::Device> device = c10::nullopt);
@ -144,7 +141,7 @@ TORCH_API Module load_jit_module_from_stream(
std::optional<at::Device> device = c10::nullopt);
TORCH_API Module parse_and_initialize_jit_module(
std::shared_ptr<char> data,
const std::shared_ptr<char>& data,
size_t size,
ExtraFilesMap& extra_files,
std::optional<at::Device> device);
@ -153,5 +150,4 @@ TORCH_API c10::intrusive_ptr<c10::ivalue::Object> ObjLoaderFunc(
const at::StrongTypePtr& type,
IValue input);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -1,8 +1,7 @@
#pragma once
#include <cstddef>
namespace torch {
namespace jit {
namespace torch::jit {
constexpr size_t BYTECODE_INDEX_INSTRUCTION = 0;
constexpr size_t BYTECODE_INDEX_OPERATOR = 1;
constexpr size_t BYTECODE_INDEX_CONSTANT = 2;
@ -17,5 +16,4 @@ constexpr size_t BYTECODE_INDEX_ARGUMENT_TYPE = 1;
constexpr size_t BYTECODE_INDEX_ARGUMENT_DEFAULT_VALUE = 2;
constexpr size_t BYTECODE_INDEX_MODULE_DEBUG_HANDLES = 0;
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,8 @@
#include <ATen/core/ivalue.h>
// Functions that are used in both import and export processes
namespace torch {
namespace jit {
namespace torch::jit {
using c10::IValue;
IValue expect_field(
c10::ivalue::TupleElements& elements,
@ -12,5 +12,4 @@ IValue expect_field(
std::string operator_str(
const std::string& name,
const std::string& overloadname);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -3,14 +3,11 @@
#include <memory>
#include <string>
namespace caffe2 {
namespace serialize {
namespace caffe2::serialize {
class PyTorchStreamReader;
}
} // namespace caffe2
namespace torch {
namespace jit {
namespace torch::jit {
struct Source;
@ -28,5 +25,4 @@ std::shared_ptr<Source> findSourceInArchiveFromQualifier(
const std::string& export_prefix,
const std::string& qualifier);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -1,6 +1,8 @@
#include <caffe2/serialize/inline_container.h>
#include <torch/csrc/jit/serialization/import_read.h>
#include <utility>
namespace torch::jit {
IValue readArchiveAndTensors(
@ -48,13 +50,13 @@ IValue readArchiveAndTensors(
device,
false,
type_parser,
storage_context);
std::move(storage_context));
unpickler.set_version(stream_reader.version());
return unpickler.parse_ivalue();
}
bool check_zip_file(
std::shared_ptr<caffe2::serialize::ReadAdapterInterface> rai) {
const std::shared_ptr<caffe2::serialize::ReadAdapterInterface>& rai) {
std::array<uint8_t, 2> first_short{};
static constexpr uint8_t first_slot = 0x80;
static constexpr uint8_t second_slot = 0x02;

View File

@ -3,14 +3,11 @@
#include <torch/csrc/jit/serialization/unpickler.h>
#include <memory>
namespace caffe2 {
namespace serialize {
namespace caffe2::serialize {
class PyTorchStreamReader;
} // namespace serialize
} // namespace caffe2
} // namespace caffe2::serialize
namespace torch {
namespace jit {
namespace torch::jit {
TORCH_API IValue readArchiveAndTensors(
const std::string& archive_name,
@ -25,7 +22,6 @@ TORCH_API IValue readArchiveAndTensors(
std::shared_ptr<DeserializationStorageContext> storage_context = nullptr);
bool check_zip_file(
std::shared_ptr<caffe2::serialize::ReadAdapterInterface> rai);
const std::shared_ptr<caffe2::serialize::ReadAdapterInterface>& rai);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -17,8 +17,7 @@
#include <string>
#include <vector>
namespace torch {
namespace jit {
namespace torch::jit {
using SourceLoader = std::function<std::shared_ptr<Source>(const std::string&)>;
@ -99,5 +98,4 @@ struct TORCH_API SourceImporter {
std::shared_ptr<SourceImporterImpl> pImpl;
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -8,7 +8,6 @@
namespace torch::jit {
namespace {
namespace onnx_torch = ::torch::onnx;
namespace onnx = ::ONNX_NAMESPACE;
// Pretty printing for ONNX

View File

@ -3,10 +3,8 @@
#include <onnx/onnx_pb.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch {
namespace jit {
namespace torch::jit {
TORCH_API std::string prettyPrint(const ::ONNX_NAMESPACE::ModelProto& model);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -7,8 +7,7 @@
#include <torch/csrc/jit/serialization/pickler.h>
#include <torch/csrc/jit/serialization/unpickler.h>
namespace torch {
namespace jit {
namespace torch::jit {
/// Pickle an IValue by calling a function to handle writing the data.
///
@ -119,5 +118,4 @@ class VectorReader : public caffe2::serialize::ReadAdapterInterface {
std::vector<char> data_;
};
#endif
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -65,25 +65,27 @@ void Pickler::pushIValueImpl(const IValue& ivalue) {
} else if (ivalue.isNone()) {
push<PickleOpCode>(PickleOpCode::NONE);
} else if (ivalue.isIntList()) {
pushSpecializedList(ivalue, "build_intlist", [=](const IValue& ivalue) {
pushSpecializedList(ivalue, "build_intlist", [this](const IValue& ivalue) {
for (const int64_t item : ivalue.toIntVector()) {
pushInt(item);
}
});
} else if (ivalue.isTensorList()) {
pushSpecializedList(ivalue, "build_tensorlist", [=](const IValue& ivalue) {
for (const at::Tensor& item : ivalue.toTensorVector()) {
pushIValue(item);
}
});
pushSpecializedList(
ivalue, "build_tensorlist", [this](const IValue& ivalue) {
for (const at::Tensor& item : ivalue.toTensorVector()) {
pushIValue(item);
}
});
} else if (ivalue.isDoubleList()) {
pushSpecializedList(ivalue, "build_doublelist", [=](const IValue& ivalue) {
for (double item : ivalue.toDoubleVector()) {
pushDouble(item);
}
});
pushSpecializedList(
ivalue, "build_doublelist", [this](const IValue& ivalue) {
for (double item : ivalue.toDoubleVector()) {
pushDouble(item);
}
});
} else if (ivalue.isBoolList()) {
pushSpecializedList(ivalue, "build_boollist", [=](const IValue& ivalue) {
pushSpecializedList(ivalue, "build_boollist", [this](const IValue& ivalue) {
for (bool item : ivalue.toBoolList()) {
pushBool(item);
}
@ -638,7 +640,7 @@ void Pickler::pushDict(const IValue& ivalue) {
push<PickleOpCode>(PickleOpCode::EMPTY_DICT);
static_assert(
std::is_unsigned<decltype(dict.size())>::value,
std::is_unsigned_v<decltype(dict.size())>,
"Expected size to be non-negative.");
push<PickleOpCode>(PickleOpCode::MARK);

View File

@ -14,8 +14,7 @@
#include <c10/util/string_view.h>
#include <torch/csrc/Export.h>
namespace torch {
namespace jit {
namespace torch::jit {
// See Python's pickletools.py for a detailed description of each of these codes
enum class PickleOpCode : char {
@ -221,7 +220,7 @@ class TORCH_API Pickler {
// does not)
static CONSTEXPR_EXCEPT_WIN_CUDA size_t kBufferSize = 256;
template <typename T>
void push(typename std::common_type<T>::type value) {
void push(std::common_type_t<T> value) {
const char* begin = reinterpret_cast<const char*>(&value);
if (bufferPos_ + sizeof(T) > buffer_.size()) {
flushNonEmpty();
@ -425,5 +424,4 @@ inline void setTensorMetadata(
setTensorMetadata(t, std::move(metadata));
}
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -1677,7 +1677,7 @@ uint64_t PythonPrint::minVersion() const {
return pImpl->min_version_;
}
static std::vector<IValue> traverseIValueAndGetObjects(IValue ivalue) {
static std::vector<IValue> traverseIValueAndGetObjects(const IValue& ivalue) {
std::vector<IValue> result;
std::vector<IValue> stack;
stack.emplace_back(ivalue);

View File

@ -4,8 +4,7 @@
#include <torch/csrc/jit/ir/ir.h>
#include <vector>
namespace torch {
namespace jit {
namespace torch::jit {
struct Method;
struct Module;
@ -54,5 +53,4 @@ TORCH_API void jitModuleToPythonCodeAndConstants(
std::vector<IValue>* constants // output
);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -6,6 +6,7 @@
#include <torch/csrc/jit/mobile/type_parser.h>
#include <torch/csrc/jit/serialization/pickle.h>
#include <algorithm>
#include <memory>
namespace torch::jit {
@ -210,15 +211,15 @@ void ConcreteSourceRangeUnpickler::unpickle() {
const auto& ivalues = ivaluesTuple->elements();
TORCH_CHECK(
ivalues.size(), "Invalid unpickle operation: empty ivalues tuple");
!ivalues.empty(), "Invalid unpickle operation: empty ivalues tuple");
unpickled_records = std::make_shared<SourceRangeRecords>();
IValue lines;
if (ivalues[0].isString() &&
kFormatWithStringTable == ivalues[0].toStringRef()) {
deserializer.reset(new SourceRangeDeserializer(ivalues[1]));
deserializer = std::make_shared<SourceRangeDeserializer>(ivalues[1]);
lines = ivalues[2];
} else {
deserializer.reset(new SourceRangeDeserializer());
deserializer = std::make_shared<SourceRangeDeserializer>();
lines = ivaluesTuple;
}
for (auto& val : lines.toTuple()->elements()) {

View File

@ -12,8 +12,7 @@ namespace c10 {
struct IValue;
}
namespace torch {
namespace jit {
namespace torch::jit {
class Pickler;
class SourceRangeSerializer;
@ -64,5 +63,4 @@ class SourceRangeUnpickler {
TORCH_API void setShouldUseFormatWithStringTable(
bool should_use_format_with_string_table);
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,7 @@
#include <torch/csrc/jit/serialization/source_range_serialization.h>
namespace torch {
namespace jit {
namespace torch::jit {
// Do this clownyness with virtual functions because of the split
// between ATen core and torch
@ -26,5 +25,4 @@ class ConcreteSourceRangeUnpickler : public SourceRangeUnpickler {
std::shared_ptr<SourceRangeRecords> unpickled_records;
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -2,8 +2,7 @@
#include <ATen/core/ivalue.h>
namespace torch {
namespace jit {
namespace torch::jit {
// Used in torch.package and TorchScript serialization to coordinate
// sharing of storages between models. Also used to create deterministic
@ -81,5 +80,4 @@ class TORCH_API DeserializationStorageContext {
std::unordered_map<std::string, c10::Storage> name_storage_map_;
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -3,8 +3,7 @@
#include <torch/csrc/jit/frontend/name_mangler.h>
#include <torch/csrc/jit/ir/type_hashing.h>
namespace torch {
namespace jit {
namespace torch::jit {
/**
* class TypeNameUniquer
@ -29,5 +28,4 @@ class TORCH_API TypeNameUniquer {
EqualType>
name_map_;
};
} // namespace jit
} // namespace torch
} // namespace torch::jit

View File

@ -7,8 +7,7 @@
#include <torch/csrc/jit/frontend/script_type_parser.h>
#include <torch/csrc/jit/serialization/pickler.h>
namespace torch {
namespace jit {
namespace torch::jit {
using TypeResolver =
std::function<c10::StrongTypePtr(const c10::QualifiedName&)>;
@ -199,5 +198,4 @@ class TORCH_API Unpickler {
void restoreAccurateTypeTags(const IValue& root, const c10::TypePtr& type_tag);
} // namespace jit
} // namespace torch
} // namespace torch::jit