mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[7/N] Fix clang-tidy warnings in jit (#131996)
Follows #131986 Pull Request resolved: https://github.com/pytorch/pytorch/pull/131996 Approved by: https://github.com/ezyang
This commit is contained in:
@ -229,6 +229,7 @@ AliasDb::AliasDb(
|
||||
analyze(graph_);
|
||||
|
||||
memoryDAG_ = std::move(*memoryDAGBuilder_).createMemoryDAG();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
|
||||
memoryDAGBuilder_ = nullptr; // to make further access a hard error
|
||||
|
||||
memoryDAG_->setWildcards(
|
||||
@ -278,6 +279,7 @@ AliasDb::AliasDb(
|
||||
// Now that we've built the write index, we can null out the WriteRegistry to
|
||||
// make future access an error. In this way we prevent the index from getting
|
||||
// out of sync (since we have no way of registering new writes)
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
|
||||
writeRegistry_ = nullptr;
|
||||
|
||||
// Initialize the write cache
|
||||
@ -995,7 +997,9 @@ void AliasDb::analyzeGradOf(Node* node) {
|
||||
mapAliases(node->outputs(), grad_of_block->outputs());
|
||||
}
|
||||
|
||||
void AliasDb::analyzeSubgraph(Node* node, std::shared_ptr<Graph> subgraph) {
|
||||
void AliasDb::analyzeSubgraph(
|
||||
Node* node,
|
||||
const std::shared_ptr<Graph>& subgraph) {
|
||||
const auto subgraphBlock = subgraph->block();
|
||||
// CallFunction nodes have an extra first parameter
|
||||
if (node->kind() == prim::CallFunction) {
|
||||
@ -1569,8 +1573,8 @@ bool AliasDb::safeToChangeAliasingRelationship(
|
||||
// Helper for topologically-safe node moves. See `tryMove()` for details.
|
||||
class AliasDb::WorkingSet {
|
||||
public:
|
||||
explicit WorkingSet(Node* mover, const AliasDb& aliasDb) : aliasDb_(aliasDb) {
|
||||
mover_ = mover;
|
||||
explicit WorkingSet(Node* mover, const AliasDb& aliasDb)
|
||||
: aliasDb_(aliasDb), mover_(mover) {
|
||||
for (const auto user : getUsersSameBlock(mover_)) {
|
||||
moverUsers_.insert(user);
|
||||
}
|
||||
@ -1581,7 +1585,7 @@ class AliasDb::WorkingSet {
|
||||
// Add `n` to the working set
|
||||
void add(Node* n) {
|
||||
nodes_.push_back(n);
|
||||
node_to_index_[n] = nodes_.size() - 1;
|
||||
node_to_index_[n] = static_cast<int64_t>(nodes_.size()) - 1;
|
||||
for (const auto user : getUsersSameBlock(n)) {
|
||||
users_.insert(user);
|
||||
}
|
||||
@ -1705,6 +1709,7 @@ class AliasDb::WorkingSet {
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
||||
const AliasDb& aliasDb_;
|
||||
std::vector<Node*> nodes_;
|
||||
// Extra data structure for nodes for faster look up
|
||||
@ -1976,7 +1981,7 @@ std::optional<Element*> AliasDb::setWildcard(const Value* v) {
|
||||
// invariant that all mutable values have an Element
|
||||
getOrCreateElement(v);
|
||||
wildcards_.insert(v);
|
||||
return *maybe_wildcardElement;
|
||||
return maybe_wildcardElement;
|
||||
}
|
||||
|
||||
void AliasDb::buildWrittenToLocationsIndex() {
|
||||
|
@ -7,8 +7,7 @@
|
||||
#include <torch/csrc/jit/passes/create_functional_graphs.h>
|
||||
#include <torch/csrc/jit/passes/utils/memory_dag.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
/**
|
||||
* Alias analysis pass.
|
||||
@ -217,7 +216,7 @@ class AliasDb {
|
||||
void analyzeImpl(Node* node);
|
||||
void analyzeIf(Node* node);
|
||||
void analyzeLoop(Node* node);
|
||||
void analyzeSubgraph(Node* node, std::shared_ptr<Graph> subgraph);
|
||||
void analyzeSubgraph(Node* node, const std::shared_ptr<Graph>& subgraph);
|
||||
void analyzeSubgraph(Node* node);
|
||||
void analyzeCreator(Node* node);
|
||||
void analyzeExtractor(Node* node);
|
||||
@ -318,5 +317,4 @@ class AliasDb {
|
||||
// the right thing.
|
||||
TORCH_API void Lint(const AliasDb* db);
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -8,8 +8,7 @@
|
||||
|
||||
#include <torch/csrc/Export.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
using ::c10::Symbol;
|
||||
|
||||
@ -180,5 +179,4 @@ struct IRAttributeError : public std::exception {
|
||||
private:
|
||||
std::string msg;
|
||||
};
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -9,8 +9,8 @@
|
||||
// - create constant nodes from ints, floats, complex, intlist, Tensors, and
|
||||
// other types
|
||||
// - implement primitive constant ops.
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
|
||||
namespace torch::jit {
|
||||
|
||||
using ::c10::IValue;
|
||||
|
||||
@ -57,5 +57,4 @@ std::optional<T> constant_as(const Value* v) {
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -2,8 +2,7 @@
|
||||
|
||||
#include <c10/util/Exception.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
// Intrusive doubly linked lists with sane reverse iterators.
|
||||
// The header file is named generic_graph_node_list.h because it is ONLY
|
||||
@ -184,8 +183,7 @@ static inline bool operator!=(
|
||||
return *a != *b;
|
||||
}
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
||||
namespace std {
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
#include <torch/csrc/jit/ir/graph_utils.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
TypePtr getTensorType(const at::Tensor& t, bool complete) {
|
||||
auto r = TensorType::create(t);
|
||||
@ -89,5 +88,4 @@ void setInputTensorTypes(
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -4,8 +4,7 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
TORCH_API TypePtr getTensorType(const at::Tensor& t, bool complete);
|
||||
|
||||
@ -21,5 +20,4 @@ TORCH_API void setInputTensorTypes(
|
||||
bool complete,
|
||||
const std::vector<int>& param_count_list = {});
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -1298,8 +1298,7 @@ Node::Node(Graph* graph_, NodeKind kind_)
|
||||
owning_block_(nullptr),
|
||||
scope_(graph_->current_scope_),
|
||||
callstack_(std::nullopt),
|
||||
op_(nullptr),
|
||||
topo_position_(0) {
|
||||
op_(nullptr) {
|
||||
graph_->all_nodes.emplace(this);
|
||||
}
|
||||
|
||||
@ -2044,14 +2043,14 @@ void inlineCallStackOfNode(
|
||||
std::unordered_map<InlinedCallStack*, InlinedCallStackPtr>& new_cs_entries,
|
||||
Function* callee,
|
||||
Node* to_replace,
|
||||
std::optional<ModuleInstanceInfo> m_info);
|
||||
const std::optional<ModuleInstanceInfo>& m_info);
|
||||
|
||||
static void inlineCallStackOfBlock(
|
||||
Block* b,
|
||||
std::unordered_map<InlinedCallStack*, InlinedCallStackPtr>& new_cs_entries,
|
||||
Function* callee,
|
||||
Node* to_replace,
|
||||
std::optional<ModuleInstanceInfo> m_info) {
|
||||
const std::optional<ModuleInstanceInfo>& m_info) {
|
||||
for (auto n : b->nodes()) {
|
||||
inlineCallStackOfNode(n, new_cs_entries, callee, to_replace, m_info);
|
||||
}
|
||||
@ -2062,7 +2061,7 @@ void inlineCallStackOfNode(
|
||||
std::unordered_map<InlinedCallStack*, InlinedCallStackPtr>& new_cs_entries,
|
||||
Function* callee,
|
||||
Node* to_replace,
|
||||
std::optional<ModuleInstanceInfo> m_info) {
|
||||
const std::optional<ModuleInstanceInfo>& m_info) {
|
||||
auto new_node_cs = new_node->callstack();
|
||||
|
||||
InlinedCallStack* raw_callstack_ptr =
|
||||
|
@ -33,8 +33,7 @@ class THPPointer;
|
||||
using THPObjectPtr = THPPointer<PyObject>;
|
||||
using pyobj_list = std::vector<THPObjectPtr>;
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
namespace utils {
|
||||
TORCH_API std::string getNodesModuleHierarchy(const Node& n);
|
||||
} // namespace utils
|
||||
@ -165,7 +164,7 @@ struct OperatorMap;
|
||||
// access the same graph
|
||||
template <typename T>
|
||||
struct Wrap {
|
||||
explicit Wrap(T* p) : elem(p), clear_cb(nullptr) {}
|
||||
explicit Wrap(T* p) : elem(p) {}
|
||||
void clear() {
|
||||
if (clear_cb) {
|
||||
clear_cb(elem);
|
||||
@ -173,7 +172,7 @@ struct Wrap {
|
||||
elem = nullptr;
|
||||
}
|
||||
T* elem;
|
||||
void (*clear_cb)(void*);
|
||||
void (*clear_cb)(void*){nullptr};
|
||||
};
|
||||
|
||||
struct Value {
|
||||
@ -1192,7 +1191,7 @@ struct Graph : std::enable_shared_from_this<Graph> {
|
||||
std::unordered_set<const Node*> all_nodes;
|
||||
std::unordered_set<const Value*> all_values;
|
||||
std::unordered_set<const Block*> all_blocks;
|
||||
size_t next_unique_;
|
||||
size_t next_unique_{0};
|
||||
|
||||
std::unordered_map<std::string, Value*> unique_names_;
|
||||
// name_base_suffix tracks largest suffix currently used by all names sharing
|
||||
@ -1212,8 +1211,7 @@ struct Graph : std::enable_shared_from_this<Graph> {
|
||||
|
||||
public:
|
||||
Graph(ScopePtr scope_root = c10::make_intrusive<Scope>())
|
||||
: next_unique_(0),
|
||||
current_scope_(std::move(scope_root)),
|
||||
: current_scope_(std::move(scope_root)),
|
||||
block_(new Block(this, nullptr)),
|
||||
insert_before_(return_node()) {}
|
||||
|
||||
@ -1837,5 +1835,4 @@ struct FunctionSchemaMap {
|
||||
MapType map;
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -3,8 +3,7 @@
|
||||
#include <c10/util/irange.h>
|
||||
#include <torch/csrc/jit/ir/ir.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
struct IfView {
|
||||
explicit IfView(Node* node) : node_(node) {
|
||||
@ -160,5 +159,4 @@ struct LoopView {
|
||||
return adjusted;
|
||||
}
|
||||
};
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -74,6 +74,7 @@ class IRParser {
|
||||
|
||||
torch::jit::Lexer L;
|
||||
torch::jit::Graph* g = nullptr;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
||||
std::unordered_map<std::string, Value*>& vmap;
|
||||
SchemaTypeParser type_parser;
|
||||
bool parse_tensor_constants_;
|
||||
@ -140,7 +141,7 @@ VarWithType IRParser::parseVarWithType(bool allow_optional) {
|
||||
std::string IRParser::parseVar() {
|
||||
L.expect('%');
|
||||
std::string name;
|
||||
bool continue_parsing;
|
||||
bool continue_parsing = false;
|
||||
do {
|
||||
if (L.cur().kind == TK_IDENT) {
|
||||
name += L.expect(TK_IDENT).text();
|
||||
@ -184,8 +185,9 @@ ParsedLiteral IRParser::parseScalarLiteral(Node* n) {
|
||||
str = "-";
|
||||
L.next();
|
||||
if (L.cur().kind != TK_NUMBER) {
|
||||
throw ErrorReport(token.range)
|
||||
<< "Expected a number after '-' but got:" << token.text();
|
||||
throw(
|
||||
ErrorReport(token.range)
|
||||
<< "Expected a number after '-' but got:" << token.text());
|
||||
}
|
||||
[[fallthrough]];
|
||||
case TK_NUMBER:
|
||||
@ -196,11 +198,13 @@ ParsedLiteral IRParser::parseScalarLiteral(Node* n) {
|
||||
try {
|
||||
imag = std::stod(str.substr(0, str.size() - 1));
|
||||
} catch (const std::invalid_argument& e) {
|
||||
throw ErrorReport(token.range)
|
||||
<< "Number cannot be converted to double";
|
||||
throw(
|
||||
ErrorReport(token.range)
|
||||
<< "Number cannot be converted to double");
|
||||
} catch (const std::out_of_range& e) {
|
||||
throw ErrorReport(token.range)
|
||||
<< "Number is too long to be represented in type double";
|
||||
throw(
|
||||
ErrorReport(token.range)
|
||||
<< "Number is too long to be represented in type double");
|
||||
}
|
||||
r.c = c10::complex<double>(0, imag);
|
||||
} else if (
|
||||
@ -210,21 +214,24 @@ ParsedLiteral IRParser::parseScalarLiteral(Node* n) {
|
||||
try {
|
||||
r.f = std::stod(str);
|
||||
} catch (const std::invalid_argument& e) {
|
||||
throw ErrorReport(token.range)
|
||||
<< "Number cannot be converted to double";
|
||||
throw(
|
||||
ErrorReport(token.range)
|
||||
<< "Number cannot be converted to double");
|
||||
} catch (const std::out_of_range& e) {
|
||||
throw ErrorReport(token.range)
|
||||
<< "Number is too long to be represented in type double";
|
||||
throw(
|
||||
ErrorReport(token.range)
|
||||
<< "Number is too long to be represented in type double");
|
||||
}
|
||||
} else {
|
||||
r.k = AttributeKind::i;
|
||||
try {
|
||||
r.i = std::stoll(str);
|
||||
} catch (const std::invalid_argument& e) {
|
||||
throw ErrorReport(token.range)
|
||||
<< "Number cannot be converted to integer";
|
||||
throw(
|
||||
ErrorReport(token.range)
|
||||
<< "Number cannot be converted to integer");
|
||||
} catch (const std::out_of_range& e) {
|
||||
throw ErrorReport(token.range) << "Number is too big";
|
||||
throw(ErrorReport(token.range) << "Number is too big");
|
||||
}
|
||||
}
|
||||
L.next();
|
||||
@ -240,13 +247,15 @@ ParsedLiteral IRParser::parseScalarLiteral(Node* n) {
|
||||
L.next();
|
||||
auto text = L.expect(TK_IDENT);
|
||||
if (text.text() != "Tensor") {
|
||||
throw ErrorReport(token.range)
|
||||
<< "Could not parse literal" << token.text();
|
||||
throw(
|
||||
ErrorReport(token.range)
|
||||
<< "Could not parse literal" << token.text());
|
||||
}
|
||||
if (!parse_tensor_constants_) {
|
||||
throw ErrorReport(token.range)
|
||||
throw(
|
||||
ErrorReport(token.range)
|
||||
<< "Tensor constant encountered but `parse_tensor_constants` set to false"
|
||||
<< token.text();
|
||||
<< token.text());
|
||||
}
|
||||
L.expect('>');
|
||||
// these values will be set with randomly initialized data in
|
||||
@ -262,9 +271,10 @@ ParsedLiteral IRParser::parseScalarLiteral(Node* n) {
|
||||
}
|
||||
auto text = L.expect(TK_NUMBER);
|
||||
if (!parse_tensor_constants_) {
|
||||
throw ErrorReport(token.range)
|
||||
throw(
|
||||
ErrorReport(token.range)
|
||||
<< "Single-element tensor constant encountered but "
|
||||
<< "`parse_tensor_constants` is set to false " << token.text();
|
||||
<< "`parse_tensor_constants` is set to false " << token.text());
|
||||
}
|
||||
L.expect('}');
|
||||
deferred_tensor_value_initializations_.push_back(n);
|
||||
@ -272,8 +282,9 @@ ParsedLiteral IRParser::parseScalarLiteral(Node* n) {
|
||||
return r;
|
||||
}
|
||||
default:
|
||||
throw ErrorReport(token.range)
|
||||
<< "Could not parse literal" << token.text();
|
||||
throw(
|
||||
ErrorReport(token.range)
|
||||
<< "Could not parse literal" << token.text());
|
||||
}
|
||||
}
|
||||
|
||||
@ -340,7 +351,7 @@ void IRParser::parseAttr(Node* n) {
|
||||
k = AttributeKind::tys;
|
||||
break;
|
||||
default:
|
||||
throw ErrorReport(L.cur().range) << "Unexpected attr type";
|
||||
throw(ErrorReport(L.cur().range) << "Unexpected attr type");
|
||||
}
|
||||
});
|
||||
switch (k) {
|
||||
@ -363,15 +374,16 @@ void IRParser::parseAttr(Node* n) {
|
||||
n->tys_(Symbol::attr(attrname), tys);
|
||||
break;
|
||||
default:
|
||||
throw ErrorReport(L.cur().range) << "Unexpected attr type";
|
||||
throw(ErrorReport(L.cur().range) << "Unexpected attr type");
|
||||
}
|
||||
} else if (L.cur().text() == "annotate") {
|
||||
L.next();
|
||||
L.expect('(');
|
||||
auto type = L.cur().text();
|
||||
if (type != "List" && type != "Dict") {
|
||||
throw ErrorReport(L.cur().range)
|
||||
<< "Unexpected annotation (only List and Dict can be parsed)";
|
||||
throw(
|
||||
ErrorReport(L.cur().range)
|
||||
<< "Unexpected annotation (only List and Dict can be parsed)");
|
||||
}
|
||||
L.next();
|
||||
// ignore the annotations on the IValue constants, and instead recover
|
||||
@ -412,7 +424,7 @@ void IRParser::parseAttr(Node* n) {
|
||||
// initialized with random data later
|
||||
break;
|
||||
default:
|
||||
throw ErrorReport(L.cur().range) << "Unexpected attr type";
|
||||
throw(ErrorReport(L.cur().range) << "Unexpected attr type");
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -542,10 +554,11 @@ void IRParser::parseOperator(Block* b) {
|
||||
// TODO: support?
|
||||
if (!schema_return_type->hasFreeVariables() &&
|
||||
!v.type->isSubtypeOf(*schema_return_type)) {
|
||||
throw ErrorReport(source_range)
|
||||
throw(
|
||||
ErrorReport(source_range)
|
||||
<< "Annotated type " << v.type->repr_str()
|
||||
<< " does not match schema type "
|
||||
<< schema_return_type->repr_str() << " for operator " << *schema;
|
||||
<< schema_return_type->repr_str() << " for operator " << *schema);
|
||||
}
|
||||
vmap[v.name]->setType(v.type);
|
||||
}
|
||||
@ -609,7 +622,7 @@ void IRParser::parseReturnOperator() {
|
||||
void IRParser::parse() {
|
||||
// Parse graph definition, it should look like the following:
|
||||
// graphName (input1, input2, ... inputN):
|
||||
std::string graphName = L.expect(TK_IDENT).text();
|
||||
L.expect(TK_IDENT);
|
||||
parseGraphInputs();
|
||||
L.expect(':');
|
||||
|
||||
@ -631,7 +644,7 @@ void IRParser::parse() {
|
||||
TORCH_INTERNAL_ASSERT(device);
|
||||
auto dtype = tt->scalarType();
|
||||
TORCH_INTERNAL_ASSERT(dtype);
|
||||
auto options = at::TensorOptions(*device).dtype(*dtype);
|
||||
auto options = at::TensorOptions(*device).dtype(dtype);
|
||||
auto t = n->t_(attr::value, at::empty_strided(*sizes, *strides, options));
|
||||
(void)t;
|
||||
}
|
||||
@ -669,8 +682,9 @@ void IRParser::parseList(
|
||||
|
||||
Value* IRParser::findValueInVMap(const std::string& name) {
|
||||
if (!vmap.count(name)) {
|
||||
throw ErrorReport(L.cur().range)
|
||||
<< "Cannot find a variable with name '" << name << "'";
|
||||
throw(
|
||||
ErrorReport(L.cur().range)
|
||||
<< "Cannot find a variable with name '" << name << "'");
|
||||
}
|
||||
return vmap.at(name);
|
||||
}
|
||||
|
@ -6,8 +6,7 @@
|
||||
|
||||
#include <torch/csrc/Export.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
struct Graph;
|
||||
struct Value;
|
||||
@ -35,5 +34,4 @@ TORCH_API void parseIR(
|
||||
std::unordered_map<std::string, Value*>& vmap,
|
||||
bool parse_tensor_constants = false);
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -4,8 +4,7 @@
|
||||
#include <torch/csrc/jit/ir/constants.h>
|
||||
#include <torch/csrc/utils/variadic.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
struct Value;
|
||||
|
||||
@ -22,8 +21,7 @@ struct NamedValue {
|
||||
NamedValue(const std::string& name, Value* value)
|
||||
: name_(name), value_(value) {}
|
||||
|
||||
/* implicit */ NamedValue(IValue value)
|
||||
: value_(nullptr), ivalue_(std::move(value)) {}
|
||||
/* implicit */ NamedValue(IValue value) : ivalue_(std::move(value)) {}
|
||||
|
||||
NamedValue(const std::string& name, IValue value)
|
||||
: name_(name), ivalue_(std::move(value)) {}
|
||||
@ -80,5 +78,4 @@ struct NamedValue {
|
||||
IValue ivalue_;
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -2,8 +2,7 @@
|
||||
|
||||
#include <torch/csrc/jit/ir/ir.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
struct TORCH_API HashNode {
|
||||
size_t operator()(const Node* k) const;
|
||||
@ -13,5 +12,4 @@ struct TORCH_API EqualNode {
|
||||
bool operator()(const Node* lhs, const Node* rhs) const;
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -7,8 +7,7 @@
|
||||
#include <optional>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
struct ModuleInstanceInfo;
|
||||
constexpr size_t kModuleInstanceInfo = 2;
|
||||
|
||||
@ -216,5 +215,4 @@ using DebugInfoTuple =
|
||||
constexpr size_t kDebugInfoTupleSourceRangeIndex{0};
|
||||
constexpr size_t kDebugInfoTupleNodeNameIndex{1};
|
||||
constexpr size_t kDebugInfoTupleInlinedCSIndex{2};
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -5,8 +5,7 @@
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
/**
|
||||
* \brief A structure describing a match of a pattern in a graph.
|
||||
@ -70,5 +69,4 @@ struct Match {
|
||||
std::vector<Match> TORCH_API
|
||||
findPatternMatches(const Graph& pattern, Graph& graph);
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -3,8 +3,7 @@
|
||||
#include <ATen/core/jit_type.h>
|
||||
#include <torch/csrc/jit/ir/ir.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
struct TORCH_API HashType {
|
||||
size_t operator()(const TypePtr& type) const;
|
||||
@ -16,5 +15,4 @@ struct EqualType {
|
||||
bool operator()(const c10::ConstTypePtr& a, const c10::ConstTypePtr& b) const;
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -18,15 +18,10 @@
|
||||
#include <torch/custom_class.h>
|
||||
|
||||
#include <caffe2/serialize/in_memory_adapter.h>
|
||||
#include <exception>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
using caffe2::serialize::FileAdapter;
|
||||
using caffe2::serialize::IStreamAdapter;
|
||||
namespace torch::jit {
|
||||
using caffe2::serialize::MemoryReadAdapter;
|
||||
using caffe2::serialize::PyTorchStreamReader;
|
||||
using caffe2::serialize::ReadAdapterInterface;
|
||||
@ -59,8 +54,7 @@ IValueUnpickler::IValueUnpickler(std::unique_ptr<PyTorchStreamReader> reader)
|
||||
c10::IValue IValueUnpickler::deserialize(std::optional<at::Device> device) {
|
||||
auto mcu = std::make_shared<mobile::CompilationUnit>();
|
||||
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
return readArchive("data", mcu, std::move(device));
|
||||
return readArchive("data", mcu, device);
|
||||
}
|
||||
|
||||
c10::IValue IValueUnpickler::readArchive(
|
||||
@ -157,8 +151,7 @@ c10::IValue IValueUnpickler::readArchive(
|
||||
std::move(type_resolver),
|
||||
std::move(obj_loader),
|
||||
std::move(read_record),
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
std::move(device),
|
||||
device,
|
||||
false,
|
||||
nullptr);
|
||||
return unpickler.parse_ivalue();
|
||||
@ -239,7 +232,7 @@ std::map<std::string, at::Tensor> mobile_module_to_parameter_map(
|
||||
}
|
||||
|
||||
static std::map<std::string, at::Tensor> _load_parameters_bytes(
|
||||
std::shared_ptr<char> data,
|
||||
const std::shared_ptr<char>& data,
|
||||
size_t size,
|
||||
std::optional<at::Device> device) {
|
||||
TORCH_CHECK(size >= kFileFormatHeaderSize, "Unrecognized data format");
|
||||
@ -270,15 +263,14 @@ std::map<std::string, at::Tensor> _load_parameters(
|
||||
std::istream& in,
|
||||
std::optional<at::Device> device) {
|
||||
auto [data, size] = get_stream_content(in);
|
||||
return _load_parameters_bytes(std::move(data), size, device);
|
||||
return _load_parameters_bytes(data, size, device);
|
||||
}
|
||||
|
||||
std::map<std::string, at::Tensor> _load_parameters(
|
||||
const std::string& filename,
|
||||
std::optional<at::Device> device) {
|
||||
auto [data, size] = get_file_content(filename.c_str());
|
||||
return _load_parameters_bytes(std::move(data), size, device);
|
||||
return _load_parameters_bytes(data, size, device);
|
||||
}
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -23,11 +23,9 @@ IValue deepCopy(const IValue& self) {
|
||||
|
||||
// Lists of ivalues should recursively deep copy their contents
|
||||
if (self.isList()) {
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
auto source = std::move(self).toList();
|
||||
auto source = self.toList();
|
||||
auto newList = c10::impl::GenericList(source.elementType());
|
||||
newList.reserve(source.size());
|
||||
// NOLINTNEXTLINE(performance-implicit-conversion-in-loop)
|
||||
for (const IValue& value : source) {
|
||||
newList.push_back(deepCopy(value));
|
||||
}
|
||||
|
@ -946,12 +946,7 @@ void initJitScriptBindings(PyObject* module) {
|
||||
if (!method) {
|
||||
return py::str("ScriptObject <" + self.type()->str() + ">");
|
||||
}
|
||||
return invokeScriptMethodFromPython(
|
||||
*method,
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
std::move(args),
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
std::move(kwargs));
|
||||
return invokeScriptMethodFromPython(*method, std::move(args), kwargs);
|
||||
});
|
||||
|
||||
special_magic_methods.emplace(
|
||||
@ -965,12 +960,7 @@ void initJitScriptBindings(PyObject* module) {
|
||||
ss << std::hex << static_cast<const void*>(&self);
|
||||
return py::str("<torch.ScriptObject object at " + ss.str() + ">");
|
||||
}
|
||||
return invokeScriptMethodFromPython(
|
||||
*method,
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
std::move(args),
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
std::move(kwargs));
|
||||
return invokeScriptMethodFromPython(*method, std::move(args), kwargs);
|
||||
});
|
||||
|
||||
for (const char* mm_name : magic_method_names) {
|
||||
@ -1142,7 +1132,7 @@ void initJitScriptBindings(PyObject* module) {
|
||||
const ResolutionCallback& rcb) {
|
||||
const auto self = ModuleSelf(std::move(concreteType));
|
||||
m._ivalue()->compilation_unit()->define(
|
||||
m.type()->name().value(), script, pythonResolver(rcb), &self);
|
||||
m.type()->name(), script, pythonResolver(rcb), &self);
|
||||
didFinishEmitModule(m);
|
||||
})
|
||||
.def(
|
||||
@ -1447,10 +1437,7 @@ void initJitScriptBindings(PyObject* module) {
|
||||
auto strongPtr = py::cast<StrongFunctionPtr>(args[0]);
|
||||
Function& callee = *strongPtr.function_;
|
||||
py::object result = invokeScriptFunctionFromPython(
|
||||
callee,
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
tuple_slice(std::move(args), 1),
|
||||
kwargs);
|
||||
callee, tuple_slice(std::move(args), 1), kwargs);
|
||||
return result;
|
||||
END_HANDLE_TH_ERRORS_PYBIND
|
||||
})
|
||||
|
@ -487,7 +487,6 @@ struct DifferentiableGraphOp {
|
||||
for (auto& tensor : lst) {
|
||||
tensor = detach(tensor);
|
||||
}
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
v = std::move(lst);
|
||||
}
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ class TORCH_API Unpickler {
|
||||
type_parser_(type_parser),
|
||||
version_(caffe2::serialize::kProducedFileFormatVersion) {}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
Unpickler(
|
||||
std::function<size_t(char*, size_t)> reader,
|
||||
TypeResolver type_resolver,
|
||||
@ -76,8 +77,7 @@ class TORCH_API Unpickler {
|
||||
type_resolver_(std::move(type_resolver)),
|
||||
obj_loader_(std::move(obj_loader)),
|
||||
read_record_(std::move(read_record)),
|
||||
// NOLINTNEXTLINE(performance-move-const-arg)
|
||||
device_(std::move(device)),
|
||||
device_(device),
|
||||
use_storage_device_(use_storage_device),
|
||||
type_parser_(type_parser),
|
||||
storage_context_(std::move(storage_context)),
|
||||
|
Reference in New Issue
Block a user