mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Apply modernize-use-emplace to aten, c10, torch (#91077)
Apply clang-tidy check modernize-use-emplace. This is slightly more efficient by using an inplace constructor and is the recommended style in parts of the codebase covered by clang-tidy. This just manually applies the check to rest of the codebase. Pinging @ezyang as this is related to my other PRs he reviewed like #89000 Pull Request resolved: https://github.com/pytorch/pytorch/pull/91077 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
944519a468
commit
3916d7a575
@ -1,6 +1,7 @@
|
||||
#include <ATen/SavedTensorHooks.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <stack>
|
||||
#include <utility>
|
||||
|
||||
namespace at {
|
||||
|
||||
@ -55,7 +56,7 @@ void SavedTensorDefaultHooks::push_hooks(PyObject* pack_hook, PyObject* unpack_h
|
||||
TORCH_INTERNAL_ASSERT(is_initialized);
|
||||
TORCH_INTERNAL_ASSERT(pack_hook != nullptr && unpack_hook != nullptr);
|
||||
assertSavedTensorHooksNotDisabled();
|
||||
tls.stack.push(std::make_pair(pack_hook, unpack_hook));
|
||||
tls.stack.emplace(pack_hook, unpack_hook);
|
||||
}
|
||||
|
||||
void SavedTensorDefaultHooks::pop_hooks() {
|
||||
@ -76,7 +77,7 @@ std::stack<std::pair<PyObject*, PyObject*>> SavedTensorDefaultHooks::get_stack()
|
||||
}
|
||||
|
||||
void SavedTensorDefaultHooks::set_stack(std::stack<std::pair<PyObject*, PyObject*>> stack_) {
|
||||
tls.stack = stack_;
|
||||
tls.stack = std::move(stack_);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -32,7 +32,7 @@ TorchVitalAttr& TorchVital::create(const std::string& attr, bool force) {
|
||||
}
|
||||
auto iter = attrs.find(attr);
|
||||
if (iter == attrs.end()) {
|
||||
auto r = attrs.emplace(std::make_pair(attr, TorchVitalAttr()));
|
||||
auto r = attrs.emplace(attr, TorchVitalAttr());
|
||||
return r.first->second;
|
||||
}
|
||||
return iter->second;
|
||||
@ -79,7 +79,7 @@ bool APIVitals::setVital(
|
||||
TorchVital* vital = nullptr;
|
||||
if (iter == name_map_.end()) {
|
||||
auto r =
|
||||
name_map_.emplace(std::make_pair(vital_name, TorchVital(vital_name)));
|
||||
name_map_.emplace(vital_name, TorchVital(vital_name));
|
||||
vital = &r.first->second;
|
||||
} else {
|
||||
vital = &iter->second;
|
||||
|
||||
@ -179,8 +179,8 @@ OptionalType::OptionalType(TypePtr contained)
|
||||
} else if (contained == NumberType::get() || is_numbertype) {
|
||||
contained_ = NumberType::get();
|
||||
types_.clear();
|
||||
types_.push_back(NumberType::get());
|
||||
types_.push_back(NoneType::get());
|
||||
types_.emplace_back(NumberType::get());
|
||||
types_.emplace_back(NoneType::get());
|
||||
} else {
|
||||
std::vector<TypePtr> to_subtract{NoneType::get()};
|
||||
auto without_none = subtractTypeSetFrom(to_subtract, types_);
|
||||
|
||||
@ -150,7 +150,7 @@ static Tensor sumproduct_pair(const Tensor& left_, const Tensor& right_, IntArra
|
||||
out_size.reserve(out_num_dim);
|
||||
for (auto& d : lro) out_size.push_back(left.sym_size(d));
|
||||
for (auto& d : lo) out_size.push_back(left.sym_size(d));
|
||||
for (auto& d : sum_dims_) { out_size.push_back(1); (void)(d); }; // avoid warning about not using d
|
||||
for (auto& d : sum_dims_) { out_size.emplace_back(1); (void)(d); }; // avoid warning about not using d
|
||||
for (auto& d : ro) out_size.push_back(right.sym_size(d));
|
||||
|
||||
std::vector<int64_t> lpermutation(lro);
|
||||
|
||||
@ -167,7 +167,7 @@ void RNNImplBase<Derived>::reset() {
|
||||
if (named_parameters.contains(wn)) {
|
||||
flat_weights_.emplace_back(named_parameters[wn]);
|
||||
} else {
|
||||
flat_weights_.emplace_back(Tensor());
|
||||
flat_weights_.emplace_back();
|
||||
}
|
||||
}
|
||||
|
||||
@ -246,7 +246,7 @@ void RNNImplBase<Derived>::reset_flat_weights() {
|
||||
if (named_parameters.contains(wn)) {
|
||||
flat_weights_.emplace_back(named_parameters[wn]);
|
||||
} else {
|
||||
flat_weights_.emplace_back(Tensor());
|
||||
flat_weights_.emplace_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ auto UndefinedGradBackward::apply(variable_list&& output_grads)
|
||||
output_grads.reserve(input_grads.size());
|
||||
for (auto& grad : output_grads) {
|
||||
(void)grad; // Suppress unused variable warning
|
||||
input_grads.emplace_back(at::Tensor());
|
||||
input_grads.emplace_back();
|
||||
}
|
||||
return input_grads;
|
||||
}
|
||||
|
||||
@ -358,7 +358,8 @@ std::vector<std::pair<std::string, TensorMetadata>> ValueCache::unpackTensorMap(
|
||||
for (auto& it : tensor_map) {
|
||||
auto* value = it.second.ptr();
|
||||
if (py::isinstance<py::str>(it.first) && THPVariable_CheckExact(value)) {
|
||||
out.push_back({py::cast<std::string>(it.first), toTensorMetadata(value)});
|
||||
out.emplace_back(
|
||||
py::cast<std::string>(it.first), toTensorMetadata(value));
|
||||
}
|
||||
}
|
||||
return out;
|
||||
|
||||
@ -77,8 +77,7 @@ c10::intrusive_ptr<rpc::Message> RpcWithProfilingResp::toMessageImpl() && {
|
||||
// Create ivalues to send over
|
||||
std::vector<at::IValue> ivalues{wrappedMsgType, profilingId_.toIValue()};
|
||||
// Attach the serialized events.
|
||||
ivalues.emplace_back(
|
||||
at::IValue(static_cast<int32_t>(profiledEvents_.size())));
|
||||
ivalues.emplace_back(static_cast<int32_t>(profiledEvents_.size()));
|
||||
for (const auto& e : profiledEvents_) {
|
||||
ivalues.emplace_back(e.toIValue());
|
||||
}
|
||||
|
||||
@ -2101,7 +2101,7 @@ void verify_params_across_processes(
|
||||
|
||||
// Allgather and verify parameter size.
|
||||
std::vector<std::vector<at::Tensor>> param_size_output_tensors;
|
||||
param_size_output_tensors.emplace_back(std::vector<at::Tensor>{});
|
||||
param_size_output_tensors.emplace_back();
|
||||
auto world_size = process_group->getSize();
|
||||
for (size_t i = 0; i < world_size; ++i) {
|
||||
param_size_output_tensors.front().emplace_back(
|
||||
|
||||
@ -882,7 +882,7 @@ c10::intrusive_ptr<JitFuture> TensorPipeAgent::send(
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(timeoutMapMutex_);
|
||||
auto& timeoutFuturesVector = timeoutMap_[expirationTime];
|
||||
messageIdToTimeout_.emplace(std::make_pair(messageId, expirationTime));
|
||||
messageIdToTimeout_.emplace(messageId, expirationTime);
|
||||
timeoutFuturesVector.emplace_back(
|
||||
messageId, futureResponseMessage, timeout);
|
||||
}
|
||||
|
||||
@ -298,7 +298,7 @@ parseWireSections(const void* data, size_t data_size) {
|
||||
break;
|
||||
}
|
||||
size_t sz = c10::stoll(std::string(sizePtr, ptr - sizePtr));
|
||||
headerEnts.emplace_back(std::make_pair(name, sz));
|
||||
headerEnts.emplace_back(name, sz);
|
||||
++ptr; // past the '\n'
|
||||
}
|
||||
if (!ok) {
|
||||
|
||||
@ -185,8 +185,8 @@ static int TensorGuards_init(
|
||||
PyErr_SetString(PyExc_TypeError, "expected Tensor()");
|
||||
return -1;
|
||||
}
|
||||
checks.emplace_back(TensorCheck(
|
||||
state, Py_TYPE(item), THPVariable_Unpack(item), dynamic_shapes));
|
||||
checks.emplace_back(
|
||||
state, Py_TYPE(item), THPVariable_Unpack(item), dynamic_shapes);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -55,7 +55,7 @@ int64_t store(std::shared_ptr<Graph> graph) {
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(key),
|
||||
std::forward_as_tuple(key, graph));
|
||||
cache.graphToKey_.emplace(std::make_pair(std::move(repr), key));
|
||||
cache.graphToKey_.emplace(std::move(repr), key);
|
||||
return key;
|
||||
}
|
||||
|
||||
|
||||
@ -274,8 +274,7 @@ void ConcreteModuleTypeBuilder::addBuiltinFunction(
|
||||
void ConcreteModuleTypeBuilder::addModule(
|
||||
std::string name,
|
||||
std::shared_ptr<ConcreteModuleType> meta) {
|
||||
modules_.emplace_back(
|
||||
ConcreteModuleTypeBuilder::ModuleInfo{std::move(name), std::move(meta)});
|
||||
modules_.emplace_back(std::move(name), std::move(meta));
|
||||
}
|
||||
|
||||
void ConcreteModuleTypeBuilder::addForwardHook(py::object hook) {
|
||||
@ -370,7 +369,7 @@ ConcreteModuleType::getModulesPy() const {
|
||||
|
||||
ret.reserve(data_.modules_.size());
|
||||
for (const auto& info : data_.modules_) {
|
||||
ret.emplace_back(std::make_pair(info.name_, info.meta_));
|
||||
ret.emplace_back(info.name_, info.meta_);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2159,7 +2159,7 @@ struct to_ir {
|
||||
if (lhs_type == AnyType::get()) {
|
||||
isinstance_types.insert(
|
||||
isinstance_types.end(), rhs_types.begin(), rhs_types.end());
|
||||
not_isinstance_types.push_back(AnyType::get());
|
||||
not_isinstance_types.emplace_back(AnyType::get());
|
||||
// Edge case: we can still say that all lhs types subtype some
|
||||
// rhs type if `lhs` is `Any` and `rhs` is `Any`
|
||||
if (isinstance_types.size() != 1 ||
|
||||
@ -5472,8 +5472,8 @@ void CompilationUnit::define_hooks(
|
||||
typeParser.parseSchemaFromDef(hook_def, true /* skip_self*/);
|
||||
// need to add self as the first because we skipped it
|
||||
std::vector<Argument> arguments;
|
||||
arguments.emplace_back(Argument(
|
||||
hook_def.decl().params()[0].ident().name(), self->getClassType()));
|
||||
arguments.emplace_back(
|
||||
hook_def.decl().params()[0].ident().name(), self->getClassType());
|
||||
arguments.insert(
|
||||
arguments.end(), schema.arguments().begin(), schema.arguments().end());
|
||||
return schema.cloneWithArguments(arguments);
|
||||
@ -5651,8 +5651,7 @@ void CompilationUnit::define_interface(
|
||||
typeParser.parseSchemaFromDef(method_def, /* skip_self*/ true);
|
||||
// need to add self as the first because we skipped it
|
||||
std::vector<Argument> arguments;
|
||||
arguments.emplace_back(
|
||||
Argument(method_def.decl().params()[0].ident().name(), iface));
|
||||
arguments.emplace_back(method_def.decl().params()[0].ident().name(), iface);
|
||||
arguments.insert(
|
||||
arguments.end(), schema.arguments().begin(), schema.arguments().end());
|
||||
iface->addMethod(schema.cloneWithArguments(std::move(arguments)));
|
||||
|
||||
@ -187,10 +187,10 @@ std::vector<InlinedCallStackEntry> InlinedCallStack::vec() {
|
||||
std::vector<InlinedCallStackEntry> r;
|
||||
c10::optional<InlinedCallStackPtr> current = intrusive_from_this();
|
||||
while (current) {
|
||||
r.emplace_back(std::make_tuple(
|
||||
r.emplace_back(
|
||||
(*current)->fn_,
|
||||
(*current)->source_range_,
|
||||
(*current)->module_instance_info_));
|
||||
(*current)->module_instance_info_);
|
||||
current = (*current)->callee_;
|
||||
}
|
||||
return r;
|
||||
|
||||
@ -647,7 +647,7 @@ IValue parseObject(
|
||||
mobile::Function* setstate = loader.getFunction(object->setstate_func());
|
||||
auto obj =
|
||||
c10::ivalue::Object::create(at::StrongTypePtr(loader.cu_, cls), 0);
|
||||
stack.push_back(obj);
|
||||
stack.emplace_back(obj);
|
||||
stack.emplace_back(std::move(input));
|
||||
setstate->run(stack);
|
||||
return obj;
|
||||
@ -658,7 +658,7 @@ IValue parseObject(
|
||||
IValue input = loader.getIValue(object->state());
|
||||
auto obj = c10::ivalue::Object::create(
|
||||
c10::StrongTypePtr(nullptr, custom_class_type), 1);
|
||||
stack.push_back(obj);
|
||||
stack.emplace_back(obj);
|
||||
stack.emplace_back(std::move(input));
|
||||
custom_class_type->getMethod("__setstate__").run(stack);
|
||||
return obj;
|
||||
|
||||
@ -228,7 +228,7 @@ c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
|
||||
args[i].name(),
|
||||
" does not have a specified value or default value. ");
|
||||
|
||||
stack.push_back(args[i].default_value());
|
||||
stack.emplace_back(args[i].default_value());
|
||||
}
|
||||
stack.insert(stack.end(), out_args.rbegin(), out_args.rend());
|
||||
fn(stack);
|
||||
|
||||
@ -135,7 +135,7 @@ bool InterpreterState::run(Stack& stack) {
|
||||
frame.step();
|
||||
} break;
|
||||
case OPN: {
|
||||
stack.push_back(inst.N);
|
||||
stack.emplace_back(inst.N);
|
||||
RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS(
|
||||
code.op_names_[inst.X].name, debug_handle, stack);
|
||||
code.operators_[inst.X](stack);
|
||||
|
||||
@ -310,8 +310,7 @@ void FoldConvBatchNormHelper::analyze(
|
||||
"Conv and BN modules didn't have all required parameters or attributes...");
|
||||
continue;
|
||||
}
|
||||
conv_bn_paths_[g].push_back(
|
||||
std::make_tuple(conv_module_path, bn_module_path));
|
||||
conv_bn_paths_[g].emplace_back(conv_module_path, bn_module_path);
|
||||
// We are using a separate vector for saving Values we want to rewrite
|
||||
// to make sure that the order in which we perform these
|
||||
// transformations is deterministic. Iterating through keys of
|
||||
|
||||
@ -52,7 +52,7 @@ std::deque<std::string> findSubModuleAttr(
|
||||
Value* addParamAsArgument(Function* function, std::string& name, IValue& attr) {
|
||||
auto schema = function->getSchema();
|
||||
auto args = schema.arguments();
|
||||
args.emplace_back(Argument(name, nullptr, c10::nullopt, attr));
|
||||
args.emplace_back(name, nullptr, c10::nullopt, attr);
|
||||
auto new_schema = FunctionSchema(
|
||||
schema.name(),
|
||||
schema.overload_name(),
|
||||
|
||||
@ -17,7 +17,7 @@ std::vector<GraphPassEntry>& getCustomPrePasses() {
|
||||
}
|
||||
|
||||
GraphPassNameType registerPostPass(GraphPass p) {
|
||||
getCustomPostPasses().emplace_back(GraphPassEntry{std::move(p), graphPassID});
|
||||
getCustomPostPasses().emplace_back(std::move(p), graphPassID);
|
||||
return graphPassID++;
|
||||
}
|
||||
|
||||
@ -26,7 +26,7 @@ GraphPassNameType registerPass(GraphPass p) {
|
||||
}
|
||||
|
||||
GraphPassNameType registerPrePass(GraphPass p) {
|
||||
getCustomPrePasses().emplace_back(GraphPassEntry{std::move(p), graphPassID});
|
||||
getCustomPrePasses().emplace_back(std::move(p), graphPassID);
|
||||
return graphPassID++;
|
||||
}
|
||||
|
||||
|
||||
@ -929,7 +929,7 @@ ModuleMethodVector InsertObserversHelper::getInvokedMethods(
|
||||
if (n->kind() == prim::CallMethod) {
|
||||
auto m_opt = getInvokedModuleOpt(module, n, graph->inputs()[0]);
|
||||
if (m_opt.has_value()) {
|
||||
invoked_methods.push_back(std::make_pair(*m_opt, n->s(attr::name)));
|
||||
invoked_methods.emplace_back(*m_opt, n->s(attr::name));
|
||||
}
|
||||
}
|
||||
|
||||
@ -956,7 +956,7 @@ void InsertObserversHelper::insertObserverFor(
|
||||
observer_name = "_observer_" + c10::to_string(uid_++);
|
||||
}
|
||||
module.register_module(observer_name, observer);
|
||||
observer_name_and_modules.push_back(std::make_pair(observer_name, observer));
|
||||
observer_name_and_modules.emplace_back(observer_name, observer);
|
||||
|
||||
auto* g = v->owningGraph();
|
||||
// Get handle of observer module
|
||||
|
||||
@ -522,8 +522,8 @@ void ReplicateChooseQParamsQuantDequant(std::shared_ptr<Graph>& graph) {
|
||||
Node* matched_quantize = match.nodes_map.at(pattern_quant);
|
||||
Node* matched_choose_qparam = match.nodes_map.at(pattern_choose_qparam);
|
||||
if (matched_dequantize->output()->uses().size() > 1) {
|
||||
nodes_to_rewrite.emplace_back(std::make_tuple(
|
||||
matched_choose_qparam, matched_quantize, matched_dequantize));
|
||||
nodes_to_rewrite.emplace_back(
|
||||
matched_choose_qparam, matched_quantize, matched_dequantize);
|
||||
}
|
||||
}
|
||||
for (const auto& nodes : nodes_to_rewrite) {
|
||||
@ -1077,12 +1077,11 @@ std::tuple<c10::QScheme, QParamVector> InsertQuantDeQuantHelper::
|
||||
// get compute_dtype for dynamic quantization
|
||||
if (observer_module.hasattr("is_dynamic") &&
|
||||
observer_module.attr("is_dynamic").toBool()) {
|
||||
qparams.push_back(
|
||||
std::make_pair(kScalarType, observer_module.attr("dtype")));
|
||||
qparams.emplace_back(kScalarType, observer_module.attr("dtype"));
|
||||
}
|
||||
return std::make_tuple(qscheme, qparams);
|
||||
return std::make_tuple(qscheme, std::move(qparams));
|
||||
} else if (scalar_type == at::ScalarType::Half) {
|
||||
return std::make_tuple(qscheme, qparams);
|
||||
return std::make_tuple(qscheme, std::move(qparams));
|
||||
}
|
||||
auto calculate_qparams = observer_module.get_method("calculate_qparams");
|
||||
IValue result = calculate_qparams(std::vector<IValue>());
|
||||
@ -1099,16 +1098,15 @@ std::tuple<c10::QScheme, QParamVector> InsertQuantDeQuantHelper::
|
||||
qscheme = observer_module.attr("qscheme").toQScheme();
|
||||
if (isPerChannel(qscheme)) {
|
||||
auto axis = observer_module.attr("ch_axis");
|
||||
qparams.push_back(std::make_pair("_scale", scale));
|
||||
qparams.push_back(std::make_pair("_zero_point", zero_point));
|
||||
qparams.push_back(std::make_pair("_axis", axis.toInt()));
|
||||
qparams.emplace_back("_scale", scale);
|
||||
qparams.emplace_back("_zero_point", zero_point);
|
||||
qparams.emplace_back("_axis", axis.toInt());
|
||||
} else {
|
||||
qparams.push_back(std::make_pair("_scale", scale.item<double>()));
|
||||
qparams.push_back(
|
||||
std::make_pair("_zero_point", zero_point.item<int64_t>()));
|
||||
qparams.emplace_back("_scale", scale.item<double>());
|
||||
qparams.emplace_back("_zero_point", zero_point.item<int64_t>());
|
||||
}
|
||||
qparams.push_back(std::make_pair(kScalarType, scalar_type));
|
||||
return std::make_tuple(qscheme, qparams);
|
||||
qparams.emplace_back(kScalarType, scalar_type);
|
||||
return std::make_tuple(qscheme, std::move(qparams));
|
||||
}
|
||||
|
||||
ModuleMethodVector InsertQuantDeQuantHelper::getInvokedMethods(
|
||||
@ -1137,7 +1135,7 @@ ModuleMethodVector InsertQuantDeQuantHelper::getInvokedMethods(
|
||||
m = getInvokedModuleOpt(module, n, graph->inputs()[0]);
|
||||
}
|
||||
if (m) {
|
||||
invoked_methods.push_back({*m, module_method_name});
|
||||
invoked_methods.emplace_back(*m, module_method_name);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -20,9 +20,9 @@ CanonicalArgVec cannonicalizeVec(
|
||||
for (auto& arg : arg_vec) {
|
||||
if (const IValue* iv = c10::get_if<IValue>(&arg)) {
|
||||
if (deep_copy) {
|
||||
canonical_args.push_back(iv->deepcopy());
|
||||
canonical_args.emplace_back(iv->deepcopy());
|
||||
} else {
|
||||
canonical_args.push_back(*iv);
|
||||
canonical_args.emplace_back(*iv);
|
||||
}
|
||||
} else {
|
||||
auto& ss = c10::get<at::SymbolicShape>(arg);
|
||||
@ -38,7 +38,7 @@ std::vector<CanonicalizedSymbolicShape> cannonicalizeVec(
|
||||
std::vector<CanonicalizedSymbolicShape> canonical_rets;
|
||||
canonical_rets.reserve(ret_vec.size());
|
||||
for (auto& ss : ret_vec) {
|
||||
canonical_rets.emplace_back(CanonicalizedSymbolicShape(ss, ss_map));
|
||||
canonical_rets.emplace_back(ss, ss_map);
|
||||
}
|
||||
return canonical_rets;
|
||||
}
|
||||
|
||||
@ -358,7 +358,7 @@ void insertDynamicShapesGuard(
|
||||
continue;
|
||||
}
|
||||
inputs_to_check.push_back(node_input);
|
||||
guard_types.push_back(
|
||||
guard_types.emplace_back(
|
||||
subgraph->inputs().at(i)->type()->expect<TensorType>()->withStrides(
|
||||
c10::VaryingShape<c10::Stride>()));
|
||||
}
|
||||
|
||||
@ -320,7 +320,8 @@ void insertTypeGuard(
|
||||
continue;
|
||||
}
|
||||
inputs_to_check.push_back(input);
|
||||
guard_types.push_back(type_converter(input->type()->expect<TensorType>()));
|
||||
guard_types.emplace_back(
|
||||
type_converter(input->type()->expect<TensorType>()));
|
||||
}
|
||||
if (!inputs_to_check.size()) {
|
||||
return;
|
||||
|
||||
@ -642,9 +642,9 @@ void initJITBindings(PyObject* module) {
|
||||
std::vector<TypePtr> input_types;
|
||||
for (Value* v : g->inputs()) {
|
||||
if (auto tt = v->type()->cast<TensorType>()) {
|
||||
input_types.push_back(tt);
|
||||
input_types.emplace_back(tt);
|
||||
} else {
|
||||
input_types.push_back(nullptr);
|
||||
input_types.emplace_back(nullptr);
|
||||
}
|
||||
}
|
||||
EraseShapeInformation(g);
|
||||
|
||||
@ -1076,7 +1076,7 @@ void initPythonIRBindings(PyObject* module_) {
|
||||
for (const auto& enum_name_value : enum_names_values) {
|
||||
auto enum_name = py::cast<std::string>(enum_name_value.attr("name"));
|
||||
auto enum_value = toIValue(enum_name_value.attr("value"), value_type);
|
||||
names_values.emplace_back(std::make_pair(enum_name, enum_value));
|
||||
names_values.emplace_back(enum_name, enum_value);
|
||||
}
|
||||
return EnumType::create(
|
||||
c10::QualifiedName(qualified_name),
|
||||
|
||||
@ -64,12 +64,12 @@ FunctionSchema PythonValue::getSchema(
|
||||
// No type signature was provided on the callable, so make a default
|
||||
// signature where each argument is typed as a Tensor
|
||||
for (; names_it != param_names.end(); ++names_it) {
|
||||
args.emplace_back(Argument(
|
||||
args.emplace_back(
|
||||
/*name=*/*names_it,
|
||||
/*type=*/TensorType::get(),
|
||||
/*N=*/c10::nullopt,
|
||||
/*default_value=*/c10::nullopt,
|
||||
/*kwarg_only=*/false));
|
||||
/*kwarg_only=*/false);
|
||||
}
|
||||
|
||||
// Use as many outputs as are requested to make the return type
|
||||
@ -123,7 +123,7 @@ std::shared_ptr<SugaredValue> PythonValue::call(
|
||||
size_t n_binders) {
|
||||
std::vector<NamedValue> argsWithSelf;
|
||||
if (moduleSelf_) {
|
||||
argsWithSelf.emplace_back(NamedValue("self", moduleSelf_));
|
||||
argsWithSelf.emplace_back("self", moduleSelf_);
|
||||
}
|
||||
argsWithSelf.insert(argsWithSelf.end(), args.begin(), args.end());
|
||||
|
||||
@ -795,7 +795,7 @@ std::shared_ptr<SugaredValue> ModuleValue::call(
|
||||
->insertNode(calling_graph->createTupleUnpack(forward_input))
|
||||
->outputs();
|
||||
for (auto& output_node : output_nodes) {
|
||||
pre_hook_result.emplace_back(NamedValue(output_node));
|
||||
pre_hook_result.emplace_back(output_node);
|
||||
}
|
||||
if (args.size() != 0) { // only replace input if it existed
|
||||
args = pre_hook_result;
|
||||
|
||||
@ -1260,8 +1260,7 @@ void initJitScriptBindings(PyObject* module) {
|
||||
[](const Module& m) {
|
||||
std::vector<StrongFunctionPtr> funcs;
|
||||
for (auto& hook : m.type()->getForwardHooks()) {
|
||||
funcs.emplace_back(
|
||||
StrongFunctionPtr(m.type()->compilation_unit(), hook));
|
||||
funcs.emplace_back(m.type()->compilation_unit(), hook);
|
||||
}
|
||||
return funcs;
|
||||
})
|
||||
@ -1270,8 +1269,7 @@ void initJitScriptBindings(PyObject* module) {
|
||||
[](const Module& m) {
|
||||
std::vector<StrongFunctionPtr> funcs;
|
||||
for (auto& pre_hook : m.type()->getForwardPreHooks()) {
|
||||
funcs.emplace_back(
|
||||
StrongFunctionPtr(m.type()->compilation_unit(), pre_hook));
|
||||
funcs.emplace_back(m.type()->compilation_unit(), pre_hook);
|
||||
}
|
||||
return funcs;
|
||||
})
|
||||
@ -1777,7 +1775,7 @@ void initJitScriptBindings(PyObject* module) {
|
||||
"definitions. File an issue on GitHub if you want "
|
||||
"something else!";
|
||||
}
|
||||
methodDefs.emplace_back(Def(def));
|
||||
methodDefs.emplace_back(def);
|
||||
methodRcbs.push_back(
|
||||
pythonResolver(rcb, classDef.name().name(), classType));
|
||||
}
|
||||
|
||||
@ -222,7 +222,7 @@ struct UnpackInstructions {
|
||||
stack.emplace_back(lst);
|
||||
} break;
|
||||
case PUSH_NONE: {
|
||||
stack.emplace_back(IValue());
|
||||
stack.emplace_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -297,7 +297,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target {
|
||||
INST_NEXT;
|
||||
case INST(OPN): {
|
||||
INST_GUARD;
|
||||
stack.push_back(inst.N);
|
||||
stack.emplace_back(inst.N);
|
||||
#ifndef NDEBUG
|
||||
size_t init_size = stack.size();
|
||||
#endif
|
||||
|
||||
@ -1275,7 +1275,7 @@ static std::vector<c10::optional<Operator>> createOperators(
|
||||
|
||||
RegisterOperators reg(([]() {
|
||||
auto v = createOperators(opGenArgs);
|
||||
v.push_back(Operator(
|
||||
v.emplace_back(Operator(
|
||||
prim::tolist,
|
||||
// This operator has to be unschematized because the return type
|
||||
// depends on the type hint and input. The implementation of this
|
||||
|
||||
@ -272,7 +272,7 @@ void listUnpack(Stack& stack, size_t num_outputs) {
|
||||
void tupleConstruct(Stack& stack, size_t num_inputs) {
|
||||
switch (num_inputs) {
|
||||
case 0:
|
||||
stack.push_back(c10::ivalue::Tuple::create());
|
||||
stack.emplace_back(c10::ivalue::Tuple::create());
|
||||
break;
|
||||
case 1:
|
||||
stack.back() = c10::ivalue::Tuple::create(std::move(stack.back()));
|
||||
@ -336,7 +336,7 @@ void listConstruct(
|
||||
drop(stack, num_inputs);
|
||||
return vals;
|
||||
};
|
||||
stack.push_back(makeList(stack, list_type, num_inputs));
|
||||
stack.emplace_back(makeList(stack, list_type, num_inputs));
|
||||
}
|
||||
|
||||
void dictConstruct(
|
||||
|
||||
@ -46,7 +46,7 @@ c10::IValue InlinedCallStackSerializer::serialize(
|
||||
elements.emplace_back(
|
||||
serialize(cs_ptr->callee().value(), source_range_tags));
|
||||
} else {
|
||||
elements.emplace_back(c10::IValue());
|
||||
elements.emplace_back();
|
||||
}
|
||||
auto fn_name = cs_ptr->function_name();
|
||||
if (!fn_name.empty()) {
|
||||
|
||||
@ -660,7 +660,7 @@ void SourceImporterImpl::importEnum(
|
||||
<< ". Only Integers, Floats and Strings are supported.";
|
||||
}
|
||||
|
||||
names_values.emplace_back(std::make_pair(name, ivalue));
|
||||
names_values.emplace_back(name, ivalue);
|
||||
}
|
||||
|
||||
if (!value_type) {
|
||||
|
||||
@ -310,7 +310,7 @@ PickleOpCode Unpickler::readInstruction() {
|
||||
stack_.emplace_back(false);
|
||||
} break;
|
||||
case PickleOpCode::NONE: {
|
||||
stack_.emplace_back(IValue());
|
||||
stack_.emplace_back();
|
||||
} break;
|
||||
case PickleOpCode::BININT1: {
|
||||
uint8_t value = read<uint8_t>();
|
||||
|
||||
@ -144,7 +144,7 @@ std::vector<std::pair<BufPtr, BufPtr>> AllocBufsWithMemReuse(
|
||||
// it, and there are no future reuses on its memory).
|
||||
// TODO: reuse memory for bufs with dynamic shapes
|
||||
if (!bufSize(buf)) {
|
||||
buf_allocs.emplace_back(std::make_pair(buf, buf));
|
||||
buf_allocs.emplace_back(buf, buf);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ std::vector<std::pair<BufPtr, BufPtr>> AllocBufsWithMemReuse(
|
||||
// it.
|
||||
if (!allocated) {
|
||||
buf_mem_map[buf] = buf;
|
||||
buf_allocs.emplace_back(std::make_pair(buf, buf));
|
||||
buf_allocs.emplace_back(buf, buf);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1445,7 +1445,7 @@ void TensorExprKernel::bindConstant(const torch::jit::Value* v) {
|
||||
std::vector<ExprHandle> te_sizes;
|
||||
te_sizes.reserve(sizes.size());
|
||||
for (auto s : sizes) {
|
||||
te_sizes.push_back(s);
|
||||
te_sizes.emplace_back(s);
|
||||
}
|
||||
BufPtr buf = alloc<Buf>(
|
||||
"const_" + sanitizeName(v->debugName()),
|
||||
|
||||
@ -2486,7 +2486,7 @@ bool LoopNest::flatten(const std::vector<ForPtr>& loops, ForPtr* flattened) {
|
||||
auto curr_loop = normalized_loops[idx];
|
||||
ExprPtr div = alloc<Div>(flat_var, stop);
|
||||
ExprPtr sub_expr = idx == 0 ? div : alloc<Mod>(div, curr_loop->stop());
|
||||
var_mapping.push_back(std::make_pair(curr_loop->var(), sub_expr));
|
||||
var_mapping.emplace_back(curr_loop->var(), sub_expr);
|
||||
stop = alloc<Mul>(curr_loop->stop(), stop);
|
||||
}
|
||||
auto flattened_body =
|
||||
@ -3197,8 +3197,8 @@ void LoopNest::computeAt(StmtPtr s, ForPtr f) {
|
||||
}
|
||||
|
||||
for (const auto i : c10::irange(prod_indices.size())) {
|
||||
rewrite_indices_map.push_back(
|
||||
{prod_indices[i], alloc<Add>(temp_indices[i], offsets[i])});
|
||||
rewrite_indices_map.emplace_back(
|
||||
prod_indices[i], alloc<Add>(temp_indices[i], offsets[i]));
|
||||
}
|
||||
|
||||
// Construct the temp statement
|
||||
|
||||
@ -1747,7 +1747,7 @@ int nnc_lowerings_lazy_registration() {
|
||||
int64_t i = 0;
|
||||
for (const auto& a : axes) {
|
||||
if (i++ != dim) {
|
||||
indices.emplace_back(ExprHandle(a.node()));
|
||||
indices.emplace_back(a.node());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1043,9 +1043,8 @@ void MemDependencyChecker::insertBuffers(
|
||||
VarPtr var = b->base_handle();
|
||||
IndexBounds bounds;
|
||||
for (const auto& d : b->dims()) {
|
||||
bounds.push_back(
|
||||
{immLike(d, 0),
|
||||
IRSimplifier::simplify(alloc<Sub>(d, immLike(d, 1)))});
|
||||
bounds.emplace_back(
|
||||
immLike(d, 0), IRSimplifier::simplify(alloc<Sub>(d, immLike(d, 1))));
|
||||
}
|
||||
auto info =
|
||||
std::make_shared<AccessInfo>(nextAccess_++, type, nullptr, var, bounds);
|
||||
@ -1135,7 +1134,7 @@ void MemDependencyChecker::visit(AllocatePtr v) {
|
||||
ExprPtr flat_size = buf_flat_size(v->buf());
|
||||
flat_size =
|
||||
IRSimplifier::simplify(alloc<Sub>(flat_size, immLike(flat_size, 1)));
|
||||
bounds.push_back({immLike(flat_size, 0), flat_size});
|
||||
bounds.emplace_back(immLike(flat_size, 0), flat_size);
|
||||
|
||||
auto info = std::make_shared<AccessInfo>(
|
||||
nextAccess_++, AccessType::Alloc, nullptr, var, bounds);
|
||||
@ -1235,7 +1234,7 @@ void MemDependencyChecker::updateWriteHistory(
|
||||
}
|
||||
|
||||
if (insert && isWrite) {
|
||||
writeHistory.emplace_back(std::make_pair(info->bounds(), info));
|
||||
writeHistory.emplace_back(info->bounds(), info);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -136,7 +136,7 @@ std::vector<std::pair<Backend, ScalarType>> all_declared_types() {
|
||||
(backend == Backend::SparseCUDA || backend == Backend::SparseCPU)) {
|
||||
continue;
|
||||
}
|
||||
ret.emplace_back(std::make_pair(backend, scalar_type));
|
||||
ret.emplace_back(backend, scalar_type);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user