Avoid some dangling reference warnings (#132535)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/132535
Approved by: https://github.com/aaronenyeshi
This commit is contained in:
Isuru Fernando
2024-10-15 13:55:21 +00:00
committed by PyTorch MergeBot
parent 0c63de9755
commit f3d7a02716
22 changed files with 74 additions and 42 deletions

View File

@ -66,7 +66,8 @@ namespace at {
} else if (ivalue.isTensorList()) {
auto tensors = std::move(ivalue).toTensorList();
for(const auto j : c10::irange(tensors.size())) {
const Tensor& tensor = tensors[j];
const auto& tensor_ref = tensors[j];
const Tensor& tensor = tensor_ref;
if (tensor._is_zerotensor()) {
// TODO: assert requires_grad=False
//_like should not propagate zerotensor dispatch key

View File

@ -168,7 +168,8 @@ class IListRefTagImpl<IListRefTag::Boxed, at::OptionalTensorRef>
*/
static IListRefConstRef<at::OptionalTensorRef> iterator_get(
const typename list_type::const_iterator& it) {
const auto& ivalue = (*it).get();
const auto& elem = *it;
const auto& ivalue = elem.get();
if (!ivalue.isNone()) {
const auto& tensor = ivalue.toTensor();
return (tensor.defined()) ? tensor : at::OptionalTensorRef{};

View File

@ -151,7 +151,9 @@ public:
// no safe toTensorRef method, alas)
ks = ks | ivalue.unsafeToTensorImpl()->key_set();
} else if (C10_UNLIKELY(ivalue.isTensorList())) {
for (const at::Tensor& tensor : ivalue.toTensorList()) {
const c10::List<at::Tensor> tensorlist = ivalue.toTensorList();
for (const auto& tensor_ref : tensorlist) {
const at::Tensor& tensor = tensor_ref;
ks = ks | tensor.key_set();
}
}

View File

@ -21,8 +21,9 @@ void OSSProxyExecutor::prefill_stack_with_static_arguments(
auto& dynamic_args = op_kernel.dynamic_args_;
TORCH_CHECK(serialized_arg.size() == 1);
std::string serialized_arg_type = serialized_arg.begin().key();
auto& serialized_arg_val = serialized_arg.begin().value();
auto serialized_arg_elem = serialized_arg.begin();
std::string serialized_arg_type = serialized_arg_elem.key();
auto& serialized_arg_val = serialized_arg_elem.value();
switch (schema_arg_type->kind()) {
case c10::TypeKind::TensorType: {
@ -255,8 +256,9 @@ void OSSProxyExecutor::get_output_info_from_serialized(
size_t output_index = 0;
for (const auto& serialized_output : serialized_node["outputs"]) {
TORCH_CHECK(serialized_output.size() == 1);
std::string serialized_output_type = serialized_output.begin().key();
auto& serialized_output_val = serialized_output.begin().value();
auto serialized_output_elem = serialized_output.begin();
std::string serialized_output_type = serialized_output_elem.key();
auto& serialized_output_val = serialized_output_elem.value();
auto& schema_return = schema_returns[output_index];
const at::TypePtr& schema_return_type = schema_return.real_type();

View File

@ -854,7 +854,8 @@ struct to_ir {
if (self) {
AT_ASSERT(it != end);
const auto& name = (*it).ident().name();
auto ident = (*it).ident();
const auto& name = ident.name();
Value* new_input = block->addInput()->setDebugName(name);
environment_stack->setSugaredVar(
(*it).ident().range(),
@ -872,7 +873,8 @@ struct to_ir {
bool shouldDeriveType = shouldDeriveSetStateType(def, schema);
size_t arg_annotation_idx = 0;
for (; it != end; ++it) {
auto& name = (*it).ident().name();
auto ident = (*it).ident();
auto& name = ident.name();
// Add the input to the graph
Value* new_input = block->addInput();
if (meaningfulName(name)) {
@ -1017,7 +1019,8 @@ struct to_ir {
" (see https://github.com/pytorch/pytorch/issues/31430)");
}
const SugaredValuePtr sv = emitSugaredExpr(subscript.value(), 1);
const SourceRange& val_range = subscript.value().range();
auto subscript_value = subscript.value();
const SourceRange& val_range = subscript_value.range();
Value* idx = emitExpr(subscript_exprs[0]);
Value* val = sv->asValue(val_range, method);
@ -1190,7 +1193,8 @@ struct to_ir {
return {};
}
// statement must be var {is, is not} None
const std::string& name = Var(lhs).name().name();
auto var_lhs_name = Var(lhs).name();
const std::string& name = var_lhs_name.name();
// While it should in theory be possible to specialize
// the `x is None` to know x has type NoneType, we have previously
// not done this. Unfortunately, doing this will make the type None
@ -2169,7 +2173,8 @@ struct to_ir {
ErrorReport(attrExpr)
<< "hasattr's second argument must be a string literal");
}
const std::string& name = StringLiteral(attrExpr).text();
auto literal = StringLiteral(attrExpr);
const std::string& name = literal.text();
const bool hasAttr = obj->hasAttr(objExpr.range(), method, name);
return CondValue(*graph, objExpr.range(), hasAttr, {});
}
@ -3502,7 +3507,8 @@ struct to_ir {
ErrorReport(apply)
<< "getattr's second argument must be a string literal");
}
const std::string& name = StringLiteral(selector).text();
auto literal = StringLiteral(selector);
const std::string& name = literal.text();
if (apply.inputs().size() == 2) {
return obj->attr(apply.range(), method, name);
@ -5287,7 +5293,8 @@ struct to_ir {
const SugaredValuePtr sv = emitSugaredExpr(subscript.value(), 1);
const List<Expr>& subscript_exprs = subscript.subscript_exprs();
const SourceRange& range = subscript.range();
const SourceRange& val_range = subscript.value().range();
const auto& val = subscript.value();
const SourceRange& val_range = val.range();
if (subscript_exprs.size() != 1) {
return std::make_shared<SimpleValue>(emitMultidimSlicing(
range, sv->asValue(val_range, method), subscript_exprs));

View File

@ -124,7 +124,8 @@ std::optional<std::pair<TypePtr, int32_t>> ScriptTypeParser::parseBroadcastList(
// Alias torch.nn._common_types._size_?_t to BroadcastingList?[int]
if (expr.kind() == TK_VAR) {
auto var = Var(expr);
auto& name = var.name().name();
auto var_name = var.name();
auto& name = var_name.name();
constexpr auto _size_prefix = "_size_";
constexpr auto _size_suffix = "_t";
constexpr auto _size_n_len = 9; // strlen("_size_X_t")
@ -206,7 +207,8 @@ std::optional<std::string> ScriptTypeParser::parseBaseTypeName(
}
case '.': {
auto select = Select(expr);
const std::string& name = select.selector().name();
auto selector = select.selector();
const std::string& name = selector.name();
// Special case for torch.Tensor and its' subclasses
const std::unordered_set<std::string> tensor_subtypes = {
"Tensor",
@ -262,7 +264,8 @@ TypePtr ScriptTypeParser::parseTypeFromExprImpl(const Expr& expr) const {
return subscriptToType(*value_name, subscript);
} else if (expr.kind() == TK_STRINGLITERAL) {
const auto& type_name = StringLiteral(expr).text();
auto literal = StringLiteral(expr);
const auto& type_name = literal.text();
// Check if the type is a custom class. This is done by checking
// if type_name starts with "torch.classes."

View File

@ -828,7 +828,8 @@ void AliasDb::analyzeImpl(Node* node) {
std::unordered_map<Symbol, Value*> formalToActual;
for (const auto i : c10::irange(schema.arguments().size())) {
const at::AliasInfo* formal = schema.arguments()[i].alias_info();
const auto& actualValue = node->inputs().at(i);
auto node_inputs = node->inputs();
const auto& actualValue = node_inputs.at(i);
// Skip if there's no alias annotation
if (!formal) {

View File

@ -132,24 +132,24 @@ Function::Function(const c10::IValue& value) {
nnc_kernel_id_ = dict.at("nnc_kernel_id").toStringRef();
parameters_ = dict.at("parameters").toList();
auto input_tuple_ref = dict.at("input_specs").toTupleRef();
// input_specs_
for (const auto& input_value :
dict.at("input_specs").toTupleRef().elements()) {
for (const auto& input_value : input_tuple_ref.elements()) {
input_specs_.emplace_back(input_value);
}
auto output_tuple_ref = dict.at("output_specs").toTupleRef();
// output_specs_
for (const auto& output_value :
dict.at("output_specs").toTupleRef().elements()) {
for (const auto& output_value : output_tuple_ref.elements()) {
output_specs_.emplace_back(output_value);
}
// memory_plan_
memory_plan_ = MemoryPlan(dict.at("memory_plan"));
auto pos_tuple_ref = dict.at("sym_shape_pos").toTupleRef();
// symbolic shape positions
for (const auto& sym_shape_pos :
dict.at("sym_shape_pos").toTupleRef().elements()) {
for (const auto& sym_shape_pos : pos_tuple_ref.elements()) {
auto sym_shape_elements = sym_shape_pos.toTupleRef().elements();
sym_shape_positions_.emplace_back(
sym_shape_elements[0].toInt(), sym_shape_elements[1].toInt());

View File

@ -49,8 +49,8 @@ void PTQQuanizationHelper::quantize_dynamic(
m.compareMethodSchemas(method_name, quantized_method_name);
m.unsafeRemoveMethod(method_name);
const Function& to_be_copied =
m.find_method(quantized_method_name).value().function();
const auto& quantized_method = m.find_method(quantized_method_name);
const Function& to_be_copied = quantized_method.value().function();
m.unsafeCopyMethod(method_name, to_be_copied);
m.unsafeRemoveMethod(quantized_method_name);
m.unsafeRemoveMethod(quantize_method_name);

View File

@ -332,7 +332,8 @@ void cloneMethod(
Module& module,
const std::string& orig_method_name,
const std::string& new_method_name) {
const Function& method = module.get_method(orig_method_name).function();
const auto& orig_method = module.get_method(orig_method_name);
const Function& method = orig_method.function();
auto graph = toGraphFunction(method).graph()->copy();
const auto& schema = method.getSchema();
const auto this_method_name =

View File

@ -1444,7 +1444,8 @@ void InsertQuantDeQuantHelper::run(
// observing a potentially mutated value due to some in-place operation
std::vector<Value*> input_values;
for (const auto idx : c10::irange(1, method.num_inputs())) {
auto& v = graph->inputs()[idx];
auto inputs = graph->inputs();
const auto v = inputs[idx];
if (v->type()->isSubtypeOf(*TensorType::get())) {
input_values.push_back(v);
}
@ -1651,7 +1652,8 @@ void InsertQuantDeQuantHelper::runForOnDevicePTQ(
// observing a potentially mutated value due to some in-place operation
std::vector<Value*> input_values;
for (const auto idx : c10::irange(1, method.num_inputs())) {
auto& v = graph->inputs()[idx];
auto inputs = graph->inputs();
auto& v = inputs[idx];
if (v->type()->isSubtypeOf(*TensorType::get())) {
input_values.push_back(v);
}

View File

@ -26,7 +26,8 @@ IValue deepCopy(const IValue& self) {
auto source = self.toList();
auto newList = c10::impl::GenericList(source.elementType());
newList.reserve(source.size());
for (const IValue& value : source) {
for (const auto& value_ref : source) {
const IValue& value = value_ref;
newList.push_back(deepCopy(value));
}
return newList;

View File

@ -330,7 +330,8 @@ struct DifferentiableGraphBackward : public autograd::Node {
void addOutputForIValue(const IValue& value) {
if (value.isTensorList()) {
input_tensor_lists_.insert({index_, value.toTensorList().size()});
for (const at::Tensor& tensor : value.toTensorList()) {
for (const auto& tensor_ref : value.toTensorList()) {
const at::Tensor& tensor = tensor_ref;
addOutputForTensor(tensor);
index_++;
}
@ -361,7 +362,8 @@ struct DifferentiableGraphBackward : public autograd::Node {
if (v.isTensorList()) {
auto tensors = v.toTensorList();
input_instructions_.pushTensorList(tensors.size());
for (const at::Tensor& tensor : tensors) {
for (const auto& tensor_ref : tensors) {
const at::Tensor& tensor = tensor_ref;
addInputVariable(tensor);
}
} else if (v.isTensor()) {

View File

@ -329,7 +329,8 @@ void listContains(Stack& stack) {
auto key = pop(stack).to<T>();
auto list = pop(stack).to<c10::List<T>>();
// NOLINTNEXTLINE(performance-implicit-conversion-in-loop)
for (const T& item : list) {
for (const auto& item_ref : list) {
const T& item = item_ref;
if (item == key) {
push(stack, true);
return;

View File

@ -1175,7 +1175,8 @@ REGISTER_NATIVE_OPERATOR_FUNCTOR(
p_node->Output(0) = future->value();
return;
}
auto& elems = future->value().toTupleRef().elements();
auto tuple_ref = future->value().toTupleRef();
auto& elems = tuple_ref.elements();
TORCH_DCHECK_EQ(elems.size(), p_node->num_outputs());
for (const auto i : c10::irange(elems.size())) {
p_node->Output(i) = elems[i];

View File

@ -204,8 +204,9 @@ std::pair<IValue, IValue> getFunctionTuple(
// For DictType, there are two items in t->containedTypes(), the first one
// is key and the second one is value. Both of them could be NamedTuple
// type.
const TypePtr& key_type = t->containedTypes()[0];
const TypePtr& value_type = t->containedTypes()[1];
const auto& contained_types = t->containedTypes();
const TypePtr& key_type = contained_types[0];
const TypePtr& value_type = contained_types[1];
std::string key_type_str = get_named_tuple_str_or_default(
compilation_unit, key_type, key_type->annotation_str());
std::string value_type_str = get_named_tuple_str_or_default(

View File

@ -283,7 +283,8 @@ void SourceImporterImpl::importNamedType(
if (!class_def.superclass().present()) {
return importClass(qualified_name, class_def, /*is_module=*/false);
}
const auto& superclass_name = Var(class_def.superclass().get()).name().name();
auto superclass_name_var = Var(class_def.superclass().get()).name();
const auto& superclass_name = superclass_name_var.name();
if (superclass_name == "Module") {
importClass(qualified_name, class_def, /*is_module=*/true);
} else if (superclass_name == "NamedTuple") {

View File

@ -1139,7 +1139,8 @@ struct PythonPrintImpl {
stmt << ")";
} break;
case prim::CallMethod: {
const auto& self = node->inputs().at(0);
auto node_inputs = node->inputs();
const auto& self = node_inputs.at(0);
const auto& methodName = node->s(attr::name);
stmt << "(" << useOf(self) << ")"
<< "." << methodName << "(";

View File

@ -142,7 +142,8 @@ void restoreAccurateTypeTags(const IValue& root, const TypePtr& type_tag) {
auto elem_type = w.type->containedType(0);
auto lst = w.value.toList();
lst.unsafeSetElementType(elem_type);
for (const IValue& item : lst) {
for (const auto& item_ref : lst) {
const IValue& item = item_ref;
Work elem = {elem_type, item};
to_process.emplace_back(std::move(elem));
}

View File

@ -1763,7 +1763,8 @@ void TensorExprKernel::compile() {
// Move output operands from `bufs_` to `bufOutputs_`
for (auto i : c10::irange(graph_->outputs().size())) {
auto& output = graph_->outputs().at(i);
auto outputs = graph_->outputs();
auto& output = outputs.at(i);
if (!bufs_.count(output)) {
throw malformed_input("cannot find output Tensor");
}

View File

@ -68,7 +68,8 @@ static std::list<std::pair<at::RecordFunctionHandle, int>> flattenOpIdList(
std::list<std::pair<at::RecordFunctionHandle, int>> input_op_id_list;
auto state_ptr = NVTXThreadLocalState::getTLS();
TORCH_INTERNAL_ASSERT(state_ptr, "Expected profiler state set");
for (const c10::IValue& input : list) {
for (const auto& input_ref : list) {
const c10::IValue& input = input_ref;
if (input.isTensor()) {
const at::Tensor& tensor = input.toTensor();
auto producer_op_pair = state_ptr->getOpIdFromInput(tensor);

View File

@ -163,7 +163,8 @@ std::string stacksToStr(
static std::vector<std::vector<int64_t>> flattenList(
const c10::List<c10::IValue>& list) {
std::vector<std::vector<int64_t>> tensor_dims;
for (const c10::IValue& input : list) {
for (const auto& input_ref : list) {
const c10::IValue& input = input_ref;
if (input.isTensor()) {
const at::Tensor& tensor = input.toTensor();
if (tensor.defined()) {