Move TypeAndSize out of /generated/ (#105195)

This avoids a circular import in the next PR.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/105195
Approved by: https://github.com/albanD
This commit is contained in:
Jason Ansel
2023-07-16 16:32:55 -07:00
committed by PyTorch MergeBot
parent 28d018dafd
commit 3fdf365397
2 changed files with 16 additions and 12 deletions

View File

@ -43,18 +43,7 @@ inline c10::List<c10::optional<Tensor>> unpack_opt_list(at::ArrayRef<SavedVariab
return result;
}
struct TypeAndSize {
TypeAndSize() : options(at::TensorOptions()) {}
/* implicit */
TypeAndSize(const Tensor & t)
: sym_sizes(t.sym_sizes().vec())
, options(t.options()) {}
Tensor zeros() { return at::zeros_symint(sym_sizes, options); }
std::vector<c10::SymInt> sym_sizes;
at::TensorOptions options;
};
using torch::autograd::TypeAndSize;
${autograd_function_declarations}

View File

@ -730,6 +730,21 @@ edge_list collect_next_edges(Variables&&... variables) {
make.apply(std::forward<Variables>(variables)...);
return std::move(make.next_edges);
}
struct TypeAndSize {
TypeAndSize() : options(at::TensorOptions()) {}
/* implicit */
TypeAndSize(const at::Tensor& t)
: sym_sizes(t.sym_sizes().vec()), options(t.options()) {}
at::Tensor zeros() {
return at::zeros_symint(sym_sizes, options);
}
std::vector<c10::SymInt> sym_sizes;
at::TensorOptions options;
};
} // namespace autograd
} // namespace torch