mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Clang-Tidy: Improve ctors by removing unnecessary copies and initializations (#91538)
Apply clang-tidy fixups to prefer member initializer and modernize-pass-by-value. This is a mostly a noop, but it should make a few ctors slighlty more readable and more efficient. Also drops in some missing moves that prevents a lot of unnecessary copying. Pull Request resolved: https://github.com/pytorch/pytorch/pull/91538 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
b407d98dbe
commit
77c2a8a11f
@ -280,13 +280,12 @@ struct TORCH_API AutogradMeta : public c10::AutogradMetaInterface {
|
||||
AutogradMeta(
|
||||
at::TensorImpl* self_impl = nullptr,
|
||||
bool requires_grad = false,
|
||||
Edge gradient_edge = Edge()) {
|
||||
grad_fn_ = std::move(gradient_edge.function);
|
||||
requires_grad_ = false;
|
||||
retains_grad_ = -1;
|
||||
is_view_ = false;
|
||||
output_nr_ = gradient_edge.input_nr;
|
||||
|
||||
Edge gradient_edge = Edge())
|
||||
: grad_fn_(std::move(gradient_edge.function)),
|
||||
requires_grad_(false),
|
||||
retains_grad_(-1),
|
||||
is_view_(false),
|
||||
output_nr_(gradient_edge.input_nr) {
|
||||
// set_requires_grad also checks error conditions.
|
||||
if (requires_grad) {
|
||||
TORCH_INTERNAL_ASSERT(self_impl);
|
||||
|
Reference in New Issue
Block a user