mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Clang-Tidy: Improve ctors by removing unnecessary copies and initializations (#91538)
Apply clang-tidy fixups to prefer member initializer and modernize-pass-by-value. This is a mostly a noop, but it should make a few ctors slighlty more readable and more efficient. Also drops in some missing moves that prevents a lot of unnecessary copying. Pull Request resolved: https://github.com/pytorch/pytorch/pull/91538 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
b407d98dbe
commit
77c2a8a11f
@ -248,8 +248,8 @@ TORCH_API void setFunctionalizationReapplyViewsTLS(bool reapply_views);
|
||||
|
||||
class TORCH_API FunctionalizationReapplyViewsGuard {
|
||||
public:
|
||||
FunctionalizationReapplyViewsGuard(bool reapply_views) {
|
||||
prev_ = getFunctionalizationReapplyViewsTLS();
|
||||
FunctionalizationReapplyViewsGuard(bool reapply_views)
|
||||
: prev_(getFunctionalizationReapplyViewsTLS()) {
|
||||
setFunctionalizationReapplyViewsTLS(reapply_views);
|
||||
}
|
||||
|
||||
|
||||
@ -6,6 +6,8 @@
|
||||
#include <ATen/TensorGeometry.h>
|
||||
#include <ATen/Utils.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
// These functions are NOT in Utils.h, because this file has a dep on Tensor.h
|
||||
|
||||
namespace at {
|
||||
@ -37,7 +39,7 @@ struct TORCH_API TensorGeometryArg {
|
||||
/* implicit */ TensorGeometryArg(TensorArg arg)
|
||||
: tensor(TensorGeometry{arg.tensor}), name(arg.name), pos(arg.pos) {}
|
||||
TensorGeometryArg(TensorGeometry tensor, const char* name, int pos)
|
||||
: tensor(tensor), name(name), pos(pos) {}
|
||||
: tensor(std::move(tensor)), name(name), pos(pos) {}
|
||||
const TensorGeometry* operator->() const {
|
||||
return &tensor;
|
||||
}
|
||||
|
||||
@ -13,17 +13,11 @@ namespace at {
|
||||
ThreadLocalState::ThreadLocalState()
|
||||
: dispatch_key_(c10::impl::tls_local_dispatch_key_set()),
|
||||
debug_info_(c10::ThreadLocalDebugInfo::current()),
|
||||
functorch_tls_(functorch::getCopyOfFuncTorchTLS()),
|
||||
rf_tls_(at::get_record_function_tls_()), functorch_tls_(functorch::getCopyOfFuncTorchTLS()),
|
||||
autograd_tls_(c10::AutogradState::get_tls_state()),
|
||||
python_dispatcher_state_(c10::impl::PythonDispatcherTLS::get_state()),
|
||||
torch_dispatch_mode_state_(c10::impl::TorchDispatchModeTLS::get_state()), python_dispatcher_state_(c10::impl::PythonDispatcherTLS::get_state()),
|
||||
python_torch_function_state_(at::impl::PythonTorchFunctionTLS::get_state()),
|
||||
functionalization_reapply_views_state_(at::functionalization::impl::getFunctionalizationReapplyViewsTLS()) {
|
||||
rf_tls_ = at::get_record_function_tls_();
|
||||
|
||||
saved_tensors_default_hooks_state_ = at::SavedTensorDefaultHooks::get_tls_state();
|
||||
|
||||
torch_dispatch_mode_state_ = c10::impl::TorchDispatchModeTLS::get_state();
|
||||
}
|
||||
saved_tensors_default_hooks_state_(at::SavedTensorDefaultHooks::get_tls_state()), functionalization_reapply_views_state_(at::functionalization::impl::getFunctionalizationReapplyViewsTLS()) {}
|
||||
|
||||
void ThreadLocalState::set_grad_mode(bool enabled) {
|
||||
autograd_tls_.set_grad_mode(enabled);
|
||||
|
||||
@ -37,10 +37,7 @@ namespace {
|
||||
template <typename T>
|
||||
struct uniform_int_from_to_distribution {
|
||||
|
||||
C10_HOST_DEVICE inline uniform_int_from_to_distribution(uint64_t range, int64_t base) {
|
||||
range_ = range;
|
||||
base_ = base;
|
||||
}
|
||||
C10_HOST_DEVICE inline uniform_int_from_to_distribution(uint64_t range, int64_t base) : range_(range), base_(base) {}
|
||||
|
||||
template <typename RNG>
|
||||
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
||||
@ -281,9 +278,7 @@ struct geometric_distribution {
|
||||
template <typename T>
|
||||
struct exponential_distribution {
|
||||
|
||||
C10_HOST_DEVICE inline exponential_distribution(T lambda_in) {
|
||||
lambda = lambda_in;
|
||||
}
|
||||
C10_HOST_DEVICE inline exponential_distribution(T lambda_in) : lambda(lambda_in) {}
|
||||
|
||||
template <typename RNG>
|
||||
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
||||
@ -301,10 +296,7 @@ struct exponential_distribution {
|
||||
template <typename T>
|
||||
struct cauchy_distribution {
|
||||
|
||||
C10_HOST_DEVICE inline cauchy_distribution(T median_in, T sigma_in) {
|
||||
median = median_in;
|
||||
sigma = sigma_in;
|
||||
}
|
||||
C10_HOST_DEVICE inline cauchy_distribution(T median_in, T sigma_in) : median(median_in), sigma(sigma_in) {}
|
||||
|
||||
template <typename RNG>
|
||||
C10_HOST_DEVICE inline T operator()(RNG generator) {
|
||||
|
||||
@ -2,6 +2,8 @@
|
||||
|
||||
#include <ATen/core/ivalue.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace c10 {
|
||||
|
||||
struct EnumType;
|
||||
@ -83,7 +85,7 @@ struct TORCH_API EnumType : public NamedType {
|
||||
: NamedType(TypeKind::EnumType, std::move(qualified_class_name)),
|
||||
value_type_(std::move(value_type)),
|
||||
enum_names_values_(std::move(enum_names_values)),
|
||||
cu_(cu) {}
|
||||
cu_(std::move(cu)) {}
|
||||
|
||||
std::string annotation_str_impl(
|
||||
TypePrinter printer = nullptr) const override {
|
||||
|
||||
@ -1015,18 +1015,13 @@ c10::intrusive_ptr<ivalue::Object> ivalue::Object::deepcopy(IValue::HashAliasedI
|
||||
|
||||
StrongTypePtr::StrongTypePtr(
|
||||
std::shared_ptr<torch::jit::CompilationUnit> cu,
|
||||
TypePtr type) {
|
||||
cu_ = std::move(cu);
|
||||
type_ = type;
|
||||
TypePtr type) : cu_(std::move(cu)), type_(std::move(type)) {
|
||||
TORCH_INTERNAL_ASSERT(type_);
|
||||
}
|
||||
|
||||
WeakTypePtr::WeakTypePtr(
|
||||
std::weak_ptr<torch::jit::CompilationUnit> cu,
|
||||
TypePtr type) {
|
||||
cu_ = std::move(cu);
|
||||
type_ = type;
|
||||
}
|
||||
TypePtr type) : cu_(std::move(cu)), type_(std::move(type)) {}
|
||||
|
||||
WeakTypePtr WeakOrStrongTypePtr::asWeakTypePtr() const {
|
||||
if (!holds_strong_ref()) {
|
||||
|
||||
@ -8,11 +8,12 @@
|
||||
#include <ATen/core/jit_type_base.h>
|
||||
#include <ATen/core/type_factory.h>
|
||||
#include <c10/core/SymFloat.h>
|
||||
#include <c10/macros/Export.h>
|
||||
#include <c10/util/C++17.h>
|
||||
#include <c10/util/MaybeOwned.h>
|
||||
#include <c10/util/intrusive_ptr.h>
|
||||
#include <c10/macros/Export.h>
|
||||
#include <typeindex>
|
||||
#include <utility>
|
||||
|
||||
namespace torch {
|
||||
class TORCH_API CustomClassHolder : public c10::intrusive_ptr_target {};
|
||||
@ -1387,16 +1388,10 @@ struct TORCH_API WeakTypePtr {
|
||||
// internal build errors with std::variant :/
|
||||
struct WeakOrStrongCompilationUnit {
|
||||
explicit WeakOrStrongCompilationUnit(
|
||||
std::shared_ptr<torch::jit::CompilationUnit> shared_cu) {
|
||||
strong_ptr_ = shared_cu;
|
||||
weak_ptr_ = c10::nullopt;
|
||||
}
|
||||
std::shared_ptr<torch::jit::CompilationUnit> shared_cu) : strong_ptr_(std::move(shared_cu)), weak_ptr_(c10::nullopt) {}
|
||||
|
||||
explicit WeakOrStrongCompilationUnit(
|
||||
std::weak_ptr<torch::jit::CompilationUnit> weak_cu) {
|
||||
strong_ptr_ = c10::nullopt;
|
||||
weak_ptr_ = weak_cu;
|
||||
}
|
||||
std::weak_ptr<torch::jit::CompilationUnit> weak_cu) : strong_ptr_(c10::nullopt), weak_ptr_(std::move(weak_cu)) {}
|
||||
|
||||
std::shared_ptr<torch::jit::CompilationUnit> getStrongRefOrThrow() const {
|
||||
TORCH_INTERNAL_ASSERT(strong_ptr_ != c10::nullopt);
|
||||
@ -1424,17 +1419,11 @@ struct WeakOrStrongCompilationUnit {
|
||||
// Constant in the graph and a Owning reference otherwise
|
||||
struct TORCH_API WeakOrStrongTypePtr {
|
||||
explicit WeakOrStrongTypePtr(WeakTypePtr weak)
|
||||
: cu_(WeakOrStrongCompilationUnit(weak.cu_)) {
|
||||
type_ = weak.type_;
|
||||
}
|
||||
: cu_(WeakOrStrongCompilationUnit(std::move(weak.cu_))), type_(std::move(weak.type_)) {}
|
||||
explicit WeakOrStrongTypePtr(StrongTypePtr strong)
|
||||
: cu_(WeakOrStrongCompilationUnit(strong.cu_)) {
|
||||
type_ = strong.type_;
|
||||
}
|
||||
: cu_(WeakOrStrongCompilationUnit(std::move(strong.cu_))), type_(std::move(strong.type_)) {}
|
||||
explicit WeakOrStrongTypePtr(WeakOrStrongCompilationUnit cu, TypePtr type)
|
||||
: cu_(cu) {
|
||||
type_ = type;
|
||||
}
|
||||
: cu_(std::move(cu)), type_(std::move(type)) {}
|
||||
WeakTypePtr asWeakTypePtr() const;
|
||||
|
||||
WeakOrStrongCompilationUnit cu_;
|
||||
|
||||
@ -2152,7 +2152,7 @@ inline IValue IValue::make_capsule(
|
||||
template <
|
||||
typename T,
|
||||
std::enable_if_t<std::is_base_of<torch::CustomClassHolder, T>::value, int>>
|
||||
IValue::IValue(c10::intrusive_ptr<T> custom_class) {
|
||||
IValue::IValue(c10::intrusive_ptr<T> custom_class) : tag(Tag::Object) {
|
||||
auto classType = []() {
|
||||
try {
|
||||
return c10::getCustomClassType<c10::intrusive_ptr<T>>();
|
||||
@ -2166,7 +2166,7 @@ IValue::IValue(c10::intrusive_ptr<T> custom_class) {
|
||||
auto ivalue_obj = c10::ivalue::Object::create(std::move(classType), /* numSlots */1);
|
||||
ivalue_obj->setSlot(0, IValue::make_capsule(std::move(custom_class)));
|
||||
payload.u.as_intrusive_ptr = null_to_undefined_tensor(ivalue_obj.release());
|
||||
tag = Tag::Object;
|
||||
|
||||
}
|
||||
|
||||
inline IValue::IValue(c10::intrusive_ptr<ivalue::Future> v)
|
||||
|
||||
@ -36,14 +36,14 @@ struct QualifiedName {
|
||||
cacheAccessors();
|
||||
}
|
||||
|
||||
explicit QualifiedName(std::vector<std::string> atoms) {
|
||||
for (const auto& atom : atoms) {
|
||||
explicit QualifiedName(std::vector<std::string> atoms) : atoms_(std::move(atoms)) {
|
||||
for (const auto& atom : atoms_) {
|
||||
TORCH_CHECK(!atom.empty(), "Atom cannot be empty");
|
||||
TORCH_CHECK(
|
||||
atom.find(delimiter_) == std::string::npos,
|
||||
"Delimiter not allowed in atom");
|
||||
}
|
||||
atoms_ = std::move(atoms);
|
||||
|
||||
cacheAccessors();
|
||||
}
|
||||
// Unnecessary copy. Ideally we'd use something like std::string_view.
|
||||
|
||||
@ -781,14 +781,13 @@ TupleType::TupleType(
|
||||
std::shared_ptr<FunctionSchema> schema)
|
||||
: NamedType(TypeKind::TupleType, std::move(name)),
|
||||
elements_(std::move(elements)),
|
||||
schema_(std::move(schema)) {
|
||||
has_free_variables_ =
|
||||
std::any_of(elements_.begin(), elements_.end(), [](TypePtr v) {
|
||||
has_free_variables_(std::any_of(elements_.begin(), elements_.end(), [](const TypePtr& v) {
|
||||
if (!v) {
|
||||
throw std::runtime_error("Can not create tuple with None type");
|
||||
}
|
||||
return v->hasFreeVariables();
|
||||
});
|
||||
})), schema_(std::move(schema)) {
|
||||
|
||||
if (schema_) {
|
||||
for (const Argument& arg : schema_->arguments()) {
|
||||
checkNoAny(*this, "attribute", arg.name(), arg.type());
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
#include <utility>
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace at { namespace native {
|
||||
@ -60,7 +62,7 @@ public:
|
||||
|
||||
C10_HOST_DEVICE
|
||||
references_holder(references refs)
|
||||
: refs{refs}
|
||||
: refs{std::move(refs)}
|
||||
{}
|
||||
|
||||
C10_HOST_DEVICE
|
||||
|
||||
@ -562,12 +562,11 @@ class BroadcastLinearIndices {
|
||||
BroadcastLinearIndices(
|
||||
int64_t numel,
|
||||
IntArrayRef original_shape,
|
||||
IntArrayRef broadcast_shape) {
|
||||
IntArrayRef broadcast_shape) : is_broadcasting_(!original_shape.equals(broadcast_shape)) {
|
||||
// The assumption is that the broadcast_shape is a materialized broadcast
|
||||
// shape of the original_shape. We need to compute the linear indices
|
||||
// compatible with the original_shape to access the elements in the original
|
||||
// tensor corresponding to the broadcast tensor.
|
||||
is_broadcasting_ = !original_shape.equals(broadcast_shape);
|
||||
if (is_broadcasting_) {
|
||||
linear_indices_ =
|
||||
get_linear_indices(numel, original_shape, broadcast_shape);
|
||||
|
||||
@ -10,6 +10,8 @@
|
||||
#include <ATen/Functions.h>
|
||||
#else
|
||||
#include <ATen/ops/clone.h>
|
||||
|
||||
#include <utility>
|
||||
#endif
|
||||
|
||||
namespace at {
|
||||
@ -21,7 +23,7 @@ namespace native {
|
||||
|
||||
// NOTE: To use this fallback, `clone` and `copy_` should fully understand and be able to correctly handle the semantic of your math bit.
|
||||
struct MathOpFallback {
|
||||
MathOpFallback(DispatchKey key_, string op_name_) : key(key_), op_name(op_name_) {}
|
||||
MathOpFallback(DispatchKey key_, string op_name_) : key(key_), op_name(std::move(op_name_)) {}
|
||||
virtual bool is_bit_set(const Tensor&) = 0;
|
||||
void fallback_impl(const c10::OperatorHandle& op, DispatchKeySet dispatch_keys, torch::jit::Stack* stack) {
|
||||
/*
|
||||
|
||||
@ -22,6 +22,7 @@
|
||||
#include <ATen/ops/tensor.h>
|
||||
#endif
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace at {
|
||||
@ -201,7 +202,7 @@ struct NestedNode {
|
||||
// NestedNode(NestedNode&) = delete;
|
||||
// NestedNode(const NestedNode&) = delete;
|
||||
// NestedNode& operator=(NestedNode) = delete;
|
||||
explicit NestedNode(T payload) : _is_leaf(true), _payload(payload) {}
|
||||
explicit NestedNode(T payload) : _is_leaf(true), _payload(std::move(payload)) {}
|
||||
inline bool is_leaf() const {
|
||||
return _is_leaf;
|
||||
}
|
||||
|
||||
@ -47,7 +47,7 @@ struct PackedLinearWeightsQnnp : public LinearPackedParamsBase {
|
||||
bias, bias.suggest_memory_format())),
|
||||
per_channel_(this->orig_weight.qscheme() == at::kPerChannelAffine),
|
||||
input_scale(std::move(input_scale)),
|
||||
w_scales(w_scales),
|
||||
w_scales(std::move(w_scales)),
|
||||
w_zero_points(std::move(w_zps)) {}
|
||||
|
||||
std::unique_ptr<qnnpack::PackBMatrix> w;
|
||||
@ -137,7 +137,7 @@ struct PackedConvWeightsQnnp : public ConvPackedParamsBase<kSpatialDim> {
|
||||
is_per_channel_(is_per_channel),
|
||||
input_scale(input_scale),
|
||||
kernel_(std::move(kernel)),
|
||||
w_scales(w_scale),
|
||||
w_scales(std::move(w_scale)),
|
||||
w_zero_points(std::move(w_zps)) {
|
||||
const bool any_padding = std::any_of(
|
||||
padding_.begin(), padding_.end(), [](const auto& e) { return e != 0; });
|
||||
|
||||
@ -26,10 +26,7 @@ struct ContextLinear final {
|
||||
|
||||
ContextLinear() = delete;
|
||||
|
||||
ContextLinear(Operator&& o, int64_t o_channels) {
|
||||
op = std::move(o);
|
||||
output_channels = o_channels;
|
||||
}
|
||||
ContextLinear(Operator&& o, int64_t o_channels) : op(std::move(o)), output_channels(o_channels) {}
|
||||
static constexpr float kMin = -std::numeric_limits<float>::infinity();
|
||||
static constexpr float kMax = std::numeric_limits<float>::infinity();
|
||||
};
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
|
||||
#include <cmath>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace at {
|
||||
|
||||
@ -132,8 +133,8 @@ struct TORCH_API PerChannelAffineQuantizer : public AffineQuantizer {
|
||||
Tensor zero_points,
|
||||
int64_t axis)
|
||||
: AffineQuantizer(scalar_type),
|
||||
scales_(scales),
|
||||
zero_points_(zero_points),
|
||||
scales_(std::move(scales)),
|
||||
zero_points_(std::move(zero_points)),
|
||||
axis_(axis) {}
|
||||
|
||||
QScheme qscheme() const override {
|
||||
|
||||
@ -167,11 +167,11 @@ struct python_error : public std::exception {
|
||||
Py_XINCREF(traceback);
|
||||
}
|
||||
|
||||
python_error(python_error&& other) {
|
||||
type = other.type;
|
||||
value = other.value;
|
||||
traceback = other.traceback;
|
||||
message = std::move(other.message);
|
||||
python_error(python_error&& other)
|
||||
: type(other.type),
|
||||
value(other.value),
|
||||
traceback(other.traceback),
|
||||
message(std::move(other.message)) {
|
||||
other.type = nullptr;
|
||||
other.value = nullptr;
|
||||
other.traceback = nullptr;
|
||||
|
||||
@ -45,7 +45,7 @@ struct InputMetadata {
|
||||
MetadataShape input_shape,
|
||||
bool is_tensor_subclass)
|
||||
: options_{options},
|
||||
shape_{input_shape},
|
||||
shape_{std::move(input_shape)},
|
||||
is_tensor_subclass_{is_tensor_subclass} {
|
||||
auto device_ = options.device();
|
||||
stream_ = c10::impl::getDeviceGuardImpl(device_.type())->getStream(device_);
|
||||
|
||||
@ -848,7 +848,7 @@ class PostProcess {
|
||||
std::deque<ThreadLocalResults>& tls,
|
||||
const ValueCache& value_cache,
|
||||
time_t end_time_ns)
|
||||
: end_time_{end_time_ns}, time_converter_{time_converter} {
|
||||
: end_time_{end_time_ns}, time_converter_{std::move(time_converter)} {
|
||||
for (size_t python_tid : c10::irange(tls.size())) {
|
||||
CallTypeHelper<TraceKeyCacheState>::map(
|
||||
tls[python_tid].trace_keys_, *this, value_cache, python_tid);
|
||||
|
||||
@ -280,13 +280,12 @@ struct TORCH_API AutogradMeta : public c10::AutogradMetaInterface {
|
||||
AutogradMeta(
|
||||
at::TensorImpl* self_impl = nullptr,
|
||||
bool requires_grad = false,
|
||||
Edge gradient_edge = Edge()) {
|
||||
grad_fn_ = std::move(gradient_edge.function);
|
||||
requires_grad_ = false;
|
||||
retains_grad_ = -1;
|
||||
is_view_ = false;
|
||||
output_nr_ = gradient_edge.input_nr;
|
||||
|
||||
Edge gradient_edge = Edge())
|
||||
: grad_fn_(std::move(gradient_edge.function)),
|
||||
requires_grad_(false),
|
||||
retains_grad_(-1),
|
||||
is_view_(false),
|
||||
output_nr_(gradient_edge.input_nr) {
|
||||
// set_requires_grad also checks error conditions.
|
||||
if (requires_grad) {
|
||||
TORCH_INTERNAL_ASSERT(self_impl);
|
||||
|
||||
@ -5,6 +5,7 @@
|
||||
#include <mutex>
|
||||
#include <stdexcept>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <ATen/ATen.h>
|
||||
@ -32,7 +33,7 @@ class TORCH_API Backend : public torch::CustomClassHolder {
|
||||
explicit Options(
|
||||
std::string backend,
|
||||
std::chrono::milliseconds timeout = kBackendDefaultTimeout)
|
||||
: timeout(timeout), backend(backend) {}
|
||||
: timeout(timeout), backend(std::move(backend)) {}
|
||||
virtual ~Options() = default;
|
||||
|
||||
std::chrono::milliseconds timeout;
|
||||
|
||||
@ -6,6 +6,7 @@
|
||||
#include <mutex>
|
||||
#include <stdexcept>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <ATen/ATen.h>
|
||||
@ -55,7 +56,7 @@ class TORCH_API ProcessGroup : public torch::CustomClassHolder {
|
||||
explicit Options(
|
||||
std::string backend,
|
||||
std::chrono::milliseconds timeout = kProcessGroupDefaultTimeout)
|
||||
: timeout(timeout), backend(backend) {}
|
||||
: timeout(timeout), backend(std::move(backend)) {}
|
||||
virtual ~Options() = default;
|
||||
|
||||
std::chrono::milliseconds timeout;
|
||||
|
||||
@ -2,8 +2,9 @@
|
||||
|
||||
#include <ATen/ATen.h>
|
||||
#include <ATen/core/ivalue.h>
|
||||
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
||||
#include <utility>
|
||||
|
||||
namespace c10d {
|
||||
|
||||
@ -20,18 +21,18 @@ class TORCH_API GradBucket {
|
||||
explicit GradBucket(
|
||||
size_t index,
|
||||
size_t bucket_count,
|
||||
const at::Tensor& tensor,
|
||||
const std::vector<size_t>& offsets,
|
||||
const std::vector<size_t>& lengths,
|
||||
const std::vector<c10::IntArrayRef>& sizes_vec,
|
||||
const std::vector<at::Tensor>& parameters)
|
||||
at::Tensor tensor,
|
||||
std::vector<size_t> offsets,
|
||||
std::vector<size_t> lengths,
|
||||
std::vector<c10::IntArrayRef> sizes_vec,
|
||||
std::vector<at::Tensor> parameters)
|
||||
: index_(index),
|
||||
bucket_count_(bucket_count),
|
||||
buffer_(tensor),
|
||||
offsets_(offsets),
|
||||
lengths_(lengths),
|
||||
sizes_vec_(sizes_vec),
|
||||
parameters_(parameters) {}
|
||||
buffer_(std::move(tensor)),
|
||||
offsets_(std::move(offsets)),
|
||||
lengths_(std::move(lengths)),
|
||||
sizes_vec_(std::move(sizes_vec)),
|
||||
parameters_(std::move(parameters)) {}
|
||||
|
||||
// Returns the index of the bucket, which is unique across all the buckets.
|
||||
size_t getIndex() const {
|
||||
@ -114,7 +115,7 @@ namespace detail {
|
||||
template <typename T>
|
||||
class CppCommHookInterface : public CommHookInterface {
|
||||
public:
|
||||
explicit CppCommHookInterface(const T& state) : state_(state) {}
|
||||
explicit CppCommHookInterface(T state) : state_(std::move(state)) {}
|
||||
|
||||
~CppCommHookInterface() override = default;
|
||||
|
||||
|
||||
@ -384,10 +384,7 @@ class TORCH_API Reducer {
|
||||
|
||||
VariableLocator() = default;
|
||||
|
||||
VariableLocator(size_t bucket_index_, size_t intra_bucket_index_) {
|
||||
bucket_index = bucket_index_;
|
||||
intra_bucket_index = intra_bucket_index_;
|
||||
}
|
||||
VariableLocator(size_t bucket_index_, size_t intra_bucket_index_) : bucket_index(bucket_index_), intra_bucket_index(intra_bucket_index_) {}
|
||||
};
|
||||
|
||||
// Map the index of a variable to its location in the bucket structure.
|
||||
|
||||
@ -18,9 +18,8 @@ struct TORCH_API GraphFunction : public Function {
|
||||
c10::nullopt)
|
||||
: name_(std::move(name)),
|
||||
graph_(std::move(graph)),
|
||||
function_creator_(std::move(function_creator)) {
|
||||
executor_execution_mode_ = executor_execution_mode;
|
||||
}
|
||||
executor_execution_mode_(executor_execution_mode),
|
||||
function_creator_(std::move(function_creator)) {}
|
||||
|
||||
bool isGraphFunction() const override {
|
||||
return true;
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
#include <torch/csrc/jit/passes/onnx/naming.h>
|
||||
#include <torch/csrc/onnx/onnx.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace onnx {
|
||||
@ -79,7 +81,7 @@ namespace {
|
||||
|
||||
class NodeNameGenerator {
|
||||
public:
|
||||
NodeNameGenerator(std::shared_ptr<Graph> g) : graph_(g){};
|
||||
NodeNameGenerator(std::shared_ptr<Graph> g) : graph_(std::move(g)){};
|
||||
virtual ~NodeNameGenerator() = 0;
|
||||
void PopulateNodeNames();
|
||||
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <torch/csrc/jit/ir/ir.h>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
@ -23,11 +24,11 @@ struct ShapeComputeGraphMapping {
|
||||
std::unordered_map<Value*, Value*>
|
||||
enclosing_graph_value_to_shape_graph_input,
|
||||
std::unordered_map<Value*, int64_t> graph_output_to_symbolic_shape_dim)
|
||||
: partial_eval_shape_graph(partial_eval_shape_graph),
|
||||
: partial_eval_shape_graph(std::move(partial_eval_shape_graph)),
|
||||
enclosing_graph_value_to_shape_graph_input_(
|
||||
enclosing_graph_value_to_shape_graph_input),
|
||||
std::move(enclosing_graph_value_to_shape_graph_input)),
|
||||
graph_output_to_symbolic_shape_dim_(
|
||||
graph_output_to_symbolic_shape_dim){};
|
||||
std::move(graph_output_to_symbolic_shape_dim)){};
|
||||
|
||||
std::shared_ptr<Graph> partial_eval_shape_graph;
|
||||
std::unordered_map<Value*, Value*>
|
||||
|
||||
@ -5,12 +5,14 @@
|
||||
#include <torch/csrc/jit/tensorexpr/stmt.h>
|
||||
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace tensorexpr {
|
||||
class HasRand : public IRVisitor {
|
||||
public:
|
||||
HasRand(StmtPtr stmt) : stmt_(stmt) {
|
||||
HasRand(StmtPtr stmt) : stmt_(std::move(stmt)) {
|
||||
stmt_->accept(this);
|
||||
}
|
||||
|
||||
@ -113,7 +115,7 @@ class BufFinder : public IRVisitor {
|
||||
class WritesToBuf : public IRVisitor {
|
||||
public:
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
WritesToBuf(BufPtr target) : target_(target) {}
|
||||
WritesToBuf(BufPtr target) : target_(std::move(target)) {}
|
||||
|
||||
std::vector<StmtPtr> writes() {
|
||||
return writes_;
|
||||
@ -145,7 +147,7 @@ class WritesToBuf : public IRVisitor {
|
||||
class StmtsReadingBuf : public IRVisitor {
|
||||
public:
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
StmtsReadingBuf(BufPtr target) : target_(target) {}
|
||||
StmtsReadingBuf(BufPtr target) : target_(std::move(target)) {}
|
||||
|
||||
std::vector<StmtPtr> reads() {
|
||||
return reads_;
|
||||
@ -227,7 +229,7 @@ class ExternalAllocBufFinder : public IRVisitor {
|
||||
// Traverses the IR to determine if a particular Var is modified within it.
|
||||
class ModifiesVarChecker : public IRVisitor {
|
||||
public:
|
||||
ModifiesVarChecker(VarPtr v) : var_(v) {}
|
||||
ModifiesVarChecker(VarPtr v) : var_(std::move(v)) {}
|
||||
|
||||
static bool check(StmtPtr s, VarPtr v) {
|
||||
ModifiesVarChecker checker(v);
|
||||
@ -281,7 +283,7 @@ class ModifiesVarChecker : public IRVisitor {
|
||||
// stmt in block stmts that access to the buf.
|
||||
class BufLiveRange : public IRVisitor {
|
||||
public:
|
||||
BufLiveRange(BufPtr b) : buf_(b) {}
|
||||
BufLiveRange(BufPtr b) : buf_(std::move(b)) {}
|
||||
|
||||
static std::tuple<int32_t, int32_t> liveRange(StmtPtr s, BufPtr b) {
|
||||
BlockPtr block = to<Block>(s);
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
#include <torch/csrc/jit/tensorexpr/ir.h>
|
||||
|
||||
#include <deque>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace torch {
|
||||
@ -22,7 +23,7 @@ struct TORCH_API Bound {
|
||||
bool swapped{false};
|
||||
|
||||
Bound() = default;
|
||||
Bound(ExprPtr s, ExprPtr e) : start(s), end(e) {}
|
||||
Bound(ExprPtr s, ExprPtr e) : start(std::move(s)), end(std::move(e)) {}
|
||||
|
||||
void print() const;
|
||||
bool equals(const Bound& other) const;
|
||||
|
||||
@ -4,6 +4,8 @@
|
||||
#include <torch/csrc/jit/tensorexpr/ir.h>
|
||||
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace tensorexpr {
|
||||
@ -19,7 +21,7 @@ class TORCH_API CodeGen {
|
||||
template <typename... Ts>
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
CodeGen(StmtPtr stmt, Ts... ts)
|
||||
: stmt_(stmt), buffer_args_({BufferArg(ts)...}) {}
|
||||
: stmt_(std::move(stmt)), buffer_args_({BufferArg(ts)...}) {}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
CodeGen(
|
||||
|
||||
@ -4,6 +4,8 @@
|
||||
|
||||
#include <c10/util/irange.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace tensorexpr {
|
||||
@ -41,7 +43,7 @@ void castIndicesToInts(std::vector<ExprPtr>& indices) {
|
||||
}
|
||||
|
||||
Load::Load(Dtype dtype, BufPtr buf, std::vector<ExprPtr> indices)
|
||||
: ExprNodeBase(dtype), buf_(buf), indices_(std::move(indices)) {
|
||||
: ExprNodeBase(dtype), buf_(std::move(buf)), indices_(std::move(indices)) {
|
||||
castIndicesToInts(indices_);
|
||||
}
|
||||
|
||||
@ -63,7 +65,9 @@ ExprHandle Load::make(
|
||||
}
|
||||
|
||||
Store::Store(BufPtr buf, std::vector<ExprPtr> indices, ExprPtr value)
|
||||
: buf_(buf), indices_(std::move(indices)), value_(value) {
|
||||
: buf_(std::move(buf)),
|
||||
indices_(std::move(indices)),
|
||||
value_(std::move(value)) {
|
||||
castIndicesToInts(indices_);
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <c10/util/string_utils.h>
|
||||
@ -72,14 +73,14 @@ class TORCH_API Cast : public ExprNode<Cast> {
|
||||
}
|
||||
|
||||
void set_src_value(ExprPtr src_value) {
|
||||
src_value_ = src_value;
|
||||
src_value_ = std::move(src_value);
|
||||
}
|
||||
|
||||
static ExprHandle make(Dtype dtype, const ExprHandle& src_value) {
|
||||
return ExprHandle(alloc<Cast>(dtype, src_value.node()));
|
||||
}
|
||||
Cast(Dtype dtype, ExprPtr src_value)
|
||||
: ExprNodeBase(dtype, kCast), src_value_(src_value) {}
|
||||
: ExprNodeBase(dtype, kCast), src_value_(std::move(src_value)) {}
|
||||
|
||||
bool isConstant() const override {
|
||||
return src_value_->isConstant();
|
||||
@ -102,14 +103,14 @@ class TORCH_API BitCast : public ExprNode<BitCast> {
|
||||
}
|
||||
|
||||
void set_src_value(ExprPtr src_value) {
|
||||
src_value_ = src_value;
|
||||
src_value_ = std::move(src_value);
|
||||
}
|
||||
|
||||
static ExprHandle make(Dtype dtype, const ExprHandle& src_value) {
|
||||
return ExprHandle(alloc<BitCast>(dtype, src_value.node()));
|
||||
}
|
||||
BitCast(Dtype dtype, ExprPtr src_value)
|
||||
: ExprNodeBase(dtype, kBitCast), src_value_(src_value) {
|
||||
: ExprNodeBase(dtype, kBitCast), src_value_(std::move(src_value)) {
|
||||
TORCH_CHECK(src_value_->dtype().byte_size() == dtype.byte_size());
|
||||
}
|
||||
|
||||
@ -140,11 +141,11 @@ class BinaryOpNode : public ExprNode<Op> {
|
||||
}
|
||||
|
||||
void set_lhs(ExprPtr lhs) {
|
||||
lhs_ = lhs;
|
||||
lhs_ = std::move(lhs);
|
||||
}
|
||||
|
||||
void set_rhs(ExprPtr rhs) {
|
||||
rhs_ = rhs;
|
||||
rhs_ = std::move(rhs);
|
||||
}
|
||||
|
||||
static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) {
|
||||
@ -184,34 +185,39 @@ bool bin_op_deducer(...);
|
||||
|
||||
class TORCH_API Add : public BinaryOpNode<Add> {
|
||||
public:
|
||||
Add(ExprPtr lhs, ExprPtr rhs) : BinaryOpNode(lhs, rhs, IRNodeType::kAdd) {}
|
||||
Add(ExprPtr lhs, ExprPtr rhs)
|
||||
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kAdd) {}
|
||||
};
|
||||
|
||||
class TORCH_API Sub : public BinaryOpNode<Sub> {
|
||||
public:
|
||||
Sub(ExprPtr lhs, ExprPtr rhs) : BinaryOpNode(lhs, rhs, IRNodeType::kSub) {}
|
||||
Sub(ExprPtr lhs, ExprPtr rhs)
|
||||
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kSub) {}
|
||||
};
|
||||
|
||||
class TORCH_API Mul : public BinaryOpNode<Mul> {
|
||||
public:
|
||||
Mul(ExprPtr lhs, ExprPtr rhs) : BinaryOpNode(lhs, rhs, IRNodeType::kMul) {}
|
||||
Mul(ExprPtr lhs, ExprPtr rhs)
|
||||
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMul) {}
|
||||
};
|
||||
|
||||
class TORCH_API Div : public BinaryOpNode<Div> {
|
||||
public:
|
||||
Div(ExprPtr lhs, ExprPtr rhs) : BinaryOpNode(lhs, rhs, IRNodeType::kDiv) {}
|
||||
Div(ExprPtr lhs, ExprPtr rhs)
|
||||
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kDiv) {}
|
||||
};
|
||||
|
||||
class TORCH_API Mod : public BinaryOpNode<Mod> {
|
||||
public:
|
||||
Mod(ExprPtr lhs, ExprPtr rhs) : BinaryOpNode(lhs, rhs, IRNodeType::kMod) {}
|
||||
Mod(ExprPtr lhs, ExprPtr rhs)
|
||||
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMod) {}
|
||||
};
|
||||
|
||||
template <typename Op>
|
||||
class BitwiseOpNode : public BinaryOpNode<Op> {
|
||||
public:
|
||||
BitwiseOpNode(ExprPtr lhs, ExprPtr rhs, IRNodeType type)
|
||||
: BinaryOpNode<Op>(lhs, rhs, type) {}
|
||||
: BinaryOpNode<Op>(std::move(lhs), std::move(rhs), type) {}
|
||||
|
||||
static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) {
|
||||
if (!lhs.dtype().is_integral()) {
|
||||
@ -259,7 +265,7 @@ class Max : public BinaryOpNode<Max> {
|
||||
|
||||
public:
|
||||
Max(ExprPtr lhs, ExprPtr rhs, bool propagate_nans)
|
||||
: BinaryOpNode(lhs, rhs, IRNodeType::kMax),
|
||||
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMax),
|
||||
propagate_nans_(propagate_nans) {}
|
||||
|
||||
bool propagate_nans() const {
|
||||
@ -283,7 +289,7 @@ class Min : public BinaryOpNode<Min> {
|
||||
|
||||
public:
|
||||
Min(ExprPtr lhs, ExprPtr rhs, bool propagate_nans)
|
||||
: BinaryOpNode(lhs, rhs, IRNodeType::kMin),
|
||||
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMin),
|
||||
propagate_nans_(propagate_nans) {}
|
||||
|
||||
bool propagate_nans() const {
|
||||
@ -413,11 +419,11 @@ class TORCH_API Ramp : public ExprNode<Ramp> {
|
||||
}
|
||||
|
||||
void set_base(ExprPtr base) {
|
||||
base_ = base;
|
||||
base_ = std::move(base);
|
||||
}
|
||||
|
||||
void set_stride(ExprPtr stride) {
|
||||
stride_ = stride;
|
||||
stride_ = std::move(stride);
|
||||
}
|
||||
|
||||
static ExprHandle make(
|
||||
@ -436,7 +442,7 @@ class TORCH_API Ramp : public ExprNode<Ramp> {
|
||||
Ramp(ExprPtr base, ExprPtr stride, int lanes)
|
||||
: ExprNodeBase(Dtype(base->dtype(), lanes)),
|
||||
base_(base),
|
||||
stride_(stride),
|
||||
stride_(std::move(stride)),
|
||||
lanes_(lanes) {}
|
||||
|
||||
private:
|
||||
@ -462,11 +468,11 @@ class TORCH_API Load : public ExprNode<Load> {
|
||||
}
|
||||
|
||||
void set_buf(BufPtr buf) {
|
||||
buf_ = buf;
|
||||
buf_ = std::move(buf);
|
||||
}
|
||||
|
||||
void set_indices(std::vector<ExprPtr> indices) {
|
||||
indices_ = indices;
|
||||
indices_ = std::move(indices);
|
||||
}
|
||||
|
||||
static ExprHandle make(
|
||||
@ -492,7 +498,7 @@ class TORCH_API Broadcast : public ExprNode<Broadcast> {
|
||||
}
|
||||
|
||||
void set_value(ExprPtr value) {
|
||||
value_ = value;
|
||||
value_ = std::move(value);
|
||||
}
|
||||
|
||||
int lanes() const {
|
||||
@ -528,15 +534,15 @@ class TORCH_API IfThenElse : public ExprNode<IfThenElse> {
|
||||
}
|
||||
|
||||
void set_condition(ExprPtr condition) {
|
||||
condition_ = condition;
|
||||
condition_ = std::move(condition);
|
||||
}
|
||||
|
||||
void set_true_value(ExprPtr true_value) {
|
||||
true_ = true_value;
|
||||
true_ = std::move(true_value);
|
||||
}
|
||||
|
||||
void set_false_value(ExprPtr false_value) {
|
||||
false_ = false_value;
|
||||
false_ = std::move(false_value);
|
||||
}
|
||||
|
||||
static ExprHandle make(
|
||||
@ -556,7 +562,10 @@ class TORCH_API IfThenElse : public ExprNode<IfThenElse> {
|
||||
}
|
||||
|
||||
IfThenElse(ExprPtr c, ExprPtr t, ExprPtr f)
|
||||
: ExprNodeBase(t->dtype()), condition_(c), true_(t), false_(f) {}
|
||||
: ExprNodeBase(t->dtype()),
|
||||
condition_(std::move(c)),
|
||||
true_(t),
|
||||
false_(std::move(f)) {}
|
||||
|
||||
private:
|
||||
ExprPtr condition_;
|
||||
@ -583,19 +592,19 @@ class TORCH_API CompareSelect : public ExprNode<CompareSelect> {
|
||||
}
|
||||
|
||||
void set_lhs(ExprPtr lhs) {
|
||||
lhs_ = lhs;
|
||||
lhs_ = std::move(lhs);
|
||||
}
|
||||
|
||||
void set_rhs(ExprPtr rhs) {
|
||||
rhs_ = rhs;
|
||||
rhs_ = std::move(rhs);
|
||||
}
|
||||
|
||||
void set_ret_val1(ExprPtr ret_val1) {
|
||||
ret_val1_ = ret_val1;
|
||||
ret_val1_ = std::move(ret_val1);
|
||||
}
|
||||
|
||||
void set_ret_val2(ExprPtr ret_val2) {
|
||||
ret_val2_ = ret_val2;
|
||||
ret_val2_ = std::move(ret_val2);
|
||||
}
|
||||
|
||||
CompareSelectBias bias() const {
|
||||
@ -646,10 +655,10 @@ class TORCH_API CompareSelect : public ExprNode<CompareSelect> {
|
||||
CompareSelectOperation cmp_op,
|
||||
CompareSelectBias bias = kUnbiased)
|
||||
: ExprNodeBase(ret_val1->dtype()),
|
||||
lhs_(lhs),
|
||||
rhs_(rhs),
|
||||
lhs_(std::move(lhs)),
|
||||
rhs_(std::move(rhs)),
|
||||
ret_val1_(ret_val1),
|
||||
ret_val2_(ret_val2),
|
||||
ret_val2_(std::move(ret_val2)),
|
||||
compare_op_(cmp_op),
|
||||
bias_(bias) {}
|
||||
|
||||
@ -660,8 +669,8 @@ class TORCH_API CompareSelect : public ExprNode<CompareSelect> {
|
||||
CompareSelectOperation cmp_op,
|
||||
CompareSelectBias bias = kUnbiased)
|
||||
: ExprNodeBase(kInt),
|
||||
lhs_(lhs),
|
||||
rhs_(rhs),
|
||||
lhs_(std::move(lhs)),
|
||||
rhs_(std::move(rhs)),
|
||||
ret_val1_(alloc<IntImm>(1)),
|
||||
ret_val2_(alloc<IntImm>(0)),
|
||||
compare_op_(cmp_op),
|
||||
|
||||
@ -3,6 +3,8 @@
|
||||
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
|
||||
#include <torch/csrc/jit/tensorexpr/ir_simplifier.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace tensorexpr {
|
||||
@ -1857,10 +1859,10 @@ ExprPtr polyGCD(PolynomialPtr poly) {
|
||||
class ModRound {
|
||||
public:
|
||||
ModRound(ExprPtr scalar, ExprPtr denom, ExprPtr divisor, ExprPtr mod_divisor)
|
||||
: scalar(scalar),
|
||||
denom(denom),
|
||||
divisor(divisor),
|
||||
mod_divisor(mod_divisor) {}
|
||||
: scalar(std::move(scalar)),
|
||||
denom(std::move(denom)),
|
||||
divisor(std::move(divisor)),
|
||||
mod_divisor(std::move(mod_divisor)) {}
|
||||
ExprPtr scalar;
|
||||
ExprPtr denom;
|
||||
ExprPtr divisor;
|
||||
|
||||
@ -5,6 +5,7 @@
|
||||
#include <typeinfo>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <c10/util/Logging.h>
|
||||
@ -42,7 +43,7 @@ LoopNest::LoopNest(const LoopNest& other)
|
||||
}
|
||||
|
||||
LoopNest::LoopNest(StmtPtr stmt, std::unordered_set<BufPtr> output_bufs)
|
||||
: root_stmt_(stmt), output_bufs_(std::move(output_bufs)) {
|
||||
: root_stmt_(std::move(stmt)), output_bufs_(std::move(output_bufs)) {
|
||||
GRAPH_DEBUG("Origin Stmt in LoopNest:\n", std::to_string(root_stmt_));
|
||||
verify(root_stmt_);
|
||||
}
|
||||
@ -1223,7 +1224,7 @@ namespace {
|
||||
class IfThenElseReplacer : public IRCloner {
|
||||
public:
|
||||
IfThenElseReplacer(IfThenElsePtr to_replace, ExprPtr new_expr)
|
||||
: to_replace_(to_replace), new_expr_(new_expr) {}
|
||||
: to_replace_(std::move(to_replace)), new_expr_(std::move(new_expr)) {}
|
||||
|
||||
ExprPtr mutate(IfThenElsePtr i) override {
|
||||
if (i == to_replace_) {
|
||||
@ -2757,7 +2758,9 @@ class LoopComputeAtRewriter : public IRMutator {
|
||||
BufPtr buf,
|
||||
BufPtr new_buf,
|
||||
std::vector<ExprPtr> offsets)
|
||||
: buf_(buf), new_buf_(new_buf), offsets_(std::move(offsets)) {}
|
||||
: buf_(std::move(buf)),
|
||||
new_buf_(std::move(new_buf)),
|
||||
offsets_(std::move(offsets)) {}
|
||||
|
||||
private:
|
||||
BufPtr buf_;
|
||||
@ -2806,7 +2809,7 @@ static std::vector<VarPtr> getOuterLoopIndexes(StmtPtr s) {
|
||||
class CacheReplacer : public IRMutator {
|
||||
public:
|
||||
CacheReplacer(BufPtr buffer, BufPtr cache, std::vector<ExprPtr>& offsets)
|
||||
: buf_(buffer), cache_(cache), offsets_(offsets) {}
|
||||
: buf_(std::move(buffer)), cache_(std::move(cache)), offsets_(offsets) {}
|
||||
|
||||
private:
|
||||
ExprPtr mutate(LoadPtr v) override {
|
||||
@ -3238,10 +3241,10 @@ class RfactorStoreRewriter : public IRMutator {
|
||||
const std::vector<ExprPtr>& old_indices,
|
||||
BufPtr new_buf,
|
||||
VarPtr reduction_var)
|
||||
: old_buf_(old_buf),
|
||||
: old_buf_(std::move(old_buf)),
|
||||
old_indices_(old_indices),
|
||||
new_buf_(new_buf),
|
||||
reduction_var_(reduction_var),
|
||||
new_buf_(std::move(new_buf)),
|
||||
reduction_var_(std::move(reduction_var)),
|
||||
new_indices_(old_indices) {
|
||||
new_indices_.push_back(reduction_var_);
|
||||
}
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
#include <c10/core/ScalarType.h>
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <torch/csrc/jit/tensorexpr/bounds_overlap.h>
|
||||
@ -45,9 +46,9 @@ class TORCH_API AccessInfo {
|
||||
IndexBounds bounds)
|
||||
: id_(id),
|
||||
type_(type),
|
||||
stmt_(stmt),
|
||||
stmt_(std::move(stmt)),
|
||||
expr_(nullptr),
|
||||
var_(var),
|
||||
var_(std::move(var)),
|
||||
bounds_(std::move(bounds)) {}
|
||||
|
||||
AccessInfo(
|
||||
@ -59,9 +60,9 @@ class TORCH_API AccessInfo {
|
||||
IndexBounds bounds)
|
||||
: id_(id),
|
||||
type_(type),
|
||||
stmt_(stmt),
|
||||
expr_(expr),
|
||||
var_(var),
|
||||
stmt_(std::move(stmt)),
|
||||
expr_(std::move(expr)),
|
||||
var_(std::move(var)),
|
||||
bounds_(std::move(bounds)) {}
|
||||
|
||||
// Id is a unique int representing the order this access occured in the graph.
|
||||
@ -275,7 +276,7 @@ class TORCH_API MemDependencyChecker : public IRVisitor {
|
||||
// An internal struct holding the accesses found within a scope Block.
|
||||
struct Scope {
|
||||
Scope(BlockPtr b, std::shared_ptr<Scope> p)
|
||||
: block(b), parent(std::move(p)) {}
|
||||
: block(std::move(b)), parent(std::move(p)) {}
|
||||
|
||||
BlockPtr block;
|
||||
std::shared_ptr<Scope> parent;
|
||||
|
||||
@ -6,6 +6,7 @@
|
||||
#include <torch/csrc/jit/tensorexpr/types.h>
|
||||
|
||||
#include <functional>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace torch {
|
||||
@ -164,9 +165,9 @@ class TORCH_API ReduceOp : public ExprNode<ReduceOp> {
|
||||
: ExprNodeBase(body->dtype()),
|
||||
body_(body),
|
||||
reduce_args_(std::move(reduce_args)),
|
||||
result_buf_(result_buf),
|
||||
acc_buf_(acc_buf),
|
||||
ri_operand_(ri_operand),
|
||||
result_buf_(std::move(result_buf)),
|
||||
acc_buf_(std::move(acc_buf)),
|
||||
ri_operand_(std::move(ri_operand)),
|
||||
reducer_(reducer) {}
|
||||
|
||||
static ExprHandle make(
|
||||
|
||||
@ -8,6 +8,7 @@
|
||||
#include <torch/csrc/jit/tensorexpr/ir_simplifier.h>
|
||||
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace torch {
|
||||
@ -58,7 +59,7 @@ class AccessInfo {
|
||||
std::vector<ExprPtr> i,
|
||||
size_t accessOrder)
|
||||
: hash_(h),
|
||||
buf_(b),
|
||||
buf_(std::move(b)),
|
||||
indices_(std::move(i)),
|
||||
store_cost_(alloc<IntImm>(0)),
|
||||
load_cost_(alloc<IntImm>(0)),
|
||||
@ -223,7 +224,9 @@ class Scope {
|
||||
public:
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
Scope(BlockPtr b, std::shared_ptr<Scope> parent, size_t conditionId = 0)
|
||||
: block_(b), parent_(std::move(parent)), conditionId_(conditionId) {}
|
||||
: block_(std::move(b)),
|
||||
parent_(std::move(parent)),
|
||||
conditionId_(conditionId) {}
|
||||
|
||||
AccessHashMap& getAccessMapByBuf(BufPtr b);
|
||||
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
#include <list>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <torch/csrc/jit/tensorexpr/expr.h>
|
||||
@ -381,7 +382,7 @@ class TORCH_API Allocate : public StmtNode<Allocate> {
|
||||
buf_ = buf;
|
||||
}
|
||||
|
||||
explicit Allocate(BufPtr buf) : buf_(buf) {}
|
||||
explicit Allocate(BufPtr buf) : buf_(std::move(buf)) {}
|
||||
|
||||
private:
|
||||
BufPtr buf_;
|
||||
@ -417,7 +418,7 @@ class TORCH_API PlacementAllocate : public StmtNode<PlacementAllocate> {
|
||||
}
|
||||
|
||||
explicit PlacementAllocate(BufPtr buf, BufPtr buf_to_reuse)
|
||||
: buf_(buf), buf_to_reuse_(buf_to_reuse) {}
|
||||
: buf_(std::move(buf)), buf_to_reuse_(std::move(buf_to_reuse)) {}
|
||||
|
||||
private:
|
||||
BufPtr buf_;
|
||||
@ -443,7 +444,7 @@ class TORCH_API Free : public StmtNode<Free> {
|
||||
buf_ = buf;
|
||||
}
|
||||
|
||||
explicit Free(BufPtr buf) : buf_(buf) {}
|
||||
explicit Free(BufPtr buf) : buf_(std::move(buf)) {}
|
||||
|
||||
private:
|
||||
BufPtr buf_;
|
||||
@ -473,7 +474,7 @@ class TORCH_API Let : public StmtNode<Let> {
|
||||
return alloc<Let>(var.node(), val.node());
|
||||
}
|
||||
|
||||
Let(VarPtr var, ExprPtr val) : var_(var), val_(val) {}
|
||||
Let(VarPtr var, ExprPtr val) : var_(std::move(var)), val_(std::move(val)) {}
|
||||
|
||||
VarPtr var() const {
|
||||
return var_;
|
||||
@ -544,7 +545,7 @@ class TORCH_API Cond : public StmtNode<Cond> {
|
||||
}
|
||||
|
||||
Cond(ExprPtr condition, StmtPtr true_stmt, StmtPtr false_stmt)
|
||||
: condition_(condition) {
|
||||
: condition_(std::move(condition)) {
|
||||
set_true_stmt(true_stmt);
|
||||
set_false_stmt(false_stmt);
|
||||
}
|
||||
@ -735,7 +736,7 @@ class TORCH_API For : public StmtNode<For> {
|
||||
}
|
||||
|
||||
For(VarPtr var, ExprPtr start, ExprPtr stop, StmtPtr body)
|
||||
: var_(var), start_(start), stop_(stop) {
|
||||
: var_(std::move(var)), start_(std::move(start)), stop_(std::move(stop)) {
|
||||
BlockPtr b = to<Block>(body);
|
||||
if (!b) {
|
||||
b = alloc<Block>(std::vector<StmtPtr>({body}));
|
||||
@ -839,7 +840,9 @@ class TORCH_API AtomicAdd : public StmtNode<AtomicAdd> {
|
||||
public:
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
AtomicAdd(BufPtr buf, std::vector<ExprPtr> indices, ExprPtr value)
|
||||
: buf_(buf), indices_(std::move(indices)), value_(value) {}
|
||||
: buf_(std::move(buf)),
|
||||
indices_(std::move(indices)),
|
||||
value_(std::move(value)) {}
|
||||
|
||||
VarPtr base_handle() const {
|
||||
return buf_->base_handle();
|
||||
@ -946,7 +949,7 @@ class TORCH_API ExternalCall : public StmtNode<ExternalCall> {
|
||||
std::string func_name,
|
||||
std::vector<BufPtr> buf_args,
|
||||
std::vector<ExprPtr> args)
|
||||
: buf_(buf),
|
||||
: buf_(std::move(buf)),
|
||||
func_name_(std::move(func_name)),
|
||||
buf_args_(std::move(buf_args)),
|
||||
args_(std::move(args)) {}
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <functional>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <torch/csrc/jit/tensorexpr/expr.h>
|
||||
@ -15,7 +16,7 @@ class TORCH_API Tensor {
|
||||
public:
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
Tensor(BufPtr buf, const std::vector<VarPtr>& args, ExprPtr body)
|
||||
: buf_(buf) {
|
||||
: buf_(std::move(buf)) {
|
||||
stmt_ = constructStmt(args, body, {}, {});
|
||||
}
|
||||
Tensor(BufHandle buf, const std::vector<VarHandle>& args, ExprHandle body)
|
||||
@ -28,7 +29,7 @@ class TORCH_API Tensor {
|
||||
const std::vector<ExprPtr>& reduce_dims,
|
||||
const std::vector<VarPtr>& reduce_args,
|
||||
ExprPtr body)
|
||||
: buf_(buf) {
|
||||
: buf_(std::move(buf)) {
|
||||
stmt_ = constructStmt(args, body, reduce_dims, reduce_args);
|
||||
}
|
||||
Tensor(
|
||||
@ -44,7 +45,8 @@ class TORCH_API Tensor {
|
||||
VarHandleVectorToVarVector(reduce_args),
|
||||
body.node()) {}
|
||||
|
||||
Tensor(BufPtr buf, StmtPtr stmt) : buf_(buf), stmt_(stmt) {}
|
||||
Tensor(BufPtr buf, StmtPtr stmt)
|
||||
: buf_(std::move(buf)), stmt_(std::move(stmt)) {}
|
||||
|
||||
BufPtr buf() const {
|
||||
return buf_;
|
||||
|
||||
@ -7,6 +7,7 @@
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
@ -528,7 +529,7 @@ ThreadLocalSubqueue::ThreadLocalSubqueue(
|
||||
RecordQueue::RecordQueue(
|
||||
const ProfilerConfig& config,
|
||||
std::set<ActivityType> activities)
|
||||
: id_(++queue_id_), config_{config}, activities_{activities} {
|
||||
: id_(++queue_id_), config_{config}, activities_{std::move(activities)} {
|
||||
if (tracePython()) {
|
||||
python_tracer_ = python_tracer::PythonTracerBase::make(this);
|
||||
}
|
||||
|
||||
@ -264,7 +264,9 @@ struct OptimizerInfo {
|
||||
|
||||
struct PyExtraFieldsBase {
|
||||
PyExtraFieldsBase(time_t end_time_ns, size_t python_tid, PyFrameState caller)
|
||||
: end_time_ns_{end_time_ns}, python_tid_{python_tid}, caller_{caller} {}
|
||||
: end_time_ns_{end_time_ns},
|
||||
python_tid_{python_tid},
|
||||
caller_{std::move(caller)} {}
|
||||
|
||||
time_t end_time_ns_;
|
||||
size_t python_tid_;
|
||||
@ -307,7 +309,7 @@ struct ExtraFields<EventType::PyCCall> : public PyExtraFieldsBase {
|
||||
PyFrameState caller,
|
||||
args_t args)
|
||||
: PyExtraFieldsBase(end_time_ns, python_tid, caller),
|
||||
function_name_{args} {}
|
||||
function_name_{std::move(args)} {}
|
||||
|
||||
at::StringView function_name_;
|
||||
};
|
||||
|
||||
@ -2,6 +2,8 @@
|
||||
|
||||
#include <torch/csrc/profiler/util.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace torch {
|
||||
namespace profiler {
|
||||
namespace impl {
|
||||
@ -17,7 +19,7 @@ ExperimentalConfig::ExperimentalConfig(
|
||||
bool verbose,
|
||||
std::vector<std::string> performance_events,
|
||||
bool adjust_timestamps)
|
||||
: profiler_metrics{profiler_metrics},
|
||||
: profiler_metrics{std::move(profiler_metrics)},
|
||||
profiler_measure_per_kernel{profiler_measure_per_kernel},
|
||||
verbose{verbose},
|
||||
performance_events(std::move(performance_events)),
|
||||
|
||||
Reference in New Issue
Block a user