Add modernize-* checks to clang-tidy (#13196)

Summary:
Enables almost all `modernize-*` checks in clang-tidy. This warns against things such as:

- Use of `const std::string&` instead of new-style `std::string` + move,
- Using old-style loops instead of range-for loops,
- Use of raw `new`
- Use of `push_back` instead of `emplace_back`
- Use of `virtual` together with `override` (`override` is sufficient)

ezyang
Pull Request resolved: https://github.com/pytorch/pytorch/pull/13196

Differential Revision: D12891837

Pulled By: goldsborough

fbshipit-source-id: 4d0f782a09eb391ee718d3d66f74c095ee121c09
This commit is contained in:
Peter Goldsborough
2018-11-02 20:24:35 -07:00
committed by Facebook Github Bot
parent 4bca51e3e7
commit 0479517325
34 changed files with 174 additions and 156 deletions

View File

@ -2,7 +2,6 @@
# NOTE there must be no spaces before the '-', so put the comma first.
Checks: '
-*
,modernize-deprecated-headers
,bugprone-*
,-bugprone-macro-parentheses
,-bugprone-forward-declaration-namespace
@ -18,6 +17,10 @@ Checks: '
,hicpp-signed-bitwise
,hicpp-exception-baseclass
,hicpp-avoid-goto
,modernize-*
,-modernize-use-default-member-init
,-modernize-return-braced-init-list
,-modernize-use-auto
'
WarningsAsErrors: '*'
HeaderFilterRegex: 'torch/csrc/.*'

View File

@ -34,7 +34,7 @@ using c10::optional;
struct TORCH_API VariableType final : public at::TypeDefault {
VariableType(Context* context, at::TypeExtendedInterface* baseType);
at::ScalarType scalarType() const override;
virtual caffe2::TypeMeta typeMeta() const override;
caffe2::TypeMeta typeMeta() const override;
at::Backend backend() const override;
at::Allocator* allocator() const override;
at::Device getDeviceFromPtr(void * data) const override;

View File

@ -86,7 +86,7 @@ class Cloneable : public virtual Module {
}
private:
void clone_(Module& other, optional<Device> device) final override {
void clone_(Module& other, optional<Device> device) final {
// Here we are *pretty* certain that `other's` type is `Derived` (because it
// was registered under the same name as `this`), but you never know what
// crazy things `reset()` does, so `dynamic_cast` just to be safe.

View File

@ -51,7 +51,7 @@ class CursorBase {
/// A `(key, value)` pair exposed by cursor iterators.
struct Item {
Item(const std::string& key_, T& value_);
Item(std::string key_, T& value_);
T& operator*();
const T& operator*() const;

View File

@ -15,8 +15,8 @@ namespace detail {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CursorBase::Item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename T>
CursorBase<T>::Item::Item(const std::string& key_, T& value_)
: key(key_), value(value_) {}
CursorBase<T>::Item::Item(std::string key_, T& value_)
: key(std::move(key_)), value(value_) {}
template <typename T>
T& CursorBase<T>::Item::operator*() {

View File

@ -34,7 +34,7 @@ RNNImplBase<Derived>::RNNImplBase(
int64_t number_of_gates)
: options(options_),
number_of_gates_(number_of_gates),
cudnn_mode_(cudnn_mode) {
cudnn_mode_(std::move(cudnn_mode)) {
reset();
}

View File

@ -1,5 +1,6 @@
#include "c10/util/Optional.h"
#include "torch/csrc/autograd/VariableTypeUtils.h"
#include "torch/csrc/utils/memory.h"
#include <torch/csrc/utils/memory.h>
@ -73,11 +74,12 @@ std::vector<std::unique_ptr<Type>> type_to_variable_type;
// XXX - this is not threadsafe with uses of Variables
void register_variable_type_for(TypeExtendedInterface* baseType) {
AT_ASSERT(baseType);
size_t base_id = static_cast<size_t>(baseType->ID());
const auto base_id = static_cast<size_t>(baseType->ID());
if(type_to_variable_type.size() <= base_id) {
type_to_variable_type.resize(base_id + 1);
}
type_to_variable_type[base_id] = torch::make_unique<VariableType>(&at::globalContext(), baseType);
type_to_variable_type[base_id] =
make_unique<VariableType>(&at::globalContext(), baseType);
}
struct VariableTypeRegistry {

View File

@ -5,6 +5,7 @@
#include "torch/csrc/autograd/grad_mode.h"
#include "torch/csrc/autograd/anomaly_mode.h"
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/utils/memory.h"
#include <ATen/DeviceGuard.h>
#include <ATen/ExpandUtils.h>
@ -634,7 +635,7 @@ void GraphTask::init_to_execute(Function& graph_root, const edge_list& outputs)
Function *output = output_edge.function.get();
auto & info = exec_info[output];
if (!info.captures)
info.captures.reset(new std::vector<ExecInfo::Capture>());
info.captures = make_unique<std::vector<ExecInfo::Capture>>();
info.captures->emplace_back(output_edge.input_nr, output_idx++);
}
captured_vars.resize(output_idx);

View File

@ -63,8 +63,8 @@ struct ArgumentSpec {
hash_code = num_flat_inputs;
args.resize(num_flat_inputs);
size_t offset = 0;
for (size_t i = 0; i < inputs.size(); ++i) {
addInput(inputs[i], offset, with_grad);
for (const auto& i : inputs) {
addInput(i, offset, with_grad);
}
JIT_ASSERT(offset == num_flat_inputs);
}
@ -192,7 +192,7 @@ struct CompleteArgumentSpec {
data.resize(ninputs + all_dims*2);
// and reinterpret our data array as these structs
CompleteArgumentInfoPOD * pods = reinterpret_cast<CompleteArgumentInfoPOD*>(data.data());
auto* pods = reinterpret_cast<CompleteArgumentInfoPOD*>(data.data());
int64_t * next_dim = sizes_strides();
int32_t total_dims = 0;
for(int32_t i = 0; i < num_inputs; i++) {

View File

@ -308,8 +308,9 @@ static std::vector<Value*> gradientForNode(Node* node, ArrayRef<Value*> grad_val
squeezed_dims.push_back(i);
}
SymbolicVariable returned_grad = grads.at(0);
for (auto it = squeezed_dims.begin(); it != squeezed_dims.end(); ++it)
returned_grad = returned_grad.unsqueeze(*it);
for (const auto& dim : squeezed_dims) {
returned_grad = returned_grad.unsqueeze(dim);
}
return {returned_grad};
} else if (node->matches("aten::squeeze(Tensor self, int dim) -> Tensor", /*const_inputs=*/attr::dim)) {
@ -333,7 +334,7 @@ static std::vector<Value*> gradientForNode(Node* node, ArrayRef<Value*> grad_val
// of equal sizes. We can use a single split operation to handle that.
if (std::all_of(tensor_inputs.begin(), tensor_inputs.end(), has_first_sizes)) {
auto tensor_grads = grads.at(0).chunk(tensor_inputs.size(), dim);
tensor_grads.push_back(nullptr); // for attr::dim
tensor_grads.emplace_back(nullptr); // for attr::dim
return tensor_grads;
} else {
size_t offset = 0;
@ -343,7 +344,7 @@ static std::vector<Value*> gradientForNode(Node* node, ArrayRef<Value*> grad_val
tensor_grads.push_back(grad.narrow(dim, offset, input.sizes()[dim]));
offset += input.sizes()[dim];
}
tensor_grads.push_back(nullptr); // for attr::dim
tensor_grads.emplace_back(nullptr); // for attr::dim
return tensor_grads;
}
} else if (comparison_ops.find(node)) {

View File

@ -96,8 +96,8 @@ private:
# if this list is not empty and ${foo,} will insert one after.
*/
struct CodeTemplate {
/* implicit */ CodeTemplate(const std::string & t)
: template_text(t) {}
/* implicit */ CodeTemplate(std::string t)
: template_text(std::move(t)) {}
std::string format(const TemplateEnv & env) {
std::stringstream out;

View File

@ -371,7 +371,7 @@ class GraphEncoder: public EncoderBase {
}
private:
virtual void EncodeTensor(
void EncodeTensor(
onnx::TensorProto* tensor_proto,
const at::Tensor& tensor,
const c10::optional<std::string> external_ref = {}) override;
@ -450,15 +450,15 @@ class ModuleEncoder: public EncoderBase {
script::Method &method,
const std::string prefix);
virtual void EncodeTensor(
void EncodeTensor(
onnx::TensorProto* tensor_proto,
const at::Tensor& tensor,
const c10::optional<std::string> external_ref = {}) override;
virtual void EncodeIntermediateValueInfo(onnx::GraphProto *graph_proto,
void EncodeIntermediateValueInfo(onnx::GraphProto *graph_proto,
const Value* n) override;
virtual void EncodeValueInfo(onnx::GraphProto *graph_proto,
void EncodeValueInfo(onnx::GraphProto *graph_proto,
onnx::ValueInfoProto* v,
const Value* n) override;

View File

@ -4,6 +4,7 @@
#include "torch/csrc/jit/code_template.h"
#include "torch/csrc/jit/fuser/cpu/temp_file.h"
#include "torch/csrc/jit/fuser/cpu/dynamic_library.h"
#include "torch/csrc/utils/memory.h"
#include <sstream>
#include <cstdlib>
@ -37,7 +38,7 @@ struct CompilerConfig {
if (!programExists(cxx)) {
cxx = "";
}
const char* debug_env = getenv("PYTORCH_FUSION_DEBUG");
debug = debug_env && atoi(debug_env) != 0;
}
@ -101,14 +102,21 @@ static void disas(const std::string& so_file) {
}
FusedKernelCPU::FusedKernelCPU(
const std::string& _name
, const std::string& _code
, const std::vector<TensorDesc> _input_desc
, const std::vector<TensorDesc> _output_desc
, const std::vector<PartitionDesc> _chunk_desc
, const std::vector<PartitionDesc> _concat_desc
, const bool _has_random)
: FusedKernel{_name, _code, _input_desc, _output_desc, _chunk_desc, _concat_desc, _has_random} {
std::string name,
std::string code,
std::vector<TensorDesc> input_desc,
std::vector<TensorDesc> output_desc,
std::vector<PartitionDesc> chunk_desc,
std::vector<PartitionDesc> concat_desc,
bool has_random)
: FusedKernel(
std::move(name),
std::move(code),
std::move(input_desc),
std::move(output_desc),
std::move(chunk_desc),
std::move(concat_desc),
has_random) {
auto& config = getConfig();
TempFile so_file(so_template, 3);
TempFile cpp_file(cpp_template, 4);
@ -116,7 +124,7 @@ FusedKernelCPU::FusedKernelCPU(
cpp_file.sync();
runCompiler(cpp_file.name(), so_file.name());
if (config.debug) disas(so_file.name());
so_lib.reset(new DynamicLibrary(so_file.name().c_str()));
so_lib = make_unique<DynamicLibrary>(so_file.name().c_str());
#pragma GCC diagnostic ignored "-Wpedantic"
kernel = reinterpret_cast<void(*)(uint32_t, void**)>(so_lib->sym(name_.c_str()));
#pragma GCC diagnostic pop

View File

@ -17,21 +17,20 @@ namespace torch { namespace jit { namespace fuser { namespace cpu {
// Represents a compiled CPU kernel and the metadata necessary to run it
struct TORCH_API FusedKernelCPU : public ::torch::jit::fuser::FusedKernel {
FusedKernelCPU(
const std::string& _name
, const std::string& _code
, const std::vector<TensorDesc> _input_desc
, const std::vector<TensorDesc> _output_desc
, const std::vector<PartitionDesc> _chunk_desc
, const std::vector<PartitionDesc> _concat_desc
, const bool _has_random);
std::string name,
std::string code,
std::vector<TensorDesc> input_desc,
std::vector<TensorDesc> output_desc,
std::vector<PartitionDesc> chunk_desc,
std::vector<PartitionDesc> concat_desc,
bool has_random);
virtual at::Backend backend() const override {
at::Backend backend() const override {
return at::Backend::CPU;
}
virtual void launch_raw(
const uint32_t numel
, std::vector<void*>& arguments) const override {
void launch_raw(const uint32_t numel, std::vector<void*>& arguments)
const override {
kernel(numel, arguments.data());
}
@ -42,7 +41,7 @@ private:
} // namespace cpu
} // namespace fuser
} // namespace jit
} // namespace jit
} // namespace torch
#endif // USE_CPU_FUSER

View File

@ -67,16 +67,23 @@ static void getMajorMinor(const cudaDeviceProp* const prop, int& major, int& min
// Compiles the specified kernel and stores the metadata required to run it
FusedKernelCUDA::FusedKernelCUDA(
const int16_t _device
, const std::string& _name
, const std::string& _code
, const std::vector<TensorDesc> _input_desc
, const std::vector<TensorDesc> _output_desc
, const std::vector<PartitionDesc> _chunk_desc
, const std::vector<PartitionDesc> _concat_desc
, const bool _has_random)
: FusedKernel{_name, _code, _input_desc, _output_desc, _chunk_desc, _concat_desc, _has_random}
, device_{_device} {
int16_t device,
std::string name,
std::string code,
std::vector<TensorDesc> input_desc,
std::vector<TensorDesc> output_desc,
std::vector<PartitionDesc> chunk_desc,
std::vector<PartitionDesc> concat_desc,
bool has_random)
: FusedKernel(
std::move(name),
std::move(code),
std::move(input_desc),
std::move(output_desc),
std::move(chunk_desc),
std::move(concat_desc),
has_random),
device_(device) {
// Initializes driver's API context (if necessary)
CUcontext pctx = 0;
TORCH_CU_CHECK(cuCtxGetCurrent(&pctx));

View File

@ -20,24 +20,23 @@ namespace torch { namespace jit { namespace fuser { namespace cuda {
// Note: CUDA functions are per device.
struct TORCH_API FusedKernelCUDA : public ::torch::jit::fuser::FusedKernel {
FusedKernelCUDA(
const int16_t _device
, const std::string& _name
, const std::string& _code
, const std::vector<TensorDesc> _input_desc
, const std::vector<TensorDesc> _output_desc
, const std::vector<PartitionDesc> _chunk_desc
, const std::vector<PartitionDesc> _concat_desc
, const bool _has_random);
int16_t device,
std::string name,
std::string code,
std::vector<TensorDesc> input_desc,
std::vector<TensorDesc> output_desc,
std::vector<PartitionDesc> chunk_desc,
std::vector<PartitionDesc> concat_desc,
bool has_random);
virtual ~FusedKernelCUDA() override {
~FusedKernelCUDA() override {
cuModuleUnload(module_);
}
virtual void launch_raw(
const uint32_t numel
, std::vector<void*>& arguments) const override;
void launch_raw(const uint32_t numel, std::vector<void*>& arguments)
const override;
virtual at::Backend backend() const override {
at::Backend backend() const override {
return at::Backend::CUDA;
}

View File

@ -27,7 +27,7 @@ static c10::optional<std::vector<int64_t>> getMapSize(
const KernelSpec& spec
, at::TensorList args
, at::IntList arg_subset) {
int64_t dim_after_broadcast = 0;
for (const auto arg_idx : arg_subset) {
dim_after_broadcast = std::max(dim_after_broadcast, args[arg_idx].dim());
@ -38,9 +38,9 @@ static c10::optional<std::vector<int64_t>> getMapSize(
// should be straightforward.
// Note: left unitialized since empty shape is broadcastable to any shape
std::vector<int64_t> map_size;
for (size_t i = 0; i < arg_subset.size(); ++i) {
auto& arg = args.at(arg_subset[i]);
auto& chunk_desc = spec.inputChunks().at(arg_subset[i]);
for (const auto arg_idx : arg_subset) {
auto& arg = args.at(arg_idx);
auto& chunk_desc = spec.inputChunks().at(arg_idx);
if (chunk_desc.nSubTensors() == 1) {
try {
map_size = at::infer_size(map_size, arg.sizes());
@ -91,7 +91,7 @@ static c10::optional<std::vector<int64_t>> canRunKernel(
}
// Arguments are expanded to a common shape, referred to as the "map size,"
// (see above).
// (see above).
// Note: Arguments are mutated by this call, although map_size is restored
// to its original value.
static void expandArgs(
@ -117,8 +117,8 @@ static void expandArgs(
// Note: assumes that inputs are 32-bit addressable
static uint32_t computeNumel(const at::ArrayRef<int64_t>& sizes) {
uint32_t result = 1;
for (const auto& size : sizes)
for (const auto& size : sizes)
result *= size;
return result;
@ -177,7 +177,7 @@ void launchFusion(
else // CPU
outputs.push_back(at::empty({0}, ref_type.options().dtype(od.scalar_type).device(at::Device{at::DeviceType::CPU})));
}
// Fails if fusion and given inputs disagree
JIT_ASSERT(inputs.size() == fusion.inputDesc().size());
@ -188,8 +188,8 @@ void launchFusion(
flat_inputs_size += c.nSubTensors();
for (const auto& c : fusion.concatDesc())
flat_outputs_size += c.nSubTensors();
// Fails if the elements of the first (any) tensor are not expressable as
// Fails if the elements of the first (any) tensor are not expressable as
// a 32-bit integer.
// Note: this code assumes that inputs are 32-bit addressable
// Note: this code assumes that all inputs are of the same size
@ -321,7 +321,7 @@ bool runFusion(
// Validates sizes and expands inputs as needed
auto maybe_map_size = canRunKernel(spec, inputs);
// Tries to run fallback if map size can't be computed
if (!maybe_map_size) return false;
expandArgs(spec, inputs, *maybe_map_size);

View File

@ -17,24 +17,23 @@ struct FusedKernel {
TH_DISALLOW_COPY_AND_ASSIGN(FusedKernel);
FusedKernel(
const std::string& _name
, const std::string& _code
, const std::vector<TensorDesc>& _input_desc
, const std::vector<TensorDesc>& _output_desc
, const std::vector<PartitionDesc>& _chunk_desc
, const std::vector<PartitionDesc>& _concat_desc
, const bool _has_random)
: name_{_name}
, code_{_code}
, input_desc_{_input_desc}
, output_desc_{_output_desc}
, chunk_desc_{_chunk_desc}
, concat_desc_{_concat_desc}
, has_random_{_has_random}
{ }
std::string name,
std::string code,
std::vector<TensorDesc> input_desc,
std::vector<TensorDesc> output_desc,
std::vector<PartitionDesc> chunk_desc,
std::vector<PartitionDesc> concat_desc,
bool has_random)
: name_(std::move(name)),
code_(std::move(code)),
input_desc_(std::move(input_desc)),
output_desc_(std::move(output_desc)),
chunk_desc_(std::move(chunk_desc)),
concat_desc_(std::move(concat_desc)),
has_random_(has_random) {}
virtual ~FusedKernel() = default;
// arguments is a list of pointers to the arguments for the compiled CUDA/CPU
// code.
@ -57,7 +56,7 @@ struct FusedKernel {
const std::vector<TensorDesc>& outputDesc() const { return output_desc_; }
const std::vector<PartitionDesc>& chunkDesc() const { return chunk_desc_; }
const std::vector<PartitionDesc>& concatDesc() const { return concat_desc_; }
bool hasRandom() const { return has_random_; }
bool hasRandom() const { return has_random_; }
protected:
@ -65,7 +64,7 @@ protected:
const std::string code_;
const std::vector<TensorDesc> input_desc_;
const std::vector<TensorDesc> output_desc_;
// same size as input_desc, describes whether an
// input should be broken into subtensors (chunks)
// to be consumed by the fusion group
@ -80,7 +79,7 @@ protected:
};
} // namespace fuser
} // namespace jit
} // namespace jit
} // namespace torch
#endif // USE_CUDA_FUSER || USE_CPU_FUSER

View File

@ -81,7 +81,7 @@ struct DifferentiableGraphBackward : public autograd::Function {
ivalue_captures.reserve(capture_size);
}
virtual variable_list apply(variable_list&& inputs) override {
variable_list apply(variable_list&& inputs) override {
Stack stack;
stack.reserve(is_var_capture.size() + inputs.size());
stack.insert(stack.end(), std::make_move_iterator(inputs.begin()),
@ -90,7 +90,7 @@ struct DifferentiableGraphBackward : public autograd::Function {
auto ivalue_capture_it = ivalue_captures.begin();
for (bool is_var : is_var_capture) {
if (is_var) {
stack.push_back(var_capture_it->unpack(this->shared_from_this()));
stack.emplace_back(var_capture_it->unpack(this->shared_from_this()));
++var_capture_it;
} else {
stack.push_back(*ivalue_capture_it);
@ -108,9 +108,9 @@ struct DifferentiableGraphBackward : public autograd::Function {
auto output = std::move(stack[i]).toTensor();
const auto & edge = next_edge(i);
if (output.defined()) {
outputs.push_back(std::move(output));
outputs.emplace_back(std::move(output));
} else if (edge.is_valid()) {
outputs.push_back(edge.function->input_metadata(edge.input_nr).zeros_like());
outputs.emplace_back(edge.function->input_metadata(edge.input_nr).zeros_like());
} else {
outputs.emplace_back();
}

View File

@ -314,17 +314,17 @@ public:
ContainerTensor()
: TensorImpl(at::UndefinedTensorId(), caffe2::TypeMeta(), nullptr, /* is_variable */ false) {}
virtual ~ContainerTensor() = default;
virtual at::IntList sizes() const override {
~ContainerTensor() override = default;
at::IntList sizes() const override {
throw std::runtime_error("sizes() on ContainerTensor");
}
virtual at::IntList strides() const override {
at::IntList strides() const override {
throw std::runtime_error("strides() on ContainerTensor");
}
virtual int64_t dim() const override {
int64_t dim() const override {
throw std::runtime_error("dim() on ContainerTensor");
}
virtual const at::Storage& storage() const override {
const at::Storage& storage() const override {
throw std::runtime_error("storage() on ContainerTensor");
}
};
@ -348,6 +348,7 @@ struct UseList {
};
// one instruction plus meta-data
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct Instruction {
Operation callback;
UseList inputs;

View File

@ -225,7 +225,7 @@ private:
// the schema.
// note: mutable because schema_ is effectively a cache
mutable const FunctionSchema* schema_;
topo_position_t topo_position_;
topo_position_t topo_position_ = 0;
protected:
TORCH_API Node(Graph * graph_, NodeKind kind_); //defined after graph
public:

View File

@ -203,10 +203,10 @@ static void reorder_according_to_dag(Block * block, const detail::DynamicDAG<Nod
if (!vertex.has_value()) continue;
auto& nodes = vertex.value()->data;
for (auto it = nodes.begin(); it != nodes.end(); ++it) {
for (Node* node : nodes) {
// Move all nodes according to the topological order in dep_graph. A lot
// of the moves are unnecessary but this is a quick & easy solution.
(*it)->moveBefore(block->return_node());
node->moveBefore(block->return_node());
}
}
}

View File

@ -745,13 +745,13 @@ struct GraphFuser {
chunked_op->copyAttributes(*producer_for_chunk_node);
chunked_op->output()->setType(chunk_sel->type());
auto chunked_inputs_it = chunked_inputs.begin();
for (size_t i = 0; i < original_inputs.size(); ++i) {
if (original_inputs[i]->type()->isSubtypeOf(DynamicType::get())) {
for (Value* original_input : original_inputs) {
if (original_input->type()->isSubtypeOf(DynamicType::get())) {
JIT_ASSERT(chunked_inputs_it != chunked_inputs.end());
chunked_op->addInput(chunked_inputs_it->at(chunk_sel->offset()));
++chunked_inputs_it;
} else {
chunked_op->addInput(original_inputs[i]);
chunked_op->addInput(original_input);
}
}
insertAt(&insertion_point, chunked_op);

View File

@ -66,9 +66,9 @@ void PropagateRequiresGradSimpleNode(Node* node) {
auto inputs = node->inputs();
auto outputs = node->outputs();
bool should_require = std::any_of(inputs.begin(), inputs.end(), getRequiresGrad);
for (size_t i = 0; i < outputs.size(); ++i) {
if (auto type = outputs[i]->type()->cast<TensorType>()) {
setRequiresGrad(outputs[i], should_require && at::isFloatingType(type->scalarType()));
for (Value* output : outputs) {
if (auto type = output->type()->cast<TensorType>()) {
setRequiresGrad(output, should_require && at::isFloatingType(type->scalarType()));
}
}
}

View File

@ -14,15 +14,14 @@ void specializeUndef(Graph & g) {
enum class State { Defined, Undefined, Unknown };
std::unordered_map<Value*, State> state;
auto inputs = g.inputs();
for (size_t i = 0; i < inputs.size(); i++) {
auto tp = inputs[i]->type();
for (Value* input : g.inputs()) {
auto tp = input->type();
if (tp->isSubtypeOf(UndefinedTensorType::get())) {
state[inputs[i]] = State::Undefined;
state[input] = State::Undefined;
} else if (tp->isSubtypeOf(DynamicType::get())) {
state[inputs[i]] = State::Defined;
state[input] = State::Defined;
} else {
state[inputs[i]] = State::Unknown;
state[input] = State::Unknown;
}
}

View File

@ -28,8 +28,8 @@ using AttributeMap = std::unordered_map<std::string, Const>;
using ListAttributeMap = std::unordered_map<std::string, std::vector<Const>>;
struct NoneValue : SugaredValue {
NoneValue() {}
virtual std::string kind() const override {
NoneValue() = default;
std::string kind() const override {
return "None";
}
};
@ -96,7 +96,7 @@ static Value* typeCast(const SourceRange& loc, Value* value, TypePtr dst) {
// expressions like int(x)
struct CastValue : public SugaredValue {
CastValue(TypePtr type)
: type(type) {}
: type(std::move(type)) {}
std::string kind() const override {
std::stringstream ss;
ss << "<" << type->str() << " cast primitive>";
@ -152,7 +152,7 @@ private:
// delete unnecessary ones later with replaceAllusesWith().
struct Environment {
Environment(Method & method, Resolver resolver, Block* b, std::shared_ptr<Environment> next = nullptr)
: method(method), resolver(std::move(resolver)), b(b), next(next) {}
: method(method), resolver(std::move(resolver)), b(b), next(std::move(next)) {}
Method & method;
Resolver resolver;
@ -872,7 +872,7 @@ struct to_ir {
}
return_type_idx++;
}
returns.push_back({"", type});
returns.emplace_back("", type);
}
}
@ -1523,12 +1523,11 @@ private:
auto starred = Starred(tree);
auto entries = emitSugaredExpr(starred.expr(), 1)->asTuple(starred.range(), method);
for(auto entry : entries) {
values.push_back(NamedValue(
tree->range(), entry->asValue(starred.range(), method)));
values.emplace_back(
tree->range(), entry->asValue(starred.range(), method));
}
} else {
values.push_back(NamedValue(
tree->range(), emitExpr(Expr(tree))));
values.emplace_back(tree->range(), emitExpr(Expr(tree)));
}
}
return values;

View File

@ -89,17 +89,17 @@ struct SugaredValue : public std::enable_shared_from_this<SugaredValue> {
struct TORCH_API SimpleValue : public SugaredValue {
SimpleValue(Value * value)
: value(value) {}
virtual std::string kind() const override {
std::string kind() const override {
return "value";
}
virtual Value * asValue(SourceRange range, Method & m) override {
Value * asValue(SourceRange range, Method & m) override {
return value;
}
virtual std::vector<std::shared_ptr<SugaredValue>> asTuple(
std::vector<std::shared_ptr<SugaredValue>> asTuple(
SourceRange loc,
Method& m,
c10::optional<size_t> size_hint = {}) override;
virtual std::shared_ptr<SugaredValue> attr(SourceRange loc, Method & m, const std::string& field) override;
std::shared_ptr<SugaredValue> attr(SourceRange loc, Method & m, const std::string& field) override;
Value* getValue() const {
return value;
}
@ -129,8 +129,8 @@ struct TORCH_API BuiltinFunction : public SugaredValue {
};
struct TORCH_API BuiltinModule : public SugaredValue {
BuiltinModule(const std::string& name)
: name(name) {}
BuiltinModule(std::string name)
: name(std::move(name)) {}
std::string name;
std::string kind() const override {
@ -143,7 +143,7 @@ struct TORCH_API BuiltinModule : public SugaredValue {
};
struct TORCH_API ForkValue : public SugaredValue {
ForkValue() {}
ForkValue() = default;
std::string kind() const override {
return "fork";
@ -194,7 +194,7 @@ struct MethodValue : public SugaredValue {
std::string kind() const override {
return "method";
}
virtual std::shared_ptr<SugaredValue> call(SourceRange loc, Method & caller, at::ArrayRef<NamedValue> inputs, at::ArrayRef<NamedValue> attributes, size_t n_binders) override {
std::shared_ptr<SugaredValue> call(SourceRange loc, Method & caller, at::ArrayRef<NamedValue> inputs, at::ArrayRef<NamedValue> attributes, size_t n_binders) override {
return std::make_shared<SimpleValue>(packOutputs(*caller.graph(), caller.emit_call_to(loc, method, inputs, attributes)));
}
private:

View File

@ -17,7 +17,7 @@ struct ErrorReport : public std::exception {
: context(std::move(loc)) {}
explicit ErrorReport(const TreeRef& tree) : ErrorReport(tree->range()) {}
explicit ErrorReport(const Token& tok) : ErrorReport(tok.range) {}
virtual const char* what() const noexcept override {
const char* what() const noexcept override {
std::stringstream msg;
msg << "\n" << ss.str();
if (context != nullptr) {

View File

@ -352,7 +352,7 @@ SharedParserData& sharedParserData();
struct Token {
int kind;
SourceRange range;
Token(int kind, const SourceRange& range) : kind(kind), range(range) {}
Token(int kind, SourceRange range) : kind(kind), range(std::move(range)) {}
std::string text() {
return range.text();
}

View File

@ -48,7 +48,7 @@ struct Method {
, graph_(std::move(graph))
, optimize(optimize)
, member_inputs(std::move(initial_members))
, method_creator(method_creator) {
, method_creator(std::move(method_creator)) {
JIT_ASSERT(graph_->inputs().size() >= member_inputs.size());
int i = graph_->inputs().size() - member_inputs.size();
for(at::Tensor* member : member_inputs) {
@ -153,13 +153,13 @@ struct Method {
}
Method& setSchema(FunctionSchema schema_) {
schema = torch::make_unique<FunctionSchema>(std::move(schema_));
schema = make_unique<FunctionSchema>(std::move(schema_));
return *this;
}
const FunctionSchema& getSchema() const {
if(schema == nullptr) {
schema.reset(new FunctionSchema(defaultSchemaFor(*this)));
schema = make_unique<FunctionSchema>(defaultSchemaFor(*this));
}
return *schema;
}
@ -193,10 +193,10 @@ private:
for(size_t i = 0; i < num_inputs; ++i) {
const Value* v = g.inputs().at(i);
std::string name = v->hasUniqueName() ? v->uniqueName() : ("argument_" + std::to_string(i));
args.push_back({std::move(name), unshapedType(g.inputs()[i]->type())});
args.emplace_back(std::move(name), unshapedType(g.inputs()[i]->type()));
}
for(size_t i = 0; i < g.outputs().size(); ++i) {
returns.push_back({"", unshapedType(g.outputs()[i]->type())});
returns.emplace_back("", unshapedType(g.outputs()[i]->type()));
}
return { method.name(), std::move(args), std::move(returns) };
}

View File

@ -96,8 +96,8 @@ struct Tree : std::enable_shared_from_this<Tree> {
};
struct String : public Tree {
String(const std::string& value_) : Tree(TK_STRING), value_(value_) {}
virtual const std::string& stringValue() const override {
String(std::string value) : Tree(TK_STRING), value_(std::move(value)) {}
const std::string& stringValue() const override {
return value_;
}
template <typename... Args>
@ -121,22 +121,22 @@ static SourceRange mergeRanges(SourceRange c, const TreeList& others) {
}
struct Compound : public Tree {
Compound(int kind, const SourceRange& range_) : Tree(kind), range_(range_) {}
Compound(int kind, SourceRange range) : Tree(kind), range_(std::move(range)) {}
Compound(int kind, const SourceRange& range_, TreeList&& trees_)
: Tree(kind),
range_(mergeRanges(range_, trees_)),
trees_(std::move(trees_)) {}
virtual const TreeList& trees() const override {
const TreeList& trees() const override {
return trees_;
}
static TreeRef
create(int kind, const SourceRange& range_, TreeList&& trees_) {
return std::make_shared<Compound>(kind, range_, std::move(trees_));
}
virtual bool isAtom() const override {
bool isAtom() const override {
return false;
}
virtual TreeRef map(std::function<TreeRef(TreeRef)> fn) override {
TreeRef map(std::function<TreeRef(TreeRef)> fn) override {
TreeList trees_;
for (auto& t : trees()) {
trees_.push_back(fn(t));

View File

@ -88,7 +88,7 @@ namespace script {
// than both in the parser and in this code.
// XXX: these structs should have no fields to prevent slicing when passing by value
struct TreeView {
explicit TreeView(const TreeRef& tree_) : tree_(tree_) {}
explicit TreeView(TreeRef tree) : tree_(std::move(tree)) {}
TreeRef tree() const {
return tree_;
}

View File

@ -284,7 +284,7 @@ private:
// shorter method so that toVar(v) + toVar(c) is short.
static inline SymbolicVariable toVar(Value * v) {
return SymbolicVariable(v);
return {v};
}
template<typename T, typename = typename std::enable_if<std::is_arithmetic<T>::value>::type>

View File

@ -125,7 +125,7 @@ public:
// list of types this type contains, e.g. for a List then element type of a list
// for a tuple, the types of the tuple elements
virtual at::ArrayRef<TypePtr> containedTypes() const {
return at::ArrayRef<TypePtr>();
return {};
}
// create a new version of this type, replacing its contained types with
// contained_types
@ -815,7 +815,7 @@ struct VarType : public Type {
}
private:
VarType(std::string name_)
: Type(TypeKind::VarType), name_(name_) {}
: Type(TypeKind::VarType), name_(std::move(name_)) {}
std::string name_;
};