Bag of clang tidy fixes for torch/csrc/ and torch/csrc/autograd (#11050)

Summary:
Linting `torch/csrc/` (non-recursive) and `torch/csrc/autograd` (non-recursive).

Fixed things like:
- `typedef` vs `using`
- Use `.empty()` instead of comparing with empty string/using `.size() == 0`
- Use range for loops instead of old style loops (`modernize-`)
- Remove some `virtual` + `override`
- Replace `stdint.h` with `cstdint`
- Replace `return Type(x, y)` with `return {x, y}`
- Use boolean values (`true`/`false`)  instead of numbers (1/0)
- More ...

ezyang apaszke cpuhrsch
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11050

Differential Revision: D9597505

Pulled By: goldsborough

fbshipit-source-id: cb0fb4793ade885a8dbf4b10484487b84c64c7f2
This commit is contained in:
Peter Goldsborough
2018-09-05 19:41:28 -07:00
committed by Facebook Github Bot
parent 83a1ab2136
commit dccd0f2de6
31 changed files with 138 additions and 122 deletions

View File

@ -4,6 +4,7 @@ Checks: '
*
,clang-analyzer-*
,modernize-*
,-cert-dcl21-cpp
,-cert-err58-cpp
,-cert-err60-cpp
,-clang-diagnostic-*
@ -12,10 +13,12 @@ Checks: '
,-cppcoreguidelines-pro-bounds-constant-array-index
,-cppcoreguidelines-pro-type-member-init
,-cppcoreguidelines-pro-type-static-cast-downcast
,-cppcoreguidelines-pro-type-union-access
,-cppcoreguidelines-pro-type-vararg
,-cppcoreguidelines-special-member-functions
,-fuchsia-*
,-google-build-using-namespace
,-google-default-arguments
,-google-explicit-constructor
,-google-readability-braces-around-statements
,-google-readability-namespace-comments
@ -24,6 +27,7 @@ Checks: '
,-google-runtime-references
,-hicpp-braces-around-statements
,-hicpp-explicit-conversions
,-hicpp-member-init
,-hicpp-no-array-decay
,-hicpp-signed-bitwise
,-hicpp-special-member-functions

View File

@ -93,7 +93,7 @@ ${return_type} TypeDefault::${api_name}(${type_method_formals}) const {
return at::native::${api_name}(${type_method_actuals}, options());
}
""")
# 4. add virtual override to TypeDerived.h
# 4. add override to TypeDerived.h
TYPE_DERIVED_DECLARATION = CodeTemplate("""\
${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override;
""")

View File

@ -35,8 +35,7 @@ bool test_optimizer_xor(Options options) {
const int64_t kBatchSize = 4;
const int64_t kMaximumNumberOfEpochs = 3000;
auto optimizer = OptimizerClass(std::vector<torch::Tensor>(), options);
optimizer.add_parameters(model->parameters());
OptimizerClass optimizer(model->parameters(), options);
float running_loss = 1;
int epoch = 0;
@ -152,6 +151,9 @@ TEST_CASE("Optim/BasicInterface") {
REQUIRE(optimizer.size() == 0);
optimizer.add_parameters(parameters);
REQUIRE(optimizer.size() == parameters.size());
for (size_t p = 0; p < parameters.size(); ++p) {
REQUIRE(optimizer.parameters()[p].allclose(parameters[p]));
}
}
{
Linear linear(3, 4);

View File

@ -80,7 +80,7 @@ DONT_REQUIRE_DERIVATIVE = {
}
METHOD_DECLARATION = CodeTemplate("""\
virtual ${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override;
${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override;
""")
METHOD_DEFINITION = CodeTemplate("""\

View File

@ -34,30 +34,30 @@ void register_variable_type_for(at::Type* baseType);
struct TORCH_API VariableType final : public at::TypeDefault {
VariableType(Context* context, at::Type* baseType);
virtual at::ScalarType scalarType() const override;
virtual at::Backend backend() const override;
virtual at::Allocator* allocator() const override;
virtual at::Device getDeviceFromPtr(void * data) const override;
virtual Storage storage(bool resizable = false) const override;
virtual Storage storage(size_t size, bool resizable = false) const override;
virtual Storage storageFromBlob(void * data, int64_t size, const std::function<void(void*)> & deleter) const override;
virtual Storage storageWithAllocator(int64_t size, at::Allocator* allocator) const override;
virtual std::unique_ptr<at::Generator> generator() const override;
virtual const char * toString() const override;
virtual at::TypeID ID() const override;
virtual size_t elementSizeInBytes() const override;
virtual at::Type & toBackend(at::Backend b) const override;
virtual at::Type & toScalarType(at::ScalarType s) const override;
virtual Storage unsafeStorageFromTH(void * th_pointer, bool retain) const override;
virtual at::Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const override;
at::ScalarType scalarType() const override;
at::Backend backend() const override;
at::Allocator* allocator() const override;
at::Device getDeviceFromPtr(void * data) const override;
Storage storage(bool resizable = false) const override;
Storage storage(size_t size, bool resizable = false) const override;
Storage storageFromBlob(void * data, int64_t size, const std::function<void(void*)> & deleter) const override;
Storage storageWithAllocator(int64_t size, at::Allocator* allocator) const override;
std::unique_ptr<at::Generator> generator() const override;
const char * toString() const override;
at::TypeID ID() const override;
size_t elementSizeInBytes() const override;
at::Type & toBackend(at::Backend b) const override;
at::Type & toScalarType(at::ScalarType s) const override;
Storage unsafeStorageFromTH(void * th_pointer, bool retain) const override;
at::Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const override;
static at::Type* getVariableTypeFromBaseType(const at::Type& baseType);
static bool isVariableType(const at::Type& type);
static std::vector<at::Type*> allCUDATypes();
static std::vector<at::Type*> allCPUTypes();
virtual Tensor & s_copy_(Tensor & self, const Tensor & src, bool non_blocking) const override;
virtual Tensor & _s_copy_from(const Tensor & self, Tensor & dst, bool non_blocking) const override;
Tensor & s_copy_(Tensor & self, const Tensor & src, bool non_blocking) const override;
Tensor & _s_copy_from(const Tensor & self, Tensor & dst, bool non_blocking) const override;
${type_derived_method_declarations}
private:

View File

@ -210,7 +210,7 @@ def parse_options():
def main():
options = parse_options()
paths = map(normalize_directory_path, options.paths)
paths = list(map(normalize_directory_path, options.paths))
if options.revision:
files = get_changed_files(options.revision, paths, options.verbose)
else:

View File

@ -2,7 +2,7 @@
#include "torch/csrc/python_headers.h"
#include "torch/csrc/autograd/variable.h"
#include "stdint.h"
#include "cstdint"
extern PyTypeObject THPSizeType;

View File

@ -5,7 +5,7 @@
#include <TH/TH.h>
#ifndef INT64_MAX
#include "stdint.h"
#include "cstdint"
#endif
template <typename T> struct THPTypeInfo {};

View File

@ -72,7 +72,9 @@ class OrderedDict {
// Move works by default, because you can move-construct vectors of const
// values..
OrderedDict(OrderedDict&& other) = default;
OrderedDict(OrderedDict&& other) noexcept(
noexcept(std::unordered_map<Key, size_t>()) &&
noexcept(std::vector<Item>())) = default;
OrderedDict& operator=(OrderedDict&& other) = default;
~OrderedDict() = default;

View File

@ -155,31 +155,31 @@ class ModuleHolder : torch::detail::ModuleHolderIndicator {
} // namespace nn
} // namespace torch
#define TORCH_ARG(T, name) \
auto name(const T& new_##name)->decltype(*this) { \
this->name##_ = new_##name; \
return *this; \
} \
auto name(T&& new_##name)->decltype(*this) { \
this->name##_ = std::move(new_##name); \
return *this; \
} \
const T& name() const noexcept { \
return this->name##_; \
} \
T name##_
#define TORCH_ARG(T, name) \
auto name(const T& new_##name)->decltype(*this) { /* NOLINT */ \
this->name##_ = new_##name; \
return *this; \
} \
auto name(T&& new_##name)->decltype(*this) { /* NOLINT */ \
this->name##_ = std::move(new_##name); \
return *this; \
} \
const T& name() const noexcept { /* NOLINT */ \
return this->name##_; \
} \
T name##_ /* NOLINT */
/// Defines a class `Name` which inherits from `nn::ModuleHolder` to provide a
/// wrapper over a `std::shared_ptr<Impl>`.
#define TORCH_MODULE_IMPL(Name, Impl) \
class Name : public torch::nn::ModuleHolder<Impl> { \
public: \
using torch::nn::ModuleHolder<Impl>::ModuleHolder; \
Name(const Name&) = default; \
Name(Name&&) = default; \
Name(Name& other) : Name(static_cast<const Name&>(other)) {} \
Name& operator=(const Name&) = default; \
Name& operator=(Name&&) = default; \
#define TORCH_MODULE_IMPL(Name, Impl) \
class Name : public torch::nn::ModuleHolder<Impl> { /* NOLINT */ \
public: \
using torch::nn::ModuleHolder<Impl>::ModuleHolder; \
Name(const Name&) = default; /* NOLINT */ \
Name(Name&&) = default; /* NOLINT */ \
Name(Name& other) : Name(static_cast<const Name&>(other)) {} /* NOLINT */ \
Name& operator=(const Name&) = default; /* NOLINT */ \
Name& operator=(Name&&) = default; /* NOLINT */ \
}
/// Like `TORCH_MODULE_IMPL`, but defaults the `Impl` name to `<Name>Impl`.

View File

@ -31,21 +31,20 @@ class OptimizerBase {
virtual ~OptimizerBase() = default;
/// Adds the given vector of parameters to the optimizer's parameter list.
/// Override this method if you want to modify the way parameters are added to
/// the `Optimizer`.
virtual void add_parameters(const std::vector<Tensor>& parameters);
void add_parameters(const std::vector<Tensor>& parameters);
/// Adds the `ParameterCursor`'s parameters to the optimizer's parameter list.
/// NOTE: Calls the `vector<Tensor>` overload of `add_parameters` -- override
/// that method if you want to modify the behavior of `add_parameters`.
virtual void add_parameters(const ParameterCursor& cursor);
void add_parameters(const ParameterCursor& cursor);
/// Zeros out the gradients of all parameters.
virtual void zero_grad();
/// Provides a reference to the parameters this optimizer holds.
/// Provides a const reference to the parameters this optimizer holds.
const std::vector<Tensor>& parameters() const noexcept;
/// Provides a reference to the parameters this optimizer holds.
std::vector<Tensor>& parameters() noexcept;
/// Returns the number of parameters referenced by the optimizer.
size_t size() const noexcept;

View File

@ -36,6 +36,14 @@ void OptimizerBase::zero_grad() {
}
}
const std::vector<Tensor>& OptimizerBase::parameters() const noexcept {
return parameters_;
}
std::vector<Tensor>& OptimizerBase::parameters() noexcept {
return parameters_;
}
size_t OptimizerBase::size() const noexcept {
return parameters_.size();
}

View File

@ -2,6 +2,6 @@
namespace torch { namespace autograd {
bool AnomalyMode::_enabled = 0;
bool AnomalyMode::_enabled = false;
}}

View File

@ -63,7 +63,7 @@ struct FunctionTask {
FunctionTask(GraphTask* base, std::shared_ptr<Function> fn, InputBuffer inputs)
: base(base)
, fn(fn)
, fn(std::move(fn))
, inputs(std::move(inputs)) {}
};
@ -170,15 +170,10 @@ struct GraphTask {
}
GraphTask(bool keep_graph, bool grad_mode)
: exception()
, has_error(false)
: has_error(false)
, outstanding_tasks(0)
, keep_graph(keep_graph)
, grad_mode(grad_mode)
, mutex()
, not_done()
, not_ready()
, dependencies()
, owner(NO_DEVICE) {}
};
@ -194,12 +189,12 @@ auto ReadyQueue::push(FunctionTask item) -> void {
auto ReadyQueue::pop() -> FunctionTask {
std::unique_lock<std::mutex> lock(mutex);
not_empty.wait(lock, [this]{ return !heap.empty(); });
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
auto task = std::move(const_cast<FunctionTask&>(heap.top())); heap.pop();
return task;
}
Engine::Engine() : ready_queues() {
}
Engine::Engine() = default;
// This Engine's ReadyQueues and their corresponding threads are leaked here
Engine::~Engine() = default;
@ -376,6 +371,7 @@ static variable_list call_function(FunctionTask& task) {
checkpoint_valid = prev_checkpoint_valid_state;
if(has_post_hooks){
// NOLINTNEXTLINE(bugprone-use-after-move)
return call_post_hooks(fn, std::move(outputs), std::move(inputs));
}
return outputs;
@ -478,7 +474,7 @@ auto Engine::compute_dependencies(Function* root, GraphTask& task) -> void {
// Queue contains all nodes that will start propagating gradients.
// We no longer have to expand functions that don't require grad.
auto& dependencies = task.dependencies;
while (queue.size() > 0) {
while (!queue.empty()) {
auto fn = queue.back(); queue.pop_back();
for (const auto& edge : fn->next_edges()) {
if (auto next_ptr = edge.function.get()) {
@ -513,6 +509,7 @@ auto Engine::execute(const edge_list& roots,
const edge_list& outputs) -> variable_list {
std::call_once(start_threads_flag, &Engine::start_threads, this);
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
validate_outputs(roots, const_cast<variable_list&>(inputs), [](const std::string& msg) {
return msg;
});
@ -559,6 +556,9 @@ auto Engine::execute(const edge_list& roots,
// more callbacks (or they can be registered from other threads
// while it's waiting.
std::unique_lock<std::mutex> cb_lock(post_callbacks_lock);
// WARNING: Don't use a range-for loop here because more callbacks may be
// added in between callback calls, so iterators may become invalidated.
// NOLINTNEXTLINE(modernize-loop-convert)
for (size_t i = 0; i < final_callbacks.size(); ++i) {
cb_lock.unlock();
final_callbacks[i]();

View File

@ -67,7 +67,7 @@ protected:
};
// allow python_engine to override the default engine when it loads
typedef Engine& (*EngineStub)(void);
using EngineStub = Engine& (*)();
TORCH_API void set_default_engine_stub(EngineStub stub);
}} // namespace torch::autograd

View File

@ -114,14 +114,14 @@ void deleteFunction(Function* function) {
delete function;
if (deleteFunctionQueue.size() == 0) {
if (deleteFunctionQueue.empty()) {
return;
}
if (recursion_depth.value() != kDeleteFunctionMaxRecursionDepth) {
AT_ERROR("Only one deleter per thread should be able to process "
"the delete queue. Please open an issue.");
}
while (deleteFunctionQueue.size() > 0) {
while (!deleteFunctionQueue.empty()) {
auto queued_function = deleteFunctionQueue.front();
deleteFunctionQueue.pop_front();
delete queued_function;

View File

@ -2,7 +2,7 @@
namespace torch { namespace autograd {
thread_local bool GradMode_enabled = 1;
thread_local bool GradMode_enabled = true;
bool GradMode::is_enabled() {
return GradMode_enabled;

View File

@ -1,7 +1,7 @@
#ifndef THP_BYTE_ORDER_H
#define THP_BYTE_ORDER_H
#include <stdint.h>
#include <cstdint>
#include <stddef.h>
#include <THHalf.h>

View File

@ -1,6 +1,6 @@
#pragma once
#include <vector>
#include <stdint.h>
#include <cstdint>
#include <string>
#include <memory>
#include <vector>
@ -36,7 +36,7 @@ struct ScalarAttributeValue : public AttributeValue {
using ConstructorType = T;
using ValueType = T;
ScalarAttributeValue(Symbol name, ConstructorType value_)
: AttributeValue(name), value_(value_) {}
: AttributeValue(name), value_(std::move(value_)) {}
ValueType & value() {
return value_;
}
@ -222,7 +222,7 @@ private:
typename T::ValueType & get(Symbol name) const {
JIT_ASSERT(name.is_attr());
auto it = find(name, true);
T* child = dynamic_cast<T*>(it->get());
auto* child = dynamic_cast<T*>(it->get());
if(child == nullptr) {
throw AttributeError(name, true);
}

View File

@ -18,8 +18,8 @@ struct Argument {
bool kwarg_only = false)
: name(std::move(name)),
type(type? type : DynamicType::get()),
N(N),
default_value(default_value),
N(std::move(N)),
default_value(std::move(default_value)),
kwarg_only(kwarg_only) {}
std::string name;
TypePtr type;

View File

@ -1,5 +1,5 @@
#include "torch/csrc/jit/interned_strings.h"
#include <stdint.h>
#include <cstdint>
#include <iostream>
#include <mutex>
#include <sstream>

View File

@ -1,6 +1,6 @@
#pragma once
#include <vector>
#include <stdint.h>
#include <cstdint>
#include <string>
#include <unordered_map>
#include <algorithm>

View File

@ -1,4 +1,4 @@
#include <stdint.h>
#include <cstdint>
#include <iostream>
#include <mutex>
#include <sstream>

View File

@ -150,6 +150,7 @@ public:
}
Scope* parent = this->parent_;
while (!parent->isRoot()) {
// NOLINTNEXTLINE(performance-inefficient-string-concatenation)
out = std::string(parent->name_.toUnqualString()) + separator + out;
parent = parent->parent_;
}
@ -181,7 +182,7 @@ private:
std::string unique_name_;
TypePtr type_;
public:
Value* setType(const TypePtr type);
Value* setType(TypePtr type);
void inferTypeFrom(const at::Tensor& output) {
setType(CompleteTensorType::create(output));
}
@ -368,7 +369,7 @@ public:
}
bool hasUses() const {
for(auto o : outputs()) {
if(o->uses().size() > 0)
if(!o->uses().empty())
return true;
}
return false;
@ -890,7 +891,7 @@ public:
Graph(std::shared_ptr<Scope> scope_root)
: next_unique_(0)
, new_node_stage_(0)
, scope_root_(scope_root)
, scope_root_(std::move(scope_root))
, current_scope_(scope_root_.get())
, block_(new Block(this, nullptr))
, insert_before_(return_node()) {}
@ -1261,7 +1262,7 @@ inline Node::Node(Graph * graph_, NodeKind kind_) :
inline void Node::eraseOutput(size_t i) {
JIT_ASSERT(i < outputs_.size());
JIT_ASSERT(outputs_[i]->uses().size() == 0);
JIT_ASSERT(outputs_[i]->uses().empty());
schema_ = nullptr;
Value * n = outputs_[i];
outputs_.erase(outputs_.begin() + i);
@ -1286,9 +1287,9 @@ inline void Node::eraseBlock(size_t i) {
}
inline void Node::destroy() {
while(outputs().size() > 0)
while(!outputs().empty())
eraseOutput(outputs().size() - 1);
while(blocks().size() > 0)
while(!blocks().empty())
eraseBlock(blocks().size() - 1);
removeAllInputs();
if(inBlockList())
@ -1422,13 +1423,13 @@ inline Node* Graph::createPythonOp(
}
inline graph_node_list_iterator Node::iterator() {
return graph_node_list_iterator(this, 0);
return {this, 0};
}
inline graph_node_list_iterator Node::reverseIterator() {
return iterator().reverse();
}
inline const_graph_node_list_iterator Node::iterator() const {
return const_graph_node_list_iterator(this, 0);
return {this, 0};
}
inline const_graph_node_list_iterator Node::reverseIterator() const {
return iterator().reverse();

View File

@ -17,8 +17,8 @@ struct TORCH_API ConstantString : c10::intrusive_ptr_target {
private:
const std::string str_;
public:
ConstantString(const std::string & str)
: str_(str) {}
ConstantString(std::string str)
: str_(std::move(str)) {}
static c10::intrusive_ptr<ConstantString> create(const std::string str_) {
return c10::make_intrusive<ConstantString>(str_);
}
@ -88,7 +88,7 @@ struct TORCH_API IValue {
c10::raw::intrusive_ptr::decref(as_intrusive_ptr);
}
}
IValue & operator=(IValue && rhs) & {
IValue & operator=(IValue && rhs) & noexcept {
rhs.swap(*this);
return *this;
}
@ -96,7 +96,7 @@ struct TORCH_API IValue {
IValue(rhs).swap(*this);
return *this;
}
void swap(IValue & rhs) {
void swap(IValue & rhs) noexcept {
std::swap(payload, rhs.payload);
std::swap(is_intrusive_ptr, rhs.is_intrusive_ptr);
std::swap(tag, rhs.tag);

View File

@ -17,7 +17,7 @@ struct SourceLocation {
void wrapAndRethrowException(const std::exception & e, const std::string & additional = "") {
std::stringstream msg;
msg << "\n" << e.what() << ":\n";
if(additional.size() != 0) {
if(!additional.empty()) {
msg << additional << ":\n";
}
highlight(msg);

View File

@ -10,10 +10,10 @@ namespace jit {
// range.
struct SourceRange : public SourceLocation {
SourceRange(
const std::shared_ptr<std::string>& file_,
std::shared_ptr<std::string> file_,
size_t start_,
size_t end_)
: file_(file_), start_(start_), end_(end_) {}
: file_(std::move(file_)), start_(start_), end_(end_) {}
const std::string text() const {
return file().substr(start(), end() - start());
}
@ -22,7 +22,7 @@ struct SourceRange : public SourceLocation {
}
static const size_t CONTEXT = 10;
virtual void highlight(std::ostream& out) const override {
void highlight(std::ostream& out) const override {
const std::string& str = file();
size_t begin_line = start(); // beginning of line to highlight
size_t end_line = start(); // end of line to highlight
@ -57,7 +57,7 @@ struct SourceRange : public SourceLocation {
out << std::string(len, '~')
<< (len < size() ? "... <--- HERE" : " <--- HERE");
out << str.substr(end_line, end_highlight - end_line);
if (str.size() > 0 && str.back() != '\n')
if (!str.empty() && str.back() != '\n')
out << "\n";
}
const std::string& file() const {

View File

@ -129,7 +129,7 @@ struct TORCH_API DynamicType : public Type {
static constexpr bool is_singleton = true;
template<typename ... T>
static DynamicTypePtr create( T&& ... all ) {
return DynamicTypePtr(new DynamicType( std::forward<T>(all)... ));
return DynamicTypePtr(new DynamicType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared)
}
bool operator==(const Type& rhs) const override {
@ -156,7 +156,7 @@ struct TORCH_API TensorType : public Type {
template<typename ... T>
static TensorTypePtr create( T&& ... all ) {
return TensorTypePtr(new TensorType( std::forward<T>(all)... ));
return TensorTypePtr(new TensorType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared)
}
at::ScalarType scalarType() const { return scalar_type_; }
@ -215,15 +215,15 @@ struct TORCH_API CompleteTensorType : public TensorType {
friend struct Type;
template<typename ... T>
static CompleteTensorTypePtr create( T&& ... all ) {
return CompleteTensorTypePtr(new CompleteTensorType( std::forward<T>(all)... ));
return CompleteTensorTypePtr(new CompleteTensorType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared)
}
// overloaded create variadic template argument as it could not distinguish initializer list
static CompleteTensorTypePtr create(at::ScalarType scalar_type, int device, at::IntList sizes) {
return CompleteTensorTypePtr(new CompleteTensorType(scalar_type, device, sizes));
return CompleteTensorTypePtr(new CompleteTensorType(scalar_type, device, sizes)); // NOLINT(modernize-make-shared)
}
static CompleteTensorTypePtr create(at::ScalarType scalar_type, int device, at::IntList sizes, at::IntList strides) {
return CompleteTensorTypePtr(new CompleteTensorType(scalar_type, device, sizes, strides));
return CompleteTensorTypePtr(new CompleteTensorType(scalar_type, device, sizes, strides)); // NOLINT(modernize-make-shared)
}
static const TypeKind Kind = TypeKind::CompleteTensorType;
@ -295,7 +295,7 @@ private:
static std::vector<int64_t> contiguousStridesOf(at::IntList sizes) {
std::vector<int64_t> strides(sizes.size());
if(sizes.size() == 0) // zero-dim case
if(sizes.empty()) // zero-dim case
return strides;
strides.back() = 1;
for(size_t i = strides.size() - 1; i > 0; i--) {
@ -318,7 +318,7 @@ struct TORCH_API ListType : public Type {
friend struct Type;
template<typename ... T>
static ListTypePtr create( T&& ... all ) {
return ListTypePtr(new ListType( std::forward<T>(all)... ));
return ListTypePtr(new ListType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared)
}
bool operator==(const Type& rhs) const override {
if(auto rhs_ = rhs.cast<ListType>()) {
@ -340,7 +340,7 @@ struct TORCH_API ListType : public Type {
static ListTypePtr ofFloats();
private:
ListType(TypePtr elem)
: Type(TypeKind::ListType), elem(elem) {}
: Type(TypeKind::ListType), elem(std::move(elem)) {}
static const TypeKind Kind = TypeKind::ListType;
TypePtr elem;
};
@ -352,7 +352,7 @@ struct TORCH_API TupleType : public Type {
static constexpr bool is_singleton = false;
friend struct Type;
static TupleTypePtr create(std::vector<TypePtr> types) {
return TupleTypePtr(new TupleType( std::move(types) ));
return TupleTypePtr(new TupleType( std::move(types) )); // NOLINT(modernize-make-shared)
}
at::ArrayRef<TypePtr> elements() const {
return elements_;
@ -408,7 +408,7 @@ struct TORCH_API NumberType : public Type {
static constexpr bool is_singleton = true;
template<typename ... T>
static NumberTypePtr create( T&& ... all ) {
return NumberTypePtr(new NumberType( std::forward<T>(all)... ));
return NumberTypePtr(new NumberType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared)
}
bool operator==(const Type& rhs) const override {
return rhs.kind() == kind();
@ -431,7 +431,7 @@ struct TORCH_API FloatType : public Type {
static constexpr bool is_singleton = true;
template<typename ... T>
static FloatTypePtr create( T&& ... all ) {
return FloatTypePtr(new FloatType( std::forward<T>(all)... ));
return FloatTypePtr(new FloatType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared)
}
bool operator==(const Type& rhs) const override {
return rhs.kind() == kind();
@ -457,7 +457,7 @@ struct TORCH_API IntType : public Type {
static constexpr bool is_singleton = true;
template<typename ... T>
static IntTypePtr create( T&& ... all ) {
return IntTypePtr(new IntType( std::forward<T>(all)... ));
return IntTypePtr(new IntType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared)
}
bool operator==(const Type& rhs) const override {
return rhs.kind() == kind();
@ -483,7 +483,7 @@ struct TORCH_API StringType : public Type {
static constexpr bool is_singleton = true;
template<typename ... T>
static StringTypePtr create( T&& ... all ) {
return StringTypePtr(new StringType( std::forward<T>(all)... ));
return StringTypePtr(new StringType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared)
}
bool operator==(const Type& rhs) const override {
return rhs.kind() == kind();
@ -509,12 +509,12 @@ struct NoneType : public Type {
static constexpr bool is_singleton = true;
template<typename ... T>
static NoneTypePtr create( T&& ... all ) {
return NoneTypePtr(new NoneType( std::forward<T>(all)... ));
return NoneTypePtr(new NoneType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared)
}
virtual bool operator==(const Type& rhs) const override {
bool operator==(const Type& rhs) const override {
return rhs.kind() == kind();
}
virtual std::string str() const override {
std::string str() const override {
return "None";
}
static const TypeKind Kind = TypeKind::NoneType;
@ -531,12 +531,12 @@ struct GeneratorType : public Type {
static constexpr bool is_singleton = true;
template<typename ... T>
static GeneratorTypePtr create( T&& ... all) {
return GeneratorTypePtr(new GeneratorType( std::forward<T>(all)... ));
return GeneratorTypePtr(new GeneratorType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared)
}
virtual bool operator==(const Type& rhs) const override {
bool operator==(const Type& rhs) const override {
return rhs.kind() == kind();
}
virtual std::string str() const override {
std::string str() const override {
return "Generator";
}
static const TypeKind Kind = TypeKind::GeneratorType;

View File

@ -6,16 +6,16 @@ template<class T>
class THPPointer {
public:
THPPointer(): ptr(nullptr) {};
explicit THPPointer(T *ptr): ptr(ptr) {};
THPPointer(THPPointer &&p) { free(); ptr = p.ptr; p.ptr = nullptr; };
explicit THPPointer(T *ptr) noexcept : ptr(ptr) {};
THPPointer(THPPointer &&p) noexcept { free(); ptr = p.ptr; p.ptr = nullptr; };
~THPPointer() { free(); };
T * get() { return ptr; }
const T * get() const { return ptr; }
T * release() { T *tmp = ptr; ptr = nullptr; return tmp; }
operator T*() { return ptr; }
THPPointer& operator =(T *new_ptr) { free(); ptr = new_ptr; return *this; }
THPPointer& operator =(THPPointer &&p) { free(); ptr = p.ptr; p.ptr = nullptr; return *this; }
THPPointer& operator =(T *new_ptr) noexcept { free(); ptr = new_ptr; return *this; }
THPPointer& operator =(THPPointer &&p) noexcept { free(); ptr = p.ptr; p.ptr = nullptr; return *this; }
T * operator ->() { return ptr; }
explicit operator bool() const { return ptr != nullptr; }
@ -35,4 +35,4 @@ private:
* out the GIL itself. Easiest way to avoid this problem is to
* not use THPPointer in this situation.
*/
typedef THPPointer<PyObject> THPObjectPtr;
using THPObjectPtr = THPPointer<PyObject>;

View File

@ -1,7 +1,7 @@
#pragma once
#include "torch/csrc/python_headers.h"
#include <stdint.h>
#include <cstdint>
#include <stdexcept>
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/utils/tensor_numpy.h"

View File

@ -1,4 +1,4 @@
#pragma once
struct _object;
typedef _object PyObject;
using PyObject = _object;