mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Use Wextra-semi (#140236)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/140236 Approved by: https://github.com/ezyang
This commit is contained in:
@ -106,10 +106,11 @@ StatementMacros:
|
||||
- C10_DEFINE_int32
|
||||
- C10_DEFINE_int64
|
||||
- C10_DEFINE_string
|
||||
- DEFINE_BINARY
|
||||
- PyObject_HEAD
|
||||
- PyObject_VAR_HEAD
|
||||
- PyException_HEAD
|
||||
- DEFINE_BINARY
|
||||
- TORCH_DECLARE_bool
|
||||
|
||||
TabWidth: 8
|
||||
UseTab: Never
|
||||
|
@ -20,7 +20,7 @@
|
||||
//
|
||||
// In native/MyKernel.h:
|
||||
// using fn_type = void(*)(const Tensor& x);
|
||||
// DECLARE_DISPATCH(fn_type, stub);
|
||||
// DECLARE_DISPATCH(fn_type, stub)
|
||||
//
|
||||
// In native/MyKernel.cpp
|
||||
// DEFINE_DISPATCH(stub);
|
||||
|
@ -476,8 +476,8 @@ void bf16_gemv_trans(
|
||||
#if !defined(C10_MOBILE)
|
||||
REGISTER_DISPATCH(fp16_dot_with_fp32_arith_stub, &fp16_dot_with_fp32_arith)
|
||||
REGISTER_DISPATCH(fp16_gemv_trans_stub, &fp16_gemv_trans)
|
||||
REGISTER_DISPATCH(bf16_dot_with_fp32_arith_stub, &bf16_dot_with_fp32_arith);
|
||||
REGISTER_DISPATCH(bf16_gemv_trans_stub, &bf16_gemv_trans);
|
||||
REGISTER_DISPATCH(bf16_dot_with_fp32_arith_stub, &bf16_dot_with_fp32_arith)
|
||||
REGISTER_DISPATCH(bf16_gemv_trans_stub, &bf16_gemv_trans)
|
||||
#endif //!defined(C10_MOBILE)
|
||||
|
||||
} // namespace at::native
|
||||
|
@ -387,9 +387,8 @@ function(torch_compile_options libname)
|
||||
list(APPEND private_compile_options -Wunused-but-set-variable)
|
||||
endif()
|
||||
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
|
||||
list(APPEND private_compile_options -Wunused-private-field)
|
||||
endif()
|
||||
if(NOT "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
|
||||
list(APPEND private_compile_options -Wunused-private-field -Wextra-semi -Wno-error=extra-semi)
|
||||
else()
|
||||
list(APPEND private_compile_options
|
||||
# Considered to be flaky. See the discussion at
|
||||
# https://github.com/pytorch/pytorch/pull/9608
|
||||
|
@ -70,15 +70,15 @@ SIGNAL_HANDLER(
|
||||
SIGBUS,
|
||||
handler_SIGBUS,
|
||||
"ERROR: Unexpected bus error encountered in worker. "
|
||||
"This might be caused by insufficient shared memory (shm).\n");
|
||||
"This might be caused by insufficient shared memory (shm).\n")
|
||||
SIGNAL_HANDLER(
|
||||
SIGSEGV,
|
||||
handler_SIGSEGV,
|
||||
"ERROR: Unexpected segmentation fault encountered in worker.\n");
|
||||
"ERROR: Unexpected segmentation fault encountered in worker.\n")
|
||||
SIGNAL_HANDLER(
|
||||
SIGFPE,
|
||||
handler_SIGFPE,
|
||||
"ERROR: Unexpected floating-point exception encountered in worker.\n");
|
||||
"ERROR: Unexpected floating-point exception encountered in worker.\n")
|
||||
|
||||
// When an error happened in DataLoader methods and Python starts to exit, the
|
||||
// error trace will keep the loader alive, and Python may kill the children
|
||||
|
@ -339,7 +339,7 @@ struct noop_gil_scoped_release {
|
||||
// user-defined constructor (i.e. not defaulted) to avoid
|
||||
// unused-variable warnings at usage sites of this class
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
noop_gil_scoped_release(){};
|
||||
noop_gil_scoped_release() {}
|
||||
};
|
||||
|
||||
template <bool release_gil>
|
||||
|
@ -273,14 +273,14 @@ void ConcretePyInterpreterVTable::decref(PyObject* pyobj, bool has_pyobj_slot)
|
||||
}
|
||||
}
|
||||
Py_DECREF(pyobj);
|
||||
};
|
||||
}
|
||||
|
||||
void ConcretePyInterpreterVTable::incref(PyObject* pyobj) const {
|
||||
if (!Py_IsInitialized())
|
||||
return;
|
||||
pybind11::gil_scoped_acquire gil;
|
||||
Py_INCREF(pyobj);
|
||||
};
|
||||
}
|
||||
|
||||
bool isPythonTensor(const at::Tensor& tensor) {
|
||||
return tensor.unsafeGetTensorImpl()->key_set().has(c10::DispatchKey::Python);
|
||||
|
@ -10,7 +10,7 @@
|
||||
#define THPStorageStr "torch.UntypedStorage"
|
||||
|
||||
struct THPStorage {
|
||||
PyObject_HEAD;
|
||||
PyObject_HEAD
|
||||
c10::MaybeOwned<c10::Storage> cdata;
|
||||
bool is_hermetic;
|
||||
};
|
||||
|
@ -5249,7 +5249,7 @@ static Tensor apply_simple_transformation(
|
||||
return condition_with_I ? K - transformation : -transformation;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
std::tuple<Tensor, Tensor> householder_product_backward(
|
||||
const Tensor& grad,
|
||||
|
@ -77,7 +77,7 @@ PyCodeObject* getCode<CallType::PyModuleCall>() {
|
||||
return (PyCodeObject*)res;
|
||||
}();
|
||||
return module_call_code;
|
||||
};
|
||||
}
|
||||
|
||||
template <>
|
||||
PyCodeObject* getCode<CallType::PyOptimizerCall>() {
|
||||
@ -92,7 +92,7 @@ PyCodeObject* getCode<CallType::PyOptimizerCall>() {
|
||||
return (PyCodeObject*)res;
|
||||
}();
|
||||
return optimizer_step_code;
|
||||
};
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace torch::profiler::impl
|
||||
@ -548,7 +548,7 @@ struct TraceKeyCacheState {
|
||||
// `PyEval_SetProfile`.
|
||||
struct ThreadLocalResults;
|
||||
struct TraceContext {
|
||||
PyObject_HEAD;
|
||||
PyObject_HEAD
|
||||
ThreadLocalResults* thread_local_results_;
|
||||
};
|
||||
|
||||
@ -795,7 +795,7 @@ PythonTracer::PythonTracer(torch::profiler::impl::RecordQueue* queue)
|
||||
// cannot be round tripped via `sys.settrace(sys.gettrace())`
|
||||
PyEval_SetProfile(PythonTracer::pyProfileFn, (PyObject*)ctx);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
void PythonTracer::stop() {
|
||||
gil_and_restore_thread gil;
|
||||
|
@ -15,7 +15,7 @@ namespace py = pybind11;
|
||||
|
||||
// Python object that backs torch.autograd.Variable
|
||||
struct THPVariable {
|
||||
PyObject_HEAD;
|
||||
PyObject_HEAD
|
||||
// Payload
|
||||
c10::MaybeOwned<at::Tensor> cdata;
|
||||
// Hooks to be run on backwards pass (corresponds to Python attr
|
||||
|
@ -5,7 +5,7 @@
|
||||
namespace torch::distributed::autograd {
|
||||
|
||||
CleanupAutogradContextReq::CleanupAutogradContextReq(int64_t context_id)
|
||||
: context_id_(context_id){};
|
||||
: context_id_(context_id) {}
|
||||
|
||||
int64_t CleanupAutogradContextReq::getContextId() {
|
||||
return context_id_;
|
||||
|
@ -65,7 +65,7 @@ class DetectorMap {
|
||||
cached_;
|
||||
};
|
||||
|
||||
}; // namespace
|
||||
} // namespace
|
||||
|
||||
namespace c10d {
|
||||
|
||||
|
@ -139,7 +139,7 @@ class IntrusivePtrNoGilDestructor {
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, IntrusivePtrNoGilDestructor<T>, true);
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, IntrusivePtrNoGilDestructor<T>, true)
|
||||
|
||||
namespace torch::distributed::c10d {
|
||||
|
||||
|
@ -43,7 +43,7 @@ C10_DEFINE_TYPED_REGISTRY( // NOLINT
|
||||
c10::DeviceType,
|
||||
Timer,
|
||||
std::unique_ptr,
|
||||
c10::Device);
|
||||
c10::Device)
|
||||
|
||||
namespace {
|
||||
|
||||
@ -67,7 +67,7 @@ class CpuTimer : public Timer {
|
||||
}
|
||||
};
|
||||
|
||||
C10_REGISTER_TYPED_CLASS(TimerRegistry, c10::kCPU, CpuTimer);
|
||||
C10_REGISTER_TYPED_CLASS(TimerRegistry, c10::kCPU, CpuTimer)
|
||||
|
||||
std::vector<at::Tensor> extractTensors(const c10::IValue& result) {
|
||||
if (result.isPyObject()) {
|
||||
|
@ -3,5 +3,5 @@
|
||||
namespace torch::distributed::rpc {
|
||||
C10_DEFINE_REGISTRY(
|
||||
RpcMetricsHandlerRegistry,
|
||||
torch::distributed::rpc::RpcMetricsHandler);
|
||||
torch::distributed::rpc::RpcMetricsHandler)
|
||||
} // namespace torch::distributed::rpc
|
||||
|
@ -229,7 +229,7 @@ namespace {
|
||||
typedef std::vector<TensorCheck> ChecksList;
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD;
|
||||
PyObject_HEAD
|
||||
ChecksList* checks;
|
||||
} TensorGuards;
|
||||
|
||||
@ -510,7 +510,7 @@ static PyTypeObject TensorGuardsType = { PyVarObject_HEAD_INIT(nullptr, 0)
|
||||
// merged.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
struct GlobalStateGuard {
|
||||
PyObject_HEAD;
|
||||
PyObject_HEAD
|
||||
|
||||
inline void init() {
|
||||
auto& ctx = at::globalContext();
|
||||
|
@ -15,7 +15,7 @@
|
||||
static struct PyModuleDef _module =
|
||||
{PyModuleDef_HEAD_INIT, "torch._C._dynamo", "", -1, nullptr};
|
||||
|
||||
PYBIND11_MAKE_OPAQUE(std::vector<uint8_t>);
|
||||
PYBIND11_MAKE_OPAQUE(std::vector<uint8_t>)
|
||||
|
||||
namespace torch::dynamo {
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <torch/csrc/jit/passes/autocast.h>
|
||||
#endif
|
||||
|
||||
// clang-format off
|
||||
C10_DEFINE_bool(
|
||||
torch_jit_do_not_store_optimized_graph,
|
||||
false,
|
||||
|
@ -61,7 +61,7 @@ std::unique_ptr<Stack> MTensorArgumentCreator(Node* n) {
|
||||
}
|
||||
}
|
||||
return stack;
|
||||
};
|
||||
}
|
||||
|
||||
bool MTensorNodeArgValid(Value* value) {
|
||||
auto tensor_type = value->type()->cast<TensorType>();
|
||||
|
@ -79,7 +79,7 @@ namespace {
|
||||
|
||||
class NodeNameGenerator {
|
||||
public:
|
||||
NodeNameGenerator(std::shared_ptr<Graph> g) : graph_(std::move(g)){};
|
||||
NodeNameGenerator(std::shared_ptr<Graph> g) : graph_(std::move(g)) {}
|
||||
virtual ~NodeNameGenerator() = 0;
|
||||
void PopulateNodeNames();
|
||||
|
||||
@ -105,7 +105,7 @@ NodeNameGenerator::~NodeNameGenerator() = default;
|
||||
class ScopedNodeNameGenerator : public NodeNameGenerator {
|
||||
public:
|
||||
ScopedNodeNameGenerator(std::shared_ptr<Graph> g)
|
||||
: NodeNameGenerator(std::move(g)){};
|
||||
: NodeNameGenerator(std::move(g)) {}
|
||||
|
||||
protected:
|
||||
void CreateNodeName(Node* n) override;
|
||||
|
@ -205,5 +205,5 @@ bool operator==(
|
||||
const CanonicalizedSymbolicShape& a,
|
||||
const CanonicalizedSymbolicShape& b) {
|
||||
return a.values_ == b.values_;
|
||||
};
|
||||
}
|
||||
} // namespace torch::jit
|
||||
|
@ -154,7 +154,7 @@ static std::vector<StrideInput> summarizeInputStrides(const TensorType& tt) {
|
||||
summarizeStrideDim(sizes, strides, dim, stride_inputs, 0));
|
||||
}
|
||||
return stride_inputs;
|
||||
};
|
||||
}
|
||||
|
||||
// Todo: incorporate in codegen
|
||||
static StrideInput summarizeOutputStrides(const TensorType& tt) {
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include <utility>
|
||||
|
||||
// clang-format off
|
||||
C10_DEFINE_bool(
|
||||
torch_jit_disable_cat,
|
||||
false,
|
||||
|
@ -65,7 +65,7 @@ class unwrapping_shared_ptr {
|
||||
|
||||
} // namespace torch::jit
|
||||
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr<T>, true);
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr<T>, true)
|
||||
|
||||
namespace pybind11::detail {
|
||||
|
||||
|
@ -98,12 +98,12 @@ class ScriptDict final {
|
||||
// not exist.
|
||||
at::IValue getItem(const at::IValue& key) {
|
||||
return dict_.at(key);
|
||||
};
|
||||
}
|
||||
|
||||
// Set the value for the given key.
|
||||
void setItem(const at::IValue& key, const at::IValue& value) {
|
||||
dict_.insert_or_assign(key, value);
|
||||
};
|
||||
}
|
||||
|
||||
// Check whether the dictionary contains the given key.
|
||||
bool contains(const at::IValue& key) {
|
||||
|
@ -92,7 +92,7 @@ class ScriptList final {
|
||||
at::IValue getItem(diff_type idx) {
|
||||
idx = wrap_index(idx);
|
||||
return list_.get(idx);
|
||||
};
|
||||
}
|
||||
|
||||
// Set the value corresponding to the given index.
|
||||
void setItem(diff_type idx, const at::IValue& value) {
|
||||
|
@ -127,7 +127,7 @@ struct VISIBILITY_HIDDEN ConstantParameterList : public SugaredValue {
|
||||
|
||||
struct VISIBILITY_HIDDEN ModuleDictMethod : public SugaredValue {
|
||||
explicit ModuleDictMethod(SugaredValuePtr iterable, std::string name)
|
||||
: iterable_(std::move(iterable)), name_(std::move(name)){};
|
||||
: iterable_(std::move(iterable)), name_(std::move(name)) {}
|
||||
|
||||
std::string kind() const override {
|
||||
return name_;
|
||||
@ -286,7 +286,7 @@ struct VISIBILITY_HIDDEN SugaredDict : public SugaredValue {
|
||||
|
||||
SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override {
|
||||
return keys_;
|
||||
};
|
||||
}
|
||||
|
||||
std::shared_ptr<ModuleValue> self_;
|
||||
std::shared_ptr<SugaredTupleValue> keys_;
|
||||
|
@ -66,7 +66,7 @@ void ArgumentSpecCreator::scan(
|
||||
} else {
|
||||
instructions_.emplace_back(SKIP);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// this is a coarse-grained guarantee that the slots of a class will not be
|
||||
// modified by the function. It works fine for things that used be read-only
|
||||
|
@ -53,6 +53,7 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// clang-format off
|
||||
C10_DEFINE_bool(
|
||||
torch_jit_execution_plan_reuse_code_graph,
|
||||
false,
|
||||
|
@ -46,6 +46,7 @@ using torch::distributed::autograd::DistAutogradContainer;
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// clang-format off
|
||||
C10_DEFINE_bool(
|
||||
torch_jit_enable_rethrow_caught_exception,
|
||||
false,
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
|
||||
// clang-format off
|
||||
C10_DEFINE_bool(
|
||||
torch_jit_enable_new_executor,
|
||||
true,
|
||||
|
@ -47,6 +47,7 @@
|
||||
#endif
|
||||
|
||||
// used in test only
|
||||
// clang-format off
|
||||
C10_DEFINE_bool(
|
||||
static_runtime_disable_debug_memory_overlap_check,
|
||||
false,
|
||||
|
@ -40,6 +40,7 @@
|
||||
|
||||
#include <ATen/CompositeExplicitAutogradFunctions.h>
|
||||
|
||||
// clang-format off
|
||||
C10_DEFINE_bool(
|
||||
static_runtime_enable_fast_math,
|
||||
true,
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <torch/csrc/jit/runtime/graph_iterator.h>
|
||||
#include <torch/csrc/jit/runtime/static/ops.h>
|
||||
|
||||
// clang-format off
|
||||
C10_DEFINE_bool(
|
||||
enable_clip_ranges_gather_fusions,
|
||||
true,
|
||||
|
@ -85,7 +85,7 @@ std::ostream& operator<<(std::ostream& out, const Check& c) {
|
||||
}
|
||||
out << ": " << c.search_str_;
|
||||
return out;
|
||||
};
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include <c10/util/env.h>
|
||||
#include <torch/csrc/lazy/core/config.h>
|
||||
|
||||
C10_DEFINE_bool(torch_lazy_ir_debug, false, "Enable lazy tensor IR debugging");
|
||||
C10_DEFINE_bool(torch_lazy_ir_debug, false, "Enable lazy tensor IR debugging")
|
||||
|
||||
C10_DEFINE_bool(
|
||||
torch_lazy_param_aliasing,
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
// Enables caching on for dynamic shapes (aka disable hash on shapes)
|
||||
// NOLINTNEXTLINE(misc-use-internal-linkage)
|
||||
// clang-format off
|
||||
C10_DEFINE_bool(
|
||||
ltc_enable_dynamic_shapes,
|
||||
false,
|
||||
|
@ -9,7 +9,7 @@
|
||||
C10_DEFINE_bool(
|
||||
ltc_enable_symbolic_shapes,
|
||||
false,
|
||||
"Enables calculation of if dims are symbolic");
|
||||
"Enables calculation of if dims are symbolic")
|
||||
|
||||
namespace torch::lazy {
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
C10_DEFINE_bool(
|
||||
torch_lazy_ts_tensor_update_sync,
|
||||
true,
|
||||
"Use synchronous copy inside _copy_from op");
|
||||
"Use synchronous copy inside _copy_from op")
|
||||
|
||||
// TODO(whc) we need to hook up these flags in a more useful way
|
||||
// possibly also keep LTC_TS_CUDA env working?
|
||||
@ -13,4 +13,4 @@ C10_DEFINE_bool(
|
||||
C10_DEFINE_bool(
|
||||
torch_lazy_ts_cuda,
|
||||
false,
|
||||
"Use cuda device for torchscript backend (instead of CPU)");
|
||||
"Use cuda device for torchscript backend (instead of CPU)")
|
||||
|
@ -12,12 +12,12 @@ using torch::profiler::impl::TensorID;
|
||||
template <> \
|
||||
struct type_caster<T> : public strong_pointer_type_caster<T> {};
|
||||
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::StorageImplData);
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::AllocationID);
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::TensorImplAddress);
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleSelf);
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleCls);
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyOptimizerSelf);
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::StorageImplData)
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::AllocationID)
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::TensorImplAddress)
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleSelf)
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyModuleCls)
|
||||
STRONG_POINTER_TYPE_CASTER(torch::profiler::impl::PyOptimizerSelf)
|
||||
#undef STRONG_POINTER_TYPE_CASTER
|
||||
|
||||
template <>
|
||||
|
@ -27,7 +27,7 @@ struct Type {
|
||||
};
|
||||
|
||||
struct SimpleType : public Type {
|
||||
SimpleType(std::string& name) : name(name){};
|
||||
SimpleType(std::string& name) : name(name) {}
|
||||
|
||||
bool is_matching(PyObject* object) override {
|
||||
return py_typename(object) == name;
|
||||
@ -38,7 +38,7 @@ struct SimpleType : public Type {
|
||||
|
||||
struct MultiType : public Type {
|
||||
MultiType(std::initializer_list<std::string> accepted_types)
|
||||
: types(accepted_types){};
|
||||
: types(accepted_types) {}
|
||||
|
||||
bool is_matching(PyObject* object) override {
|
||||
auto it = std::find(types.begin(), types.end(), py_typename(object));
|
||||
@ -49,7 +49,7 @@ struct MultiType : public Type {
|
||||
};
|
||||
|
||||
struct NullableType : public Type {
|
||||
NullableType(std::unique_ptr<Type> type) : type(std::move(type)){};
|
||||
NullableType(std::unique_ptr<Type> type) : type(std::move(type)) {}
|
||||
|
||||
bool is_matching(PyObject* object) override {
|
||||
return object == Py_None || type->is_matching(object);
|
||||
@ -60,7 +60,7 @@ struct NullableType : public Type {
|
||||
|
||||
struct TupleType : public Type {
|
||||
TupleType(std::vector<std::unique_ptr<Type>> types)
|
||||
: types(std::move(types)){};
|
||||
: types(std::move(types)) {}
|
||||
|
||||
bool is_matching(PyObject* object) override {
|
||||
if (!PyTuple_Check(object))
|
||||
@ -79,7 +79,7 @@ struct TupleType : public Type {
|
||||
};
|
||||
|
||||
struct SequenceType : public Type {
|
||||
SequenceType(std::unique_ptr<Type> type) : type(std::move(type)){};
|
||||
SequenceType(std::unique_ptr<Type> type) : type(std::move(type)) {}
|
||||
|
||||
bool is_matching(PyObject* object) override {
|
||||
if (!PySequence_Check(object))
|
||||
@ -99,7 +99,7 @@ struct SequenceType : public Type {
|
||||
|
||||
struct Argument {
|
||||
Argument(std::string name, std::unique_ptr<Type> type)
|
||||
: name(std::move(name)), type(std::move(type)){};
|
||||
: name(std::move(name)), type(std::move(type)) {}
|
||||
|
||||
std::string name;
|
||||
std::unique_ptr<Type> type;
|
||||
@ -109,9 +109,9 @@ struct Option {
|
||||
Option(std::vector<Argument> arguments, bool is_variadic, bool has_out)
|
||||
: arguments(std::move(arguments)),
|
||||
is_variadic(is_variadic),
|
||||
has_out(has_out){};
|
||||
has_out(has_out) {}
|
||||
Option(bool is_variadic, bool has_out)
|
||||
: arguments(), is_variadic(is_variadic), has_out(has_out){};
|
||||
: arguments(), is_variadic(is_variadic), has_out(has_out) {}
|
||||
Option(const Option&) = delete;
|
||||
Option(Option&& other) noexcept = default;
|
||||
Option& operator=(const Option&) = delete;
|
||||
|
@ -7,15 +7,15 @@
|
||||
template <class T>
|
||||
class TORCH_PYTHON_API THPPointer {
|
||||
public:
|
||||
THPPointer() : ptr(nullptr){};
|
||||
explicit THPPointer(T* ptr) noexcept : ptr(ptr){};
|
||||
THPPointer() : ptr(nullptr) {}
|
||||
explicit THPPointer(T* ptr) noexcept : ptr(ptr) {}
|
||||
THPPointer(THPPointer&& p) noexcept : ptr(std::exchange(p.ptr, nullptr)) {}
|
||||
THPPointer(const THPPointer& p) = delete;
|
||||
THPPointer& operator=(const THPPointer&) = delete;
|
||||
|
||||
~THPPointer() {
|
||||
free();
|
||||
};
|
||||
}
|
||||
T* get() {
|
||||
return ptr;
|
||||
}
|
||||
|
@ -24,10 +24,10 @@ namespace py = pybind11;
|
||||
// This makes intrusive_ptr to be available as a custom pybind11 holder type,
|
||||
// see
|
||||
// https://pybind11.readthedocs.io/en/stable/advanced/smart_ptrs.html#custom-smart-pointers
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr<T>, true);
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr<T>, true)
|
||||
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr<T>);
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr<T>, true);
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr<T>)
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr<T>, true)
|
||||
|
||||
namespace pybind11::detail {
|
||||
|
||||
|
@ -35,7 +35,7 @@ class PythonSymNodeImpl : public c10::SymNodeImpl {
|
||||
PythonSymNodeImpl(py::object pyobj) : c10::SymNodeImpl() {
|
||||
pyobj_ = std::make_shared<c10::SafePyObject>(
|
||||
pyobj.release().ptr(), getPyInterpreter());
|
||||
};
|
||||
}
|
||||
|
||||
c10::SymNode wrap_int(int64_t num) override {
|
||||
py::gil_scoped_acquire acquire;
|
||||
|
Reference in New Issue
Block a user